diff options
Diffstat (limited to 'drivers')
2212 files changed, 98621 insertions, 45101 deletions
diff --git a/drivers/acpi/acpi_cmos_rtc.c b/drivers/acpi/acpi_cmos_rtc.c index 2da8660262e5..81dc75033f15 100644 --- a/drivers/acpi/acpi_cmos_rtc.c +++ b/drivers/acpi/acpi_cmos_rtc.c @@ -33,7 +33,7 @@ acpi_cmos_rtc_space_handler(u32 function, acpi_physical_address address, void *handler_context, void *region_context) { int i; - u8 *value = (u8 *)&value64; + u8 *value = (u8 *)value64; if (address > 0xff || !value64) return AE_BAD_PARAMETER; diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index ce06149088c5..b0ea767c8696 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -196,6 +196,17 @@ static struct lpss_device_desc byt_i2c_dev_desc = { .setup = lpss_i2c_setup, }; +static struct lpss_shared_clock bsw_pwm_clock = { + .name = "pwm_clk", + .rate = 19200000, +}; + +static struct lpss_device_desc bsw_pwm_dev_desc = { + .clk_required = true, + .save_ctx = true, + .shared_clock = &bsw_pwm_clock, +}; + #else #define LPSS_ADDR(desc) (0UL) @@ -225,6 +236,12 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = { { "INT33B2", }, { "INT33FC", }, + /* Braswell LPSS devices */ + { "80862288", LPSS_ADDR(bsw_pwm_dev_desc) }, + { "8086228A", LPSS_ADDR(byt_uart_dev_desc) }, + { "8086228E", LPSS_ADDR(byt_spi_dev_desc) }, + { "808622C1", LPSS_ADDR(byt_i2c_dev_desc) }, + { "INT3430", LPSS_ADDR(lpt_dev_desc) }, { "INT3431", LPSS_ADDR(lpt_dev_desc) }, { "INT3432", LPSS_ADDR(lpt_i2c_dev_desc) }, @@ -402,7 +419,6 @@ static int acpi_lpss_create_device(struct acpi_device *adev, adev->driver_data = pdata; pdev = acpi_create_platform_device(adev); if (!IS_ERR_OR_NULL(pdev)) { - device_enable_async_suspend(&pdev->dev); return 1; } @@ -593,7 +609,7 @@ static int acpi_lpss_suspend_late(struct device *dev) return acpi_dev_suspend_late(dev); } -static int acpi_lpss_restore_early(struct device *dev) +static int acpi_lpss_resume_early(struct device *dev) { int ret = acpi_dev_resume_early(dev); @@ -633,15 +649,15 @@ static int acpi_lpss_runtime_resume(struct device *dev) static struct dev_pm_domain acpi_lpss_pm_domain = { .ops = { #ifdef CONFIG_PM_SLEEP - .suspend_late = acpi_lpss_suspend_late, - .restore_early = acpi_lpss_restore_early, .prepare = acpi_subsys_prepare, .complete = acpi_subsys_complete, .suspend = acpi_subsys_suspend, - .resume_early = acpi_subsys_resume_early, + .suspend_late = acpi_lpss_suspend_late, + .resume_early = acpi_lpss_resume_early, .freeze = acpi_subsys_freeze, .poweroff = acpi_subsys_suspend, - .poweroff_late = acpi_subsys_suspend_late, + .poweroff_late = acpi_lpss_suspend_late, + .restore_early = acpi_lpss_resume_early, #endif #ifdef CONFIG_PM_RUNTIME .runtime_suspend = acpi_lpss_runtime_suspend, diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h index 1f9aba5fb81f..2747279fbe3c 100644 --- a/drivers/acpi/acpica/aclocal.h +++ b/drivers/acpi/acpica/aclocal.h @@ -254,6 +254,7 @@ struct acpi_create_field_info { u32 field_bit_position; u32 field_bit_length; u16 resource_length; + u16 pin_number_index; u8 field_flags; u8 attribute; u8 field_type; diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h index 22fb6449d3d6..8abb393dafab 100644 --- a/drivers/acpi/acpica/acobject.h +++ b/drivers/acpi/acpica/acobject.h @@ -264,6 +264,7 @@ struct acpi_object_region_field { ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO u16 resource_length; union acpi_operand_object *region_obj; /* Containing op_region object */ u8 *resource_buffer; /* resource_template for serial regions/fields */ + u16 pin_number_index; /* Index relative to previous Connection/Template */ }; struct acpi_object_bank_field { diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c index 3661c8e90540..c57666196672 100644 --- a/drivers/acpi/acpica/dsfield.c +++ b/drivers/acpi/acpica/dsfield.c @@ -360,6 +360,7 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info, */ info->resource_buffer = NULL; info->connection_node = NULL; + info->pin_number_index = 0; /* * A Connection() is either an actual resource descriptor (buffer) @@ -437,6 +438,7 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info, } info->field_bit_position += info->field_bit_length; + info->pin_number_index++; /* Index relative to previous Connection() */ break; default: diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c index 9957297d1580..8eb8575e8c16 100644 --- a/drivers/acpi/acpica/evregion.c +++ b/drivers/acpi/acpica/evregion.c @@ -142,6 +142,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, union acpi_operand_object *region_obj2; void *region_context = NULL; struct acpi_connection_info *context; + acpi_physical_address address; ACPI_FUNCTION_TRACE(ev_address_space_dispatch); @@ -231,25 +232,23 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, /* We have everything we need, we can invoke the address space handler */ handler = handler_desc->address_space.handler; - - ACPI_DEBUG_PRINT((ACPI_DB_OPREGION, - "Handler %p (@%p) Address %8.8X%8.8X [%s]\n", - ®ion_obj->region.handler->address_space, handler, - ACPI_FORMAT_NATIVE_UINT(region_obj->region.address + - region_offset), - acpi_ut_get_region_name(region_obj->region. - space_id))); + address = (region_obj->region.address + region_offset); /* * Special handling for generic_serial_bus and general_purpose_io: * There are three extra parameters that must be passed to the * handler via the context: - * 1) Connection buffer, a resource template from Connection() op. - * 2) Length of the above buffer. - * 3) Actual access length from the access_as() op. + * 1) Connection buffer, a resource template from Connection() op + * 2) Length of the above buffer + * 3) Actual access length from the access_as() op + * + * In addition, for general_purpose_io, the Address and bit_width fields + * are defined as follows: + * 1) Address is the pin number index of the field (bit offset from + * the previous Connection) + * 2) bit_width is the actual bit length of the field (number of pins) */ - if (((region_obj->region.space_id == ACPI_ADR_SPACE_GSBUS) || - (region_obj->region.space_id == ACPI_ADR_SPACE_GPIO)) && + if ((region_obj->region.space_id == ACPI_ADR_SPACE_GSBUS) && context && field_obj) { /* Get the Connection (resource_template) buffer */ @@ -258,6 +257,24 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, context->length = field_obj->field.resource_length; context->access_length = field_obj->field.access_length; } + if ((region_obj->region.space_id == ACPI_ADR_SPACE_GPIO) && + context && field_obj) { + + /* Get the Connection (resource_template) buffer */ + + context->connection = field_obj->field.resource_buffer; + context->length = field_obj->field.resource_length; + context->access_length = field_obj->field.access_length; + address = field_obj->field.pin_number_index; + bit_width = field_obj->field.bit_length; + } + + ACPI_DEBUG_PRINT((ACPI_DB_OPREGION, + "Handler %p (@%p) Address %8.8X%8.8X [%s]\n", + ®ion_obj->region.handler->address_space, handler, + ACPI_FORMAT_NATIVE_UINT(address), + acpi_ut_get_region_name(region_obj->region. + space_id))); if (!(handler_desc->address_space.handler_flags & ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) { @@ -271,9 +288,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, /* Call the handler */ - status = handler(function, - (region_obj->region.address + region_offset), - bit_width, value, context, + status = handler(function, address, bit_width, value, context, region_obj2->extra.region_context); if (ACPI_FAILURE(status)) { diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c index 6907ce0c704c..b994845ed359 100644 --- a/drivers/acpi/acpica/exfield.c +++ b/drivers/acpi/acpica/exfield.c @@ -253,6 +253,37 @@ acpi_ex_read_data_from_field(struct acpi_walk_state * walk_state, buffer = &buffer_desc->integer.value; } + if ((obj_desc->common.type == ACPI_TYPE_LOCAL_REGION_FIELD) && + (obj_desc->field.region_obj->region.space_id == + ACPI_ADR_SPACE_GPIO)) { + /* + * For GPIO (general_purpose_io), the Address will be the bit offset + * from the previous Connection() operator, making it effectively a + * pin number index. The bit_length is the length of the field, which + * is thus the number of pins. + */ + ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, + "GPIO FieldRead [FROM]: Pin %u Bits %u\n", + obj_desc->field.pin_number_index, + obj_desc->field.bit_length)); + + /* Lock entire transaction if requested */ + + acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags); + + /* Perform the write */ + + status = acpi_ex_access_region(obj_desc, 0, + (u64 *)buffer, ACPI_READ); + acpi_ex_release_global_lock(obj_desc->common_field.field_flags); + if (ACPI_FAILURE(status)) { + acpi_ut_remove_reference(buffer_desc); + } else { + *ret_buffer_desc = buffer_desc; + } + return_ACPI_STATUS(status); + } + ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, "FieldRead [TO]: Obj %p, Type %X, Buf %p, ByteLen %X\n", obj_desc, obj_desc->common.type, buffer, @@ -413,6 +444,42 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc, *result_desc = buffer_desc; return_ACPI_STATUS(status); + } else if ((obj_desc->common.type == ACPI_TYPE_LOCAL_REGION_FIELD) && + (obj_desc->field.region_obj->region.space_id == + ACPI_ADR_SPACE_GPIO)) { + /* + * For GPIO (general_purpose_io), we will bypass the entire field + * mechanism and handoff the bit address and bit width directly to + * the handler. The Address will be the bit offset + * from the previous Connection() operator, making it effectively a + * pin number index. The bit_length is the length of the field, which + * is thus the number of pins. + */ + if (source_desc->common.type != ACPI_TYPE_INTEGER) { + return_ACPI_STATUS(AE_AML_OPERAND_TYPE); + } + + ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, + "GPIO FieldWrite [FROM]: (%s:%X), Val %.8X [TO]: Pin %u Bits %u\n", + acpi_ut_get_type_name(source_desc->common. + type), + source_desc->common.type, + (u32)source_desc->integer.value, + obj_desc->field.pin_number_index, + obj_desc->field.bit_length)); + + buffer = &source_desc->integer.value; + + /* Lock entire transaction if requested */ + + acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags); + + /* Perform the write */ + + status = acpi_ex_access_region(obj_desc, 0, + (u64 *)buffer, ACPI_WRITE); + acpi_ex_release_global_lock(obj_desc->common_field.field_flags); + return_ACPI_STATUS(status); } /* Get a pointer to the data to be written */ diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c index ee3f872870bc..118e942005e5 100644 --- a/drivers/acpi/acpica/exprep.c +++ b/drivers/acpi/acpica/exprep.c @@ -484,6 +484,8 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info) obj_desc->field.resource_length = info->resource_length; } + obj_desc->field.pin_number_index = info->pin_number_index; + /* Allow full data read from EC address space */ if ((obj_desc->field.region_obj->region.space_id == diff --git a/drivers/acpi/acpica/nsprepkg.c b/drivers/acpi/acpica/nsprepkg.c index 68f725839eb6..1b13b921dda9 100644 --- a/drivers/acpi/acpica/nsprepkg.c +++ b/drivers/acpi/acpica/nsprepkg.c @@ -316,6 +316,45 @@ acpi_ns_check_package(struct acpi_evaluate_info *info, acpi_ns_check_package_list(info, package, elements, count); break; + case ACPI_PTYPE2_UUID_PAIR: + + /* The package must contain pairs of (UUID + type) */ + + if (count & 1) { + expected_count = count + 1; + goto package_too_small; + } + + while (count > 0) { + status = acpi_ns_check_object_type(info, elements, + package->ret_info. + object_type1, 0); + if (ACPI_FAILURE(status)) { + return (status); + } + + /* Validate length of the UUID buffer */ + + if ((*elements)->buffer.length != 16) { + ACPI_WARN_PREDEFINED((AE_INFO, + info->full_pathname, + info->node_flags, + "Invalid length for UUID Buffer")); + return (AE_AML_OPERAND_VALUE); + } + + status = acpi_ns_check_object_type(info, elements + 1, + package->ret_info. + object_type2, 0); + if (ACPI_FAILURE(status)) { + return (status); + } + + elements += 2; + count -= 2; + } + break; + default: /* Should not get here if predefined info table is correct */ diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index 48bcf38a0ea8..5fdfe65fe165 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -534,20 +534,6 @@ static int acpi_battery_get_state(struct acpi_battery *battery) " invalid.\n"); } - /* - * When fully charged, some batteries wrongly report - * capacity_now = design_capacity instead of = full_charge_capacity - */ - if (battery->capacity_now > battery->full_charge_capacity - && battery->full_charge_capacity != ACPI_BATTERY_VALUE_UNKNOWN) { - battery->capacity_now = battery->full_charge_capacity; - if (battery->capacity_now != battery->design_capacity) - printk_once(KERN_WARNING FW_BUG - "battery: reported current charge level (%d) " - "is higher than reported maximum charge level (%d).\n", - battery->capacity_now, battery->full_charge_capacity); - } - if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags) && battery->capacity_now >= 0 && battery->capacity_now <= 100) battery->capacity_now = (battery->capacity_now * diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 8581f5b84f48..8b67bd0f6bb5 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -177,16 +177,6 @@ void acpi_bus_detach_private_data(acpi_handle handle) } EXPORT_SYMBOL_GPL(acpi_bus_detach_private_data); -void acpi_bus_no_hotplug(acpi_handle handle) -{ - struct acpi_device *adev = NULL; - - acpi_bus_get_device(handle, &adev); - if (adev) - adev->flags.no_hotplug = true; -} -EXPORT_SYMBOL_GPL(acpi_bus_no_hotplug); - static void acpi_print_osc_error(acpi_handle handle, struct acpi_osc_context *context, char *error) { diff --git a/drivers/acpi/container.c b/drivers/acpi/container.c index 76f7cff64594..c8ead9f97375 100644 --- a/drivers/acpi/container.c +++ b/drivers/acpi/container.c @@ -99,6 +99,13 @@ static void container_device_detach(struct acpi_device *adev) device_unregister(dev); } +static void container_device_online(struct acpi_device *adev) +{ + struct device *dev = acpi_driver_data(adev); + + kobject_uevent(&dev->kobj, KOBJ_ONLINE); +} + static struct acpi_scan_handler container_handler = { .ids = container_device_ids, .attach = container_device_attach, @@ -106,6 +113,7 @@ static struct acpi_scan_handler container_handler = { .hotplug = { .enabled = true, .demand_offline = true, + .notify_online = container_device_online, }, }; diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index a66ab658abbc..cb6066c809ea 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -197,6 +197,8 @@ static bool advance_transaction(struct acpi_ec *ec) t->rdata[t->ri++] = acpi_ec_read_data(ec); if (t->rlen == t->ri) { t->flags |= ACPI_EC_COMMAND_COMPLETE; + if (t->command == ACPI_EC_COMMAND_QUERY) + pr_debug("hardware QR_EC completion\n"); wakeup = true; } } else @@ -208,7 +210,20 @@ static bool advance_transaction(struct acpi_ec *ec) } return wakeup; } else { - if ((status & ACPI_EC_FLAG_IBF) == 0) { + /* + * There is firmware refusing to respond QR_EC when SCI_EVT + * is not set, for which case, we complete the QR_EC + * without issuing it to the firmware. + * https://bugzilla.kernel.org/show_bug.cgi?id=86211 + */ + if (!(status & ACPI_EC_FLAG_SCI) && + (t->command == ACPI_EC_COMMAND_QUERY)) { + t->flags |= ACPI_EC_COMMAND_POLL; + t->rdata[t->ri++] = 0x00; + t->flags |= ACPI_EC_COMMAND_COMPLETE; + pr_debug("software QR_EC completion\n"); + wakeup = true; + } else if ((status & ACPI_EC_FLAG_IBF) == 0) { acpi_ec_write_cmd(ec, t->command); t->flags |= ACPI_EC_COMMAND_POLL; } else @@ -288,11 +303,11 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, /* following two actions should be kept atomic */ ec->curr = t; start_transaction(ec); - if (ec->curr->command == ACPI_EC_COMMAND_QUERY) - clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags); spin_unlock_irqrestore(&ec->lock, tmp); ret = ec_poll(ec); spin_lock_irqsave(&ec->lock, tmp); + if (ec->curr->command == ACPI_EC_COMMAND_QUERY) + clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags); ec->curr = NULL; spin_unlock_irqrestore(&ec->lock, tmp); return ret; @@ -1015,6 +1030,10 @@ static struct dmi_system_id ec_dmi_table[] __initdata = { DMI_MATCH(DMI_SYS_VENDOR, "Quanta"), DMI_MATCH(DMI_PRODUCT_NAME, "TW9/SW9"),}, NULL}, { + ec_flag_msi, "Clevo W350etq", { + DMI_MATCH(DMI_SYS_VENDOR, "CLEVO CO."), + DMI_MATCH(DMI_PRODUCT_NAME, "W35_37ET"),}, NULL}, + { ec_validate_ecdt, "ASUS hardware", { DMI_MATCH(DMI_BIOS_VENDOR, "ASUS") }, NULL}, { diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c index 9c62340c2360..6e6b80eb0bba 100644 --- a/drivers/acpi/pci_irq.c +++ b/drivers/acpi/pci_irq.c @@ -481,6 +481,14 @@ void acpi_pci_irq_disable(struct pci_dev *dev) if (!pin) return; + /* Keep IOAPIC pin configuration when suspending */ + if (dev->dev.power.is_prepared) + return; +#ifdef CONFIG_PM_RUNTIME + if (dev->dev.power.runtime_status == RPM_SUSPENDING) + return; +#endif + entry = acpi_pci_irq_lookup(dev, pin); if (!entry) return; @@ -498,5 +506,6 @@ void acpi_pci_irq_disable(struct pci_dev *dev) */ dev_dbg(&dev->dev, "PCI INT %c disabled\n", pin_name(pin)); - acpi_unregister_gsi(gsi); + if (gsi >= 0 && dev->irq > 0) + acpi_unregister_gsi(gsi); } diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c index 4fcbd670415c..d9f71581b79b 100644 --- a/drivers/acpi/processor_driver.c +++ b/drivers/acpi/processor_driver.c @@ -120,6 +120,7 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb, unsigned int cpu = (unsigned long)hcpu; struct acpi_processor *pr = per_cpu(processors, cpu); struct acpi_device *device; + action &= ~CPU_TASKS_FROZEN; /* * CPU_STARTING and CPU_DYING must not sleep. Return here since diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 3dca36d4ad26..17f9ec501972 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -1071,9 +1071,9 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr) if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) { - cpuidle_pause_and_lock(); /* Protect against cpu-hotplug */ get_online_cpus(); + cpuidle_pause_and_lock(); /* Disable all cpuidle devices */ for_each_online_cpu(cpu) { @@ -1100,8 +1100,8 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr) cpuidle_enable_device(dev); } } - put_online_cpus(); cpuidle_resume_and_unlock(); + put_online_cpus(); } return 0; diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 5d592e17d760..ae44d8654c82 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -130,7 +130,7 @@ static int create_modalias(struct acpi_device *acpi_dev, char *modalias, list_for_each_entry(id, &acpi_dev->pnp.ids, list) { count = snprintf(&modalias[len], size, "%s:", id->id); if (count < 0) - return EINVAL; + return -EINVAL; if (count >= size) return -ENOMEM; len += count; @@ -353,7 +353,8 @@ static int acpi_scan_hot_remove(struct acpi_device *device) unsigned long long sta; acpi_status status; - if (device->handler->hotplug.demand_offline && !acpi_force_hot_remove) { + if (device->handler && device->handler->hotplug.demand_offline + && !acpi_force_hot_remove) { if (!acpi_scan_is_offline(device, true)) return -EBUSY; } else { @@ -666,8 +667,14 @@ static ssize_t acpi_device_sun_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_device *acpi_dev = to_acpi_device(dev); + acpi_status status; + unsigned long long sun; + + status = acpi_evaluate_integer(acpi_dev->handle, "_SUN", NULL, &sun); + if (ACPI_FAILURE(status)) + return -ENODEV; - return sprintf(buf, "%lu\n", acpi_dev->pnp.sun); + return sprintf(buf, "%llu\n", sun); } static DEVICE_ATTR(sun, 0444, acpi_device_sun_show, NULL); @@ -689,7 +696,6 @@ static int acpi_device_setup_files(struct acpi_device *dev) { struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; acpi_status status; - unsigned long long sun; int result = 0; /* @@ -730,14 +736,10 @@ static int acpi_device_setup_files(struct acpi_device *dev) if (dev->pnp.unique_id) result = device_create_file(&dev->dev, &dev_attr_uid); - status = acpi_evaluate_integer(dev->handle, "_SUN", NULL, &sun); - if (ACPI_SUCCESS(status)) { - dev->pnp.sun = (unsigned long)sun; + if (acpi_has_method(dev->handle, "_SUN")) { result = device_create_file(&dev->dev, &dev_attr_sun); if (result) goto end; - } else { - dev->pnp.sun = (unsigned long)-1; } if (acpi_has_method(dev->handle, "_STA")) { @@ -921,12 +923,17 @@ static void acpi_device_notify(acpi_handle handle, u32 event, void *data) device->driver->ops.notify(device, event); } -static acpi_status acpi_device_notify_fixed(void *data) +static void acpi_device_notify_fixed(void *data) { struct acpi_device *device = data; /* Fixed hardware devices have no handles */ acpi_device_notify(NULL, ACPI_FIXED_HARDWARE_EVENT, device); +} + +static acpi_status acpi_device_fixed_event(void *data) +{ + acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_device_notify_fixed, data); return AE_OK; } @@ -937,12 +944,12 @@ static int acpi_device_install_notify_handler(struct acpi_device *device) if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON) status = acpi_install_fixed_event_handler(ACPI_EVENT_POWER_BUTTON, - acpi_device_notify_fixed, + acpi_device_fixed_event, device); else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON) status = acpi_install_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON, - acpi_device_notify_fixed, + acpi_device_fixed_event, device); else status = acpi_install_notify_handler(device->handle, @@ -959,10 +966,10 @@ static void acpi_device_remove_notify_handler(struct acpi_device *device) { if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON) acpi_remove_fixed_event_handler(ACPI_EVENT_POWER_BUTTON, - acpi_device_notify_fixed); + acpi_device_fixed_event); else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON) acpi_remove_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON, - acpi_device_notify_fixed); + acpi_device_fixed_event); else acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY, acpi_device_notify); @@ -974,7 +981,7 @@ static int acpi_device_probe(struct device *dev) struct acpi_driver *acpi_drv = to_acpi_driver(dev->driver); int ret; - if (acpi_dev->handler) + if (acpi_dev->handler && !acpi_is_pnp_device(acpi_dev)) return -EINVAL; if (!acpi_drv->ops.add) @@ -2182,6 +2189,9 @@ static void acpi_bus_attach(struct acpi_device *device) ok: list_for_each_entry(child, &device->children, node) acpi_bus_attach(child); + + if (device->handler && device->handler->hotplug.notify_online) + device->handler->hotplug.notify_online(device); } /** diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index 826884392e6b..8e7e18567ae6 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c @@ -82,9 +82,9 @@ module_param(allow_duplicates, bool, 0644); * For Windows 8 systems: used to decide if video module * should skip registering backlight interface of its own. */ -static int use_native_backlight_param = 1; +static int use_native_backlight_param = -1; module_param_named(use_native_backlight, use_native_backlight_param, int, 0444); -static bool use_native_backlight_dmi = false; +static bool use_native_backlight_dmi = true; static int register_count; static struct mutex video_list_lock; @@ -417,6 +417,12 @@ static int __init video_set_use_native_backlight(const struct dmi_system_id *d) return 0; } +static int __init video_disable_native_backlight(const struct dmi_system_id *d) +{ + use_native_backlight_dmi = false; + return 0; +} + static struct dmi_system_id video_dmi_table[] __initdata = { /* * Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121 @@ -720,6 +726,49 @@ static struct dmi_system_id video_dmi_table[] __initdata = { DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 8780w"), }, }, + + /* + * These models have a working acpi_video backlight control, and using + * native backlight causes a regression where backlight does not work + * when userspace is not handling brightness key events. Disable + * native_backlight on these to fix this: + * https://bugzilla.kernel.org/show_bug.cgi?id=81691 + */ + { + .callback = video_disable_native_backlight, + .ident = "ThinkPad T420", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T420"), + }, + }, + { + .callback = video_disable_native_backlight, + .ident = "ThinkPad T520", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T520"), + }, + }, + { + .callback = video_disable_native_backlight, + .ident = "ThinkPad X201s", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201s"), + }, + }, + + /* The native backlight controls do not work on some older machines */ + { + /* https://bugs.freedesktop.org/show_bug.cgi?id=81515 */ + .callback = video_disable_native_backlight, + .ident = "HP ENVY 15 Notebook", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP ENVY 15 Notebook PC"), + }, + }, {} }; diff --git a/drivers/amba/tegra-ahb.c b/drivers/amba/tegra-ahb.c index 558a239954e8..d8961ef4d2e7 100644 --- a/drivers/amba/tegra-ahb.c +++ b/drivers/amba/tegra-ahb.c @@ -25,7 +25,8 @@ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/io.h> -#include <linux/tegra-ahb.h> + +#include <soc/tegra/ahb.h> #define DRV_NAME "tegra-ahb" diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index e65d400efd44..e1b92788c225 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig @@ -16,6 +16,7 @@ menuconfig ATA depends on BLOCK depends on !(M32R || M68K || S390) || BROKEN select SCSI + select GLOB ---help--- If you want to use an ATA hard disk, ATA tape drive, ATA CD-ROM or any other ATA device under Linux, say Y and make sure that you know diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index a29f8012fb08..a0cc0edafc78 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -305,6 +305,14 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(INTEL, 0x9c85), board_ahci }, /* Wildcat Point-LP RAID */ { PCI_VDEVICE(INTEL, 0x9c87), board_ahci }, /* Wildcat Point-LP RAID */ { PCI_VDEVICE(INTEL, 0x9c8f), board_ahci }, /* Wildcat Point-LP RAID */ + { PCI_VDEVICE(INTEL, 0x8c82), board_ahci }, /* 9 Series AHCI */ + { PCI_VDEVICE(INTEL, 0x8c83), board_ahci }, /* 9 Series AHCI */ + { PCI_VDEVICE(INTEL, 0x8c84), board_ahci }, /* 9 Series RAID */ + { PCI_VDEVICE(INTEL, 0x8c85), board_ahci }, /* 9 Series RAID */ + { PCI_VDEVICE(INTEL, 0x8c86), board_ahci }, /* 9 Series RAID */ + { PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series RAID */ + { PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */ + { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series RAID */ /* JMicron 360/1/3/5/6, match class to avoid IDE function */ { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, @@ -442,6 +450,8 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x917a), .driver_data = board_ahci_yes_fbs }, /* 88se9172 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9172), + .driver_data = board_ahci_yes_fbs }, /* 88se9182 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9182), .driver_data = board_ahci_yes_fbs }, /* 88se9172 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9192), .driver_data = board_ahci_yes_fbs }, /* 88se9172 on some Gigabyte */ @@ -1329,6 +1339,18 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) else if (pdev->vendor == 0x1c44 && pdev->device == 0x8000) ahci_pci_bar = AHCI_PCI_BAR_ENMOTUS; + /* + * The JMicron chip 361/363 contains one SATA controller and one + * PATA controller,for powering on these both controllers, we must + * follow the sequence one by one, otherwise one of them can not be + * powered on successfully, so here we disable the async suspend + * method for these chips. + */ + if (pdev->vendor == PCI_VENDOR_ID_JMICRON && + (pdev->device == PCI_DEVICE_ID_JMICRON_JMB363 || + pdev->device == PCI_DEVICE_ID_JMICRON_JMB361)) + device_disable_async_suspend(&pdev->dev); + /* acquire resources */ rc = pcim_enable_device(pdev); if (rc) diff --git a/drivers/ata/ahci_tegra.c b/drivers/ata/ahci_tegra.c index fc3df47fca35..032904402c95 100644 --- a/drivers/ata/ahci_tegra.c +++ b/drivers/ata/ahci_tegra.c @@ -18,14 +18,17 @@ */ #include <linux/ahci_platform.h> -#include <linux/reset.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of_device.h> #include <linux/platform_device.h> -#include <linux/tegra-powergate.h> #include <linux/regulator/consumer.h> +#include <linux/reset.h> + +#include <soc/tegra/fuse.h> +#include <soc/tegra/pmc.h> + #include "ahci.h" #define SATA_CONFIGURATION_0 0x180 @@ -180,9 +183,12 @@ static int tegra_ahci_controller_init(struct ahci_host_priv *hpriv) /* Pad calibration */ - /* FIXME Always use calibration 0. Change this to read the calibration - * fuse once the fuse driver has landed. */ - val = 0; + ret = tegra_fuse_readl(FUSE_SATA_CALIB, &val); + if (ret) { + dev_err(&tegra->pdev->dev, + "failed to read calibration fuse: %d\n", ret); + return ret; + } calib = tegra124_pad_calibration[val & FUSE_SATA_CALIB_MASK]; diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c index bc281115490b..f03aab187f4d 100644 --- a/drivers/ata/ahci_xgene.c +++ b/drivers/ata/ahci_xgene.c @@ -78,6 +78,9 @@ #define CFG_MEM_RAM_SHUTDOWN 0x00000070 #define BLOCK_MEM_RDY 0x00000074 +/* Max retry for link down */ +#define MAX_LINK_DOWN_RETRY 3 + struct xgene_ahci_context { struct ahci_host_priv *hpriv; struct device *dev; @@ -145,6 +148,14 @@ static unsigned int xgene_ahci_qc_issue(struct ata_queued_cmd *qc) return rc; } +static bool xgene_ahci_is_memram_inited(struct xgene_ahci_context *ctx) +{ + void __iomem *diagcsr = ctx->csr_diag; + + return (readl(diagcsr + CFG_MEM_RAM_SHUTDOWN) == 0 && + readl(diagcsr + BLOCK_MEM_RDY) == 0xFFFFFFFF); +} + /** * xgene_ahci_read_id - Read ID data from the specified device * @dev: device @@ -229,8 +240,11 @@ static void xgene_ahci_set_phy_cfg(struct xgene_ahci_context *ctx, int channel) * and Gen1 (1.5Gbps). Otherwise during long IO stress test, the PHY will * report disparity error and etc. In addition, during COMRESET, there can * be error reported in the register PORT_SCR_ERR. For SERR_DISPARITY and - * SERR_10B_8B_ERR, the PHY receiver line must be reseted. The following - * algorithm is followed to proper configure the hardware PHY during COMRESET: + * SERR_10B_8B_ERR, the PHY receiver line must be reseted. Also during long + * reboot cycle regression, sometimes the PHY reports link down even if the + * device is present because of speed negotiation failure. so need to retry + * the COMRESET to get the link up. The following algorithm is followed to + * proper configure the hardware PHY during COMRESET: * * Alg Part 1: * 1. Start the PHY at Gen3 speed (default setting) @@ -246,9 +260,15 @@ static void xgene_ahci_set_phy_cfg(struct xgene_ahci_context *ctx, int channel) * Alg Part 2: * 1. On link up, if there are any SERR_DISPARITY and SERR_10B_8B_ERR error * reported in the register PORT_SCR_ERR, then reset the PHY receiver line - * 2. Go to Alg Part 3 + * 2. Go to Alg Part 4 * * Alg Part 3: + * 1. Check the PORT_SCR_STAT to see whether device presence detected but PHY + * communication establishment failed and maximum link down attempts are + * less than Max attempts 3 then goto Alg Part 1. + * 2. Go to Alg Part 4. + * + * Alg Part 4: * 1. Clear any pending from register PORT_SCR_ERR. * * NOTE: For the initial version, we will NOT support Gen1/Gen2. In addition @@ -267,19 +287,27 @@ static int xgene_ahci_do_hardreset(struct ata_link *link, u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; void __iomem *port_mmio = ahci_port_base(ap); struct ata_taskfile tf; + int link_down_retry = 0; int rc; - u32 val; - - /* clear D2H reception area to properly wait for D2H FIS */ - ata_tf_init(link->device, &tf); - tf.command = ATA_BUSY; - ata_tf_to_fis(&tf, 0, 0, d2h_fis); - rc = sata_link_hardreset(link, timing, deadline, online, + u32 val, sstatus; + + do { + /* clear D2H reception area to properly wait for D2H FIS */ + ata_tf_init(link->device, &tf); + tf.command = ATA_BUSY; + ata_tf_to_fis(&tf, 0, 0, d2h_fis); + rc = sata_link_hardreset(link, timing, deadline, online, ahci_check_ready); + if (*online) { + val = readl(port_mmio + PORT_SCR_ERR); + if (val & (SERR_DISPARITY | SERR_10B_8B_ERR)) + dev_warn(ctx->dev, "link has error\n"); + break; + } - val = readl(port_mmio + PORT_SCR_ERR); - if (val & (SERR_DISPARITY | SERR_10B_8B_ERR)) - dev_warn(ctx->dev, "link has error\n"); + sata_scr_read(link, SCR_STATUS, &sstatus); + } while (link_down_retry++ < MAX_LINK_DOWN_RETRY && + (sstatus & 0xff) == 0x1); /* clear all errors if any pending */ val = readl(port_mmio + PORT_SCR_ERR); @@ -344,7 +372,7 @@ static struct ata_port_operations xgene_ahci_ops = { }; static const struct ata_port_info xgene_ahci_port_info = { - .flags = AHCI_FLAG_COMMON | ATA_FLAG_NCQ, + .flags = AHCI_FLAG_COMMON, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, .port_ops = &xgene_ahci_ops, @@ -467,6 +495,11 @@ static int xgene_ahci_probe(struct platform_device *pdev) return -ENODEV; } + if (xgene_ahci_is_memram_inited(ctx)) { + dev_info(dev, "skip clock and PHY initialization\n"); + goto skip_clk_phy; + } + /* Due to errata, HW requires full toggle transition */ rc = ahci_platform_enable_clks(hpriv); if (rc) @@ -479,8 +512,8 @@ static int xgene_ahci_probe(struct platform_device *pdev) /* Configure the host controller */ xgene_ahci_hw_init(hpriv); - - hpriv->flags = AHCI_HFLAG_NO_PMP | AHCI_HFLAG_YES_NCQ; +skip_clk_phy: + hpriv->flags = AHCI_HFLAG_NO_PMP | AHCI_HFLAG_NO_NCQ; rc = ahci_platform_init_host(pdev, hpriv, &xgene_ahci_port_info); if (rc) diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c index 893e30e9a9ef..ffbe625e6fd2 100644 --- a/drivers/ata/ata_piix.c +++ b/drivers/ata/ata_piix.c @@ -340,6 +340,14 @@ static const struct pci_device_id piix_pci_tbl[] = { { 0x8086, 0x0F21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_byt }, /* SATA Controller IDE (Coleto Creek) */ { 0x8086, 0x23a6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, + /* SATA Controller IDE (9 Series) */ + { 0x8086, 0x8c88, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb }, + /* SATA Controller IDE (9 Series) */ + { 0x8086, 0x8c89, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb }, + /* SATA Controller IDE (9 Series) */ + { 0x8086, 0x8c80, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, + /* SATA Controller IDE (9 Series) */ + { 0x8086, 0x8c81, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, { } /* terminate list */ }; diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 677c0c1b03bd..f3e7b9f894cd 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -59,6 +59,7 @@ #include <linux/async.h> #include <linux/log2.h> #include <linux/slab.h> +#include <linux/glob.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_host.h> @@ -4227,7 +4228,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, { "Crucial_CT???M500SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, { "Micron_M550*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, - { "Crucial_CT???M550SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, + { "Crucial_CT*M550SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, /* * Some WD SATA-I drives spin up and down erratically when the link @@ -4250,73 +4251,6 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { { } }; -/** - * glob_match - match a text string against a glob-style pattern - * @text: the string to be examined - * @pattern: the glob-style pattern to be matched against - * - * Either/both of text and pattern can be empty strings. - * - * Match text against a glob-style pattern, with wildcards and simple sets: - * - * ? matches any single character. - * * matches any run of characters. - * [xyz] matches a single character from the set: x, y, or z. - * [a-d] matches a single character from the range: a, b, c, or d. - * [a-d0-9] matches a single character from either range. - * - * The special characters ?, [, -, or *, can be matched using a set, eg. [*] - * Behaviour with malformed patterns is undefined, though generally reasonable. - * - * Sample patterns: "SD1?", "SD1[0-5]", "*R0", "SD*1?[012]*xx" - * - * This function uses one level of recursion per '*' in pattern. - * Since it calls _nothing_ else, and has _no_ explicit local variables, - * this will not cause stack problems for any reasonable use here. - * - * RETURNS: - * 0 on match, 1 otherwise. - */ -static int glob_match (const char *text, const char *pattern) -{ - do { - /* Match single character or a '?' wildcard */ - if (*text == *pattern || *pattern == '?') { - if (!*pattern++) - return 0; /* End of both strings: match */ - } else { - /* Match single char against a '[' bracketed ']' pattern set */ - if (!*text || *pattern != '[') - break; /* Not a pattern set */ - while (*++pattern && *pattern != ']' && *text != *pattern) { - if (*pattern == '-' && *(pattern - 1) != '[') - if (*text > *(pattern - 1) && *text < *(pattern + 1)) { - ++pattern; - break; - } - } - if (!*pattern || *pattern == ']') - return 1; /* No match */ - while (*pattern && *pattern++ != ']'); - } - } while (*++text && *pattern); - - /* Match any run of chars against a '*' wildcard */ - if (*pattern == '*') { - if (!*++pattern) - return 0; /* Match: avoid recursion at end of pattern */ - /* Loop to handle additional pattern chars after the wildcard */ - while (*text) { - if (glob_match(text, pattern) == 0) - return 0; /* Remainder matched */ - ++text; /* Absorb (match) this char and try again */ - } - } - if (!*text && !*pattern) - return 0; /* End of both strings: match */ - return 1; /* No match */ -} - static unsigned long ata_dev_blacklisted(const struct ata_device *dev) { unsigned char model_num[ATA_ID_PROD_LEN + 1]; @@ -4327,10 +4261,10 @@ static unsigned long ata_dev_blacklisted(const struct ata_device *dev) ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev)); while (ad->model_num) { - if (!glob_match(model_num, ad->model_num)) { + if (glob_match(model_num, ad->model_num)) { if (ad->model_rev == NULL) return ad->horkage; - if (!glob_match(model_rev, ad->model_rev)) + if (glob_match(model_rev, ad->model_rev)) return ad->horkage; } ad++; diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c index 4d1a5d2c4287..47e418b8c8ba 100644 --- a/drivers/ata/pata_jmicron.c +++ b/drivers/ata/pata_jmicron.c @@ -143,6 +143,18 @@ static int jmicron_init_one (struct pci_dev *pdev, const struct pci_device_id *i }; const struct ata_port_info *ppi[] = { &info, NULL }; + /* + * The JMicron chip 361/363 contains one SATA controller and one + * PATA controller,for powering on these both controllers, we must + * follow the sequence one by one, otherwise one of them can not be + * powered on successfully, so here we disable the async suspend + * method for these chips. + */ + if (pdev->vendor == PCI_VENDOR_ID_JMICRON && + (pdev->device == PCI_DEVICE_ID_JMICRON_JMB363 || + pdev->device == PCI_DEVICE_ID_JMICRON_JMB361)) + device_disable_async_suspend(&pdev->dev); + return ata_pci_bmdma_init_one(pdev, ppi, &jmicron_sht, NULL, 0); } diff --git a/drivers/ata/pata_samsung_cf.c b/drivers/ata/pata_samsung_cf.c index 2578fc16960a..1a24a5dc3940 100644 --- a/drivers/ata/pata_samsung_cf.c +++ b/drivers/ata/pata_samsung_cf.c @@ -360,7 +360,7 @@ static int pata_s3c_wait_after_reset(struct ata_link *link, /* * pata_s3c_bus_softreset - PATA device software reset */ -static unsigned int pata_s3c_bus_softreset(struct ata_port *ap, +static int pata_s3c_bus_softreset(struct ata_port *ap, unsigned long deadline) { struct ata_ioports *ioaddr = &ap->ioaddr; diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c index 4e006d74bef8..7f4cb76ed9fa 100644 --- a/drivers/ata/pata_scc.c +++ b/drivers/ata/pata_scc.c @@ -585,7 +585,7 @@ static int scc_wait_after_reset(struct ata_link *link, unsigned int devmask, * Note: Original code is ata_bus_softreset(). */ -static unsigned int scc_bus_softreset(struct ata_port *ap, unsigned int devmask, +static int scc_bus_softreset(struct ata_port *ap, unsigned int devmask, unsigned long deadline) { struct ata_ioports *ioaddr = &ap->ioaddr; @@ -599,9 +599,7 @@ static unsigned int scc_bus_softreset(struct ata_port *ap, unsigned int devmask, udelay(20); out_be32(ioaddr->ctl_addr, ap->ctl); - scc_wait_after_reset(&ap->link, devmask, deadline); - - return 0; + return scc_wait_after_reset(&ap->link, devmask, deadline); } /** @@ -618,7 +616,8 @@ static int scc_softreset(struct ata_link *link, unsigned int *classes, { struct ata_port *ap = link->ap; unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; - unsigned int devmask = 0, err_mask; + unsigned int devmask = 0; + int rc; u8 err; DPRINTK("ENTER\n"); @@ -634,9 +633,9 @@ static int scc_softreset(struct ata_link *link, unsigned int *classes, /* issue bus reset */ DPRINTK("about to softreset, devmask=%x\n", devmask); - err_mask = scc_bus_softreset(ap, devmask, deadline); - if (err_mask) { - ata_port_err(ap, "SRST failed (err_mask=0x%x)\n", err_mask); + rc = scc_bus_softreset(ap, devmask, deadline); + if (rc) { + ata_port_err(ap, "SRST failed (err_mask=0x%x)\n", rc); return -EIO; } diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c index 0e3f8f9dcd29..480fa6ffbc09 100644 --- a/drivers/atm/atmtcp.c +++ b/drivers/atm/atmtcp.c @@ -299,6 +299,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb) out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci)); read_unlock(&vcc_sklist_lock); if (!out_vcc) { + result = -EUNATCH; atomic_inc(&vcc->stats->tx_err); goto done; } diff --git a/drivers/atm/he.c b/drivers/atm/he.c index aa6be2698669..c39702bc279d 100644 --- a/drivers/atm/he.c +++ b/drivers/atm/he.c @@ -533,14 +533,13 @@ static void he_init_tx_lbfp(struct he_dev *he_dev) static int he_init_tpdrq(struct he_dev *he_dev) { - he_dev->tpdrq_base = pci_alloc_consistent(he_dev->pci_dev, - CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), &he_dev->tpdrq_phys); + he_dev->tpdrq_base = pci_zalloc_consistent(he_dev->pci_dev, + CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), + &he_dev->tpdrq_phys); if (he_dev->tpdrq_base == NULL) { hprintk("failed to alloc tpdrq\n"); return -ENOMEM; } - memset(he_dev->tpdrq_base, 0, - CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq)); he_dev->tpdrq_tail = he_dev->tpdrq_base; he_dev->tpdrq_head = he_dev->tpdrq_base; @@ -804,13 +803,13 @@ static int he_init_group(struct he_dev *he_dev, int group) goto out_free_rbpl_virt; } - he_dev->rbpl_base = pci_alloc_consistent(he_dev->pci_dev, - CONFIG_RBPL_SIZE * sizeof(struct he_rbp), &he_dev->rbpl_phys); + he_dev->rbpl_base = pci_zalloc_consistent(he_dev->pci_dev, + CONFIG_RBPL_SIZE * sizeof(struct he_rbp), + &he_dev->rbpl_phys); if (he_dev->rbpl_base == NULL) { hprintk("failed to alloc rbpl_base\n"); goto out_destroy_rbpl_pool; } - memset(he_dev->rbpl_base, 0, CONFIG_RBPL_SIZE * sizeof(struct he_rbp)); INIT_LIST_HEAD(&he_dev->rbpl_outstanding); @@ -843,13 +842,13 @@ static int he_init_group(struct he_dev *he_dev, int group) /* rx buffer ready queue */ - he_dev->rbrq_base = pci_alloc_consistent(he_dev->pci_dev, - CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), &he_dev->rbrq_phys); + he_dev->rbrq_base = pci_zalloc_consistent(he_dev->pci_dev, + CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), + &he_dev->rbrq_phys); if (he_dev->rbrq_base == NULL) { hprintk("failed to allocate rbrq\n"); goto out_free_rbpl; } - memset(he_dev->rbrq_base, 0, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq)); he_dev->rbrq_head = he_dev->rbrq_base; he_writel(he_dev, he_dev->rbrq_phys, G0_RBRQ_ST + (group * 16)); @@ -867,13 +866,13 @@ static int he_init_group(struct he_dev *he_dev, int group) /* tx buffer ready queue */ - he_dev->tbrq_base = pci_alloc_consistent(he_dev->pci_dev, - CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), &he_dev->tbrq_phys); + he_dev->tbrq_base = pci_zalloc_consistent(he_dev->pci_dev, + CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), + &he_dev->tbrq_phys); if (he_dev->tbrq_base == NULL) { hprintk("failed to allocate tbrq\n"); goto out_free_rbpq_base; } - memset(he_dev->tbrq_base, 0, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq)); he_dev->tbrq_head = he_dev->tbrq_base; @@ -1460,13 +1459,13 @@ static int he_start(struct atm_dev *dev) /* host status page */ - he_dev->hsp = pci_alloc_consistent(he_dev->pci_dev, - sizeof(struct he_hsp), &he_dev->hsp_phys); + he_dev->hsp = pci_zalloc_consistent(he_dev->pci_dev, + sizeof(struct he_hsp), + &he_dev->hsp_phys); if (he_dev->hsp == NULL) { hprintk("failed to allocate host status page\n"); return -ENOMEM; } - memset(he_dev->hsp, 0, sizeof(struct he_hsp)); he_writel(he_dev, he_dev->hsp_phys, HSP_BA); /* initialize framer */ diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c index b621f56a36be..2b24ed056728 100644 --- a/drivers/atm/idt77252.c +++ b/drivers/atm/idt77252.c @@ -641,13 +641,11 @@ alloc_scq(struct idt77252_dev *card, int class) scq = kzalloc(sizeof(struct scq_info), GFP_KERNEL); if (!scq) return NULL; - scq->base = pci_alloc_consistent(card->pcidev, SCQ_SIZE, - &scq->paddr); + scq->base = pci_zalloc_consistent(card->pcidev, SCQ_SIZE, &scq->paddr); if (scq->base == NULL) { kfree(scq); return NULL; } - memset(scq->base, 0, SCQ_SIZE); scq->next = scq->base; scq->last = scq->base + (SCQ_ENTRIES - 1); @@ -972,13 +970,12 @@ init_rsq(struct idt77252_dev *card) { struct rsq_entry *rsqe; - card->rsq.base = pci_alloc_consistent(card->pcidev, RSQSIZE, - &card->rsq.paddr); + card->rsq.base = pci_zalloc_consistent(card->pcidev, RSQSIZE, + &card->rsq.paddr); if (card->rsq.base == NULL) { printk("%s: can't allocate RSQ.\n", card->name); return -1; } - memset(card->rsq.base, 0, RSQSIZE); card->rsq.last = card->rsq.base + RSQ_NUM_ENTRIES - 1; card->rsq.next = card->rsq.last; @@ -3400,14 +3397,14 @@ static int init_card(struct atm_dev *dev) writel(0, SAR_REG_GP); /* Initialize RAW Cell Handle Register */ - card->raw_cell_hnd = pci_alloc_consistent(card->pcidev, 2 * sizeof(u32), - &card->raw_cell_paddr); + card->raw_cell_hnd = pci_zalloc_consistent(card->pcidev, + 2 * sizeof(u32), + &card->raw_cell_paddr); if (!card->raw_cell_hnd) { printk("%s: memory allocation failure.\n", card->name); deinit_card(card); return -1; } - memset(card->raw_cell_hnd, 0, 2 * sizeof(u32)); writel(card->raw_cell_paddr, SAR_REG_RAWHND); IPRINTK("%s: raw cell handle is at 0x%p.\n", card->name, card->raw_cell_hnd); diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c index 943cf0d6abaf..7652e8dc188f 100644 --- a/drivers/atm/solos-pci.c +++ b/drivers/atm/solos-pci.c @@ -1278,6 +1278,7 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id) card->dma_bounce = kmalloc(card->nr_ports * BUF_SIZE, GFP_KERNEL); if (!card->dma_bounce) { dev_warn(&card->dev->dev, "Failed to allocate DMA bounce buffers\n"); + err = -ENOMEM; /* Fallback to MMIO doesn't work */ goto out_unmap_both; } diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index 88500fed3c7a..4e7f0ff83ae7 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig @@ -289,16 +289,6 @@ config CMA_ALIGNMENT If unsure, leave the default value "8". -config CMA_AREAS - int "Maximum count of the CMA device-private areas" - default 7 - help - CMA allows to create CMA areas for particular devices. This parameter - sets the maximum number of such device private CMA areas in the - system. - - If unsure, leave the default value "7". - endif endmenu diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c index 6467c919c509..6606abdf880c 100644 --- a/drivers/base/dma-contiguous.c +++ b/drivers/base/dma-contiguous.c @@ -24,23 +24,9 @@ #include <linux/memblock.h> #include <linux/err.h> -#include <linux/mm.h> -#include <linux/mutex.h> -#include <linux/page-isolation.h> #include <linux/sizes.h> -#include <linux/slab.h> -#include <linux/swap.h> -#include <linux/mm_types.h> #include <linux/dma-contiguous.h> - -struct cma { - unsigned long base_pfn; - unsigned long count; - unsigned long *bitmap; - struct mutex lock; -}; - -struct cma *dma_contiguous_default_area; +#include <linux/cma.h> #ifdef CONFIG_CMA_SIZE_MBYTES #define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES @@ -48,6 +34,8 @@ struct cma *dma_contiguous_default_area; #define CMA_SIZE_MBYTES 0 #endif +struct cma *dma_contiguous_default_area; + /* * Default global CMA area size can be defined in kernel's .config. * This is useful mainly for distro maintainers to create a kernel @@ -154,65 +142,6 @@ void __init dma_contiguous_reserve(phys_addr_t limit) } } -static DEFINE_MUTEX(cma_mutex); - -static int __init cma_activate_area(struct cma *cma) -{ - int bitmap_size = BITS_TO_LONGS(cma->count) * sizeof(long); - unsigned long base_pfn = cma->base_pfn, pfn = base_pfn; - unsigned i = cma->count >> pageblock_order; - struct zone *zone; - - cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL); - - if (!cma->bitmap) - return -ENOMEM; - - WARN_ON_ONCE(!pfn_valid(pfn)); - zone = page_zone(pfn_to_page(pfn)); - - do { - unsigned j; - base_pfn = pfn; - for (j = pageblock_nr_pages; j; --j, pfn++) { - WARN_ON_ONCE(!pfn_valid(pfn)); - /* - * alloc_contig_range requires the pfn range - * specified to be in the same zone. Make this - * simple by forcing the entire CMA resv range - * to be in the same zone. - */ - if (page_zone(pfn_to_page(pfn)) != zone) - goto err; - } - init_cma_reserved_pageblock(pfn_to_page(base_pfn)); - } while (--i); - - mutex_init(&cma->lock); - return 0; - -err: - kfree(cma->bitmap); - return -EINVAL; -} - -static struct cma cma_areas[MAX_CMA_AREAS]; -static unsigned cma_area_count; - -static int __init cma_init_reserved_areas(void) -{ - int i; - - for (i = 0; i < cma_area_count; i++) { - int ret = cma_activate_area(&cma_areas[i]); - if (ret) - return ret; - } - - return 0; -} -core_initcall(cma_init_reserved_areas); - /** * dma_contiguous_reserve_area() - reserve custom contiguous area * @size: Size of the reserved area (in bytes), @@ -234,72 +163,17 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, phys_addr_t limit, struct cma **res_cma, bool fixed) { - struct cma *cma = &cma_areas[cma_area_count]; - phys_addr_t alignment; - int ret = 0; - - pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__, - (unsigned long)size, (unsigned long)base, - (unsigned long)limit); - - /* Sanity checks */ - if (cma_area_count == ARRAY_SIZE(cma_areas)) { - pr_err("Not enough slots for CMA reserved regions!\n"); - return -ENOSPC; - } - - if (!size) - return -EINVAL; - - /* Sanitise input arguments */ - alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order); - base = ALIGN(base, alignment); - size = ALIGN(size, alignment); - limit &= ~(alignment - 1); - - /* Reserve memory */ - if (base && fixed) { - if (memblock_is_region_reserved(base, size) || - memblock_reserve(base, size) < 0) { - ret = -EBUSY; - goto err; - } - } else { - phys_addr_t addr = memblock_alloc_range(size, alignment, base, - limit); - if (!addr) { - ret = -ENOMEM; - goto err; - } else { - base = addr; - } - } - - /* - * Each reserved area must be initialised later, when more kernel - * subsystems (like slab allocator) are available. - */ - cma->base_pfn = PFN_DOWN(base); - cma->count = size >> PAGE_SHIFT; - *res_cma = cma; - cma_area_count++; + int ret; - pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M, - (unsigned long)base); + ret = cma_declare_contiguous(base, size, limit, 0, 0, fixed, res_cma); + if (ret) + return ret; /* Architecture specific contiguous memory fixup. */ - dma_contiguous_early_fixup(base, size); - return 0; -err: - pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M); - return ret; -} + dma_contiguous_early_fixup(cma_get_base(*res_cma), + cma_get_size(*res_cma)); -static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count) -{ - mutex_lock(&cma->lock); - bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count); - mutex_unlock(&cma->lock); + return 0; } /** @@ -316,62 +190,10 @@ static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count) struct page *dma_alloc_from_contiguous(struct device *dev, int count, unsigned int align) { - unsigned long mask, pfn, pageno, start = 0; - struct cma *cma = dev_get_cma_area(dev); - struct page *page = NULL; - int ret; - - if (!cma || !cma->count) - return NULL; - if (align > CONFIG_CMA_ALIGNMENT) align = CONFIG_CMA_ALIGNMENT; - pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma, - count, align); - - if (!count) - return NULL; - - mask = (1 << align) - 1; - - - for (;;) { - mutex_lock(&cma->lock); - pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count, - start, count, mask); - if (pageno >= cma->count) { - mutex_unlock(&cma->lock); - break; - } - bitmap_set(cma->bitmap, pageno, count); - /* - * It's safe to drop the lock here. We've marked this region for - * our exclusive use. If the migration fails we will take the - * lock again and unmark it. - */ - mutex_unlock(&cma->lock); - - pfn = cma->base_pfn + pageno; - mutex_lock(&cma_mutex); - ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA); - mutex_unlock(&cma_mutex); - if (ret == 0) { - page = pfn_to_page(pfn); - break; - } else if (ret != -EBUSY) { - clear_cma_bitmap(cma, pfn, count); - break; - } - clear_cma_bitmap(cma, pfn, count); - pr_debug("%s(): memory range at %p is busy, retrying\n", - __func__, pfn_to_page(pfn)); - /* try again with a bit different memory target */ - start = pageno + mask + 1; - } - - pr_debug("%s(): returned %p\n", __func__, page); - return page; + return cma_alloc(dev_get_cma_area(dev), count, align); } /** @@ -387,23 +209,5 @@ struct page *dma_alloc_from_contiguous(struct device *dev, int count, bool dma_release_from_contiguous(struct device *dev, struct page *pages, int count) { - struct cma *cma = dev_get_cma_area(dev); - unsigned long pfn; - - if (!cma || !pages) - return false; - - pr_debug("%s(page %p)\n", __func__, (void *)pages); - - pfn = page_to_pfn(pages); - - if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count) - return false; - - VM_BUG_ON(pfn + count > cma->base_pfn + cma->count); - - free_contig_range(pfn, count); - clear_cma_bitmap(cma, pfn, count); - - return true; + return cma_release(dev_get_cma_area(dev), pages, count); } diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 89f752dd8465..a2e13e250bba 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -284,7 +284,7 @@ static int memory_subsys_online(struct device *dev) * attribute and need to set the online_type. */ if (mem->online_type < 0) - mem->online_type = ONLINE_KEEP; + mem->online_type = MMOP_ONLINE_KEEP; ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE); @@ -315,23 +315,23 @@ store_mem_state(struct device *dev, if (ret) return ret; - if (!strncmp(buf, "online_kernel", min_t(int, count, 13))) - online_type = ONLINE_KERNEL; - else if (!strncmp(buf, "online_movable", min_t(int, count, 14))) - online_type = ONLINE_MOVABLE; - else if (!strncmp(buf, "online", min_t(int, count, 6))) - online_type = ONLINE_KEEP; - else if (!strncmp(buf, "offline", min_t(int, count, 7))) - online_type = -1; + if (sysfs_streq(buf, "online_kernel")) + online_type = MMOP_ONLINE_KERNEL; + else if (sysfs_streq(buf, "online_movable")) + online_type = MMOP_ONLINE_MOVABLE; + else if (sysfs_streq(buf, "online")) + online_type = MMOP_ONLINE_KEEP; + else if (sysfs_streq(buf, "offline")) + online_type = MMOP_OFFLINE; else { ret = -EINVAL; goto err; } switch (online_type) { - case ONLINE_KERNEL: - case ONLINE_MOVABLE: - case ONLINE_KEEP: + case MMOP_ONLINE_KERNEL: + case MMOP_ONLINE_MOVABLE: + case MMOP_ONLINE_KEEP: /* * mem->online_type is not protected so there can be a * race here. However, when racing online, the first @@ -342,7 +342,7 @@ store_mem_state(struct device *dev, mem->online_type = online_type; ret = device_online(&mem->dev); break; - case -1: + case MMOP_OFFLINE: ret = device_offline(&mem->dev); break; default: @@ -406,7 +406,9 @@ memory_probe_store(struct device *dev, struct device_attribute *attr, int i, ret; unsigned long pages_per_block = PAGES_PER_SECTION * sections_per_block; - phys_addr = simple_strtoull(buf, NULL, 0); + ret = kstrtoull(buf, 0, &phys_addr); + if (ret) + return ret; if (phys_addr & ((pages_per_block << PAGE_SHIFT) - 1)) return -EINVAL; diff --git a/drivers/base/node.c b/drivers/base/node.c index 8f7ed9933a7c..c6d3ae05f1ca 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -126,7 +126,7 @@ static ssize_t node_read_meminfo(struct device *dev, nid, K(node_page_state(nid, NR_FILE_PAGES)), nid, K(node_page_state(nid, NR_FILE_MAPPED)), nid, K(node_page_state(nid, NR_ANON_PAGES)), - nid, K(node_page_state(nid, NR_SHMEM)), + nid, K(i.sharedram), nid, node_page_state(nid, NR_KERNEL_STACK) * THREAD_SIZE / 1024, nid, K(node_page_state(nid, NR_PAGETABLE)), diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig index 4251570610c9..8a3f51f7b1b9 100644 --- a/drivers/base/regmap/Kconfig +++ b/drivers/base/regmap/Kconfig @@ -11,12 +11,15 @@ config REGMAP config REGMAP_I2C tristate + depends on I2C config REGMAP_SPI tristate + depends on SPI config REGMAP_SPMI tristate + depends on SPMI config REGMAP_MMIO tristate diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h index 7d1326985bee..0da5865df5b1 100644 --- a/drivers/base/regmap/internal.h +++ b/drivers/base/regmap/internal.h @@ -49,8 +49,10 @@ struct regmap_async { }; struct regmap { - struct mutex mutex; - spinlock_t spinlock; + union { + struct mutex mutex; + spinlock_t spinlock; + }; unsigned long spinlock_flags; regmap_lock lock; regmap_unlock unlock; @@ -146,6 +148,9 @@ struct regcache_ops { enum regcache_type type; int (*init)(struct regmap *map); int (*exit)(struct regmap *map); +#ifdef CONFIG_DEBUG_FS + void (*debugfs_init)(struct regmap *map); +#endif int (*read)(struct regmap *map, unsigned int reg, unsigned int *value); int (*write)(struct regmap *map, unsigned int reg, unsigned int value); int (*sync)(struct regmap *map, unsigned int min, unsigned int max); diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c index 6a7e4fa12854..f3e8fe0cc650 100644 --- a/drivers/base/regmap/regcache-rbtree.c +++ b/drivers/base/regmap/regcache-rbtree.c @@ -194,10 +194,6 @@ static void rbtree_debugfs_init(struct regmap *map) { debugfs_create_file("rbtree", 0400, map->debugfs, map, &rbtree_fops); } -#else -static void rbtree_debugfs_init(struct regmap *map) -{ -} #endif static int regcache_rbtree_init(struct regmap *map) @@ -222,8 +218,6 @@ static int regcache_rbtree_init(struct regmap *map) goto err; } - rbtree_debugfs_init(map); - return 0; err: @@ -532,6 +526,9 @@ struct regcache_ops regcache_rbtree_ops = { .name = "rbtree", .init = regcache_rbtree_init, .exit = regcache_rbtree_exit, +#ifdef CONFIG_DEBUG_FS + .debugfs_init = rbtree_debugfs_init, +#endif .read = regcache_rbtree_read, .write = regcache_rbtree_write, .sync = regcache_rbtree_sync, diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c index 29b4128da0b0..f1280dc356d0 100644 --- a/drivers/base/regmap/regcache.c +++ b/drivers/base/regmap/regcache.c @@ -269,8 +269,11 @@ static int regcache_default_sync(struct regmap *map, unsigned int min, map->cache_bypass = 1; ret = _regmap_write(map, reg, val); map->cache_bypass = 0; - if (ret) + if (ret) { + dev_err(map->dev, "Unable to sync register %#x. %d\n", + reg, ret); return ret; + } dev_dbg(map->dev, "Synced register %#x, value %#x\n", reg, val); } @@ -615,8 +618,11 @@ static int regcache_sync_block_single(struct regmap *map, void *block, ret = _regmap_write(map, regtmp, val); map->cache_bypass = 0; - if (ret != 0) + if (ret != 0) { + dev_err(map->dev, "Unable to sync register %#x. %d\n", + regtmp, ret); return ret; + } dev_dbg(map->dev, "Synced register %#x, value %#x\n", regtmp, val); } @@ -641,6 +647,9 @@ static int regcache_sync_block_raw_flush(struct regmap *map, const void **data, map->cache_bypass = 1; ret = _regmap_raw_write(map, base, *data, count * val_bytes); + if (ret) + dev_err(map->dev, "Unable to sync registers %#x-%#x. %d\n", + base, cur - map->reg_stride, ret); map->cache_bypass = 0; @@ -698,7 +707,7 @@ int regcache_sync_block(struct regmap *map, void *block, unsigned int block_base, unsigned int start, unsigned int end) { - if (regmap_can_raw_write(map)) + if (regmap_can_raw_write(map) && !map->use_single_rw) return regcache_sync_block_raw(map, block, cache_present, block_base, start, end); else diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c index 45d812c0ea77..5799a0b9e6cc 100644 --- a/drivers/base/regmap/regmap-debugfs.c +++ b/drivers/base/regmap/regmap-debugfs.c @@ -473,6 +473,7 @@ void regmap_debugfs_init(struct regmap *map, const char *name) { struct rb_node *next; struct regmap_range_node *range_node; + const char *devname = "dummy"; /* If we don't have the debugfs root yet, postpone init */ if (!regmap_debugfs_root) { @@ -491,12 +492,15 @@ void regmap_debugfs_init(struct regmap *map, const char *name) INIT_LIST_HEAD(&map->debugfs_off_cache); mutex_init(&map->cache_lock); + if (map->dev) + devname = dev_name(map->dev); + if (name) { map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s", - dev_name(map->dev), name); + devname, name); name = map->debugfs_name; } else { - name = dev_name(map->dev); + name = devname; } map->debugfs = debugfs_create_dir(name, regmap_debugfs_root); @@ -512,7 +516,14 @@ void regmap_debugfs_init(struct regmap *map, const char *name) map, ®map_reg_ranges_fops); if (map->max_register || regmap_readable(map, 0)) { - debugfs_create_file("registers", 0400, map->debugfs, + umode_t registers_mode; + + if (IS_ENABLED(REGMAP_ALLOW_WRITE_DEBUGFS)) + registers_mode = 0600; + else + registers_mode = 0400; + + debugfs_create_file("registers", registers_mode, map->debugfs, map, ®map_map_fops); debugfs_create_file("access", 0400, map->debugfs, map, ®map_access_fops); @@ -538,6 +549,9 @@ void regmap_debugfs_init(struct regmap *map, const char *name) next = rb_next(&range_node->node); } + + if (map->cache_ops && map->cache_ops->debugfs_init) + map->cache_ops->debugfs_init(map); } void regmap_debugfs_exit(struct regmap *map) diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c index ca193d1ef47c..053150a7f9f2 100644 --- a/drivers/base/regmap/regmap-i2c.c +++ b/drivers/base/regmap/regmap-i2c.c @@ -168,6 +168,8 @@ static struct regmap_bus regmap_i2c = { .write = regmap_i2c_write, .gather_write = regmap_i2c_gather_write, .read = regmap_i2c_read, + .reg_format_endian_default = REGMAP_ENDIAN_BIG, + .val_format_endian_default = REGMAP_ENDIAN_BIG, }; static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c, diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c index 0eb3097c0d76..53d1148e80a0 100644 --- a/drivers/base/regmap/regmap-spi.c +++ b/drivers/base/regmap/regmap-spi.c @@ -109,6 +109,8 @@ static struct regmap_bus regmap_spi = { .async_alloc = regmap_spi_async_alloc, .read = regmap_spi_read, .read_flag_mask = 0x80, + .reg_format_endian_default = REGMAP_ENDIAN_BIG, + .val_format_endian_default = REGMAP_ENDIAN_BIG, }; /** diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 78f43fb2fe84..d2f8a818d200 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -15,6 +15,7 @@ #include <linux/export.h> #include <linux/mutex.h> #include <linux/err.h> +#include <linux/of.h> #include <linux/rbtree.h> #include <linux/sched.h> @@ -109,7 +110,7 @@ bool regmap_readable(struct regmap *map, unsigned int reg) bool regmap_volatile(struct regmap *map, unsigned int reg) { - if (!regmap_readable(map, reg)) + if (!map->format.format_write && !regmap_readable(map, reg)) return false; if (map->volatile_reg) @@ -448,6 +449,71 @@ int regmap_attach_dev(struct device *dev, struct regmap *map, } EXPORT_SYMBOL_GPL(regmap_attach_dev); +static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus, + const struct regmap_config *config) +{ + enum regmap_endian endian; + + /* Retrieve the endianness specification from the regmap config */ + endian = config->reg_format_endian; + + /* If the regmap config specified a non-default value, use that */ + if (endian != REGMAP_ENDIAN_DEFAULT) + return endian; + + /* Retrieve the endianness specification from the bus config */ + if (bus && bus->reg_format_endian_default) + endian = bus->reg_format_endian_default; + + /* If the bus specified a non-default value, use that */ + if (endian != REGMAP_ENDIAN_DEFAULT) + return endian; + + /* Use this if no other value was found */ + return REGMAP_ENDIAN_BIG; +} + +static enum regmap_endian regmap_get_val_endian(struct device *dev, + const struct regmap_bus *bus, + const struct regmap_config *config) +{ + struct device_node *np; + enum regmap_endian endian; + + /* Retrieve the endianness specification from the regmap config */ + endian = config->val_format_endian; + + /* If the regmap config specified a non-default value, use that */ + if (endian != REGMAP_ENDIAN_DEFAULT) + return endian; + + /* If the dev and dev->of_node exist try to get endianness from DT */ + if (dev && dev->of_node) { + np = dev->of_node; + + /* Parse the device's DT node for an endianness specification */ + if (of_property_read_bool(np, "big-endian")) + endian = REGMAP_ENDIAN_BIG; + else if (of_property_read_bool(np, "little-endian")) + endian = REGMAP_ENDIAN_LITTLE; + + /* If the endianness was specified in DT, use that */ + if (endian != REGMAP_ENDIAN_DEFAULT) + return endian; + } + + /* Retrieve the endianness specification from the bus config */ + if (bus && bus->val_format_endian_default) + endian = bus->val_format_endian_default; + + /* If the bus specified a non-default value, use that */ + if (endian != REGMAP_ENDIAN_DEFAULT) + return endian; + + /* Use this if no other value was found */ + return REGMAP_ENDIAN_BIG; +} + /** * regmap_init(): Initialise register map * @@ -551,17 +617,8 @@ struct regmap *regmap_init(struct device *dev, map->reg_read = _regmap_bus_read; } - reg_endian = config->reg_format_endian; - if (reg_endian == REGMAP_ENDIAN_DEFAULT) - reg_endian = bus->reg_format_endian_default; - if (reg_endian == REGMAP_ENDIAN_DEFAULT) - reg_endian = REGMAP_ENDIAN_BIG; - - val_endian = config->val_format_endian; - if (val_endian == REGMAP_ENDIAN_DEFAULT) - val_endian = bus->val_format_endian_default; - if (val_endian == REGMAP_ENDIAN_DEFAULT) - val_endian = REGMAP_ENDIAN_BIG; + reg_endian = regmap_get_reg_endian(bus, config); + val_endian = regmap_get_val_endian(dev, bus, config); switch (config->reg_bits + map->reg_shift) { case 2: @@ -1408,7 +1465,7 @@ int _regmap_write(struct regmap *map, unsigned int reg, } #ifdef LOG_DEVICE - if (strcmp(dev_name(map->dev), LOG_DEVICE) == 0) + if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0) dev_info(map->dev, "%x <= %x\n", reg, val); #endif @@ -1659,6 +1716,9 @@ out: } else { void *wval; + if (!val_count) + return -EINVAL; + wval = kmemdup(val, val_count * val_bytes, GFP_KERNEL); if (!wval) { dev_err(map->dev, "Error in memory allocation\n"); @@ -2058,7 +2118,7 @@ static int _regmap_read(struct regmap *map, unsigned int reg, ret = map->reg_read(context, reg, val); if (ret == 0) { #ifdef LOG_DEVICE - if (strcmp(dev_name(map->dev), LOG_DEVICE) == 0) + if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0) dev_info(map->dev, "%x => %x\n", reg, *val); #endif diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c index 294a7dd25190..f032ed6dd459 100644 --- a/drivers/bcma/host_pci.c +++ b/drivers/bcma/host_pci.c @@ -282,6 +282,7 @@ static const struct pci_device_id bcma_pci_bridge_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a9) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43aa) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) }, + { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43227) }, /* 0xA8DB */ { 0, }, }; MODULE_DEVICE_TABLE(pci, bcma_pci_bridge_tbl); diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c index 125d84505738..811e11c82f32 100644 --- a/drivers/block/DAC960.c +++ b/drivers/block/DAC960.c @@ -6741,11 +6741,11 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request, ErrorCode = -ENOMEM; if (DataTransferLength > 0) { - DataTransferBuffer = pci_alloc_consistent(Controller->PCIDevice, - DataTransferLength, &DataTransferBufferDMA); + DataTransferBuffer = pci_zalloc_consistent(Controller->PCIDevice, + DataTransferLength, + &DataTransferBufferDMA); if (DataTransferBuffer == NULL) break; - memset(DataTransferBuffer, 0, DataTransferLength); } else if (DataTransferLength < 0) { @@ -6877,11 +6877,11 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request, ErrorCode = -ENOMEM; if (DataTransferLength > 0) { - DataTransferBuffer = pci_alloc_consistent(Controller->PCIDevice, - DataTransferLength, &DataTransferBufferDMA); + DataTransferBuffer = pci_zalloc_consistent(Controller->PCIDevice, + DataTransferLength, + &DataTransferBufferDMA); if (DataTransferBuffer == NULL) break; - memset(DataTransferBuffer, 0, DataTransferLength); } else if (DataTransferLength < 0) { @@ -6899,14 +6899,14 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request, RequestSenseLength = UserCommand.RequestSenseLength; if (RequestSenseLength > 0) { - RequestSenseBuffer = pci_alloc_consistent(Controller->PCIDevice, - RequestSenseLength, &RequestSenseBufferDMA); + RequestSenseBuffer = pci_zalloc_consistent(Controller->PCIDevice, + RequestSenseLength, + &RequestSenseBufferDMA); if (RequestSenseBuffer == NULL) { ErrorCode = -ENOMEM; goto Failure2; } - memset(RequestSenseBuffer, 0, RequestSenseLength); } spin_lock_irqsave(&Controller->queue_lock, flags); while ((Command = DAC960_AllocateCommand(Controller)) == NULL) diff --git a/drivers/block/brd.c b/drivers/block/brd.c index c7d138eca731..3598110d2cef 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -442,12 +442,15 @@ static int rd_nr; int rd_size = CONFIG_BLK_DEV_RAM_SIZE; static int max_part; static int part_shift; +static int part_show = 0; module_param(rd_nr, int, S_IRUGO); MODULE_PARM_DESC(rd_nr, "Maximum number of brd devices"); module_param(rd_size, int, S_IRUGO); MODULE_PARM_DESC(rd_size, "Size of each RAM disk in kbytes."); module_param(max_part, int, S_IRUGO); MODULE_PARM_DESC(max_part, "Maximum number of partitions per RAM disk"); +module_param(part_show, int, S_IRUGO); +MODULE_PARM_DESC(part_show, "Control RAM disk visibility in /proc/partitions"); MODULE_LICENSE("GPL"); MODULE_ALIAS_BLOCKDEV_MAJOR(RAMDISK_MAJOR); MODULE_ALIAS("rd"); @@ -501,7 +504,8 @@ static struct brd_device *brd_alloc(int i) disk->fops = &brd_fops; disk->private_data = brd; disk->queue = brd->brd_queue; - disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO; + if (!part_show) + disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO; sprintf(disk->disk_name, "ram%d", i); set_capacity(disk, rd_size * 2); diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 4595c22f33f7..ff20f192b0f6 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -1014,24 +1014,21 @@ static CommandList_struct *cmd_special_alloc(ctlr_info_t *h) u64bit temp64; dma_addr_t cmd_dma_handle, err_dma_handle; - c = (CommandList_struct *) pci_alloc_consistent(h->pdev, - sizeof(CommandList_struct), &cmd_dma_handle); + c = pci_zalloc_consistent(h->pdev, sizeof(CommandList_struct), + &cmd_dma_handle); if (c == NULL) return NULL; - memset(c, 0, sizeof(CommandList_struct)); c->cmdindex = -1; - c->err_info = (ErrorInfo_struct *) - pci_alloc_consistent(h->pdev, sizeof(ErrorInfo_struct), - &err_dma_handle); + c->err_info = pci_zalloc_consistent(h->pdev, sizeof(ErrorInfo_struct), + &err_dma_handle); if (c->err_info == NULL) { pci_free_consistent(h->pdev, sizeof(CommandList_struct), c, cmd_dma_handle); return NULL; } - memset(c->err_info, 0, sizeof(ErrorInfo_struct)); INIT_LIST_HEAD(&c->list); c->busaddr = (__u32) cmd_dma_handle; diff --git a/drivers/block/drbd/Makefile b/drivers/block/drbd/Makefile index 8b450338075e..4464e353c1e8 100644 --- a/drivers/block/drbd/Makefile +++ b/drivers/block/drbd/Makefile @@ -3,5 +3,6 @@ drbd-y += drbd_worker.o drbd_receiver.o drbd_req.o drbd_actlog.o drbd-y += drbd_main.o drbd_strings.o drbd_nl.o drbd-y += drbd_interval.o drbd_state.o drbd-y += drbd_nla.o +drbd-$(CONFIG_DEBUG_FS) += drbd_debugfs.o obj-$(CONFIG_BLK_DEV_DRBD) += drbd.o diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c index 05a1780ffa85..d26a3fa63688 100644 --- a/drivers/block/drbd/drbd_actlog.c +++ b/drivers/block/drbd/drbd_actlog.c @@ -92,34 +92,26 @@ struct __packed al_transaction_on_disk { __be32 context[AL_CONTEXT_PER_TRANSACTION]; }; -struct update_odbm_work { - struct drbd_work w; - struct drbd_device *device; - unsigned int enr; -}; - -struct update_al_work { - struct drbd_work w; - struct drbd_device *device; - struct completion event; - int err; -}; - - -void *drbd_md_get_buffer(struct drbd_device *device) +void *drbd_md_get_buffer(struct drbd_device *device, const char *intent) { int r; wait_event(device->misc_wait, - (r = atomic_cmpxchg(&device->md_io_in_use, 0, 1)) == 0 || + (r = atomic_cmpxchg(&device->md_io.in_use, 0, 1)) == 0 || device->state.disk <= D_FAILED); - return r ? NULL : page_address(device->md_io_page); + if (r) + return NULL; + + device->md_io.current_use = intent; + device->md_io.start_jif = jiffies; + device->md_io.submit_jif = device->md_io.start_jif - 1; + return page_address(device->md_io.page); } void drbd_md_put_buffer(struct drbd_device *device) { - if (atomic_dec_and_test(&device->md_io_in_use)) + if (atomic_dec_and_test(&device->md_io.in_use)) wake_up(&device->misc_wait); } @@ -145,10 +137,11 @@ void wait_until_done_or_force_detached(struct drbd_device *device, struct drbd_b static int _drbd_md_sync_page_io(struct drbd_device *device, struct drbd_backing_dev *bdev, - struct page *page, sector_t sector, - int rw, int size) + sector_t sector, int rw) { struct bio *bio; + /* we do all our meta data IO in aligned 4k blocks. */ + const int size = 4096; int err; device->md_io.done = 0; @@ -156,15 +149,15 @@ static int _drbd_md_sync_page_io(struct drbd_device *device, if ((rw & WRITE) && !test_bit(MD_NO_FUA, &device->flags)) rw |= REQ_FUA | REQ_FLUSH; - rw |= REQ_SYNC; + rw |= REQ_SYNC | REQ_NOIDLE; bio = bio_alloc_drbd(GFP_NOIO); bio->bi_bdev = bdev->md_bdev; bio->bi_iter.bi_sector = sector; err = -EIO; - if (bio_add_page(bio, page, size, 0) != size) + if (bio_add_page(bio, device->md_io.page, size, 0) != size) goto out; - bio->bi_private = &device->md_io; + bio->bi_private = device; bio->bi_end_io = drbd_md_io_complete; bio->bi_rw = rw; @@ -179,7 +172,8 @@ static int _drbd_md_sync_page_io(struct drbd_device *device, } bio_get(bio); /* one bio_put() is in the completion handler */ - atomic_inc(&device->md_io_in_use); /* drbd_md_put_buffer() is in the completion handler */ + atomic_inc(&device->md_io.in_use); /* drbd_md_put_buffer() is in the completion handler */ + device->md_io.submit_jif = jiffies; if (drbd_insert_fault(device, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) bio_endio(bio, -EIO); else @@ -197,9 +191,7 @@ int drbd_md_sync_page_io(struct drbd_device *device, struct drbd_backing_dev *bd sector_t sector, int rw) { int err; - struct page *iop = device->md_io_page; - - D_ASSERT(device, atomic_read(&device->md_io_in_use) == 1); + D_ASSERT(device, atomic_read(&device->md_io.in_use) == 1); BUG_ON(!bdev->md_bdev); @@ -214,8 +206,7 @@ int drbd_md_sync_page_io(struct drbd_device *device, struct drbd_backing_dev *bd current->comm, current->pid, __func__, (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ"); - /* we do all our meta data IO in aligned 4k blocks. */ - err = _drbd_md_sync_page_io(device, bdev, iop, sector, rw, 4096); + err = _drbd_md_sync_page_io(device, bdev, sector, rw); if (err) { drbd_err(device, "drbd_md_sync_page_io(,%llus,%s) failed with error %d\n", (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ", err); @@ -297,26 +288,12 @@ bool drbd_al_begin_io_prepare(struct drbd_device *device, struct drbd_interval * return need_transaction; } -static int al_write_transaction(struct drbd_device *device, bool delegate); - -/* When called through generic_make_request(), we must delegate - * activity log I/O to the worker thread: a further request - * submitted via generic_make_request() within the same task - * would be queued on current->bio_list, and would only start - * after this function returns (see generic_make_request()). - * - * However, if we *are* the worker, we must not delegate to ourselves. - */ +static int al_write_transaction(struct drbd_device *device); -/* - * @delegate: delegate activity log I/O to the worker thread - */ -void drbd_al_begin_io_commit(struct drbd_device *device, bool delegate) +void drbd_al_begin_io_commit(struct drbd_device *device) { bool locked = false; - BUG_ON(delegate && current == first_peer_device(device)->connection->worker.task); - /* Serialize multiple transactions. * This uses test_and_set_bit, memory barrier is implicit. */ @@ -335,7 +312,7 @@ void drbd_al_begin_io_commit(struct drbd_device *device, bool delegate) rcu_read_unlock(); if (write_al_updates) - al_write_transaction(device, delegate); + al_write_transaction(device); spin_lock_irq(&device->al_lock); /* FIXME if (err) @@ -352,12 +329,10 @@ void drbd_al_begin_io_commit(struct drbd_device *device, bool delegate) /* * @delegate: delegate activity log I/O to the worker thread */ -void drbd_al_begin_io(struct drbd_device *device, struct drbd_interval *i, bool delegate) +void drbd_al_begin_io(struct drbd_device *device, struct drbd_interval *i) { - BUG_ON(delegate && current == first_peer_device(device)->connection->worker.task); - if (drbd_al_begin_io_prepare(device, i)) - drbd_al_begin_io_commit(device, delegate); + drbd_al_begin_io_commit(device); } int drbd_al_begin_io_nonblock(struct drbd_device *device, struct drbd_interval *i) @@ -380,8 +355,19 @@ int drbd_al_begin_io_nonblock(struct drbd_device *device, struct drbd_interval * /* We want all necessary updates for a given request within the same transaction * We could first check how many updates are *actually* needed, * and use that instead of the worst-case nr_al_extents */ - if (available_update_slots < nr_al_extents) - return -EWOULDBLOCK; + if (available_update_slots < nr_al_extents) { + /* Too many activity log extents are currently "hot". + * + * If we have accumulated pending changes already, + * we made progress. + * + * If we cannot get even a single pending change through, + * stop the fast path until we made some progress, + * or requests to "cold" extents could be starved. */ + if (!al->pending_changes) + __set_bit(__LC_STARVING, &device->act_log->flags); + return -ENOBUFS; + } /* Is resync active in this area? */ for (enr = first; enr <= last; enr++) { @@ -452,15 +438,6 @@ static unsigned int al_extent_to_bm_page(unsigned int al_enr) (AL_EXTENT_SHIFT - BM_BLOCK_SHIFT)); } -static unsigned int rs_extent_to_bm_page(unsigned int rs_enr) -{ - return rs_enr >> - /* bit to page */ - ((PAGE_SHIFT + 3) - - /* resync extent number to bit */ - (BM_EXT_SHIFT - BM_BLOCK_SHIFT)); -} - static sector_t al_tr_number_to_on_disk_sector(struct drbd_device *device) { const unsigned int stripes = device->ldev->md.al_stripes; @@ -479,8 +456,7 @@ static sector_t al_tr_number_to_on_disk_sector(struct drbd_device *device) return device->ldev->md.md_offset + device->ldev->md.al_offset + t; } -static int -_al_write_transaction(struct drbd_device *device) +int al_write_transaction(struct drbd_device *device) { struct al_transaction_on_disk *buffer; struct lc_element *e; @@ -505,7 +481,8 @@ _al_write_transaction(struct drbd_device *device) return -EIO; } - buffer = drbd_md_get_buffer(device); /* protects md_io_buffer, al_tr_cycle, ... */ + /* protects md_io_buffer, al_tr_cycle, ... */ + buffer = drbd_md_get_buffer(device, __func__); if (!buffer) { drbd_err(device, "disk failed while waiting for md_io buffer\n"); put_ldev(device); @@ -590,38 +567,6 @@ _al_write_transaction(struct drbd_device *device) return err; } - -static int w_al_write_transaction(struct drbd_work *w, int unused) -{ - struct update_al_work *aw = container_of(w, struct update_al_work, w); - struct drbd_device *device = aw->device; - int err; - - err = _al_write_transaction(device); - aw->err = err; - complete(&aw->event); - - return err != -EIO ? err : 0; -} - -/* Calls from worker context (see w_restart_disk_io()) need to write the - transaction directly. Others came through generic_make_request(), - those need to delegate it to the worker. */ -static int al_write_transaction(struct drbd_device *device, bool delegate) -{ - if (delegate) { - struct update_al_work al_work; - init_completion(&al_work.event); - al_work.w.cb = w_al_write_transaction; - al_work.device = device; - drbd_queue_work_front(&first_peer_device(device)->connection->sender_work, - &al_work.w); - wait_for_completion(&al_work.event); - return al_work.err; - } else - return _al_write_transaction(device); -} - static int _try_lc_del(struct drbd_device *device, struct lc_element *al_ext) { int rv; @@ -682,72 +627,56 @@ int drbd_initialize_al(struct drbd_device *device, void *buffer) return 0; } -static int w_update_odbm(struct drbd_work *w, int unused) -{ - struct update_odbm_work *udw = container_of(w, struct update_odbm_work, w); - struct drbd_device *device = udw->device; - struct sib_info sib = { .sib_reason = SIB_SYNC_PROGRESS, }; - - if (!get_ldev(device)) { - if (__ratelimit(&drbd_ratelimit_state)) - drbd_warn(device, "Can not update on disk bitmap, local IO disabled.\n"); - kfree(udw); - return 0; - } - - drbd_bm_write_page(device, rs_extent_to_bm_page(udw->enr)); - put_ldev(device); - - kfree(udw); - - if (drbd_bm_total_weight(device) <= device->rs_failed) { - switch (device->state.conn) { - case C_SYNC_SOURCE: case C_SYNC_TARGET: - case C_PAUSED_SYNC_S: case C_PAUSED_SYNC_T: - drbd_resync_finished(device); - default: - /* nothing to do */ - break; - } - } - drbd_bcast_event(device, &sib); - - return 0; -} - +static const char *drbd_change_sync_fname[] = { + [RECORD_RS_FAILED] = "drbd_rs_failed_io", + [SET_IN_SYNC] = "drbd_set_in_sync", + [SET_OUT_OF_SYNC] = "drbd_set_out_of_sync" +}; /* ATTENTION. The AL's extents are 4MB each, while the extents in the * resync LRU-cache are 16MB each. * The caller of this function has to hold an get_ldev() reference. * + * Adjusts the caching members ->rs_left (success) or ->rs_failed (!success), + * potentially pulling in (and recounting the corresponding bits) + * this resync extent into the resync extent lru cache. + * + * Returns whether all bits have been cleared for this resync extent, + * precisely: (rs_left <= rs_failed) + * * TODO will be obsoleted once we have a caching lru of the on disk bitmap */ -static void drbd_try_clear_on_disk_bm(struct drbd_device *device, sector_t sector, - int count, int success) +static bool update_rs_extent(struct drbd_device *device, + unsigned int enr, int count, + enum update_sync_bits_mode mode) { struct lc_element *e; - struct update_odbm_work *udw; - - unsigned int enr; D_ASSERT(device, atomic_read(&device->local_cnt)); - /* I simply assume that a sector/size pair never crosses - * a 16 MB extent border. (Currently this is true...) */ - enr = BM_SECT_TO_EXT(sector); - - e = lc_get(device->resync, enr); + /* When setting out-of-sync bits, + * we don't need it cached (lc_find). + * But if it is present in the cache, + * we should update the cached bit count. + * Otherwise, that extent should be in the resync extent lru cache + * already -- or we want to pull it in if necessary -- (lc_get), + * then update and check rs_left and rs_failed. */ + if (mode == SET_OUT_OF_SYNC) + e = lc_find(device->resync, enr); + else + e = lc_get(device->resync, enr); if (e) { struct bm_extent *ext = lc_entry(e, struct bm_extent, lce); if (ext->lce.lc_number == enr) { - if (success) + if (mode == SET_IN_SYNC) ext->rs_left -= count; + else if (mode == SET_OUT_OF_SYNC) + ext->rs_left += count; else ext->rs_failed += count; if (ext->rs_left < ext->rs_failed) { - drbd_warn(device, "BAD! sector=%llus enr=%u rs_left=%d " + drbd_warn(device, "BAD! enr=%u rs_left=%d " "rs_failed=%d count=%d cstate=%s\n", - (unsigned long long)sector, ext->lce.lc_number, ext->rs_left, ext->rs_failed, count, drbd_conn_str(device->state.conn)); @@ -781,34 +710,27 @@ static void drbd_try_clear_on_disk_bm(struct drbd_device *device, sector_t secto ext->lce.lc_number, ext->rs_failed); } ext->rs_left = rs_left; - ext->rs_failed = success ? 0 : count; + ext->rs_failed = (mode == RECORD_RS_FAILED) ? count : 0; /* we don't keep a persistent log of the resync lru, * we can commit any change right away. */ lc_committed(device->resync); } - lc_put(device->resync, &ext->lce); + if (mode != SET_OUT_OF_SYNC) + lc_put(device->resync, &ext->lce); /* no race, we are within the al_lock! */ - if (ext->rs_left == ext->rs_failed) { + if (ext->rs_left <= ext->rs_failed) { ext->rs_failed = 0; - - udw = kmalloc(sizeof(*udw), GFP_ATOMIC); - if (udw) { - udw->enr = ext->lce.lc_number; - udw->w.cb = w_update_odbm; - udw->device = device; - drbd_queue_work_front(&first_peer_device(device)->connection->sender_work, - &udw->w); - } else { - drbd_warn(device, "Could not kmalloc an udw\n"); - } + return true; } - } else { + } else if (mode != SET_OUT_OF_SYNC) { + /* be quiet if lc_find() did not find it. */ drbd_err(device, "lc_get() failed! locked=%d/%d flags=%lu\n", device->resync_locked, device->resync->nr_elements, device->resync->flags); } + return false; } void drbd_advance_rs_marks(struct drbd_device *device, unsigned long still_to_go) @@ -827,105 +749,105 @@ void drbd_advance_rs_marks(struct drbd_device *device, unsigned long still_to_go } } -/* clear the bit corresponding to the piece of storage in question: - * size byte of data starting from sector. Only clear a bits of the affected - * one ore more _aligned_ BM_BLOCK_SIZE blocks. - * - * called by worker on C_SYNC_TARGET and receiver on SyncSource. - * - */ -void __drbd_set_in_sync(struct drbd_device *device, sector_t sector, int size, - const char *file, const unsigned int line) +/* It is called lazy update, so don't do write-out too often. */ +static bool lazy_bitmap_update_due(struct drbd_device *device) { - /* Is called from worker and receiver context _only_ */ - unsigned long sbnr, ebnr, lbnr; - unsigned long count = 0; - sector_t esector, nr_sectors; - int wake_up = 0; - unsigned long flags; + return time_after(jiffies, device->rs_last_bcast + 2*HZ); +} - if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_DISCARD_SIZE) { - drbd_err(device, "drbd_set_in_sync: sector=%llus size=%d nonsense!\n", - (unsigned long long)sector, size); +static void maybe_schedule_on_disk_bitmap_update(struct drbd_device *device, bool rs_done) +{ + if (rs_done) + set_bit(RS_DONE, &device->flags); + /* and also set RS_PROGRESS below */ + else if (!lazy_bitmap_update_due(device)) return; - } - - if (!get_ldev(device)) - return; /* no disk, no metadata, no bitmap to clear bits in */ - - nr_sectors = drbd_get_capacity(device->this_bdev); - esector = sector + (size >> 9) - 1; - - if (!expect(sector < nr_sectors)) - goto out; - if (!expect(esector < nr_sectors)) - esector = nr_sectors - 1; - - lbnr = BM_SECT_TO_BIT(nr_sectors-1); - - /* we clear it (in sync). - * round up start sector, round down end sector. we make sure we only - * clear full, aligned, BM_BLOCK_SIZE (4K) blocks */ - if (unlikely(esector < BM_SECT_PER_BIT-1)) - goto out; - if (unlikely(esector == (nr_sectors-1))) - ebnr = lbnr; - else - ebnr = BM_SECT_TO_BIT(esector - (BM_SECT_PER_BIT-1)); - sbnr = BM_SECT_TO_BIT(sector + BM_SECT_PER_BIT-1); - if (sbnr > ebnr) - goto out; + drbd_device_post_work(device, RS_PROGRESS); +} +static int update_sync_bits(struct drbd_device *device, + unsigned long sbnr, unsigned long ebnr, + enum update_sync_bits_mode mode) +{ /* - * ok, (capacity & 7) != 0 sometimes, but who cares... - * we count rs_{total,left} in bits, not sectors. + * We keep a count of set bits per resync-extent in the ->rs_left + * caching member, so we need to loop and work within the resync extent + * alignment. Typically this loop will execute exactly once. */ - count = drbd_bm_clear_bits(device, sbnr, ebnr); - if (count) { - drbd_advance_rs_marks(device, drbd_bm_total_weight(device)); - spin_lock_irqsave(&device->al_lock, flags); - drbd_try_clear_on_disk_bm(device, sector, count, true); - spin_unlock_irqrestore(&device->al_lock, flags); - - /* just wake_up unconditional now, various lc_chaged(), - * lc_put() in drbd_try_clear_on_disk_bm(). */ - wake_up = 1; + unsigned long flags; + unsigned long count = 0; + unsigned int cleared = 0; + while (sbnr <= ebnr) { + /* set temporary boundary bit number to last bit number within + * the resync extent of the current start bit number, + * but cap at provided end bit number */ + unsigned long tbnr = min(ebnr, sbnr | BM_BLOCKS_PER_BM_EXT_MASK); + unsigned long c; + + if (mode == RECORD_RS_FAILED) + /* Only called from drbd_rs_failed_io(), bits + * supposedly still set. Recount, maybe some + * of the bits have been successfully cleared + * by application IO meanwhile. + */ + c = drbd_bm_count_bits(device, sbnr, tbnr); + else if (mode == SET_IN_SYNC) + c = drbd_bm_clear_bits(device, sbnr, tbnr); + else /* if (mode == SET_OUT_OF_SYNC) */ + c = drbd_bm_set_bits(device, sbnr, tbnr); + + if (c) { + spin_lock_irqsave(&device->al_lock, flags); + cleared += update_rs_extent(device, BM_BIT_TO_EXT(sbnr), c, mode); + spin_unlock_irqrestore(&device->al_lock, flags); + count += c; + } + sbnr = tbnr + 1; } -out: - put_ldev(device); - if (wake_up) + if (count) { + if (mode == SET_IN_SYNC) { + unsigned long still_to_go = drbd_bm_total_weight(device); + bool rs_is_done = (still_to_go <= device->rs_failed); + drbd_advance_rs_marks(device, still_to_go); + if (cleared || rs_is_done) + maybe_schedule_on_disk_bitmap_update(device, rs_is_done); + } else if (mode == RECORD_RS_FAILED) + device->rs_failed += count; wake_up(&device->al_wait); + } + return count; } -/* - * this is intended to set one request worth of data out of sync. - * affects at least 1 bit, - * and at most 1+DRBD_MAX_BIO_SIZE/BM_BLOCK_SIZE bits. +/* clear the bit corresponding to the piece of storage in question: + * size byte of data starting from sector. Only clear a bits of the affected + * one ore more _aligned_ BM_BLOCK_SIZE blocks. + * + * called by worker on C_SYNC_TARGET and receiver on SyncSource. * - * called by tl_clear and drbd_send_dblock (==drbd_make_request). - * so this can be _any_ process. */ -int __drbd_set_out_of_sync(struct drbd_device *device, sector_t sector, int size, - const char *file, const unsigned int line) +int __drbd_change_sync(struct drbd_device *device, sector_t sector, int size, + enum update_sync_bits_mode mode, + const char *file, const unsigned int line) { - unsigned long sbnr, ebnr, flags; + /* Is called from worker and receiver context _only_ */ + unsigned long sbnr, ebnr, lbnr; + unsigned long count = 0; sector_t esector, nr_sectors; - unsigned int enr, count = 0; - struct lc_element *e; - /* this should be an empty REQ_FLUSH */ - if (size == 0) + /* This would be an empty REQ_FLUSH, be silent. */ + if ((mode == SET_OUT_OF_SYNC) && size == 0) return 0; - if (size < 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_DISCARD_SIZE) { - drbd_err(device, "sector: %llus, size: %d\n", - (unsigned long long)sector, size); + if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_DISCARD_SIZE) { + drbd_err(device, "%s: sector=%llus size=%d nonsense!\n", + drbd_change_sync_fname[mode], + (unsigned long long)sector, size); return 0; } if (!get_ldev(device)) - return 0; /* no disk, no metadata, no bitmap to set bits in */ + return 0; /* no disk, no metadata, no bitmap to manipulate bits in */ nr_sectors = drbd_get_capacity(device->this_bdev); esector = sector + (size >> 9) - 1; @@ -935,25 +857,28 @@ int __drbd_set_out_of_sync(struct drbd_device *device, sector_t sector, int size if (!expect(esector < nr_sectors)) esector = nr_sectors - 1; - /* we set it out of sync, - * we do not need to round anything here */ - sbnr = BM_SECT_TO_BIT(sector); - ebnr = BM_SECT_TO_BIT(esector); - - /* ok, (capacity & 7) != 0 sometimes, but who cares... - * we count rs_{total,left} in bits, not sectors. */ - spin_lock_irqsave(&device->al_lock, flags); - count = drbd_bm_set_bits(device, sbnr, ebnr); + lbnr = BM_SECT_TO_BIT(nr_sectors-1); - enr = BM_SECT_TO_EXT(sector); - e = lc_find(device->resync, enr); - if (e) - lc_entry(e, struct bm_extent, lce)->rs_left += count; - spin_unlock_irqrestore(&device->al_lock, flags); + if (mode == SET_IN_SYNC) { + /* Round up start sector, round down end sector. We make sure + * we only clear full, aligned, BM_BLOCK_SIZE blocks. */ + if (unlikely(esector < BM_SECT_PER_BIT-1)) + goto out; + if (unlikely(esector == (nr_sectors-1))) + ebnr = lbnr; + else + ebnr = BM_SECT_TO_BIT(esector - (BM_SECT_PER_BIT-1)); + sbnr = BM_SECT_TO_BIT(sector + BM_SECT_PER_BIT-1); + } else { + /* We set it out of sync, or record resync failure. + * Should not round anything here. */ + sbnr = BM_SECT_TO_BIT(sector); + ebnr = BM_SECT_TO_BIT(esector); + } + count = update_sync_bits(device, sbnr, ebnr, mode); out: put_ldev(device); - return count; } @@ -1075,6 +1000,15 @@ int drbd_try_rs_begin_io(struct drbd_device *device, sector_t sector) struct lc_element *e; struct bm_extent *bm_ext; int i; + bool throttle = drbd_rs_should_slow_down(device, sector, true); + + /* If we need to throttle, a half-locked (only marked BME_NO_WRITES, + * not yet BME_LOCKED) extent needs to be kicked out explicitly if we + * need to throttle. There is at most one such half-locked extent, + * which is remembered in resync_wenr. */ + + if (throttle && device->resync_wenr != enr) + return -EAGAIN; spin_lock_irq(&device->al_lock); if (device->resync_wenr != LC_FREE && device->resync_wenr != enr) { @@ -1098,8 +1032,10 @@ int drbd_try_rs_begin_io(struct drbd_device *device, sector_t sector) D_ASSERT(device, test_bit(BME_NO_WRITES, &bm_ext->flags)); clear_bit(BME_NO_WRITES, &bm_ext->flags); device->resync_wenr = LC_FREE; - if (lc_put(device->resync, &bm_ext->lce) == 0) + if (lc_put(device->resync, &bm_ext->lce) == 0) { + bm_ext->flags = 0; device->resync_locked--; + } wake_up(&device->al_wait); } else { drbd_alert(device, "LOGIC BUG\n"); @@ -1161,8 +1097,20 @@ proceed: return 0; try_again: - if (bm_ext) - device->resync_wenr = enr; + if (bm_ext) { + if (throttle) { + D_ASSERT(device, !test_bit(BME_LOCKED, &bm_ext->flags)); + D_ASSERT(device, test_bit(BME_NO_WRITES, &bm_ext->flags)); + clear_bit(BME_NO_WRITES, &bm_ext->flags); + device->resync_wenr = LC_FREE; + if (lc_put(device->resync, &bm_ext->lce) == 0) { + bm_ext->flags = 0; + device->resync_locked--; + } + wake_up(&device->al_wait); + } else + device->resync_wenr = enr; + } spin_unlock_irq(&device->al_lock); return -EAGAIN; } @@ -1270,69 +1218,3 @@ int drbd_rs_del_all(struct drbd_device *device) return 0; } - -/** - * drbd_rs_failed_io() - Record information on a failure to resync the specified blocks - * @device: DRBD device. - * @sector: The sector number. - * @size: Size of failed IO operation, in byte. - */ -void drbd_rs_failed_io(struct drbd_device *device, sector_t sector, int size) -{ - /* Is called from worker and receiver context _only_ */ - unsigned long sbnr, ebnr, lbnr; - unsigned long count; - sector_t esector, nr_sectors; - int wake_up = 0; - - if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_DISCARD_SIZE) { - drbd_err(device, "drbd_rs_failed_io: sector=%llus size=%d nonsense!\n", - (unsigned long long)sector, size); - return; - } - nr_sectors = drbd_get_capacity(device->this_bdev); - esector = sector + (size >> 9) - 1; - - if (!expect(sector < nr_sectors)) - return; - if (!expect(esector < nr_sectors)) - esector = nr_sectors - 1; - - lbnr = BM_SECT_TO_BIT(nr_sectors-1); - - /* - * round up start sector, round down end sector. we make sure we only - * handle full, aligned, BM_BLOCK_SIZE (4K) blocks */ - if (unlikely(esector < BM_SECT_PER_BIT-1)) - return; - if (unlikely(esector == (nr_sectors-1))) - ebnr = lbnr; - else - ebnr = BM_SECT_TO_BIT(esector - (BM_SECT_PER_BIT-1)); - sbnr = BM_SECT_TO_BIT(sector + BM_SECT_PER_BIT-1); - - if (sbnr > ebnr) - return; - - /* - * ok, (capacity & 7) != 0 sometimes, but who cares... - * we count rs_{total,left} in bits, not sectors. - */ - spin_lock_irq(&device->al_lock); - count = drbd_bm_count_bits(device, sbnr, ebnr); - if (count) { - device->rs_failed += count; - - if (get_ldev(device)) { - drbd_try_clear_on_disk_bm(device, sector, count, false); - put_ldev(device); - } - - /* just wake_up unconditional now, various lc_chaged(), - * lc_put() in drbd_try_clear_on_disk_bm(). */ - wake_up = 1; - } - spin_unlock_irq(&device->al_lock); - if (wake_up) - wake_up(&device->al_wait); -} diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c index 1aa29f8fdfe1..426c97aef900 100644 --- a/drivers/block/drbd/drbd_bitmap.c +++ b/drivers/block/drbd/drbd_bitmap.c @@ -22,6 +22,8 @@ the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/bitops.h> #include <linux/vmalloc.h> #include <linux/string.h> @@ -353,9 +355,8 @@ static void bm_free_pages(struct page **pages, unsigned long number) for (i = 0; i < number; i++) { if (!pages[i]) { - printk(KERN_ALERT "drbd: bm_free_pages tried to free " - "a NULL pointer; i=%lu n=%lu\n", - i, number); + pr_alert("bm_free_pages tried to free a NULL pointer; i=%lu n=%lu\n", + i, number); continue; } __free_page(pages[i]); @@ -592,7 +593,7 @@ static void bm_memset(struct drbd_bitmap *b, size_t offset, int c, size_t len) end = offset + len; if (end > b->bm_words) { - printk(KERN_ALERT "drbd: bm_memset end > bm_words\n"); + pr_alert("bm_memset end > bm_words\n"); return; } @@ -602,7 +603,7 @@ static void bm_memset(struct drbd_bitmap *b, size_t offset, int c, size_t len) p_addr = bm_map_pidx(b, idx); bm = p_addr + MLPP(offset); if (bm+do_now > p_addr + LWPP) { - printk(KERN_ALERT "drbd: BUG BUG BUG! p_addr:%p bm:%p do_now:%d\n", + pr_alert("BUG BUG BUG! p_addr:%p bm:%p do_now:%d\n", p_addr, bm, (int)do_now); } else memset(bm, c, do_now * sizeof(long)); @@ -927,22 +928,14 @@ void drbd_bm_clear_all(struct drbd_device *device) spin_unlock_irq(&b->bm_lock); } -struct bm_aio_ctx { - struct drbd_device *device; - atomic_t in_flight; - unsigned int done; - unsigned flags; -#define BM_AIO_COPY_PAGES 1 -#define BM_AIO_WRITE_HINTED 2 -#define BM_WRITE_ALL_PAGES 4 - int error; - struct kref kref; -}; - -static void bm_aio_ctx_destroy(struct kref *kref) +static void drbd_bm_aio_ctx_destroy(struct kref *kref) { - struct bm_aio_ctx *ctx = container_of(kref, struct bm_aio_ctx, kref); + struct drbd_bm_aio_ctx *ctx = container_of(kref, struct drbd_bm_aio_ctx, kref); + unsigned long flags; + spin_lock_irqsave(&ctx->device->resource->req_lock, flags); + list_del(&ctx->list); + spin_unlock_irqrestore(&ctx->device->resource->req_lock, flags); put_ldev(ctx->device); kfree(ctx); } @@ -950,7 +943,7 @@ static void bm_aio_ctx_destroy(struct kref *kref) /* bv_page may be a copy, or may be the original */ static void bm_async_io_complete(struct bio *bio, int error) { - struct bm_aio_ctx *ctx = bio->bi_private; + struct drbd_bm_aio_ctx *ctx = bio->bi_private; struct drbd_device *device = ctx->device; struct drbd_bitmap *b = device->bitmap; unsigned int idx = bm_page_to_idx(bio->bi_io_vec[0].bv_page); @@ -993,17 +986,18 @@ static void bm_async_io_complete(struct bio *bio, int error) if (atomic_dec_and_test(&ctx->in_flight)) { ctx->done = 1; wake_up(&device->misc_wait); - kref_put(&ctx->kref, &bm_aio_ctx_destroy); + kref_put(&ctx->kref, &drbd_bm_aio_ctx_destroy); } } -static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must_hold(local) +static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_hold(local) { struct bio *bio = bio_alloc_drbd(GFP_NOIO); struct drbd_device *device = ctx->device; struct drbd_bitmap *b = device->bitmap; struct page *page; unsigned int len; + unsigned int rw = (ctx->flags & BM_AIO_READ) ? READ : WRITE; sector_t on_disk_sector = device->ldev->md.md_offset + device->ldev->md.bm_offset; @@ -1049,9 +1043,9 @@ static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must /* * bm_rw: read/write the whole bitmap from/to its on disk location. */ -static int bm_rw(struct drbd_device *device, int rw, unsigned flags, unsigned lazy_writeout_upper_idx) __must_hold(local) +static int bm_rw(struct drbd_device *device, const unsigned int flags, unsigned lazy_writeout_upper_idx) __must_hold(local) { - struct bm_aio_ctx *ctx; + struct drbd_bm_aio_ctx *ctx; struct drbd_bitmap *b = device->bitmap; int num_pages, i, count = 0; unsigned long now; @@ -1067,12 +1061,13 @@ static int bm_rw(struct drbd_device *device, int rw, unsigned flags, unsigned la * as we submit copies of pages anyways. */ - ctx = kmalloc(sizeof(struct bm_aio_ctx), GFP_NOIO); + ctx = kmalloc(sizeof(struct drbd_bm_aio_ctx), GFP_NOIO); if (!ctx) return -ENOMEM; - *ctx = (struct bm_aio_ctx) { + *ctx = (struct drbd_bm_aio_ctx) { .device = device, + .start_jif = jiffies, .in_flight = ATOMIC_INIT(1), .done = 0, .flags = flags, @@ -1080,15 +1075,21 @@ static int bm_rw(struct drbd_device *device, int rw, unsigned flags, unsigned la .kref = { ATOMIC_INIT(2) }, }; - if (!get_ldev_if_state(device, D_ATTACHING)) { /* put is in bm_aio_ctx_destroy() */ + if (!get_ldev_if_state(device, D_ATTACHING)) { /* put is in drbd_bm_aio_ctx_destroy() */ drbd_err(device, "ASSERT FAILED: get_ldev_if_state() == 1 in bm_rw()\n"); kfree(ctx); return -ENODEV; } + /* Here D_ATTACHING is sufficient since drbd_bm_read() is called only from + drbd_adm_attach(), after device->ldev was assigned. */ - if (!ctx->flags) + if (0 == (ctx->flags & ~BM_AIO_READ)) WARN_ON(!(BM_LOCKED_MASK & b->bm_flags)); + spin_lock_irq(&device->resource->req_lock); + list_add_tail(&ctx->list, &device->pending_bitmap_io); + spin_unlock_irq(&device->resource->req_lock); + num_pages = b->bm_number_of_pages; now = jiffies; @@ -1098,13 +1099,13 @@ static int bm_rw(struct drbd_device *device, int rw, unsigned flags, unsigned la /* ignore completely unchanged pages */ if (lazy_writeout_upper_idx && i == lazy_writeout_upper_idx) break; - if (rw & WRITE) { + if (!(flags & BM_AIO_READ)) { if ((flags & BM_AIO_WRITE_HINTED) && !test_and_clear_bit(BM_PAGE_HINT_WRITEOUT, &page_private(b->bm_pages[i]))) continue; - if (!(flags & BM_WRITE_ALL_PAGES) && + if (!(flags & BM_AIO_WRITE_ALL_PAGES) && bm_test_page_unchanged(b->bm_pages[i])) { dynamic_drbd_dbg(device, "skipped bm write for idx %u\n", i); continue; @@ -1118,7 +1119,7 @@ static int bm_rw(struct drbd_device *device, int rw, unsigned flags, unsigned la } } atomic_inc(&ctx->in_flight); - bm_page_io_async(ctx, i, rw); + bm_page_io_async(ctx, i); ++count; cond_resched(); } @@ -1134,12 +1135,12 @@ static int bm_rw(struct drbd_device *device, int rw, unsigned flags, unsigned la if (!atomic_dec_and_test(&ctx->in_flight)) wait_until_done_or_force_detached(device, device->ldev, &ctx->done); else - kref_put(&ctx->kref, &bm_aio_ctx_destroy); + kref_put(&ctx->kref, &drbd_bm_aio_ctx_destroy); /* summary for global bitmap IO */ if (flags == 0) drbd_info(device, "bitmap %s of %u pages took %lu jiffies\n", - rw == WRITE ? "WRITE" : "READ", + (flags & BM_AIO_READ) ? "READ" : "WRITE", count, jiffies - now); if (ctx->error) { @@ -1152,20 +1153,18 @@ static int bm_rw(struct drbd_device *device, int rw, unsigned flags, unsigned la err = -EIO; /* Disk timeout/force-detach during IO... */ now = jiffies; - if (rw == WRITE) { - drbd_md_flush(device); - } else /* rw == READ */ { + if (flags & BM_AIO_READ) { b->bm_set = bm_count_bits(b); drbd_info(device, "recounting of set bits took additional %lu jiffies\n", jiffies - now); } now = b->bm_set; - if (flags == 0) + if ((flags & ~BM_AIO_READ) == 0) drbd_info(device, "%s (%lu bits) marked out-of-sync by on disk bit-map.\n", ppsize(ppb, now << (BM_BLOCK_SHIFT-10)), now); - kref_put(&ctx->kref, &bm_aio_ctx_destroy); + kref_put(&ctx->kref, &drbd_bm_aio_ctx_destroy); return err; } @@ -1175,7 +1174,7 @@ static int bm_rw(struct drbd_device *device, int rw, unsigned flags, unsigned la */ int drbd_bm_read(struct drbd_device *device) __must_hold(local) { - return bm_rw(device, READ, 0, 0); + return bm_rw(device, BM_AIO_READ, 0); } /** @@ -1186,7 +1185,7 @@ int drbd_bm_read(struct drbd_device *device) __must_hold(local) */ int drbd_bm_write(struct drbd_device *device) __must_hold(local) { - return bm_rw(device, WRITE, 0, 0); + return bm_rw(device, 0, 0); } /** @@ -1197,7 +1196,17 @@ int drbd_bm_write(struct drbd_device *device) __must_hold(local) */ int drbd_bm_write_all(struct drbd_device *device) __must_hold(local) { - return bm_rw(device, WRITE, BM_WRITE_ALL_PAGES, 0); + return bm_rw(device, BM_AIO_WRITE_ALL_PAGES, 0); +} + +/** + * drbd_bm_write_lazy() - Write bitmap pages 0 to @upper_idx-1, if they have changed. + * @device: DRBD device. + * @upper_idx: 0: write all changed pages; +ve: page index to stop scanning for changed pages + */ +int drbd_bm_write_lazy(struct drbd_device *device, unsigned upper_idx) __must_hold(local) +{ + return bm_rw(device, BM_AIO_COPY_PAGES, upper_idx); } /** @@ -1213,7 +1222,7 @@ int drbd_bm_write_all(struct drbd_device *device) __must_hold(local) */ int drbd_bm_write_copy_pages(struct drbd_device *device) __must_hold(local) { - return bm_rw(device, WRITE, BM_AIO_COPY_PAGES, 0); + return bm_rw(device, BM_AIO_COPY_PAGES, 0); } /** @@ -1222,62 +1231,7 @@ int drbd_bm_write_copy_pages(struct drbd_device *device) __must_hold(local) */ int drbd_bm_write_hinted(struct drbd_device *device) __must_hold(local) { - return bm_rw(device, WRITE, BM_AIO_WRITE_HINTED | BM_AIO_COPY_PAGES, 0); -} - -/** - * drbd_bm_write_page() - Writes a PAGE_SIZE aligned piece of bitmap - * @device: DRBD device. - * @idx: bitmap page index - * - * We don't want to special case on logical_block_size of the backend device, - * so we submit PAGE_SIZE aligned pieces. - * Note that on "most" systems, PAGE_SIZE is 4k. - * - * In case this becomes an issue on systems with larger PAGE_SIZE, - * we may want to change this again to write 4k aligned 4k pieces. - */ -int drbd_bm_write_page(struct drbd_device *device, unsigned int idx) __must_hold(local) -{ - struct bm_aio_ctx *ctx; - int err; - - if (bm_test_page_unchanged(device->bitmap->bm_pages[idx])) { - dynamic_drbd_dbg(device, "skipped bm page write for idx %u\n", idx); - return 0; - } - - ctx = kmalloc(sizeof(struct bm_aio_ctx), GFP_NOIO); - if (!ctx) - return -ENOMEM; - - *ctx = (struct bm_aio_ctx) { - .device = device, - .in_flight = ATOMIC_INIT(1), - .done = 0, - .flags = BM_AIO_COPY_PAGES, - .error = 0, - .kref = { ATOMIC_INIT(2) }, - }; - - if (!get_ldev_if_state(device, D_ATTACHING)) { /* put is in bm_aio_ctx_destroy() */ - drbd_err(device, "ASSERT FAILED: get_ldev_if_state() == 1 in drbd_bm_write_page()\n"); - kfree(ctx); - return -ENODEV; - } - - bm_page_io_async(ctx, idx, WRITE_SYNC); - wait_until_done_or_force_detached(device, device->ldev, &ctx->done); - - if (ctx->error) - drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR); - /* that causes us to detach, so the in memory bitmap will be - * gone in a moment as well. */ - - device->bm_writ_cnt++; - err = atomic_read(&ctx->in_flight) ? -EIO : ctx->error; - kref_put(&ctx->kref, &bm_aio_ctx_destroy); - return err; + return bm_rw(device, BM_AIO_WRITE_HINTED | BM_AIO_COPY_PAGES, 0); } /* NOTE diff --git a/drivers/block/drbd/drbd_debugfs.c b/drivers/block/drbd/drbd_debugfs.c new file mode 100644 index 000000000000..5c20b18540b8 --- /dev/null +++ b/drivers/block/drbd/drbd_debugfs.c @@ -0,0 +1,958 @@ +#define pr_fmt(fmt) "drbd debugfs: " fmt +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/debugfs.h> +#include <linux/seq_file.h> +#include <linux/stat.h> +#include <linux/jiffies.h> +#include <linux/list.h> + +#include "drbd_int.h" +#include "drbd_req.h" +#include "drbd_debugfs.h" + + +/********************************************************************** + * Whenever you change the file format, remember to bump the version. * + **********************************************************************/ + +static struct dentry *drbd_debugfs_root; +static struct dentry *drbd_debugfs_version; +static struct dentry *drbd_debugfs_resources; +static struct dentry *drbd_debugfs_minors; + +static void seq_print_age_or_dash(struct seq_file *m, bool valid, unsigned long dt) +{ + if (valid) + seq_printf(m, "\t%d", jiffies_to_msecs(dt)); + else + seq_printf(m, "\t-"); +} + +static void __seq_print_rq_state_bit(struct seq_file *m, + bool is_set, char *sep, const char *set_name, const char *unset_name) +{ + if (is_set && set_name) { + seq_putc(m, *sep); + seq_puts(m, set_name); + *sep = '|'; + } else if (!is_set && unset_name) { + seq_putc(m, *sep); + seq_puts(m, unset_name); + *sep = '|'; + } +} + +static void seq_print_rq_state_bit(struct seq_file *m, + bool is_set, char *sep, const char *set_name) +{ + __seq_print_rq_state_bit(m, is_set, sep, set_name, NULL); +} + +/* pretty print enum drbd_req_state_bits req->rq_state */ +static void seq_print_request_state(struct seq_file *m, struct drbd_request *req) +{ + unsigned int s = req->rq_state; + char sep = ' '; + seq_printf(m, "\t0x%08x", s); + seq_printf(m, "\tmaster: %s", req->master_bio ? "pending" : "completed"); + + /* RQ_WRITE ignored, already reported */ + seq_puts(m, "\tlocal:"); + seq_print_rq_state_bit(m, s & RQ_IN_ACT_LOG, &sep, "in-AL"); + seq_print_rq_state_bit(m, s & RQ_POSTPONED, &sep, "postponed"); + seq_print_rq_state_bit(m, s & RQ_COMPLETION_SUSP, &sep, "suspended"); + sep = ' '; + seq_print_rq_state_bit(m, s & RQ_LOCAL_PENDING, &sep, "pending"); + seq_print_rq_state_bit(m, s & RQ_LOCAL_COMPLETED, &sep, "completed"); + seq_print_rq_state_bit(m, s & RQ_LOCAL_ABORTED, &sep, "aborted"); + seq_print_rq_state_bit(m, s & RQ_LOCAL_OK, &sep, "ok"); + if (sep == ' ') + seq_puts(m, " -"); + + /* for_each_connection ... */ + seq_printf(m, "\tnet:"); + sep = ' '; + seq_print_rq_state_bit(m, s & RQ_NET_PENDING, &sep, "pending"); + seq_print_rq_state_bit(m, s & RQ_NET_QUEUED, &sep, "queued"); + seq_print_rq_state_bit(m, s & RQ_NET_SENT, &sep, "sent"); + seq_print_rq_state_bit(m, s & RQ_NET_DONE, &sep, "done"); + seq_print_rq_state_bit(m, s & RQ_NET_SIS, &sep, "sis"); + seq_print_rq_state_bit(m, s & RQ_NET_OK, &sep, "ok"); + if (sep == ' ') + seq_puts(m, " -"); + + seq_printf(m, " :"); + sep = ' '; + seq_print_rq_state_bit(m, s & RQ_EXP_RECEIVE_ACK, &sep, "B"); + seq_print_rq_state_bit(m, s & RQ_EXP_WRITE_ACK, &sep, "C"); + seq_print_rq_state_bit(m, s & RQ_EXP_BARR_ACK, &sep, "barr"); + if (sep == ' ') + seq_puts(m, " -"); + seq_printf(m, "\n"); +} + +static void seq_print_one_request(struct seq_file *m, struct drbd_request *req, unsigned long now) +{ + /* change anything here, fixup header below! */ + unsigned int s = req->rq_state; + +#define RQ_HDR_1 "epoch\tsector\tsize\trw" + seq_printf(m, "0x%x\t%llu\t%u\t%s", + req->epoch, + (unsigned long long)req->i.sector, req->i.size >> 9, + (s & RQ_WRITE) ? "W" : "R"); + +#define RQ_HDR_2 "\tstart\tin AL\tsubmit" + seq_printf(m, "\t%d", jiffies_to_msecs(now - req->start_jif)); + seq_print_age_or_dash(m, s & RQ_IN_ACT_LOG, now - req->in_actlog_jif); + seq_print_age_or_dash(m, s & RQ_LOCAL_PENDING, now - req->pre_submit_jif); + +#define RQ_HDR_3 "\tsent\tacked\tdone" + seq_print_age_or_dash(m, s & RQ_NET_SENT, now - req->pre_send_jif); + seq_print_age_or_dash(m, (s & RQ_NET_SENT) && !(s & RQ_NET_PENDING), now - req->acked_jif); + seq_print_age_or_dash(m, s & RQ_NET_DONE, now - req->net_done_jif); + +#define RQ_HDR_4 "\tstate\n" + seq_print_request_state(m, req); +} +#define RQ_HDR RQ_HDR_1 RQ_HDR_2 RQ_HDR_3 RQ_HDR_4 + +static void seq_print_minor_vnr_req(struct seq_file *m, struct drbd_request *req, unsigned long now) +{ + seq_printf(m, "%u\t%u\t", req->device->minor, req->device->vnr); + seq_print_one_request(m, req, now); +} + +static void seq_print_resource_pending_meta_io(struct seq_file *m, struct drbd_resource *resource, unsigned long now) +{ + struct drbd_device *device; + unsigned int i; + + seq_puts(m, "minor\tvnr\tstart\tsubmit\tintent\n"); + rcu_read_lock(); + idr_for_each_entry(&resource->devices, device, i) { + struct drbd_md_io tmp; + /* In theory this is racy, + * in the sense that there could have been a + * drbd_md_put_buffer(); drbd_md_get_buffer(); + * between accessing these members here. */ + tmp = device->md_io; + if (atomic_read(&tmp.in_use)) { + seq_printf(m, "%u\t%u\t%d\t", + device->minor, device->vnr, + jiffies_to_msecs(now - tmp.start_jif)); + if (time_before(tmp.submit_jif, tmp.start_jif)) + seq_puts(m, "-\t"); + else + seq_printf(m, "%d\t", jiffies_to_msecs(now - tmp.submit_jif)); + seq_printf(m, "%s\n", tmp.current_use); + } + } + rcu_read_unlock(); +} + +static void seq_print_waiting_for_AL(struct seq_file *m, struct drbd_resource *resource, unsigned long now) +{ + struct drbd_device *device; + unsigned int i; + + seq_puts(m, "minor\tvnr\tage\t#waiting\n"); + rcu_read_lock(); + idr_for_each_entry(&resource->devices, device, i) { + unsigned long jif; + struct drbd_request *req; + int n = atomic_read(&device->ap_actlog_cnt); + if (n) { + spin_lock_irq(&device->resource->req_lock); + req = list_first_entry_or_null(&device->pending_master_completion[1], + struct drbd_request, req_pending_master_completion); + /* if the oldest request does not wait for the activity log + * it is not interesting for us here */ + if (req && !(req->rq_state & RQ_IN_ACT_LOG)) + jif = req->start_jif; + else + req = NULL; + spin_unlock_irq(&device->resource->req_lock); + } + if (n) { + seq_printf(m, "%u\t%u\t", device->minor, device->vnr); + if (req) + seq_printf(m, "%u\t", jiffies_to_msecs(now - jif)); + else + seq_puts(m, "-\t"); + seq_printf(m, "%u\n", n); + } + } + rcu_read_unlock(); +} + +static void seq_print_device_bitmap_io(struct seq_file *m, struct drbd_device *device, unsigned long now) +{ + struct drbd_bm_aio_ctx *ctx; + unsigned long start_jif; + unsigned int in_flight; + unsigned int flags; + spin_lock_irq(&device->resource->req_lock); + ctx = list_first_entry_or_null(&device->pending_bitmap_io, struct drbd_bm_aio_ctx, list); + if (ctx && ctx->done) + ctx = NULL; + if (ctx) { + start_jif = ctx->start_jif; + in_flight = atomic_read(&ctx->in_flight); + flags = ctx->flags; + } + spin_unlock_irq(&device->resource->req_lock); + if (ctx) { + seq_printf(m, "%u\t%u\t%c\t%u\t%u\n", + device->minor, device->vnr, + (flags & BM_AIO_READ) ? 'R' : 'W', + jiffies_to_msecs(now - start_jif), + in_flight); + } +} + +static void seq_print_resource_pending_bitmap_io(struct seq_file *m, struct drbd_resource *resource, unsigned long now) +{ + struct drbd_device *device; + unsigned int i; + + seq_puts(m, "minor\tvnr\trw\tage\t#in-flight\n"); + rcu_read_lock(); + idr_for_each_entry(&resource->devices, device, i) { + seq_print_device_bitmap_io(m, device, now); + } + rcu_read_unlock(); +} + +/* pretty print enum peer_req->flags */ +static void seq_print_peer_request_flags(struct seq_file *m, struct drbd_peer_request *peer_req) +{ + unsigned long f = peer_req->flags; + char sep = ' '; + + __seq_print_rq_state_bit(m, f & EE_SUBMITTED, &sep, "submitted", "preparing"); + __seq_print_rq_state_bit(m, f & EE_APPLICATION, &sep, "application", "internal"); + seq_print_rq_state_bit(m, f & EE_CALL_AL_COMPLETE_IO, &sep, "in-AL"); + seq_print_rq_state_bit(m, f & EE_SEND_WRITE_ACK, &sep, "C"); + seq_print_rq_state_bit(m, f & EE_MAY_SET_IN_SYNC, &sep, "set-in-sync"); + + if (f & EE_IS_TRIM) { + seq_putc(m, sep); + sep = '|'; + if (f & EE_IS_TRIM_USE_ZEROOUT) + seq_puts(m, "zero-out"); + else + seq_puts(m, "trim"); + } + seq_putc(m, '\n'); +} + +static void seq_print_peer_request(struct seq_file *m, + struct drbd_device *device, struct list_head *lh, + unsigned long now) +{ + bool reported_preparing = false; + struct drbd_peer_request *peer_req; + list_for_each_entry(peer_req, lh, w.list) { + if (reported_preparing && !(peer_req->flags & EE_SUBMITTED)) + continue; + + if (device) + seq_printf(m, "%u\t%u\t", device->minor, device->vnr); + + seq_printf(m, "%llu\t%u\t%c\t%u\t", + (unsigned long long)peer_req->i.sector, peer_req->i.size >> 9, + (peer_req->flags & EE_WRITE) ? 'W' : 'R', + jiffies_to_msecs(now - peer_req->submit_jif)); + seq_print_peer_request_flags(m, peer_req); + if (peer_req->flags & EE_SUBMITTED) + break; + else + reported_preparing = true; + } +} + +static void seq_print_device_peer_requests(struct seq_file *m, + struct drbd_device *device, unsigned long now) +{ + seq_puts(m, "minor\tvnr\tsector\tsize\trw\tage\tflags\n"); + spin_lock_irq(&device->resource->req_lock); + seq_print_peer_request(m, device, &device->active_ee, now); + seq_print_peer_request(m, device, &device->read_ee, now); + seq_print_peer_request(m, device, &device->sync_ee, now); + spin_unlock_irq(&device->resource->req_lock); + if (test_bit(FLUSH_PENDING, &device->flags)) { + seq_printf(m, "%u\t%u\t-\t-\tF\t%u\tflush\n", + device->minor, device->vnr, + jiffies_to_msecs(now - device->flush_jif)); + } +} + +static void seq_print_resource_pending_peer_requests(struct seq_file *m, + struct drbd_resource *resource, unsigned long now) +{ + struct drbd_device *device; + unsigned int i; + + rcu_read_lock(); + idr_for_each_entry(&resource->devices, device, i) { + seq_print_device_peer_requests(m, device, now); + } + rcu_read_unlock(); +} + +static void seq_print_resource_transfer_log_summary(struct seq_file *m, + struct drbd_resource *resource, + struct drbd_connection *connection, + unsigned long now) +{ + struct drbd_request *req; + unsigned int count = 0; + unsigned int show_state = 0; + + seq_puts(m, "n\tdevice\tvnr\t" RQ_HDR); + spin_lock_irq(&resource->req_lock); + list_for_each_entry(req, &connection->transfer_log, tl_requests) { + unsigned int tmp = 0; + unsigned int s; + ++count; + + /* don't disable irq "forever" */ + if (!(count & 0x1ff)) { + struct drbd_request *req_next; + kref_get(&req->kref); + spin_unlock_irq(&resource->req_lock); + cond_resched(); + spin_lock_irq(&resource->req_lock); + req_next = list_next_entry(req, tl_requests); + if (kref_put(&req->kref, drbd_req_destroy)) + req = req_next; + if (&req->tl_requests == &connection->transfer_log) + break; + } + + s = req->rq_state; + + /* This is meant to summarize timing issues, to be able to tell + * local disk problems from network problems. + * Skip requests, if we have shown an even older request with + * similar aspects already. */ + if (req->master_bio == NULL) + tmp |= 1; + if ((s & RQ_LOCAL_MASK) && (s & RQ_LOCAL_PENDING)) + tmp |= 2; + if (s & RQ_NET_MASK) { + if (!(s & RQ_NET_SENT)) + tmp |= 4; + if (s & RQ_NET_PENDING) + tmp |= 8; + if (!(s & RQ_NET_DONE)) + tmp |= 16; + } + if ((tmp & show_state) == tmp) + continue; + show_state |= tmp; + seq_printf(m, "%u\t", count); + seq_print_minor_vnr_req(m, req, now); + if (show_state == 0x1f) + break; + } + spin_unlock_irq(&resource->req_lock); +} + +/* TODO: transfer_log and friends should be moved to resource */ +static int in_flight_summary_show(struct seq_file *m, void *pos) +{ + struct drbd_resource *resource = m->private; + struct drbd_connection *connection; + unsigned long jif = jiffies; + + connection = first_connection(resource); + /* This does not happen, actually. + * But be robust and prepare for future code changes. */ + if (!connection || !kref_get_unless_zero(&connection->kref)) + return -ESTALE; + + /* BUMP me if you change the file format/content/presentation */ + seq_printf(m, "v: %u\n\n", 0); + + seq_puts(m, "oldest bitmap IO\n"); + seq_print_resource_pending_bitmap_io(m, resource, jif); + seq_putc(m, '\n'); + + seq_puts(m, "meta data IO\n"); + seq_print_resource_pending_meta_io(m, resource, jif); + seq_putc(m, '\n'); + + seq_puts(m, "socket buffer stats\n"); + /* for each connection ... once we have more than one */ + rcu_read_lock(); + if (connection->data.socket) { + /* open coded SIOCINQ, the "relevant" part */ + struct tcp_sock *tp = tcp_sk(connection->data.socket->sk); + int answ = tp->rcv_nxt - tp->copied_seq; + seq_printf(m, "unread receive buffer: %u Byte\n", answ); + /* open coded SIOCOUTQ, the "relevant" part */ + answ = tp->write_seq - tp->snd_una; + seq_printf(m, "unacked send buffer: %u Byte\n", answ); + } + rcu_read_unlock(); + seq_putc(m, '\n'); + + seq_puts(m, "oldest peer requests\n"); + seq_print_resource_pending_peer_requests(m, resource, jif); + seq_putc(m, '\n'); + + seq_puts(m, "application requests waiting for activity log\n"); + seq_print_waiting_for_AL(m, resource, jif); + seq_putc(m, '\n'); + + seq_puts(m, "oldest application requests\n"); + seq_print_resource_transfer_log_summary(m, resource, connection, jif); + seq_putc(m, '\n'); + + jif = jiffies - jif; + if (jif) + seq_printf(m, "generated in %d ms\n", jiffies_to_msecs(jif)); + kref_put(&connection->kref, drbd_destroy_connection); + return 0; +} + +/* simple_positive(file->f_dentry) respectively debugfs_positive(), + * but neither is "reachable" from here. + * So we have our own inline version of it above. :-( */ +static inline int debugfs_positive(struct dentry *dentry) +{ + return dentry->d_inode && !d_unhashed(dentry); +} + +/* make sure at *open* time that the respective object won't go away. */ +static int drbd_single_open(struct file *file, int (*show)(struct seq_file *, void *), + void *data, struct kref *kref, + void (*release)(struct kref *)) +{ + struct dentry *parent; + int ret = -ESTALE; + + /* Are we still linked, + * or has debugfs_remove() already been called? */ + parent = file->f_dentry->d_parent; + /* not sure if this can happen: */ + if (!parent || !parent->d_inode) + goto out; + /* serialize with d_delete() */ + mutex_lock(&parent->d_inode->i_mutex); + /* Make sure the object is still alive */ + if (debugfs_positive(file->f_dentry) + && kref_get_unless_zero(kref)) + ret = 0; + mutex_unlock(&parent->d_inode->i_mutex); + if (!ret) { + ret = single_open(file, show, data); + if (ret) + kref_put(kref, release); + } +out: + return ret; +} + +static int in_flight_summary_open(struct inode *inode, struct file *file) +{ + struct drbd_resource *resource = inode->i_private; + return drbd_single_open(file, in_flight_summary_show, resource, + &resource->kref, drbd_destroy_resource); +} + +static int in_flight_summary_release(struct inode *inode, struct file *file) +{ + struct drbd_resource *resource = inode->i_private; + kref_put(&resource->kref, drbd_destroy_resource); + return single_release(inode, file); +} + +static const struct file_operations in_flight_summary_fops = { + .owner = THIS_MODULE, + .open = in_flight_summary_open, + .read = seq_read, + .llseek = seq_lseek, + .release = in_flight_summary_release, +}; + +void drbd_debugfs_resource_add(struct drbd_resource *resource) +{ + struct dentry *dentry; + if (!drbd_debugfs_resources) + return; + + dentry = debugfs_create_dir(resource->name, drbd_debugfs_resources); + if (IS_ERR_OR_NULL(dentry)) + goto fail; + resource->debugfs_res = dentry; + + dentry = debugfs_create_dir("volumes", resource->debugfs_res); + if (IS_ERR_OR_NULL(dentry)) + goto fail; + resource->debugfs_res_volumes = dentry; + + dentry = debugfs_create_dir("connections", resource->debugfs_res); + if (IS_ERR_OR_NULL(dentry)) + goto fail; + resource->debugfs_res_connections = dentry; + + dentry = debugfs_create_file("in_flight_summary", S_IRUSR|S_IRGRP, + resource->debugfs_res, resource, + &in_flight_summary_fops); + if (IS_ERR_OR_NULL(dentry)) + goto fail; + resource->debugfs_res_in_flight_summary = dentry; + return; + +fail: + drbd_debugfs_resource_cleanup(resource); + drbd_err(resource, "failed to create debugfs dentry\n"); +} + +static void drbd_debugfs_remove(struct dentry **dp) +{ + debugfs_remove(*dp); + *dp = NULL; +} + +void drbd_debugfs_resource_cleanup(struct drbd_resource *resource) +{ + /* it is ok to call debugfs_remove(NULL) */ + drbd_debugfs_remove(&resource->debugfs_res_in_flight_summary); + drbd_debugfs_remove(&resource->debugfs_res_connections); + drbd_debugfs_remove(&resource->debugfs_res_volumes); + drbd_debugfs_remove(&resource->debugfs_res); +} + +static void seq_print_one_timing_detail(struct seq_file *m, + const struct drbd_thread_timing_details *tdp, + unsigned long now) +{ + struct drbd_thread_timing_details td; + /* No locking... + * use temporary assignment to get at consistent data. */ + do { + td = *tdp; + } while (td.cb_nr != tdp->cb_nr); + if (!td.cb_addr) + return; + seq_printf(m, "%u\t%d\t%s:%u\t%ps\n", + td.cb_nr, + jiffies_to_msecs(now - td.start_jif), + td.caller_fn, td.line, + td.cb_addr); +} + +static void seq_print_timing_details(struct seq_file *m, + const char *title, + unsigned int cb_nr, struct drbd_thread_timing_details *tdp, unsigned long now) +{ + unsigned int start_idx; + unsigned int i; + + seq_printf(m, "%s\n", title); + /* If not much is going on, this will result in natural ordering. + * If it is very busy, we will possibly skip events, or even see wrap + * arounds, which could only be avoided with locking. + */ + start_idx = cb_nr % DRBD_THREAD_DETAILS_HIST; + for (i = start_idx; i < DRBD_THREAD_DETAILS_HIST; i++) + seq_print_one_timing_detail(m, tdp+i, now); + for (i = 0; i < start_idx; i++) + seq_print_one_timing_detail(m, tdp+i, now); +} + +static int callback_history_show(struct seq_file *m, void *ignored) +{ + struct drbd_connection *connection = m->private; + unsigned long jif = jiffies; + + /* BUMP me if you change the file format/content/presentation */ + seq_printf(m, "v: %u\n\n", 0); + + seq_puts(m, "n\tage\tcallsite\tfn\n"); + seq_print_timing_details(m, "worker", connection->w_cb_nr, connection->w_timing_details, jif); + seq_print_timing_details(m, "receiver", connection->r_cb_nr, connection->r_timing_details, jif); + return 0; +} + +static int callback_history_open(struct inode *inode, struct file *file) +{ + struct drbd_connection *connection = inode->i_private; + return drbd_single_open(file, callback_history_show, connection, + &connection->kref, drbd_destroy_connection); +} + +static int callback_history_release(struct inode *inode, struct file *file) +{ + struct drbd_connection *connection = inode->i_private; + kref_put(&connection->kref, drbd_destroy_connection); + return single_release(inode, file); +} + +static const struct file_operations connection_callback_history_fops = { + .owner = THIS_MODULE, + .open = callback_history_open, + .read = seq_read, + .llseek = seq_lseek, + .release = callback_history_release, +}; + +static int connection_oldest_requests_show(struct seq_file *m, void *ignored) +{ + struct drbd_connection *connection = m->private; + unsigned long now = jiffies; + struct drbd_request *r1, *r2; + + /* BUMP me if you change the file format/content/presentation */ + seq_printf(m, "v: %u\n\n", 0); + + spin_lock_irq(&connection->resource->req_lock); + r1 = connection->req_next; + if (r1) + seq_print_minor_vnr_req(m, r1, now); + r2 = connection->req_ack_pending; + if (r2 && r2 != r1) { + r1 = r2; + seq_print_minor_vnr_req(m, r1, now); + } + r2 = connection->req_not_net_done; + if (r2 && r2 != r1) + seq_print_minor_vnr_req(m, r2, now); + spin_unlock_irq(&connection->resource->req_lock); + return 0; +} + +static int connection_oldest_requests_open(struct inode *inode, struct file *file) +{ + struct drbd_connection *connection = inode->i_private; + return drbd_single_open(file, connection_oldest_requests_show, connection, + &connection->kref, drbd_destroy_connection); +} + +static int connection_oldest_requests_release(struct inode *inode, struct file *file) +{ + struct drbd_connection *connection = inode->i_private; + kref_put(&connection->kref, drbd_destroy_connection); + return single_release(inode, file); +} + +static const struct file_operations connection_oldest_requests_fops = { + .owner = THIS_MODULE, + .open = connection_oldest_requests_open, + .read = seq_read, + .llseek = seq_lseek, + .release = connection_oldest_requests_release, +}; + +void drbd_debugfs_connection_add(struct drbd_connection *connection) +{ + struct dentry *conns_dir = connection->resource->debugfs_res_connections; + struct dentry *dentry; + if (!conns_dir) + return; + + /* Once we enable mutliple peers, + * these connections will have descriptive names. + * For now, it is just the one connection to the (only) "peer". */ + dentry = debugfs_create_dir("peer", conns_dir); + if (IS_ERR_OR_NULL(dentry)) + goto fail; + connection->debugfs_conn = dentry; + + dentry = debugfs_create_file("callback_history", S_IRUSR|S_IRGRP, + connection->debugfs_conn, connection, + &connection_callback_history_fops); + if (IS_ERR_OR_NULL(dentry)) + goto fail; + connection->debugfs_conn_callback_history = dentry; + + dentry = debugfs_create_file("oldest_requests", S_IRUSR|S_IRGRP, + connection->debugfs_conn, connection, + &connection_oldest_requests_fops); + if (IS_ERR_OR_NULL(dentry)) + goto fail; + connection->debugfs_conn_oldest_requests = dentry; + return; + +fail: + drbd_debugfs_connection_cleanup(connection); + drbd_err(connection, "failed to create debugfs dentry\n"); +} + +void drbd_debugfs_connection_cleanup(struct drbd_connection *connection) +{ + drbd_debugfs_remove(&connection->debugfs_conn_callback_history); + drbd_debugfs_remove(&connection->debugfs_conn_oldest_requests); + drbd_debugfs_remove(&connection->debugfs_conn); +} + +static void resync_dump_detail(struct seq_file *m, struct lc_element *e) +{ + struct bm_extent *bme = lc_entry(e, struct bm_extent, lce); + + seq_printf(m, "%5d %s %s %s\n", bme->rs_left, + test_bit(BME_NO_WRITES, &bme->flags) ? "NO_WRITES" : "---------", + test_bit(BME_LOCKED, &bme->flags) ? "LOCKED" : "------", + test_bit(BME_PRIORITY, &bme->flags) ? "PRIORITY" : "--------" + ); +} + +static int device_resync_extents_show(struct seq_file *m, void *ignored) +{ + struct drbd_device *device = m->private; + + /* BUMP me if you change the file format/content/presentation */ + seq_printf(m, "v: %u\n\n", 0); + + if (get_ldev_if_state(device, D_FAILED)) { + lc_seq_printf_stats(m, device->resync); + lc_seq_dump_details(m, device->resync, "rs_left flags", resync_dump_detail); + put_ldev(device); + } + return 0; +} + +static int device_act_log_extents_show(struct seq_file *m, void *ignored) +{ + struct drbd_device *device = m->private; + + /* BUMP me if you change the file format/content/presentation */ + seq_printf(m, "v: %u\n\n", 0); + + if (get_ldev_if_state(device, D_FAILED)) { + lc_seq_printf_stats(m, device->act_log); + lc_seq_dump_details(m, device->act_log, "", NULL); + put_ldev(device); + } + return 0; +} + +static int device_oldest_requests_show(struct seq_file *m, void *ignored) +{ + struct drbd_device *device = m->private; + struct drbd_resource *resource = device->resource; + unsigned long now = jiffies; + struct drbd_request *r1, *r2; + int i; + + /* BUMP me if you change the file format/content/presentation */ + seq_printf(m, "v: %u\n\n", 0); + + seq_puts(m, RQ_HDR); + spin_lock_irq(&resource->req_lock); + /* WRITE, then READ */ + for (i = 1; i >= 0; --i) { + r1 = list_first_entry_or_null(&device->pending_master_completion[i], + struct drbd_request, req_pending_master_completion); + r2 = list_first_entry_or_null(&device->pending_completion[i], + struct drbd_request, req_pending_local); + if (r1) + seq_print_one_request(m, r1, now); + if (r2 && r2 != r1) + seq_print_one_request(m, r2, now); + } + spin_unlock_irq(&resource->req_lock); + return 0; +} + +static int device_data_gen_id_show(struct seq_file *m, void *ignored) +{ + struct drbd_device *device = m->private; + struct drbd_md *md; + enum drbd_uuid_index idx; + + if (!get_ldev_if_state(device, D_FAILED)) + return -ENODEV; + + md = &device->ldev->md; + spin_lock_irq(&md->uuid_lock); + for (idx = UI_CURRENT; idx <= UI_HISTORY_END; idx++) { + seq_printf(m, "0x%016llX\n", md->uuid[idx]); + } + spin_unlock_irq(&md->uuid_lock); + put_ldev(device); + return 0; +} + +#define drbd_debugfs_device_attr(name) \ +static int device_ ## name ## _open(struct inode *inode, struct file *file) \ +{ \ + struct drbd_device *device = inode->i_private; \ + return drbd_single_open(file, device_ ## name ## _show, device, \ + &device->kref, drbd_destroy_device); \ +} \ +static int device_ ## name ## _release(struct inode *inode, struct file *file) \ +{ \ + struct drbd_device *device = inode->i_private; \ + kref_put(&device->kref, drbd_destroy_device); \ + return single_release(inode, file); \ +} \ +static const struct file_operations device_ ## name ## _fops = { \ + .owner = THIS_MODULE, \ + .open = device_ ## name ## _open, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .release = device_ ## name ## _release, \ +}; + +drbd_debugfs_device_attr(oldest_requests) +drbd_debugfs_device_attr(act_log_extents) +drbd_debugfs_device_attr(resync_extents) +drbd_debugfs_device_attr(data_gen_id) + +void drbd_debugfs_device_add(struct drbd_device *device) +{ + struct dentry *vols_dir = device->resource->debugfs_res_volumes; + char minor_buf[8]; /* MINORMASK, MINORBITS == 20; */ + char vnr_buf[8]; /* volume number vnr is even 16 bit only; */ + char *slink_name = NULL; + + struct dentry *dentry; + if (!vols_dir || !drbd_debugfs_minors) + return; + + snprintf(vnr_buf, sizeof(vnr_buf), "%u", device->vnr); + dentry = debugfs_create_dir(vnr_buf, vols_dir); + if (IS_ERR_OR_NULL(dentry)) + goto fail; + device->debugfs_vol = dentry; + + snprintf(minor_buf, sizeof(minor_buf), "%u", device->minor); + slink_name = kasprintf(GFP_KERNEL, "../resources/%s/volumes/%u", + device->resource->name, device->vnr); + if (!slink_name) + goto fail; + dentry = debugfs_create_symlink(minor_buf, drbd_debugfs_minors, slink_name); + kfree(slink_name); + slink_name = NULL; + if (IS_ERR_OR_NULL(dentry)) + goto fail; + device->debugfs_minor = dentry; + +#define DCF(name) do { \ + dentry = debugfs_create_file(#name, S_IRUSR|S_IRGRP, \ + device->debugfs_vol, device, \ + &device_ ## name ## _fops); \ + if (IS_ERR_OR_NULL(dentry)) \ + goto fail; \ + device->debugfs_vol_ ## name = dentry; \ + } while (0) + + DCF(oldest_requests); + DCF(act_log_extents); + DCF(resync_extents); + DCF(data_gen_id); +#undef DCF + return; + +fail: + drbd_debugfs_device_cleanup(device); + drbd_err(device, "failed to create debugfs entries\n"); +} + +void drbd_debugfs_device_cleanup(struct drbd_device *device) +{ + drbd_debugfs_remove(&device->debugfs_minor); + drbd_debugfs_remove(&device->debugfs_vol_oldest_requests); + drbd_debugfs_remove(&device->debugfs_vol_act_log_extents); + drbd_debugfs_remove(&device->debugfs_vol_resync_extents); + drbd_debugfs_remove(&device->debugfs_vol_data_gen_id); + drbd_debugfs_remove(&device->debugfs_vol); +} + +void drbd_debugfs_peer_device_add(struct drbd_peer_device *peer_device) +{ + struct dentry *conn_dir = peer_device->connection->debugfs_conn; + struct dentry *dentry; + char vnr_buf[8]; + + if (!conn_dir) + return; + + snprintf(vnr_buf, sizeof(vnr_buf), "%u", peer_device->device->vnr); + dentry = debugfs_create_dir(vnr_buf, conn_dir); + if (IS_ERR_OR_NULL(dentry)) + goto fail; + peer_device->debugfs_peer_dev = dentry; + return; + +fail: + drbd_debugfs_peer_device_cleanup(peer_device); + drbd_err(peer_device, "failed to create debugfs entries\n"); +} + +void drbd_debugfs_peer_device_cleanup(struct drbd_peer_device *peer_device) +{ + drbd_debugfs_remove(&peer_device->debugfs_peer_dev); +} + +static int drbd_version_show(struct seq_file *m, void *ignored) +{ + seq_printf(m, "# %s\n", drbd_buildtag()); + seq_printf(m, "VERSION=%s\n", REL_VERSION); + seq_printf(m, "API_VERSION=%u\n", API_VERSION); + seq_printf(m, "PRO_VERSION_MIN=%u\n", PRO_VERSION_MIN); + seq_printf(m, "PRO_VERSION_MAX=%u\n", PRO_VERSION_MAX); + return 0; +} + +static int drbd_version_open(struct inode *inode, struct file *file) +{ + return single_open(file, drbd_version_show, NULL); +} + +static struct file_operations drbd_version_fops = { + .owner = THIS_MODULE, + .open = drbd_version_open, + .llseek = seq_lseek, + .read = seq_read, + .release = single_release, +}; + +/* not __exit, may be indirectly called + * from the module-load-failure path as well. */ +void drbd_debugfs_cleanup(void) +{ + drbd_debugfs_remove(&drbd_debugfs_resources); + drbd_debugfs_remove(&drbd_debugfs_minors); + drbd_debugfs_remove(&drbd_debugfs_version); + drbd_debugfs_remove(&drbd_debugfs_root); +} + +int __init drbd_debugfs_init(void) +{ + struct dentry *dentry; + + dentry = debugfs_create_dir("drbd", NULL); + if (IS_ERR_OR_NULL(dentry)) + goto fail; + drbd_debugfs_root = dentry; + + dentry = debugfs_create_file("version", 0444, drbd_debugfs_root, NULL, &drbd_version_fops); + if (IS_ERR_OR_NULL(dentry)) + goto fail; + drbd_debugfs_version = dentry; + + dentry = debugfs_create_dir("resources", drbd_debugfs_root); + if (IS_ERR_OR_NULL(dentry)) + goto fail; + drbd_debugfs_resources = dentry; + + dentry = debugfs_create_dir("minors", drbd_debugfs_root); + if (IS_ERR_OR_NULL(dentry)) + goto fail; + drbd_debugfs_minors = dentry; + return 0; + +fail: + drbd_debugfs_cleanup(); + if (dentry) + return PTR_ERR(dentry); + else + return -EINVAL; +} diff --git a/drivers/block/drbd/drbd_debugfs.h b/drivers/block/drbd/drbd_debugfs.h new file mode 100644 index 000000000000..8bee21340dce --- /dev/null +++ b/drivers/block/drbd/drbd_debugfs.h @@ -0,0 +1,39 @@ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/debugfs.h> + +#include "drbd_int.h" + +#ifdef CONFIG_DEBUG_FS +int __init drbd_debugfs_init(void); +void drbd_debugfs_cleanup(void); + +void drbd_debugfs_resource_add(struct drbd_resource *resource); +void drbd_debugfs_resource_cleanup(struct drbd_resource *resource); + +void drbd_debugfs_connection_add(struct drbd_connection *connection); +void drbd_debugfs_connection_cleanup(struct drbd_connection *connection); + +void drbd_debugfs_device_add(struct drbd_device *device); +void drbd_debugfs_device_cleanup(struct drbd_device *device); + +void drbd_debugfs_peer_device_add(struct drbd_peer_device *peer_device); +void drbd_debugfs_peer_device_cleanup(struct drbd_peer_device *peer_device); +#else + +static inline int __init drbd_debugfs_init(void) { return -ENODEV; } +static inline void drbd_debugfs_cleanup(void) { } + +static inline void drbd_debugfs_resource_add(struct drbd_resource *resource) { } +static inline void drbd_debugfs_resource_cleanup(struct drbd_resource *resource) { } + +static inline void drbd_debugfs_connection_add(struct drbd_connection *connection) { } +static inline void drbd_debugfs_connection_cleanup(struct drbd_connection *connection) { } + +static inline void drbd_debugfs_device_add(struct drbd_device *device) { } +static inline void drbd_debugfs_device_cleanup(struct drbd_device *device) { } + +static inline void drbd_debugfs_peer_device_add(struct drbd_peer_device *peer_device) { } +static inline void drbd_debugfs_peer_device_cleanup(struct drbd_peer_device *peer_device) { } + +#endif diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index a76ceb344d64..1a000016ccdf 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -317,7 +317,63 @@ struct drbd_request { struct list_head tl_requests; /* ring list in the transfer log */ struct bio *master_bio; /* master bio pointer */ - unsigned long start_time; + + /* see struct drbd_device */ + struct list_head req_pending_master_completion; + struct list_head req_pending_local; + + /* for generic IO accounting */ + unsigned long start_jif; + + /* for DRBD internal statistics */ + + /* Minimal set of time stamps to determine if we wait for activity log + * transactions, local disk or peer. 32 bit "jiffies" are good enough, + * we don't expect a DRBD request to be stalled for several month. + */ + + /* before actual request processing */ + unsigned long in_actlog_jif; + + /* local disk */ + unsigned long pre_submit_jif; + + /* per connection */ + unsigned long pre_send_jif; + unsigned long acked_jif; + unsigned long net_done_jif; + + /* Possibly even more detail to track each phase: + * master_completion_jif + * how long did it take to complete the master bio + * (application visible latency) + * allocated_jif + * how long the master bio was blocked until we finally allocated + * a tracking struct + * in_actlog_jif + * how long did we wait for activity log transactions + * + * net_queued_jif + * when did we finally queue it for sending + * pre_send_jif + * when did we start sending it + * post_send_jif + * how long did we block in the network stack trying to send it + * acked_jif + * when did we receive (or fake, in protocol A) a remote ACK + * net_done_jif + * when did we receive final acknowledgement (P_BARRIER_ACK), + * or decide, e.g. on connection loss, that we do no longer expect + * anything from this peer for this request. + * + * pre_submit_jif + * post_sub_jif + * when did we start submiting to the lower level device, + * and how long did we block in that submit function + * local_completion_jif + * how long did it take the lower level device to complete this request + */ + /* once it hits 0, we may complete the master_bio */ atomic_t completion_ref; @@ -366,6 +422,7 @@ struct drbd_peer_request { struct drbd_interval i; /* see comments on ee flag bits below */ unsigned long flags; + unsigned long submit_jif; union { u64 block_id; struct digest_info *digest; @@ -408,6 +465,17 @@ enum { /* Is set when net_conf had two_primaries set while creating this peer_req */ __EE_IN_INTERVAL_TREE, + + /* for debugfs: */ + /* has this been submitted, or does it still wait for something else? */ + __EE_SUBMITTED, + + /* this is/was a write request */ + __EE_WRITE, + + /* this originates from application on peer + * (not some resync or verify or other DRBD internal request) */ + __EE_APPLICATION, }; #define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO) #define EE_MAY_SET_IN_SYNC (1<<__EE_MAY_SET_IN_SYNC) @@ -419,6 +487,9 @@ enum { #define EE_RESTART_REQUESTS (1<<__EE_RESTART_REQUESTS) #define EE_SEND_WRITE_ACK (1<<__EE_SEND_WRITE_ACK) #define EE_IN_INTERVAL_TREE (1<<__EE_IN_INTERVAL_TREE) +#define EE_SUBMITTED (1<<__EE_SUBMITTED) +#define EE_WRITE (1<<__EE_WRITE) +#define EE_APPLICATION (1<<__EE_APPLICATION) /* flag bits per device */ enum { @@ -433,11 +504,11 @@ enum { CONSIDER_RESYNC, MD_NO_FUA, /* Users wants us to not use FUA/FLUSH on meta data dev */ + SUSPEND_IO, /* suspend application io */ BITMAP_IO, /* suspend application io; once no more io in flight, start bitmap io */ BITMAP_IO_QUEUED, /* Started bitmap IO */ - GO_DISKLESS, /* Disk is being detached, on io-error or admin request. */ WAS_IO_ERROR, /* Local disk failed, returned IO error */ WAS_READ_ERROR, /* Local disk READ failed (set additionally to the above) */ FORCE_DETACH, /* Force-detach from local disk, aborting any pending local IO */ @@ -450,6 +521,20 @@ enum { B_RS_H_DONE, /* Before resync handler done (already executed) */ DISCARD_MY_DATA, /* discard_my_data flag per volume */ READ_BALANCE_RR, + + FLUSH_PENDING, /* if set, device->flush_jif is when we submitted that flush + * from drbd_flush_after_epoch() */ + + /* cleared only after backing device related structures have been destroyed. */ + GOING_DISKLESS, /* Disk is being detached, because of io-error, or admin request. */ + + /* to be used in drbd_device_post_work() */ + GO_DISKLESS, /* tell worker to schedule cleanup before detach */ + DESTROY_DISK, /* tell worker to close backing devices and destroy related structures. */ + MD_SYNC, /* tell worker to call drbd_md_sync() */ + RS_START, /* tell worker to start resync/OV */ + RS_PROGRESS, /* tell worker that resync made significant progress */ + RS_DONE, /* tell worker that resync is done */ }; struct drbd_bitmap; /* opaque for drbd_device */ @@ -531,6 +616,11 @@ struct drbd_backing_dev { }; struct drbd_md_io { + struct page *page; + unsigned long start_jif; /* last call to drbd_md_get_buffer */ + unsigned long submit_jif; /* last _drbd_md_sync_page_io() submit */ + const char *current_use; + atomic_t in_use; unsigned int done; int error; }; @@ -577,10 +667,18 @@ enum { * and potentially deadlock on, this drbd worker. */ DISCONNECT_SENT, + + DEVICE_WORK_PENDING, /* tell worker that some device has pending work */ }; struct drbd_resource { char *name; +#ifdef CONFIG_DEBUG_FS + struct dentry *debugfs_res; + struct dentry *debugfs_res_volumes; + struct dentry *debugfs_res_connections; + struct dentry *debugfs_res_in_flight_summary; +#endif struct kref kref; struct idr devices; /* volume number to device mapping */ struct list_head connections; @@ -594,12 +692,28 @@ struct drbd_resource { unsigned susp_nod:1; /* IO suspended because no data */ unsigned susp_fen:1; /* IO suspended because fence peer handler runs */ + enum write_ordering_e write_ordering; + cpumask_var_t cpu_mask; }; +struct drbd_thread_timing_details +{ + unsigned long start_jif; + void *cb_addr; + const char *caller_fn; + unsigned int line; + unsigned int cb_nr; +}; + struct drbd_connection { struct list_head connections; struct drbd_resource *resource; +#ifdef CONFIG_DEBUG_FS + struct dentry *debugfs_conn; + struct dentry *debugfs_conn_callback_history; + struct dentry *debugfs_conn_oldest_requests; +#endif struct kref kref; struct idr peer_devices; /* volume number to peer device mapping */ enum drbd_conns cstate; /* Only C_STANDALONE to C_WF_REPORT_PARAMS */ @@ -636,7 +750,6 @@ struct drbd_connection { struct drbd_epoch *current_epoch; spinlock_t epoch_lock; unsigned int epochs; - enum write_ordering_e write_ordering; atomic_t current_tle_nr; /* transfer log epoch number */ unsigned current_tle_writes; /* writes seen within this tl epoch */ @@ -645,9 +758,22 @@ struct drbd_connection { struct drbd_thread worker; struct drbd_thread asender; + /* cached pointers, + * so we can look up the oldest pending requests more quickly. + * protected by resource->req_lock */ + struct drbd_request *req_next; /* DRBD 9: todo.req_next */ + struct drbd_request *req_ack_pending; + struct drbd_request *req_not_net_done; + /* sender side */ struct drbd_work_queue sender_work; +#define DRBD_THREAD_DETAILS_HIST 16 + unsigned int w_cb_nr; /* keeps counting up */ + unsigned int r_cb_nr; /* keeps counting up */ + struct drbd_thread_timing_details w_timing_details[DRBD_THREAD_DETAILS_HIST]; + struct drbd_thread_timing_details r_timing_details[DRBD_THREAD_DETAILS_HIST]; + struct { /* whether this sender thread * has processed a single write yet. */ @@ -663,11 +789,22 @@ struct drbd_connection { } send; }; +void __update_timing_details( + struct drbd_thread_timing_details *tdp, + unsigned int *cb_nr, + void *cb, + const char *fn, const unsigned int line); + +#define update_worker_timing_details(c, cb) \ + __update_timing_details(c->w_timing_details, &c->w_cb_nr, cb, __func__ , __LINE__ ) +#define update_receiver_timing_details(c, cb) \ + __update_timing_details(c->r_timing_details, &c->r_cb_nr, cb, __func__ , __LINE__ ) + struct submit_worker { struct workqueue_struct *wq; struct work_struct worker; - spinlock_t lock; + /* protected by ..->resource->req_lock */ struct list_head writes; }; @@ -675,12 +812,29 @@ struct drbd_peer_device { struct list_head peer_devices; struct drbd_device *device; struct drbd_connection *connection; +#ifdef CONFIG_DEBUG_FS + struct dentry *debugfs_peer_dev; +#endif }; struct drbd_device { struct drbd_resource *resource; struct list_head peer_devices; - int vnr; /* volume number within the connection */ + struct list_head pending_bitmap_io; + + unsigned long flush_jif; +#ifdef CONFIG_DEBUG_FS + struct dentry *debugfs_minor; + struct dentry *debugfs_vol; + struct dentry *debugfs_vol_oldest_requests; + struct dentry *debugfs_vol_act_log_extents; + struct dentry *debugfs_vol_resync_extents; + struct dentry *debugfs_vol_data_gen_id; +#endif + + unsigned int vnr; /* volume number within the connection */ + unsigned int minor; /* device minor number */ + struct kref kref; /* things that are stored as / read from meta data on disk */ @@ -697,19 +851,10 @@ struct drbd_device { unsigned long last_reattach_jif; struct drbd_work resync_work; struct drbd_work unplug_work; - struct drbd_work go_diskless; - struct drbd_work md_sync_work; - struct drbd_work start_resync_work; struct timer_list resync_timer; struct timer_list md_sync_timer; struct timer_list start_resync_timer; struct timer_list request_timer; -#ifdef DRBD_DEBUG_MD_SYNC - struct { - unsigned int line; - const char* func; - } last_md_mark_dirty; -#endif /* Used after attach while negotiating new disk state. */ union drbd_state new_state_tmp; @@ -724,6 +869,7 @@ struct drbd_device { unsigned int al_writ_cnt; unsigned int bm_writ_cnt; atomic_t ap_bio_cnt; /* Requests we need to complete */ + atomic_t ap_actlog_cnt; /* Requests waiting for activity log */ atomic_t ap_pending_cnt; /* AP data packets on the wire, ack expected */ atomic_t rs_pending_cnt; /* RS request/data packets on the wire */ atomic_t unacked_cnt; /* Need to send replies for */ @@ -733,6 +879,13 @@ struct drbd_device { struct rb_root read_requests; struct rb_root write_requests; + /* for statistics and timeouts */ + /* [0] read, [1] write */ + struct list_head pending_master_completion[2]; + struct list_head pending_completion[2]; + + /* use checksums for *this* resync */ + bool use_csums; /* blocks to resync in this run [unit BM_BLOCK_SIZE] */ unsigned long rs_total; /* number of resync blocks that failed in this run */ @@ -788,9 +941,7 @@ struct drbd_device { atomic_t pp_in_use; /* allocated from page pool */ atomic_t pp_in_use_by_net; /* sendpage()d, still referenced by tcp */ wait_queue_head_t ee_wait; - struct page *md_io_page; /* one page buffer for md_io */ struct drbd_md_io md_io; - atomic_t md_io_in_use; /* protects the md_io, md_io_page and md_io_tmpp */ spinlock_t al_lock; wait_queue_head_t al_wait; struct lru_cache *act_log; /* activity log */ @@ -800,7 +951,6 @@ struct drbd_device { atomic_t packet_seq; unsigned int peer_seq; spinlock_t peer_seq_lock; - unsigned int minor; unsigned long comm_bm_set; /* communicated number of set bits. */ struct bm_io_work bm_io_work; u64 ed_uuid; /* UUID of the exposed data */ @@ -824,6 +974,21 @@ struct drbd_device { struct submit_worker submit; }; +struct drbd_bm_aio_ctx { + struct drbd_device *device; + struct list_head list; /* on device->pending_bitmap_io */; + unsigned long start_jif; + atomic_t in_flight; + unsigned int done; + unsigned flags; +#define BM_AIO_COPY_PAGES 1 +#define BM_AIO_WRITE_HINTED 2 +#define BM_AIO_WRITE_ALL_PAGES 4 +#define BM_AIO_READ 8 + int error; + struct kref kref; +}; + struct drbd_config_context { /* assigned from drbd_genlmsghdr */ unsigned int minor; @@ -949,7 +1114,7 @@ extern int drbd_send_ov_request(struct drbd_peer_device *, sector_t sector, int extern int drbd_send_bitmap(struct drbd_device *device); extern void drbd_send_sr_reply(struct drbd_peer_device *, enum drbd_state_rv retcode); extern void conn_send_sr_reply(struct drbd_connection *connection, enum drbd_state_rv retcode); -extern void drbd_free_bc(struct drbd_backing_dev *ldev); +extern void drbd_free_ldev(struct drbd_backing_dev *ldev); extern void drbd_device_cleanup(struct drbd_device *device); void drbd_print_uuids(struct drbd_device *device, const char *text); @@ -966,13 +1131,7 @@ extern void __drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must extern void drbd_md_set_flag(struct drbd_device *device, int flags) __must_hold(local); extern void drbd_md_clear_flag(struct drbd_device *device, int flags)__must_hold(local); extern int drbd_md_test_flag(struct drbd_backing_dev *, int); -#ifndef DRBD_DEBUG_MD_SYNC extern void drbd_md_mark_dirty(struct drbd_device *device); -#else -#define drbd_md_mark_dirty(m) drbd_md_mark_dirty_(m, __LINE__ , __func__ ) -extern void drbd_md_mark_dirty_(struct drbd_device *device, - unsigned int line, const char *func); -#endif extern void drbd_queue_bitmap_io(struct drbd_device *device, int (*io_fn)(struct drbd_device *), void (*done)(struct drbd_device *, int), @@ -983,9 +1142,8 @@ extern int drbd_bitmap_io(struct drbd_device *device, extern int drbd_bitmap_io_from_worker(struct drbd_device *device, int (*io_fn)(struct drbd_device *), char *why, enum bm_flag flags); -extern int drbd_bmio_set_n_write(struct drbd_device *device); -extern int drbd_bmio_clear_n_write(struct drbd_device *device); -extern void drbd_ldev_destroy(struct drbd_device *device); +extern int drbd_bmio_set_n_write(struct drbd_device *device) __must_hold(local); +extern int drbd_bmio_clear_n_write(struct drbd_device *device) __must_hold(local); /* Meta data layout * @@ -1105,17 +1263,21 @@ struct bm_extent { /* in which _bitmap_ extent (resp. sector) the bit for a certain * _storage_ sector is located in */ #define BM_SECT_TO_EXT(x) ((x)>>(BM_EXT_SHIFT-9)) +#define BM_BIT_TO_EXT(x) ((x) >> (BM_EXT_SHIFT - BM_BLOCK_SHIFT)) -/* how much _storage_ sectors we have per bitmap sector */ +/* first storage sector a bitmap extent corresponds to */ #define BM_EXT_TO_SECT(x) ((sector_t)(x) << (BM_EXT_SHIFT-9)) +/* how much _storage_ sectors we have per bitmap extent */ #define BM_SECT_PER_EXT BM_EXT_TO_SECT(1) +/* how many bits are covered by one bitmap extent (resync extent) */ +#define BM_BITS_PER_EXT (1UL << (BM_EXT_SHIFT - BM_BLOCK_SHIFT)) + +#define BM_BLOCKS_PER_BM_EXT_MASK (BM_BITS_PER_EXT - 1) + /* in one sector of the bitmap, we have this many activity_log extents. */ #define AL_EXT_PER_BM_SECT (1 << (BM_EXT_SHIFT - AL_EXTENT_SHIFT)) -#define BM_BLOCKS_PER_BM_EXT_B (BM_EXT_SHIFT - BM_BLOCK_SHIFT) -#define BM_BLOCKS_PER_BM_EXT_MASK ((1<<BM_BLOCKS_PER_BM_EXT_B) - 1) - /* the extent in "PER_EXTENT" below is an activity log extent * we need that many (long words/bytes) to store the bitmap * of one AL_EXTENT_SIZE chunk of storage. @@ -1195,11 +1357,11 @@ extern void _drbd_bm_set_bits(struct drbd_device *device, const unsigned long s, const unsigned long e); extern int drbd_bm_test_bit(struct drbd_device *device, unsigned long bitnr); extern int drbd_bm_e_weight(struct drbd_device *device, unsigned long enr); -extern int drbd_bm_write_page(struct drbd_device *device, unsigned int idx) __must_hold(local); extern int drbd_bm_read(struct drbd_device *device) __must_hold(local); extern void drbd_bm_mark_for_writeout(struct drbd_device *device, int page_nr); extern int drbd_bm_write(struct drbd_device *device) __must_hold(local); extern int drbd_bm_write_hinted(struct drbd_device *device) __must_hold(local); +extern int drbd_bm_write_lazy(struct drbd_device *device, unsigned upper_idx) __must_hold(local); extern int drbd_bm_write_all(struct drbd_device *device) __must_hold(local); extern int drbd_bm_write_copy_pages(struct drbd_device *device) __must_hold(local); extern size_t drbd_bm_words(struct drbd_device *device); @@ -1213,7 +1375,6 @@ extern unsigned long _drbd_bm_find_next(struct drbd_device *device, unsigned lon extern unsigned long _drbd_bm_find_next_zero(struct drbd_device *device, unsigned long bm_fo); extern unsigned long _drbd_bm_total_weight(struct drbd_device *device); extern unsigned long drbd_bm_total_weight(struct drbd_device *device); -extern int drbd_bm_rs_done(struct drbd_device *device); /* for receive_bitmap */ extern void drbd_bm_merge_lel(struct drbd_device *device, size_t offset, size_t number, unsigned long *buffer); @@ -1312,7 +1473,7 @@ enum determine_dev_size { extern enum determine_dev_size drbd_determine_dev_size(struct drbd_device *, enum dds_flags, struct resize_parms *) __must_hold(local); extern void resync_after_online_grow(struct drbd_device *); -extern void drbd_reconsider_max_bio_size(struct drbd_device *device); +extern void drbd_reconsider_max_bio_size(struct drbd_device *device, struct drbd_backing_dev *bdev); extern enum drbd_state_rv drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force); @@ -1333,7 +1494,7 @@ extern void resume_next_sg(struct drbd_device *device); extern void suspend_other_sg(struct drbd_device *device); extern int drbd_resync_finished(struct drbd_device *device); /* maybe rather drbd_main.c ? */ -extern void *drbd_md_get_buffer(struct drbd_device *device); +extern void *drbd_md_get_buffer(struct drbd_device *device, const char *intent); extern void drbd_md_put_buffer(struct drbd_device *device); extern int drbd_md_sync_page_io(struct drbd_device *device, struct drbd_backing_dev *bdev, sector_t sector, int rw); @@ -1380,7 +1541,8 @@ extern void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req); extern int drbd_receiver(struct drbd_thread *thi); extern int drbd_asender(struct drbd_thread *thi); extern bool drbd_rs_c_min_rate_throttle(struct drbd_device *device); -extern bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector); +extern bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector, + bool throttle_if_app_is_waiting); extern int drbd_submit_peer_request(struct drbd_device *, struct drbd_peer_request *, const unsigned, const int); @@ -1464,10 +1626,7 @@ static inline void drbd_generic_make_request(struct drbd_device *device, { __release(local); if (!bio->bi_bdev) { - printk(KERN_ERR "drbd%d: drbd_generic_make_request: " - "bio->bi_bdev == NULL\n", - device_to_minor(device)); - dump_stack(); + drbd_err(device, "drbd_generic_make_request: bio->bi_bdev == NULL\n"); bio_endio(bio, -ENODEV); return; } @@ -1478,7 +1637,8 @@ static inline void drbd_generic_make_request(struct drbd_device *device, generic_make_request(bio); } -void drbd_bump_write_ordering(struct drbd_connection *connection, enum write_ordering_e wo); +void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backing_dev *bdev, + enum write_ordering_e wo); /* drbd_proc.c */ extern struct proc_dir_entry *drbd_proc; @@ -1489,9 +1649,9 @@ extern const char *drbd_role_str(enum drbd_role s); /* drbd_actlog.c */ extern bool drbd_al_begin_io_prepare(struct drbd_device *device, struct drbd_interval *i); extern int drbd_al_begin_io_nonblock(struct drbd_device *device, struct drbd_interval *i); -extern void drbd_al_begin_io_commit(struct drbd_device *device, bool delegate); +extern void drbd_al_begin_io_commit(struct drbd_device *device); extern bool drbd_al_begin_io_fastpath(struct drbd_device *device, struct drbd_interval *i); -extern void drbd_al_begin_io(struct drbd_device *device, struct drbd_interval *i, bool delegate); +extern void drbd_al_begin_io(struct drbd_device *device, struct drbd_interval *i); extern void drbd_al_complete_io(struct drbd_device *device, struct drbd_interval *i); extern void drbd_rs_complete_io(struct drbd_device *device, sector_t sector); extern int drbd_rs_begin_io(struct drbd_device *device, sector_t sector); @@ -1501,14 +1661,17 @@ extern int drbd_rs_del_all(struct drbd_device *device); extern void drbd_rs_failed_io(struct drbd_device *device, sector_t sector, int size); extern void drbd_advance_rs_marks(struct drbd_device *device, unsigned long still_to_go); -extern void __drbd_set_in_sync(struct drbd_device *device, sector_t sector, - int size, const char *file, const unsigned int line); + +enum update_sync_bits_mode { RECORD_RS_FAILED, SET_OUT_OF_SYNC, SET_IN_SYNC }; +extern int __drbd_change_sync(struct drbd_device *device, sector_t sector, int size, + enum update_sync_bits_mode mode, + const char *file, const unsigned int line); #define drbd_set_in_sync(device, sector, size) \ - __drbd_set_in_sync(device, sector, size, __FILE__, __LINE__) -extern int __drbd_set_out_of_sync(struct drbd_device *device, sector_t sector, - int size, const char *file, const unsigned int line); + __drbd_change_sync(device, sector, size, SET_IN_SYNC, __FILE__, __LINE__) #define drbd_set_out_of_sync(device, sector, size) \ - __drbd_set_out_of_sync(device, sector, size, __FILE__, __LINE__) + __drbd_change_sync(device, sector, size, SET_OUT_OF_SYNC, __FILE__, __LINE__) +#define drbd_rs_failed_io(device, sector, size) \ + __drbd_change_sync(device, sector, size, RECORD_RS_FAILED, __FILE__, __LINE__) extern void drbd_al_shrink(struct drbd_device *device); extern int drbd_initialize_al(struct drbd_device *, void *); @@ -1764,25 +1927,38 @@ static inline sector_t drbd_md_ss(struct drbd_backing_dev *bdev) } static inline void -drbd_queue_work_front(struct drbd_work_queue *q, struct drbd_work *w) +drbd_queue_work(struct drbd_work_queue *q, struct drbd_work *w) { unsigned long flags; spin_lock_irqsave(&q->q_lock, flags); - list_add(&w->list, &q->q); + list_add_tail(&w->list, &q->q); spin_unlock_irqrestore(&q->q_lock, flags); wake_up(&q->q_wait); } static inline void -drbd_queue_work(struct drbd_work_queue *q, struct drbd_work *w) +drbd_queue_work_if_unqueued(struct drbd_work_queue *q, struct drbd_work *w) { unsigned long flags; spin_lock_irqsave(&q->q_lock, flags); - list_add_tail(&w->list, &q->q); + if (list_empty_careful(&w->list)) + list_add_tail(&w->list, &q->q); spin_unlock_irqrestore(&q->q_lock, flags); wake_up(&q->q_wait); } +static inline void +drbd_device_post_work(struct drbd_device *device, int work_bit) +{ + if (!test_and_set_bit(work_bit, &device->flags)) { + struct drbd_connection *connection = + first_peer_device(device)->connection; + struct drbd_work_queue *q = &connection->sender_work; + if (!test_and_set_bit(DEVICE_WORK_PENDING, &connection->flags)) + wake_up(&q->q_wait); + } +} + extern void drbd_flush_workqueue(struct drbd_work_queue *work_queue); static inline void wake_asender(struct drbd_connection *connection) @@ -1859,7 +2035,7 @@ static inline void inc_ap_pending(struct drbd_device *device) func, line, \ atomic_read(&device->which)) -#define dec_ap_pending(device) _dec_ap_pending(device, __FUNCTION__, __LINE__) +#define dec_ap_pending(device) _dec_ap_pending(device, __func__, __LINE__) static inline void _dec_ap_pending(struct drbd_device *device, const char *func, int line) { if (atomic_dec_and_test(&device->ap_pending_cnt)) @@ -1878,7 +2054,7 @@ static inline void inc_rs_pending(struct drbd_device *device) atomic_inc(&device->rs_pending_cnt); } -#define dec_rs_pending(device) _dec_rs_pending(device, __FUNCTION__, __LINE__) +#define dec_rs_pending(device) _dec_rs_pending(device, __func__, __LINE__) static inline void _dec_rs_pending(struct drbd_device *device, const char *func, int line) { atomic_dec(&device->rs_pending_cnt); @@ -1899,20 +2075,29 @@ static inline void inc_unacked(struct drbd_device *device) atomic_inc(&device->unacked_cnt); } -#define dec_unacked(device) _dec_unacked(device, __FUNCTION__, __LINE__) +#define dec_unacked(device) _dec_unacked(device, __func__, __LINE__) static inline void _dec_unacked(struct drbd_device *device, const char *func, int line) { atomic_dec(&device->unacked_cnt); ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line); } -#define sub_unacked(device, n) _sub_unacked(device, n, __FUNCTION__, __LINE__) +#define sub_unacked(device, n) _sub_unacked(device, n, __func__, __LINE__) static inline void _sub_unacked(struct drbd_device *device, int n, const char *func, int line) { atomic_sub(n, &device->unacked_cnt); ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line); } +static inline bool is_sync_state(enum drbd_conns connection_state) +{ + return + (connection_state == C_SYNC_SOURCE + || connection_state == C_SYNC_TARGET + || connection_state == C_PAUSED_SYNC_S + || connection_state == C_PAUSED_SYNC_T); +} + /** * get_ldev() - Increase the ref count on device->ldev. Returns 0 if there is no ldev * @M: DRBD device. @@ -1924,6 +2109,11 @@ static inline void _sub_unacked(struct drbd_device *device, int n, const char *f static inline void put_ldev(struct drbd_device *device) { + enum drbd_disk_state ds = device->state.disk; + /* We must check the state *before* the atomic_dec becomes visible, + * or we have a theoretical race where someone hitting zero, + * while state still D_FAILED, will then see D_DISKLESS in the + * condition below and calling into destroy, where he must not, yet. */ int i = atomic_dec_return(&device->local_cnt); /* This may be called from some endio handler, @@ -1932,15 +2122,13 @@ static inline void put_ldev(struct drbd_device *device) __release(local); D_ASSERT(device, i >= 0); if (i == 0) { - if (device->state.disk == D_DISKLESS) + if (ds == D_DISKLESS) /* even internal references gone, safe to destroy */ - drbd_ldev_destroy(device); - if (device->state.disk == D_FAILED) { + drbd_device_post_work(device, DESTROY_DISK); + if (ds == D_FAILED) /* all application IO references gone. */ - if (!test_and_set_bit(GO_DISKLESS, &device->flags)) - drbd_queue_work(&first_peer_device(device)->connection->sender_work, - &device->go_diskless); - } + if (!test_and_set_bit(GOING_DISKLESS, &device->flags)) + drbd_device_post_work(device, GO_DISKLESS); wake_up(&device->misc_wait); } } @@ -1964,54 +2152,6 @@ static inline int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_ extern int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins); #endif -/* you must have an "get_ldev" reference */ -static inline void drbd_get_syncer_progress(struct drbd_device *device, - unsigned long *bits_left, unsigned int *per_mil_done) -{ - /* this is to break it at compile time when we change that, in case we - * want to support more than (1<<32) bits on a 32bit arch. */ - typecheck(unsigned long, device->rs_total); - - /* note: both rs_total and rs_left are in bits, i.e. in - * units of BM_BLOCK_SIZE. - * for the percentage, we don't care. */ - - if (device->state.conn == C_VERIFY_S || device->state.conn == C_VERIFY_T) - *bits_left = device->ov_left; - else - *bits_left = drbd_bm_total_weight(device) - device->rs_failed; - /* >> 10 to prevent overflow, - * +1 to prevent division by zero */ - if (*bits_left > device->rs_total) { - /* doh. maybe a logic bug somewhere. - * may also be just a race condition - * between this and a disconnect during sync. - * for now, just prevent in-kernel buffer overflow. - */ - smp_rmb(); - drbd_warn(device, "cs:%s rs_left=%lu > rs_total=%lu (rs_failed %lu)\n", - drbd_conn_str(device->state.conn), - *bits_left, device->rs_total, device->rs_failed); - *per_mil_done = 0; - } else { - /* Make sure the division happens in long context. - * We allow up to one petabyte storage right now, - * at a granularity of 4k per bit that is 2**38 bits. - * After shift right and multiplication by 1000, - * this should still fit easily into a 32bit long, - * so we don't need a 64bit division on 32bit arch. - * Note: currently we don't support such large bitmaps on 32bit - * arch anyways, but no harm done to be prepared for it here. - */ - unsigned int shift = device->rs_total > UINT_MAX ? 16 : 10; - unsigned long left = *bits_left >> shift; - unsigned long total = 1UL + (device->rs_total >> shift); - unsigned long tmp = 1000UL - left * 1000UL/total; - *per_mil_done = tmp; - } -} - - /* this throttles on-the-fly application requests * according to max_buffers settings; * maybe re-implement using semaphores? */ @@ -2201,25 +2341,6 @@ static inline int drbd_queue_order_type(struct drbd_device *device) return QUEUE_ORDERED_NONE; } -static inline void drbd_md_flush(struct drbd_device *device) -{ - int r; - - if (device->ldev == NULL) { - drbd_warn(device, "device->ldev == NULL in drbd_md_flush\n"); - return; - } - - if (test_bit(MD_NO_FUA, &device->flags)) - return; - - r = blkdev_issue_flush(device->ldev->md_bdev, GFP_NOIO, NULL); - if (r) { - set_bit(MD_NO_FUA, &device->flags); - drbd_err(device, "meta data flush failed with status %d, disabling md-flushes\n", r); - } -} - static inline struct drbd_connection *first_connection(struct drbd_resource *resource) { return list_first_entry_or_null(&resource->connections, diff --git a/drivers/block/drbd/drbd_interval.h b/drivers/block/drbd/drbd_interval.h index f38fcb00c10d..f210543f05f4 100644 --- a/drivers/block/drbd/drbd_interval.h +++ b/drivers/block/drbd/drbd_interval.h @@ -10,7 +10,9 @@ struct drbd_interval { unsigned int size; /* size in bytes */ sector_t end; /* highest interval end in subtree */ int local:1 /* local or remote request? */; - int waiting:1; + int waiting:1; /* someone is waiting for this to complete */ + int completed:1; /* this has been completed already; + * ignore for conflict detection */ }; static inline void drbd_clear_interval(struct drbd_interval *i) diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 960645c26e6f..9b465bb68487 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -26,7 +26,10 @@ */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/module.h> +#include <linux/jiffies.h> #include <linux/drbd.h> #include <asm/uaccess.h> #include <asm/types.h> @@ -54,16 +57,14 @@ #include "drbd_int.h" #include "drbd_protocol.h" #include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */ - #include "drbd_vli.h" +#include "drbd_debugfs.h" static DEFINE_MUTEX(drbd_main_mutex); static int drbd_open(struct block_device *bdev, fmode_t mode); static void drbd_release(struct gendisk *gd, fmode_t mode); -static int w_md_sync(struct drbd_work *w, int unused); static void md_sync_timer_fn(unsigned long data); static int w_bitmap_io(struct drbd_work *w, int unused); -static int w_go_diskless(struct drbd_work *w, int unused); MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, " "Lars Ellenberg <lars@linbit.com>"); @@ -264,7 +265,7 @@ bail: /** * _tl_restart() - Walks the transfer log, and applies an action to all requests - * @device: DRBD device. + * @connection: DRBD connection to operate on. * @what: The action/event to perform with all request objects * * @what might be one of CONNECTION_LOST_WHILE_PENDING, RESEND, FAIL_FROZEN_DISK_IO, @@ -662,6 +663,11 @@ static int __send_command(struct drbd_connection *connection, int vnr, msg_flags); if (data && !err) err = drbd_send_all(connection, sock->socket, data, size, 0); + /* DRBD protocol "pings" are latency critical. + * This is supposed to trigger tcp_push_pending_frames() */ + if (!err && (cmd == P_PING || cmd == P_PING_ACK)) + drbd_tcp_nodelay(sock->socket); + return err; } @@ -1636,7 +1642,10 @@ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request * if (peer_device->connection->agreed_pro_version >= 100) { if (req->rq_state & RQ_EXP_RECEIVE_ACK) dp_flags |= DP_SEND_RECEIVE_ACK; - if (req->rq_state & RQ_EXP_WRITE_ACK) + /* During resync, request an explicit write ack, + * even in protocol != C */ + if (req->rq_state & RQ_EXP_WRITE_ACK + || (dp_flags & DP_MAY_SET_IN_SYNC)) dp_flags |= DP_SEND_WRITE_ACK; } p->dp_flags = cpu_to_be32(dp_flags); @@ -1900,6 +1909,7 @@ void drbd_init_set_defaults(struct drbd_device *device) drbd_set_defaults(device); atomic_set(&device->ap_bio_cnt, 0); + atomic_set(&device->ap_actlog_cnt, 0); atomic_set(&device->ap_pending_cnt, 0); atomic_set(&device->rs_pending_cnt, 0); atomic_set(&device->unacked_cnt, 0); @@ -1908,7 +1918,7 @@ void drbd_init_set_defaults(struct drbd_device *device) atomic_set(&device->rs_sect_in, 0); atomic_set(&device->rs_sect_ev, 0); atomic_set(&device->ap_in_flight, 0); - atomic_set(&device->md_io_in_use, 0); + atomic_set(&device->md_io.in_use, 0); mutex_init(&device->own_state_mutex); device->state_mutex = &device->own_state_mutex; @@ -1924,17 +1934,15 @@ void drbd_init_set_defaults(struct drbd_device *device) INIT_LIST_HEAD(&device->resync_reads); INIT_LIST_HEAD(&device->resync_work.list); INIT_LIST_HEAD(&device->unplug_work.list); - INIT_LIST_HEAD(&device->go_diskless.list); - INIT_LIST_HEAD(&device->md_sync_work.list); - INIT_LIST_HEAD(&device->start_resync_work.list); INIT_LIST_HEAD(&device->bm_io_work.w.list); + INIT_LIST_HEAD(&device->pending_master_completion[0]); + INIT_LIST_HEAD(&device->pending_master_completion[1]); + INIT_LIST_HEAD(&device->pending_completion[0]); + INIT_LIST_HEAD(&device->pending_completion[1]); device->resync_work.cb = w_resync_timer; device->unplug_work.cb = w_send_write_hint; - device->go_diskless.cb = w_go_diskless; - device->md_sync_work.cb = w_md_sync; device->bm_io_work.w.cb = w_bitmap_io; - device->start_resync_work.cb = w_start_resync; init_timer(&device->resync_timer); init_timer(&device->md_sync_timer); @@ -1992,7 +2000,7 @@ void drbd_device_cleanup(struct drbd_device *device) drbd_bm_cleanup(device); } - drbd_free_bc(device->ldev); + drbd_free_ldev(device->ldev); device->ldev = NULL; clear_bit(AL_SUSPENDED, &device->flags); @@ -2006,7 +2014,6 @@ void drbd_device_cleanup(struct drbd_device *device) D_ASSERT(device, list_empty(&first_peer_device(device)->connection->sender_work.q)); D_ASSERT(device, list_empty(&device->resync_work.list)); D_ASSERT(device, list_empty(&device->unplug_work.list)); - D_ASSERT(device, list_empty(&device->go_diskless.list)); drbd_set_defaults(device); } @@ -2129,20 +2136,6 @@ Enomem: return -ENOMEM; } -static int drbd_notify_sys(struct notifier_block *this, unsigned long code, - void *unused) -{ - /* just so we have it. you never know what interesting things we - * might want to do here some day... - */ - - return NOTIFY_DONE; -} - -static struct notifier_block drbd_notifier = { - .notifier_call = drbd_notify_sys, -}; - static void drbd_release_all_peer_reqs(struct drbd_device *device) { int rr; @@ -2173,7 +2166,7 @@ void drbd_destroy_device(struct kref *kref) { struct drbd_device *device = container_of(kref, struct drbd_device, kref); struct drbd_resource *resource = device->resource; - struct drbd_connection *connection; + struct drbd_peer_device *peer_device, *tmp_peer_device; del_timer_sync(&device->request_timer); @@ -2187,7 +2180,7 @@ void drbd_destroy_device(struct kref *kref) if (device->this_bdev) bdput(device->this_bdev); - drbd_free_bc(device->ldev); + drbd_free_ldev(device->ldev); device->ldev = NULL; drbd_release_all_peer_reqs(device); @@ -2200,15 +2193,20 @@ void drbd_destroy_device(struct kref *kref) if (device->bitmap) /* should no longer be there. */ drbd_bm_cleanup(device); - __free_page(device->md_io_page); + __free_page(device->md_io.page); put_disk(device->vdisk); blk_cleanup_queue(device->rq_queue); kfree(device->rs_plan_s); - kfree(first_peer_device(device)); - kfree(device); - for_each_connection(connection, resource) - kref_put(&connection->kref, drbd_destroy_connection); + /* not for_each_connection(connection, resource): + * those may have been cleaned up and disassociated already. + */ + for_each_peer_device_safe(peer_device, tmp_peer_device, device) { + kref_put(&peer_device->connection->kref, drbd_destroy_connection); + kfree(peer_device); + } + memset(device, 0xfd, sizeof(*device)); + kfree(device); kref_put(&resource->kref, drbd_destroy_resource); } @@ -2236,7 +2234,7 @@ static void do_retry(struct work_struct *ws) list_for_each_entry_safe(req, tmp, &writes, tl_requests) { struct drbd_device *device = req->device; struct bio *bio = req->master_bio; - unsigned long start_time = req->start_time; + unsigned long start_jif = req->start_jif; bool expected; expected = @@ -2271,10 +2269,12 @@ static void do_retry(struct work_struct *ws) /* We are not just doing generic_make_request(), * as we want to keep the start_time information. */ inc_ap_bio(device); - __drbd_make_request(device, bio, start_time); + __drbd_make_request(device, bio, start_jif); } } +/* called via drbd_req_put_completion_ref(), + * holds resource->req_lock */ void drbd_restart_request(struct drbd_request *req) { unsigned long flags; @@ -2298,6 +2298,7 @@ void drbd_destroy_resource(struct kref *kref) idr_destroy(&resource->devices); free_cpumask_var(resource->cpu_mask); kfree(resource->name); + memset(resource, 0xf2, sizeof(*resource)); kfree(resource); } @@ -2307,8 +2308,10 @@ void drbd_free_resource(struct drbd_resource *resource) for_each_connection_safe(connection, tmp, resource) { list_del(&connection->connections); + drbd_debugfs_connection_cleanup(connection); kref_put(&connection->kref, drbd_destroy_connection); } + drbd_debugfs_resource_cleanup(resource); kref_put(&resource->kref, drbd_destroy_resource); } @@ -2318,8 +2321,6 @@ static void drbd_cleanup(void) struct drbd_device *device; struct drbd_resource *resource, *tmp; - unregister_reboot_notifier(&drbd_notifier); - /* first remove proc, * drbdsetup uses it's presence to detect * whether DRBD is loaded. @@ -2335,6 +2336,7 @@ static void drbd_cleanup(void) destroy_workqueue(retry.wq); drbd_genl_unregister(); + drbd_debugfs_cleanup(); idr_for_each_entry(&drbd_devices, device, i) drbd_delete_device(device); @@ -2350,7 +2352,7 @@ static void drbd_cleanup(void) idr_destroy(&drbd_devices); - printk(KERN_INFO "drbd: module cleanup done.\n"); + pr_info("module cleanup done.\n"); } /** @@ -2539,6 +2541,20 @@ int set_resource_options(struct drbd_resource *resource, struct res_opts *res_op if (nr_cpu_ids > 1 && res_opts->cpu_mask[0] != 0) { err = bitmap_parse(res_opts->cpu_mask, DRBD_CPU_MASK_SIZE, cpumask_bits(new_cpu_mask), nr_cpu_ids); + if (err == -EOVERFLOW) { + /* So what. mask it out. */ + cpumask_var_t tmp_cpu_mask; + if (zalloc_cpumask_var(&tmp_cpu_mask, GFP_KERNEL)) { + cpumask_setall(tmp_cpu_mask); + cpumask_and(new_cpu_mask, new_cpu_mask, tmp_cpu_mask); + drbd_warn(resource, "Overflow in bitmap_parse(%.12s%s), truncating to %u bits\n", + res_opts->cpu_mask, + strlen(res_opts->cpu_mask) > 12 ? "..." : "", + nr_cpu_ids); + free_cpumask_var(tmp_cpu_mask); + err = 0; + } + } if (err) { drbd_warn(resource, "bitmap_parse() failed with %d\n", err); /* retcode = ERR_CPU_MASK_PARSE; */ @@ -2579,10 +2595,12 @@ struct drbd_resource *drbd_create_resource(const char *name) kref_init(&resource->kref); idr_init(&resource->devices); INIT_LIST_HEAD(&resource->connections); + resource->write_ordering = WO_bdev_flush; list_add_tail_rcu(&resource->resources, &drbd_resources); mutex_init(&resource->conf_update); mutex_init(&resource->adm_mutex); spin_lock_init(&resource->req_lock); + drbd_debugfs_resource_add(resource); return resource; fail_free_name: @@ -2593,7 +2611,7 @@ fail: return NULL; } -/* caller must be under genl_lock() */ +/* caller must be under adm_mutex */ struct drbd_connection *conn_create(const char *name, struct res_opts *res_opts) { struct drbd_resource *resource; @@ -2617,7 +2635,6 @@ struct drbd_connection *conn_create(const char *name, struct res_opts *res_opts) INIT_LIST_HEAD(&connection->current_epoch->list); connection->epochs = 1; spin_lock_init(&connection->epoch_lock); - connection->write_ordering = WO_bdev_flush; connection->send.seen_any_write_yet = false; connection->send.current_epoch_nr = 0; @@ -2652,6 +2669,7 @@ struct drbd_connection *conn_create(const char *name, struct res_opts *res_opts) kref_get(&resource->kref); list_add_tail_rcu(&connection->connections, &resource->connections); + drbd_debugfs_connection_add(connection); return connection; fail_resource: @@ -2680,6 +2698,7 @@ void drbd_destroy_connection(struct kref *kref) drbd_free_socket(&connection->data); kfree(connection->int_dig_in); kfree(connection->int_dig_vv); + memset(connection, 0xfc, sizeof(*connection)); kfree(connection); kref_put(&resource->kref, drbd_destroy_resource); } @@ -2694,7 +2713,6 @@ static int init_submitter(struct drbd_device *device) return -ENOMEM; INIT_WORK(&device->submit.worker, do_submit); - spin_lock_init(&device->submit.lock); INIT_LIST_HEAD(&device->submit.writes); return 0; } @@ -2764,8 +2782,8 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig blk_queue_merge_bvec(q, drbd_merge_bvec); q->queue_lock = &resource->req_lock; - device->md_io_page = alloc_page(GFP_KERNEL); - if (!device->md_io_page) + device->md_io.page = alloc_page(GFP_KERNEL); + if (!device->md_io.page) goto out_no_io_page; if (drbd_bm_init(device)) @@ -2794,6 +2812,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig kref_get(&device->kref); INIT_LIST_HEAD(&device->peer_devices); + INIT_LIST_HEAD(&device->pending_bitmap_io); for_each_connection(connection, resource) { peer_device = kzalloc(sizeof(struct drbd_peer_device), GFP_KERNEL); if (!peer_device) @@ -2829,7 +2848,10 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig for_each_peer_device(peer_device, device) drbd_connected(peer_device); } - + /* move to create_peer_device() */ + for_each_peer_device(peer_device, device) + drbd_debugfs_peer_device_add(peer_device); + drbd_debugfs_device_add(device); return NO_ERROR; out_idr_remove_vol: @@ -2853,7 +2875,7 @@ out_idr_remove_minor: out_no_minor_idr: drbd_bm_cleanup(device); out_no_bitmap: - __free_page(device->md_io_page); + __free_page(device->md_io.page); out_no_io_page: put_disk(disk); out_no_disk: @@ -2868,8 +2890,13 @@ void drbd_delete_device(struct drbd_device *device) { struct drbd_resource *resource = device->resource; struct drbd_connection *connection; + struct drbd_peer_device *peer_device; int refs = 3; + /* move to free_peer_device() */ + for_each_peer_device(peer_device, device) + drbd_debugfs_peer_device_cleanup(peer_device); + drbd_debugfs_device_cleanup(device); for_each_connection(connection, resource) { idr_remove(&connection->peer_devices, device->vnr); refs++; @@ -2881,13 +2908,12 @@ void drbd_delete_device(struct drbd_device *device) kref_sub(&device->kref, refs, drbd_destroy_device); } -int __init drbd_init(void) +static int __init drbd_init(void) { int err; if (minor_count < DRBD_MINOR_COUNT_MIN || minor_count > DRBD_MINOR_COUNT_MAX) { - printk(KERN_ERR - "drbd: invalid minor_count (%d)\n", minor_count); + pr_err("invalid minor_count (%d)\n", minor_count); #ifdef MODULE return -EINVAL; #else @@ -2897,14 +2923,11 @@ int __init drbd_init(void) err = register_blkdev(DRBD_MAJOR, "drbd"); if (err) { - printk(KERN_ERR - "drbd: unable to register block device major %d\n", + pr_err("unable to register block device major %d\n", DRBD_MAJOR); return err; } - register_reboot_notifier(&drbd_notifier); - /* * allocate all necessary structs */ @@ -2918,7 +2941,7 @@ int __init drbd_init(void) err = drbd_genl_register(); if (err) { - printk(KERN_ERR "drbd: unable to register generic netlink family\n"); + pr_err("unable to register generic netlink family\n"); goto fail; } @@ -2929,38 +2952,39 @@ int __init drbd_init(void) err = -ENOMEM; drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL); if (!drbd_proc) { - printk(KERN_ERR "drbd: unable to register proc file\n"); + pr_err("unable to register proc file\n"); goto fail; } retry.wq = create_singlethread_workqueue("drbd-reissue"); if (!retry.wq) { - printk(KERN_ERR "drbd: unable to create retry workqueue\n"); + pr_err("unable to create retry workqueue\n"); goto fail; } INIT_WORK(&retry.worker, do_retry); spin_lock_init(&retry.lock); INIT_LIST_HEAD(&retry.writes); - printk(KERN_INFO "drbd: initialized. " + if (drbd_debugfs_init()) + pr_notice("failed to initialize debugfs -- will not be available\n"); + + pr_info("initialized. " "Version: " REL_VERSION " (api:%d/proto:%d-%d)\n", API_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX); - printk(KERN_INFO "drbd: %s\n", drbd_buildtag()); - printk(KERN_INFO "drbd: registered as block device major %d\n", - DRBD_MAJOR); - + pr_info("%s\n", drbd_buildtag()); + pr_info("registered as block device major %d\n", DRBD_MAJOR); return 0; /* Success! */ fail: drbd_cleanup(); if (err == -ENOMEM) - printk(KERN_ERR "drbd: ran out of memory\n"); + pr_err("ran out of memory\n"); else - printk(KERN_ERR "drbd: initialization failure\n"); + pr_err("initialization failure\n"); return err; } -void drbd_free_bc(struct drbd_backing_dev *ldev) +void drbd_free_ldev(struct drbd_backing_dev *ldev) { if (ldev == NULL) return; @@ -2972,24 +2996,29 @@ void drbd_free_bc(struct drbd_backing_dev *ldev) kfree(ldev); } -void drbd_free_sock(struct drbd_connection *connection) +static void drbd_free_one_sock(struct drbd_socket *ds) { - if (connection->data.socket) { - mutex_lock(&connection->data.mutex); - kernel_sock_shutdown(connection->data.socket, SHUT_RDWR); - sock_release(connection->data.socket); - connection->data.socket = NULL; - mutex_unlock(&connection->data.mutex); - } - if (connection->meta.socket) { - mutex_lock(&connection->meta.mutex); - kernel_sock_shutdown(connection->meta.socket, SHUT_RDWR); - sock_release(connection->meta.socket); - connection->meta.socket = NULL; - mutex_unlock(&connection->meta.mutex); + struct socket *s; + mutex_lock(&ds->mutex); + s = ds->socket; + ds->socket = NULL; + mutex_unlock(&ds->mutex); + if (s) { + /* so debugfs does not need to mutex_lock() */ + synchronize_rcu(); + kernel_sock_shutdown(s, SHUT_RDWR); + sock_release(s); } } +void drbd_free_sock(struct drbd_connection *connection) +{ + if (connection->data.socket) + drbd_free_one_sock(&connection->data); + if (connection->meta.socket) + drbd_free_one_sock(&connection->meta); +} + /* meta data management */ void conn_md_sync(struct drbd_connection *connection) @@ -3093,7 +3122,7 @@ void drbd_md_sync(struct drbd_device *device) if (!get_ldev_if_state(device, D_FAILED)) return; - buffer = drbd_md_get_buffer(device); + buffer = drbd_md_get_buffer(device, __func__); if (!buffer) goto out; @@ -3253,7 +3282,7 @@ int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev) if (device->state.disk != D_DISKLESS) return ERR_DISK_CONFIGURED; - buffer = drbd_md_get_buffer(device); + buffer = drbd_md_get_buffer(device, __func__); if (!buffer) return ERR_NOMEM; @@ -3466,23 +3495,19 @@ void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local) * * Sets all bits in the bitmap and writes the whole bitmap to stable storage. */ -int drbd_bmio_set_n_write(struct drbd_device *device) +int drbd_bmio_set_n_write(struct drbd_device *device) __must_hold(local) { int rv = -EIO; - if (get_ldev_if_state(device, D_ATTACHING)) { - drbd_md_set_flag(device, MDF_FULL_SYNC); - drbd_md_sync(device); - drbd_bm_set_all(device); - - rv = drbd_bm_write(device); + drbd_md_set_flag(device, MDF_FULL_SYNC); + drbd_md_sync(device); + drbd_bm_set_all(device); - if (!rv) { - drbd_md_clear_flag(device, MDF_FULL_SYNC); - drbd_md_sync(device); - } + rv = drbd_bm_write(device); - put_ldev(device); + if (!rv) { + drbd_md_clear_flag(device, MDF_FULL_SYNC); + drbd_md_sync(device); } return rv; @@ -3494,18 +3519,11 @@ int drbd_bmio_set_n_write(struct drbd_device *device) * * Clears all bits in the bitmap and writes the whole bitmap to stable storage. */ -int drbd_bmio_clear_n_write(struct drbd_device *device) +int drbd_bmio_clear_n_write(struct drbd_device *device) __must_hold(local) { - int rv = -EIO; - drbd_resume_al(device); - if (get_ldev_if_state(device, D_ATTACHING)) { - drbd_bm_clear_all(device); - rv = drbd_bm_write(device); - put_ldev(device); - } - - return rv; + drbd_bm_clear_all(device); + return drbd_bm_write(device); } static int w_bitmap_io(struct drbd_work *w, int unused) @@ -3537,61 +3555,6 @@ static int w_bitmap_io(struct drbd_work *w, int unused) return 0; } -void drbd_ldev_destroy(struct drbd_device *device) -{ - lc_destroy(device->resync); - device->resync = NULL; - lc_destroy(device->act_log); - device->act_log = NULL; - __no_warn(local, - drbd_free_bc(device->ldev); - device->ldev = NULL;); - - clear_bit(GO_DISKLESS, &device->flags); -} - -static int w_go_diskless(struct drbd_work *w, int unused) -{ - struct drbd_device *device = - container_of(w, struct drbd_device, go_diskless); - - D_ASSERT(device, device->state.disk == D_FAILED); - /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will - * inc/dec it frequently. Once we are D_DISKLESS, no one will touch - * the protected members anymore, though, so once put_ldev reaches zero - * again, it will be safe to free them. */ - - /* Try to write changed bitmap pages, read errors may have just - * set some bits outside the area covered by the activity log. - * - * If we have an IO error during the bitmap writeout, - * we will want a full sync next time, just in case. - * (Do we want a specific meta data flag for this?) - * - * If that does not make it to stable storage either, - * we cannot do anything about that anymore. - * - * We still need to check if both bitmap and ldev are present, we may - * end up here after a failed attach, before ldev was even assigned. - */ - if (device->bitmap && device->ldev) { - /* An interrupted resync or similar is allowed to recounts bits - * while we detach. - * Any modifications would not be expected anymore, though. - */ - if (drbd_bitmap_io_from_worker(device, drbd_bm_write, - "detach", BM_LOCKED_TEST_ALLOWED)) { - if (test_bit(WAS_READ_ERROR, &device->flags)) { - drbd_md_set_flag(device, MDF_FULL_SYNC); - drbd_md_sync(device); - } - } - } - - drbd_force_state(device, NS(disk, D_DISKLESS)); - return 0; -} - /** * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap * @device: DRBD device. @@ -3603,6 +3566,9 @@ static int w_go_diskless(struct drbd_work *w, int unused) * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be * called from worker context. It MUST NOT be used while a previous such * work is still pending! + * + * Its worker function encloses the call of io_fn() by get_ldev() and + * put_ldev(). */ void drbd_queue_bitmap_io(struct drbd_device *device, int (*io_fn)(struct drbd_device *), @@ -3685,25 +3651,7 @@ int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag) static void md_sync_timer_fn(unsigned long data) { struct drbd_device *device = (struct drbd_device *) data; - - /* must not double-queue! */ - if (list_empty(&device->md_sync_work.list)) - drbd_queue_work_front(&first_peer_device(device)->connection->sender_work, - &device->md_sync_work); -} - -static int w_md_sync(struct drbd_work *w, int unused) -{ - struct drbd_device *device = - container_of(w, struct drbd_device, md_sync_work); - - drbd_warn(device, "md_sync_timer expired! Worker calls drbd_md_sync().\n"); -#ifdef DEBUG - drbd_warn(device, "last md_mark_dirty: %s:%u\n", - device->last_md_mark_dirty.func, device->last_md_mark_dirty.line); -#endif - drbd_md_sync(device); - return 0; + drbd_device_post_work(device, MD_SYNC); } const char *cmdname(enum drbd_packet cmd) diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 3f2e16738080..1cd47df44bda 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -23,6 +23,8 @@ */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/module.h> #include <linux/drbd.h> #include <linux/in.h> @@ -85,7 +87,7 @@ static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info) { genlmsg_end(skb, genlmsg_data(nlmsg_data(nlmsg_hdr(skb)))); if (genlmsg_reply(skb, info)) - printk(KERN_ERR "drbd: error sending genl reply\n"); + pr_err("error sending genl reply\n"); } /* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only @@ -558,8 +560,10 @@ void conn_try_outdate_peer_async(struct drbd_connection *connection) } enum drbd_state_rv -drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force) +drbd_set_role(struct drbd_device *const device, enum drbd_role new_role, int force) { + struct drbd_peer_device *const peer_device = first_peer_device(device); + struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL; const int max_tries = 4; enum drbd_state_rv rv = SS_UNKNOWN_ERROR; struct net_conf *nc; @@ -607,7 +611,7 @@ drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force) device->state.disk == D_CONSISTENT && mask.pdsk == 0) { D_ASSERT(device, device->state.pdsk == D_UNKNOWN); - if (conn_try_outdate_peer(first_peer_device(device)->connection)) { + if (conn_try_outdate_peer(connection)) { val.disk = D_UP_TO_DATE; mask.disk = D_MASK; } @@ -617,7 +621,7 @@ drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force) if (rv == SS_NOTHING_TO_DO) goto out; if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) { - if (!conn_try_outdate_peer(first_peer_device(device)->connection) && force) { + if (!conn_try_outdate_peer(connection) && force) { drbd_warn(device, "Forced into split brain situation!\n"); mask.pdsk = D_MASK; val.pdsk = D_OUTDATED; @@ -630,7 +634,7 @@ drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force) retry at most once more in this case. */ int timeo; rcu_read_lock(); - nc = rcu_dereference(first_peer_device(device)->connection->net_conf); + nc = rcu_dereference(connection->net_conf); timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1; rcu_read_unlock(); schedule_timeout_interruptible(timeo); @@ -659,19 +663,17 @@ drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force) /* FIXME also wait for all pending P_BARRIER_ACK? */ if (new_role == R_SECONDARY) { - set_disk_ro(device->vdisk, true); if (get_ldev(device)) { device->ldev->md.uuid[UI_CURRENT] &= ~(u64)1; put_ldev(device); } } else { - /* Called from drbd_adm_set_role only. - * We are still holding the conf_update mutex. */ - nc = first_peer_device(device)->connection->net_conf; + mutex_lock(&device->resource->conf_update); + nc = connection->net_conf; if (nc) nc->discard_my_data = 0; /* without copy; single bit op is atomic */ + mutex_unlock(&device->resource->conf_update); - set_disk_ro(device->vdisk, false); if (get_ldev(device)) { if (((device->state.conn < C_CONNECTED || device->state.pdsk <= D_FAILED) @@ -689,12 +691,12 @@ drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force) if (device->state.conn >= C_WF_REPORT_PARAMS) { /* if this was forced, we should consider sync */ if (forced) - drbd_send_uuids(first_peer_device(device)); - drbd_send_current_state(first_peer_device(device)); + drbd_send_uuids(peer_device); + drbd_send_current_state(peer_device); } drbd_md_sync(device); - + set_disk_ro(device->vdisk, new_role == R_SECONDARY); kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE); out: mutex_unlock(device->state_mutex); @@ -891,7 +893,7 @@ drbd_determine_dev_size(struct drbd_device *device, enum dds_flags flags, struct * still lock the act_log to not trigger ASSERTs there. */ drbd_suspend_io(device); - buffer = drbd_md_get_buffer(device); /* Lock meta-data IO */ + buffer = drbd_md_get_buffer(device, __func__); /* Lock meta-data IO */ if (!buffer) { drbd_resume_io(device); return DS_ERROR; @@ -971,6 +973,10 @@ drbd_determine_dev_size(struct drbd_device *device, enum dds_flags flags, struct if (la_size_changed || md_moved || rs) { u32 prev_flags; + /* We do some synchronous IO below, which may take some time. + * Clear the timer, to avoid scary "timer expired!" messages, + * "Superblock" is written out at least twice below, anyways. */ + del_timer(&device->md_sync_timer); drbd_al_shrink(device); /* All extents inactive. */ prev_flags = md->flags; @@ -1116,15 +1122,16 @@ static int drbd_check_al_size(struct drbd_device *device, struct disk_conf *dc) return 0; } -static void drbd_setup_queue_param(struct drbd_device *device, unsigned int max_bio_size) +static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backing_dev *bdev, + unsigned int max_bio_size) { struct request_queue * const q = device->rq_queue; unsigned int max_hw_sectors = max_bio_size >> 9; unsigned int max_segments = 0; struct request_queue *b = NULL; - if (get_ldev_if_state(device, D_ATTACHING)) { - b = device->ldev->backing_bdev->bd_disk->queue; + if (bdev) { + b = bdev->backing_bdev->bd_disk->queue; max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9); rcu_read_lock(); @@ -1169,11 +1176,10 @@ static void drbd_setup_queue_param(struct drbd_device *device, unsigned int max_ b->backing_dev_info.ra_pages); q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages; } - put_ldev(device); } } -void drbd_reconsider_max_bio_size(struct drbd_device *device) +void drbd_reconsider_max_bio_size(struct drbd_device *device, struct drbd_backing_dev *bdev) { unsigned int now, new, local, peer; @@ -1181,10 +1187,9 @@ void drbd_reconsider_max_bio_size(struct drbd_device *device) local = device->local_max_bio_size; /* Eventually last known value, from volatile memory */ peer = device->peer_max_bio_size; /* Eventually last known value, from meta data */ - if (get_ldev_if_state(device, D_ATTACHING)) { - local = queue_max_hw_sectors(device->ldev->backing_bdev->bd_disk->queue) << 9; + if (bdev) { + local = queue_max_hw_sectors(bdev->backing_bdev->bd_disk->queue) << 9; device->local_max_bio_size = local; - put_ldev(device); } local = min(local, DRBD_MAX_BIO_SIZE); @@ -1217,7 +1222,7 @@ void drbd_reconsider_max_bio_size(struct drbd_device *device) if (new != now) drbd_info(device, "max BIO size = %u\n", new); - drbd_setup_queue_param(device, new); + drbd_setup_queue_param(device, bdev, new); } /* Starts the worker thread */ @@ -1299,6 +1304,13 @@ static unsigned int drbd_al_extents_max(struct drbd_backing_dev *bdev) return (al_size_4k - 1) * AL_CONTEXT_PER_TRANSACTION; } +static bool write_ordering_changed(struct disk_conf *a, struct disk_conf *b) +{ + return a->disk_barrier != b->disk_barrier || + a->disk_flushes != b->disk_flushes || + a->disk_drain != b->disk_drain; +} + int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info) { struct drbd_config_context adm_ctx; @@ -1405,7 +1417,8 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info) else set_bit(MD_NO_FUA, &device->flags); - drbd_bump_write_ordering(first_peer_device(device)->connection, WO_bdev_flush); + if (write_ordering_changed(old_disk_conf, new_disk_conf)) + drbd_bump_write_ordering(device->resource, NULL, WO_bdev_flush); drbd_md_sync(device); @@ -1440,6 +1453,8 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) { struct drbd_config_context adm_ctx; struct drbd_device *device; + struct drbd_peer_device *peer_device; + struct drbd_connection *connection; int err; enum drbd_ret_code retcode; enum determine_dev_size dd; @@ -1462,7 +1477,9 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) device = adm_ctx.device; mutex_lock(&adm_ctx.resource->adm_mutex); - conn_reconfig_start(first_peer_device(device)->connection); + peer_device = first_peer_device(device); + connection = peer_device ? peer_device->connection : NULL; + conn_reconfig_start(connection); /* if you want to reconfigure, please tear down first */ if (device->state.disk > D_DISKLESS) { @@ -1473,7 +1490,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) * drbd_ldev_destroy is done already, we may end up here very fast, * e.g. if someone calls attach from the on-io-error handler, * to realize a "hot spare" feature (not that I'd recommend that) */ - wait_event(device->misc_wait, !atomic_read(&device->local_cnt)); + wait_event(device->misc_wait, !test_bit(GOING_DISKLESS, &device->flags)); /* make sure there is no leftover from previous force-detach attempts */ clear_bit(FORCE_DETACH, &device->flags); @@ -1529,7 +1546,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) goto fail; rcu_read_lock(); - nc = rcu_dereference(first_peer_device(device)->connection->net_conf); + nc = rcu_dereference(connection->net_conf); if (nc) { if (new_disk_conf->fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) { rcu_read_unlock(); @@ -1649,7 +1666,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) */ wait_event(device->misc_wait, !atomic_read(&device->ap_pending_cnt) || drbd_suspended(device)); /* and for any other previously queued work */ - drbd_flush_workqueue(&first_peer_device(device)->connection->sender_work); + drbd_flush_workqueue(&connection->sender_work); rv = _drbd_request_state(device, NS(disk, D_ATTACHING), CS_VERBOSE); retcode = rv; /* FIXME: Type mismatch. */ @@ -1710,7 +1727,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) new_disk_conf = NULL; new_plan = NULL; - drbd_bump_write_ordering(first_peer_device(device)->connection, WO_bdev_flush); + drbd_bump_write_ordering(device->resource, device->ldev, WO_bdev_flush); if (drbd_md_test_flag(device->ldev, MDF_CRASHED_PRIMARY)) set_bit(CRASHED_PRIMARY, &device->flags); @@ -1726,7 +1743,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) device->read_cnt = 0; device->writ_cnt = 0; - drbd_reconsider_max_bio_size(device); + drbd_reconsider_max_bio_size(device, device->ldev); /* If I am currently not R_PRIMARY, * but meta data primary indicator is set, @@ -1845,7 +1862,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE); put_ldev(device); - conn_reconfig_done(first_peer_device(device)->connection); + conn_reconfig_done(connection); mutex_unlock(&adm_ctx.resource->adm_mutex); drbd_adm_finish(&adm_ctx, info, retcode); return 0; @@ -1856,7 +1873,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) drbd_force_state(device, NS(disk, D_DISKLESS)); drbd_md_sync(device); fail: - conn_reconfig_done(first_peer_device(device)->connection); + conn_reconfig_done(connection); if (nbc) { if (nbc->backing_bdev) blkdev_put(nbc->backing_bdev, @@ -1888,7 +1905,7 @@ static int adm_detach(struct drbd_device *device, int force) } drbd_suspend_io(device); /* so no-one is stuck in drbd_al_begin_io */ - drbd_md_get_buffer(device); /* make sure there is no in-flight meta-data IO */ + drbd_md_get_buffer(device, __func__); /* make sure there is no in-flight meta-data IO */ retcode = drbd_request_state(device, NS(disk, D_FAILED)); drbd_md_put_buffer(device); /* D_FAILED will transition to DISKLESS. */ @@ -2654,8 +2671,13 @@ int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info) if (retcode != NO_ERROR) goto out; - mutex_lock(&adm_ctx.resource->adm_mutex); device = adm_ctx.device; + if (!get_ldev(device)) { + retcode = ERR_NO_DISK; + goto out; + } + + mutex_lock(&adm_ctx.resource->adm_mutex); /* If there is still bitmap IO pending, probably because of a previous * resync just being finished, wait for it before requesting a new resync. @@ -2679,6 +2701,7 @@ int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info) retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_T)); drbd_resume_io(device); mutex_unlock(&adm_ctx.resource->adm_mutex); + put_ldev(device); out: drbd_adm_finish(&adm_ctx, info, retcode); return 0; @@ -2704,7 +2727,7 @@ out: return 0; } -static int drbd_bmio_set_susp_al(struct drbd_device *device) +static int drbd_bmio_set_susp_al(struct drbd_device *device) __must_hold(local) { int rv; @@ -2725,8 +2748,13 @@ int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info) if (retcode != NO_ERROR) goto out; - mutex_lock(&adm_ctx.resource->adm_mutex); device = adm_ctx.device; + if (!get_ldev(device)) { + retcode = ERR_NO_DISK; + goto out; + } + + mutex_lock(&adm_ctx.resource->adm_mutex); /* If there is still bitmap IO pending, probably because of a previous * resync just being finished, wait for it before requesting a new resync. @@ -2753,6 +2781,7 @@ int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info) retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_S)); drbd_resume_io(device); mutex_unlock(&adm_ctx.resource->adm_mutex); + put_ldev(device); out: drbd_adm_finish(&adm_ctx, info, retcode); return 0; @@ -2892,7 +2921,7 @@ static struct drbd_connection *the_only_connection(struct drbd_resource *resourc return list_first_entry(&resource->connections, struct drbd_connection, connections); } -int nla_put_status_info(struct sk_buff *skb, struct drbd_device *device, +static int nla_put_status_info(struct sk_buff *skb, struct drbd_device *device, const struct sib_info *sib) { struct drbd_resource *resource = device->resource; @@ -3622,13 +3651,6 @@ void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib) unsigned seq; int err = -ENOMEM; - if (sib->sib_reason == SIB_SYNC_PROGRESS) { - if (time_after(jiffies, device->rs_last_bcast + HZ)) - device->rs_last_bcast = jiffies; - else - return; - } - seq = atomic_inc_return(&drbd_genl_seq); msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO); if (!msg) diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c index 89736bdbbc70..06e6147c7601 100644 --- a/drivers/block/drbd/drbd_proc.c +++ b/drivers/block/drbd/drbd_proc.c @@ -60,20 +60,65 @@ static void seq_printf_with_thousands_grouping(struct seq_file *seq, long v) seq_printf(seq, "%ld", v); } +static void drbd_get_syncer_progress(struct drbd_device *device, + union drbd_dev_state state, unsigned long *rs_total, + unsigned long *bits_left, unsigned int *per_mil_done) +{ + /* this is to break it at compile time when we change that, in case we + * want to support more than (1<<32) bits on a 32bit arch. */ + typecheck(unsigned long, device->rs_total); + *rs_total = device->rs_total; + + /* note: both rs_total and rs_left are in bits, i.e. in + * units of BM_BLOCK_SIZE. + * for the percentage, we don't care. */ + + if (state.conn == C_VERIFY_S || state.conn == C_VERIFY_T) + *bits_left = device->ov_left; + else + *bits_left = drbd_bm_total_weight(device) - device->rs_failed; + /* >> 10 to prevent overflow, + * +1 to prevent division by zero */ + if (*bits_left > *rs_total) { + /* D'oh. Maybe a logic bug somewhere. More likely just a race + * between state change and reset of rs_total. + */ + *bits_left = *rs_total; + *per_mil_done = *rs_total ? 0 : 1000; + } else { + /* Make sure the division happens in long context. + * We allow up to one petabyte storage right now, + * at a granularity of 4k per bit that is 2**38 bits. + * After shift right and multiplication by 1000, + * this should still fit easily into a 32bit long, + * so we don't need a 64bit division on 32bit arch. + * Note: currently we don't support such large bitmaps on 32bit + * arch anyways, but no harm done to be prepared for it here. + */ + unsigned int shift = *rs_total > UINT_MAX ? 16 : 10; + unsigned long left = *bits_left >> shift; + unsigned long total = 1UL + (*rs_total >> shift); + unsigned long tmp = 1000UL - left * 1000UL/total; + *per_mil_done = tmp; + } +} + + /*lge * progress bars shamelessly adapted from driver/md/md.c * output looks like * [=====>..............] 33.5% (23456/123456) * finish: 2:20:20 speed: 6,345 (6,456) K/sec */ -static void drbd_syncer_progress(struct drbd_device *device, struct seq_file *seq) +static void drbd_syncer_progress(struct drbd_device *device, struct seq_file *seq, + union drbd_dev_state state) { - unsigned long db, dt, dbdt, rt, rs_left; + unsigned long db, dt, dbdt, rt, rs_total, rs_left; unsigned int res; int i, x, y; int stalled = 0; - drbd_get_syncer_progress(device, &rs_left, &res); + drbd_get_syncer_progress(device, state, &rs_total, &rs_left, &res); x = res/50; y = 20-x; @@ -85,21 +130,21 @@ static void drbd_syncer_progress(struct drbd_device *device, struct seq_file *se seq_printf(seq, "."); seq_printf(seq, "] "); - if (device->state.conn == C_VERIFY_S || device->state.conn == C_VERIFY_T) + if (state.conn == C_VERIFY_S || state.conn == C_VERIFY_T) seq_printf(seq, "verified:"); else seq_printf(seq, "sync'ed:"); seq_printf(seq, "%3u.%u%% ", res / 10, res % 10); /* if more than a few GB, display in MB */ - if (device->rs_total > (4UL << (30 - BM_BLOCK_SHIFT))) + if (rs_total > (4UL << (30 - BM_BLOCK_SHIFT))) seq_printf(seq, "(%lu/%lu)M", (unsigned long) Bit2KB(rs_left >> 10), - (unsigned long) Bit2KB(device->rs_total >> 10)); + (unsigned long) Bit2KB(rs_total >> 10)); else seq_printf(seq, "(%lu/%lu)K\n\t", (unsigned long) Bit2KB(rs_left), - (unsigned long) Bit2KB(device->rs_total)); + (unsigned long) Bit2KB(rs_total)); /* see drivers/md/md.c * We do not want to overflow, so the order of operands and @@ -150,13 +195,13 @@ static void drbd_syncer_progress(struct drbd_device *device, struct seq_file *se dt = (jiffies - device->rs_start - device->rs_paused) / HZ; if (dt == 0) dt = 1; - db = device->rs_total - rs_left; + db = rs_total - rs_left; dbdt = Bit2KB(db/dt); seq_printf_with_thousands_grouping(seq, dbdt); seq_printf(seq, ")"); - if (device->state.conn == C_SYNC_TARGET || - device->state.conn == C_VERIFY_S) { + if (state.conn == C_SYNC_TARGET || + state.conn == C_VERIFY_S) { seq_printf(seq, " want: "); seq_printf_with_thousands_grouping(seq, device->c_sync_rate); } @@ -168,8 +213,8 @@ static void drbd_syncer_progress(struct drbd_device *device, struct seq_file *se unsigned long bm_bits = drbd_bm_bits(device); unsigned long bit_pos; unsigned long long stop_sector = 0; - if (device->state.conn == C_VERIFY_S || - device->state.conn == C_VERIFY_T) { + if (state.conn == C_VERIFY_S || + state.conn == C_VERIFY_T) { bit_pos = bm_bits - device->ov_left; if (verify_can_do_stop_sector(device)) stop_sector = device->ov_stop_sector; @@ -188,22 +233,13 @@ static void drbd_syncer_progress(struct drbd_device *device, struct seq_file *se } } -static void resync_dump_detail(struct seq_file *seq, struct lc_element *e) -{ - struct bm_extent *bme = lc_entry(e, struct bm_extent, lce); - - seq_printf(seq, "%5d %s %s\n", bme->rs_left, - bme->flags & BME_NO_WRITES ? "NO_WRITES" : "---------", - bme->flags & BME_LOCKED ? "LOCKED" : "------" - ); -} - static int drbd_seq_show(struct seq_file *seq, void *v) { int i, prev_i = -1; const char *sn; struct drbd_device *device; struct net_conf *nc; + union drbd_dev_state state; char wp; static char write_ordering_chars[] = { @@ -241,11 +277,12 @@ static int drbd_seq_show(struct seq_file *seq, void *v) seq_printf(seq, "\n"); prev_i = i; - sn = drbd_conn_str(device->state.conn); + state = device->state; + sn = drbd_conn_str(state.conn); - if (device->state.conn == C_STANDALONE && - device->state.disk == D_DISKLESS && - device->state.role == R_SECONDARY) { + if (state.conn == C_STANDALONE && + state.disk == D_DISKLESS && + state.role == R_SECONDARY) { seq_printf(seq, "%2d: cs:Unconfigured\n", i); } else { /* reset device->congestion_reason */ @@ -258,15 +295,15 @@ static int drbd_seq_show(struct seq_file *seq, void *v) " ns:%u nr:%u dw:%u dr:%u al:%u bm:%u " "lo:%d pe:%d ua:%d ap:%d ep:%d wo:%c", i, sn, - drbd_role_str(device->state.role), - drbd_role_str(device->state.peer), - drbd_disk_str(device->state.disk), - drbd_disk_str(device->state.pdsk), + drbd_role_str(state.role), + drbd_role_str(state.peer), + drbd_disk_str(state.disk), + drbd_disk_str(state.pdsk), wp, drbd_suspended(device) ? 's' : 'r', - device->state.aftr_isp ? 'a' : '-', - device->state.peer_isp ? 'p' : '-', - device->state.user_isp ? 'u' : '-', + state.aftr_isp ? 'a' : '-', + state.peer_isp ? 'p' : '-', + state.user_isp ? 'u' : '-', device->congestion_reason ?: '-', test_bit(AL_SUSPENDED, &device->flags) ? 's' : '-', device->send_cnt/2, @@ -281,17 +318,17 @@ static int drbd_seq_show(struct seq_file *seq, void *v) atomic_read(&device->unacked_cnt), atomic_read(&device->ap_bio_cnt), first_peer_device(device)->connection->epochs, - write_ordering_chars[first_peer_device(device)->connection->write_ordering] + write_ordering_chars[device->resource->write_ordering] ); seq_printf(seq, " oos:%llu\n", Bit2KB((unsigned long long) drbd_bm_total_weight(device))); } - if (device->state.conn == C_SYNC_SOURCE || - device->state.conn == C_SYNC_TARGET || - device->state.conn == C_VERIFY_S || - device->state.conn == C_VERIFY_T) - drbd_syncer_progress(device, seq); + if (state.conn == C_SYNC_SOURCE || + state.conn == C_SYNC_TARGET || + state.conn == C_VERIFY_S || + state.conn == C_VERIFY_T) + drbd_syncer_progress(device, seq, state); if (proc_details >= 1 && get_ldev_if_state(device, D_FAILED)) { lc_seq_printf_stats(seq, device->resync); @@ -299,12 +336,8 @@ static int drbd_seq_show(struct seq_file *seq, void *v) put_ldev(device); } - if (proc_details >= 2) { - if (device->resync) { - lc_seq_dump_details(seq, device->resync, "rs_left", - resync_dump_detail); - } - } + if (proc_details >= 2) + seq_printf(seq, "\tblocked on activity log: %d\n", atomic_read(&device->ap_actlog_cnt)); } rcu_read_unlock(); @@ -316,7 +349,7 @@ static int drbd_proc_open(struct inode *inode, struct file *file) int err; if (try_module_get(THIS_MODULE)) { - err = single_open(file, drbd_seq_show, PDE_DATA(inode)); + err = single_open(file, drbd_seq_show, NULL); if (err) module_put(THIS_MODULE); return err; diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 5b17ec88ea05..9342b8da73ab 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -362,17 +362,14 @@ drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t secto goto fail; } + memset(peer_req, 0, sizeof(*peer_req)); + INIT_LIST_HEAD(&peer_req->w.list); drbd_clear_interval(&peer_req->i); peer_req->i.size = data_size; peer_req->i.sector = sector; - peer_req->i.local = false; - peer_req->i.waiting = false; - - peer_req->epoch = NULL; + peer_req->submit_jif = jiffies; peer_req->peer_device = peer_device; peer_req->pages = page; - atomic_set(&peer_req->pending_bios, 0); - peer_req->flags = 0; /* * The block_id is opaque to the receiver. It is not endianness * converted, and sent back to the sender unchanged. @@ -389,11 +386,16 @@ drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t secto void __drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *peer_req, int is_net) { + might_sleep(); if (peer_req->flags & EE_HAS_DIGEST) kfree(peer_req->digest); drbd_free_pages(device, peer_req->pages, is_net); D_ASSERT(device, atomic_read(&peer_req->pending_bios) == 0); D_ASSERT(device, drbd_interval_empty(&peer_req->i)); + if (!expect(!(peer_req->flags & EE_CALL_AL_COMPLETE_IO))) { + peer_req->flags &= ~EE_CALL_AL_COMPLETE_IO; + drbd_al_complete_io(device, &peer_req->i); + } mempool_free(peer_req, drbd_ee_mempool); } @@ -791,8 +793,18 @@ static int receive_first_packet(struct drbd_connection *connection, struct socke { unsigned int header_size = drbd_header_size(connection); struct packet_info pi; + struct net_conf *nc; int err; + rcu_read_lock(); + nc = rcu_dereference(connection->net_conf); + if (!nc) { + rcu_read_unlock(); + return -EIO; + } + sock->sk->sk_rcvtimeo = nc->ping_timeo * 4 * HZ / 10; + rcu_read_unlock(); + err = drbd_recv_short(sock, connection->data.rbuf, header_size, 0); if (err != header_size) { if (err >= 0) @@ -809,7 +821,7 @@ static int receive_first_packet(struct drbd_connection *connection, struct socke * drbd_socket_okay() - Free the socket if its connection is not okay * @sock: pointer to the pointer to the socket. */ -static int drbd_socket_okay(struct socket **sock) +static bool drbd_socket_okay(struct socket **sock) { int rr; char tb[4]; @@ -827,6 +839,30 @@ static int drbd_socket_okay(struct socket **sock) return false; } } + +static bool connection_established(struct drbd_connection *connection, + struct socket **sock1, + struct socket **sock2) +{ + struct net_conf *nc; + int timeout; + bool ok; + + if (!*sock1 || !*sock2) + return false; + + rcu_read_lock(); + nc = rcu_dereference(connection->net_conf); + timeout = (nc->sock_check_timeo ?: nc->ping_timeo) * HZ / 10; + rcu_read_unlock(); + schedule_timeout_interruptible(timeout); + + ok = drbd_socket_okay(sock1); + ok = drbd_socket_okay(sock2) && ok; + + return ok; +} + /* Gets called if a connection is established, or if a new minor gets created in a connection */ int drbd_connected(struct drbd_peer_device *peer_device) @@ -868,8 +904,8 @@ static int conn_connect(struct drbd_connection *connection) struct drbd_socket sock, msock; struct drbd_peer_device *peer_device; struct net_conf *nc; - int vnr, timeout, h, ok; - bool discard_my_data; + int vnr, timeout, h; + bool discard_my_data, ok; enum drbd_state_rv rv; struct accept_wait_data ad = { .connection = connection, @@ -913,17 +949,8 @@ static int conn_connect(struct drbd_connection *connection) } } - if (sock.socket && msock.socket) { - rcu_read_lock(); - nc = rcu_dereference(connection->net_conf); - timeout = nc->ping_timeo * HZ / 10; - rcu_read_unlock(); - schedule_timeout_interruptible(timeout); - ok = drbd_socket_okay(&sock.socket); - ok = drbd_socket_okay(&msock.socket) && ok; - if (ok) - break; - } + if (connection_established(connection, &sock.socket, &msock.socket)) + break; retry: s = drbd_wait_for_connect(connection, &ad); @@ -969,8 +996,7 @@ randomize: goto out_release_sockets; } - ok = drbd_socket_okay(&sock.socket); - ok = drbd_socket_okay(&msock.socket) && ok; + ok = connection_established(connection, &sock.socket, &msock.socket); } while (!ok); if (ad.s_listen) @@ -1151,7 +1177,7 @@ static void drbd_flush(struct drbd_connection *connection) struct drbd_peer_device *peer_device; int vnr; - if (connection->write_ordering >= WO_bdev_flush) { + if (connection->resource->write_ordering >= WO_bdev_flush) { rcu_read_lock(); idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { struct drbd_device *device = peer_device->device; @@ -1161,14 +1187,22 @@ static void drbd_flush(struct drbd_connection *connection) kref_get(&device->kref); rcu_read_unlock(); + /* Right now, we have only this one synchronous code path + * for flushes between request epochs. + * We may want to make those asynchronous, + * or at least parallelize the flushes to the volume devices. + */ + device->flush_jif = jiffies; + set_bit(FLUSH_PENDING, &device->flags); rv = blkdev_issue_flush(device->ldev->backing_bdev, GFP_NOIO, NULL); + clear_bit(FLUSH_PENDING, &device->flags); if (rv) { drbd_info(device, "local disk flush failed with status %d\n", rv); /* would rather check on EOPNOTSUPP, but that is not reliable. * don't try again for ANY return value != 0 * if (rv == -EOPNOTSUPP) */ - drbd_bump_write_ordering(connection, WO_drain_io); + drbd_bump_write_ordering(connection->resource, NULL, WO_drain_io); } put_ldev(device); kref_put(&device->kref, drbd_destroy_device); @@ -1257,15 +1291,30 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connectio return rv; } +static enum write_ordering_e +max_allowed_wo(struct drbd_backing_dev *bdev, enum write_ordering_e wo) +{ + struct disk_conf *dc; + + dc = rcu_dereference(bdev->disk_conf); + + if (wo == WO_bdev_flush && !dc->disk_flushes) + wo = WO_drain_io; + if (wo == WO_drain_io && !dc->disk_drain) + wo = WO_none; + + return wo; +} + /** * drbd_bump_write_ordering() - Fall back to an other write ordering method * @connection: DRBD connection. * @wo: Write ordering method to try. */ -void drbd_bump_write_ordering(struct drbd_connection *connection, enum write_ordering_e wo) +void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backing_dev *bdev, + enum write_ordering_e wo) { - struct disk_conf *dc; - struct drbd_peer_device *peer_device; + struct drbd_device *device; enum write_ordering_e pwo; int vnr; static char *write_ordering_str[] = { @@ -1274,26 +1323,27 @@ void drbd_bump_write_ordering(struct drbd_connection *connection, enum write_ord [WO_bdev_flush] = "flush", }; - pwo = connection->write_ordering; - wo = min(pwo, wo); + pwo = resource->write_ordering; + if (wo != WO_bdev_flush) + wo = min(pwo, wo); rcu_read_lock(); - idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { - struct drbd_device *device = peer_device->device; + idr_for_each_entry(&resource->devices, device, vnr) { + if (get_ldev(device)) { + wo = max_allowed_wo(device->ldev, wo); + if (device->ldev == bdev) + bdev = NULL; + put_ldev(device); + } + } - if (!get_ldev_if_state(device, D_ATTACHING)) - continue; - dc = rcu_dereference(device->ldev->disk_conf); + if (bdev) + wo = max_allowed_wo(bdev, wo); - if (wo == WO_bdev_flush && !dc->disk_flushes) - wo = WO_drain_io; - if (wo == WO_drain_io && !dc->disk_drain) - wo = WO_none; - put_ldev(device); - } rcu_read_unlock(); - connection->write_ordering = wo; - if (pwo != connection->write_ordering || wo == WO_bdev_flush) - drbd_info(connection, "Method to ensure write ordering: %s\n", write_ordering_str[connection->write_ordering]); + + resource->write_ordering = wo; + if (pwo != resource->write_ordering || wo == WO_bdev_flush) + drbd_info(resource, "Method to ensure write ordering: %s\n", write_ordering_str[resource->write_ordering]); } /** @@ -1330,6 +1380,13 @@ int drbd_submit_peer_request(struct drbd_device *device, /* wait for all pending IO completions, before we start * zeroing things out. */ conn_wait_active_ee_empty(first_peer_device(device)->connection); + /* add it to the active list now, + * so we can find it to present it in debugfs */ + peer_req->submit_jif = jiffies; + peer_req->flags |= EE_SUBMITTED; + spin_lock_irq(&device->resource->req_lock); + list_add_tail(&peer_req->w.list, &device->active_ee); + spin_unlock_irq(&device->resource->req_lock); if (blkdev_issue_zeroout(device->ldev->backing_bdev, sector, ds >> 9, GFP_NOIO)) peer_req->flags |= EE_WAS_ERROR; @@ -1398,6 +1455,9 @@ submit: D_ASSERT(device, page == NULL); atomic_set(&peer_req->pending_bios, n_bios); + /* for debugfs: update timestamp, mark as submitted */ + peer_req->submit_jif = jiffies; + peer_req->flags |= EE_SUBMITTED; do { bio = bios; bios = bios->bi_next; @@ -1471,7 +1531,7 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf * R_PRIMARY crashes now. * Therefore we must send the barrier_ack after the barrier request was * completed. */ - switch (connection->write_ordering) { + switch (connection->resource->write_ordering) { case WO_none: if (rv == FE_RECYCLED) return 0; @@ -1498,7 +1558,8 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf return 0; default: - drbd_err(connection, "Strangeness in connection->write_ordering %d\n", connection->write_ordering); + drbd_err(connection, "Strangeness in connection->write_ordering %d\n", + connection->resource->write_ordering); return -EIO; } @@ -1531,7 +1592,7 @@ read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector, struct drbd_peer_request *peer_req; struct page *page; int dgs, ds, err; - int data_size = pi->size; + unsigned int data_size = pi->size; void *dig_in = peer_device->connection->int_dig_in; void *dig_vv = peer_device->connection->int_dig_vv; unsigned long *data; @@ -1578,6 +1639,7 @@ read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector, if (!peer_req) return NULL; + peer_req->flags |= EE_WRITE; if (trim) return peer_req; @@ -1734,9 +1796,10 @@ static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t secto * respective _drbd_clear_done_ee */ peer_req->w.cb = e_end_resync_block; + peer_req->submit_jif = jiffies; spin_lock_irq(&device->resource->req_lock); - list_add(&peer_req->w.list, &device->sync_ee); + list_add_tail(&peer_req->w.list, &device->sync_ee); spin_unlock_irq(&device->resource->req_lock); atomic_add(pi->size >> 9, &device->rs_sect_ev); @@ -1889,6 +1952,7 @@ static int e_end_block(struct drbd_work *w, int cancel) } dec_unacked(device); } + /* we delete from the conflict detection hash _after_ we sent out the * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */ if (peer_req->flags & EE_IN_INTERVAL_TREE) { @@ -2115,6 +2179,8 @@ static int handle_write_conflicts(struct drbd_device *device, drbd_for_each_overlap(i, &device->write_requests, sector, size) { if (i == &peer_req->i) continue; + if (i->completed) + continue; if (!i->local) { /* @@ -2147,7 +2213,6 @@ static int handle_write_conflicts(struct drbd_device *device, (unsigned long long)sector, size, superseded ? "local" : "remote"); - inc_unacked(device); peer_req->w.cb = superseded ? e_send_superseded : e_send_retry_write; list_add_tail(&peer_req->w.list, &device->done_ee); @@ -2206,6 +2271,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info * { struct drbd_peer_device *peer_device; struct drbd_device *device; + struct net_conf *nc; sector_t sector; struct drbd_peer_request *peer_req; struct p_data *p = pi->data; @@ -2245,6 +2311,8 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info * } peer_req->w.cb = e_end_block; + peer_req->submit_jif = jiffies; + peer_req->flags |= EE_APPLICATION; dp_flags = be32_to_cpu(p->dp_flags); rw |= wire_flags_to_bio(dp_flags); @@ -2271,9 +2339,36 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info * spin_unlock(&connection->epoch_lock); rcu_read_lock(); - tp = rcu_dereference(peer_device->connection->net_conf)->two_primaries; + nc = rcu_dereference(peer_device->connection->net_conf); + tp = nc->two_primaries; + if (peer_device->connection->agreed_pro_version < 100) { + switch (nc->wire_protocol) { + case DRBD_PROT_C: + dp_flags |= DP_SEND_WRITE_ACK; + break; + case DRBD_PROT_B: + dp_flags |= DP_SEND_RECEIVE_ACK; + break; + } + } rcu_read_unlock(); + + if (dp_flags & DP_SEND_WRITE_ACK) { + peer_req->flags |= EE_SEND_WRITE_ACK; + inc_unacked(device); + /* corresponding dec_unacked() in e_end_block() + * respective _drbd_clear_done_ee */ + } + + if (dp_flags & DP_SEND_RECEIVE_ACK) { + /* I really don't like it that the receiver thread + * sends on the msock, but anyways */ + drbd_send_ack(first_peer_device(device), P_RECV_ACK, peer_req); + } + if (tp) { + /* two primaries implies protocol C */ + D_ASSERT(device, dp_flags & DP_SEND_WRITE_ACK); peer_req->flags |= EE_IN_INTERVAL_TREE; err = wait_for_and_update_peer_seq(peer_device, peer_seq); if (err) @@ -2297,44 +2392,18 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info * * active_ee to become empty in drbd_submit_peer_request(); * better not add ourselves here. */ if ((peer_req->flags & EE_IS_TRIM_USE_ZEROOUT) == 0) - list_add(&peer_req->w.list, &device->active_ee); + list_add_tail(&peer_req->w.list, &device->active_ee); spin_unlock_irq(&device->resource->req_lock); if (device->state.conn == C_SYNC_TARGET) wait_event(device->ee_wait, !overlapping_resync_write(device, peer_req)); - if (peer_device->connection->agreed_pro_version < 100) { - rcu_read_lock(); - switch (rcu_dereference(peer_device->connection->net_conf)->wire_protocol) { - case DRBD_PROT_C: - dp_flags |= DP_SEND_WRITE_ACK; - break; - case DRBD_PROT_B: - dp_flags |= DP_SEND_RECEIVE_ACK; - break; - } - rcu_read_unlock(); - } - - if (dp_flags & DP_SEND_WRITE_ACK) { - peer_req->flags |= EE_SEND_WRITE_ACK; - inc_unacked(device); - /* corresponding dec_unacked() in e_end_block() - * respective _drbd_clear_done_ee */ - } - - if (dp_flags & DP_SEND_RECEIVE_ACK) { - /* I really don't like it that the receiver thread - * sends on the msock, but anyways */ - drbd_send_ack(first_peer_device(device), P_RECV_ACK, peer_req); - } - if (device->state.pdsk < D_INCONSISTENT) { /* In case we have the only disk of the cluster, */ drbd_set_out_of_sync(device, peer_req->i.sector, peer_req->i.size); - peer_req->flags |= EE_CALL_AL_COMPLETE_IO; peer_req->flags &= ~EE_MAY_SET_IN_SYNC; - drbd_al_begin_io(device, &peer_req->i, true); + drbd_al_begin_io(device, &peer_req->i); + peer_req->flags |= EE_CALL_AL_COMPLETE_IO; } err = drbd_submit_peer_request(device, peer_req, rw, DRBD_FAULT_DT_WR); @@ -2347,8 +2416,10 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info * list_del(&peer_req->w.list); drbd_remove_epoch_entry_interval(device, peer_req); spin_unlock_irq(&device->resource->req_lock); - if (peer_req->flags & EE_CALL_AL_COMPLETE_IO) + if (peer_req->flags & EE_CALL_AL_COMPLETE_IO) { + peer_req->flags &= ~EE_CALL_AL_COMPLETE_IO; drbd_al_complete_io(device, &peer_req->i); + } out_interrupted: drbd_may_finish_epoch(connection, peer_req->epoch, EV_PUT + EV_CLEANUP); @@ -2368,13 +2439,14 @@ out_interrupted: * The current sync rate used here uses only the most recent two step marks, * to have a short time average so we can react faster. */ -bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector) +bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector, + bool throttle_if_app_is_waiting) { struct lc_element *tmp; - bool throttle = true; + bool throttle = drbd_rs_c_min_rate_throttle(device); - if (!drbd_rs_c_min_rate_throttle(device)) - return false; + if (!throttle || throttle_if_app_is_waiting) + return throttle; spin_lock_irq(&device->al_lock); tmp = lc_find(device->resync, BM_SECT_TO_EXT(sector)); @@ -2382,7 +2454,8 @@ bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector) struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce); if (test_bit(BME_PRIORITY, &bm_ext->flags)) throttle = false; - /* Do not slow down if app IO is already waiting for this extent */ + /* Do not slow down if app IO is already waiting for this extent, + * and our progress is necessary for application IO to complete. */ } spin_unlock_irq(&device->al_lock); @@ -2407,7 +2480,9 @@ bool drbd_rs_c_min_rate_throttle(struct drbd_device *device) curr_events = (int)part_stat_read(&disk->part0, sectors[0]) + (int)part_stat_read(&disk->part0, sectors[1]) - atomic_read(&device->rs_sect_ev); - if (!device->rs_last_events || curr_events - device->rs_last_events > 64) { + + if (atomic_read(&device->ap_actlog_cnt) + || !device->rs_last_events || curr_events - device->rs_last_events > 64) { unsigned long rs_left; int i; @@ -2508,6 +2583,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet peer_req->w.cb = w_e_end_data_req; fault_type = DRBD_FAULT_DT_RD; /* application IO, don't drbd_rs_begin_io */ + peer_req->flags |= EE_APPLICATION; goto submit; case P_RS_DATA_REQUEST: @@ -2538,6 +2614,8 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet peer_req->w.cb = w_e_end_csum_rs_req; /* used in the sector offset progress display */ device->bm_resync_fo = BM_SECT_TO_BIT(sector); + /* remember to report stats in drbd_resync_finished */ + device->use_csums = true; } else if (pi->cmd == P_OV_REPLY) { /* track progress, we may need to throttle */ atomic_add(size >> 9, &device->rs_sect_in); @@ -2595,8 +2673,20 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet * we would also throttle its application reads. * In that case, throttling is done on the SyncTarget only. */ - if (device->state.peer != R_PRIMARY && drbd_rs_should_slow_down(device, sector)) + + /* Even though this may be a resync request, we do add to "read_ee"; + * "sync_ee" is only used for resync WRITEs. + * Add to list early, so debugfs can find this request + * even if we have to sleep below. */ + spin_lock_irq(&device->resource->req_lock); + list_add_tail(&peer_req->w.list, &device->read_ee); + spin_unlock_irq(&device->resource->req_lock); + + update_receiver_timing_details(connection, drbd_rs_should_slow_down); + if (device->state.peer != R_PRIMARY + && drbd_rs_should_slow_down(device, sector, false)) schedule_timeout_uninterruptible(HZ/10); + update_receiver_timing_details(connection, drbd_rs_begin_io); if (drbd_rs_begin_io(device, sector)) goto out_free_e; @@ -2604,22 +2694,20 @@ submit_for_resync: atomic_add(size >> 9, &device->rs_sect_ev); submit: + update_receiver_timing_details(connection, drbd_submit_peer_request); inc_unacked(device); - spin_lock_irq(&device->resource->req_lock); - list_add_tail(&peer_req->w.list, &device->read_ee); - spin_unlock_irq(&device->resource->req_lock); - if (drbd_submit_peer_request(device, peer_req, READ, fault_type) == 0) return 0; /* don't care for the reason here */ drbd_err(device, "submit failed, triggering re-connect\n"); + +out_free_e: spin_lock_irq(&device->resource->req_lock); list_del(&peer_req->w.list); spin_unlock_irq(&device->resource->req_lock); /* no drbd_rs_complete_io(), we are dropping the connection anyways */ -out_free_e: put_ldev(device); drbd_free_peer_req(device, peer_req); return -EIO; @@ -2842,8 +2930,10 @@ static void drbd_uuid_dump(struct drbd_device *device, char *text, u64 *uuid, -1091 requires proto 91 -1096 requires proto 96 */ -static int drbd_uuid_compare(struct drbd_device *device, int *rule_nr) __must_hold(local) +static int drbd_uuid_compare(struct drbd_device *const device, int *rule_nr) __must_hold(local) { + struct drbd_peer_device *const peer_device = first_peer_device(device); + struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL; u64 self, peer; int i, j; @@ -2869,7 +2959,7 @@ static int drbd_uuid_compare(struct drbd_device *device, int *rule_nr) __must_ho if (device->p_uuid[UI_BITMAP] == (u64)0 && device->ldev->md.uuid[UI_BITMAP] != (u64)0) { - if (first_peer_device(device)->connection->agreed_pro_version < 91) + if (connection->agreed_pro_version < 91) return -1091; if ((device->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START] & ~((u64)1)) && @@ -2892,7 +2982,7 @@ static int drbd_uuid_compare(struct drbd_device *device, int *rule_nr) __must_ho if (device->ldev->md.uuid[UI_BITMAP] == (u64)0 && device->p_uuid[UI_BITMAP] != (u64)0) { - if (first_peer_device(device)->connection->agreed_pro_version < 91) + if (connection->agreed_pro_version < 91) return -1091; if ((device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (device->p_uuid[UI_BITMAP] & ~((u64)1)) && @@ -2925,7 +3015,7 @@ static int drbd_uuid_compare(struct drbd_device *device, int *rule_nr) __must_ho case 1: /* self_pri && !peer_pri */ return 1; case 2: /* !self_pri && peer_pri */ return -1; case 3: /* self_pri && peer_pri */ - dc = test_bit(RESOLVE_CONFLICTS, &first_peer_device(device)->connection->flags); + dc = test_bit(RESOLVE_CONFLICTS, &connection->flags); return dc ? -1 : 1; } } @@ -2938,14 +3028,14 @@ static int drbd_uuid_compare(struct drbd_device *device, int *rule_nr) __must_ho *rule_nr = 51; peer = device->p_uuid[UI_HISTORY_START] & ~((u64)1); if (self == peer) { - if (first_peer_device(device)->connection->agreed_pro_version < 96 ? + if (connection->agreed_pro_version < 96 ? (device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) : peer + UUID_NEW_BM_OFFSET == (device->p_uuid[UI_BITMAP] & ~((u64)1))) { /* The last P_SYNC_UUID did not get though. Undo the last start of resync as sync source modifications of the peer's UUIDs. */ - if (first_peer_device(device)->connection->agreed_pro_version < 91) + if (connection->agreed_pro_version < 91) return -1091; device->p_uuid[UI_BITMAP] = device->p_uuid[UI_HISTORY_START]; @@ -2975,14 +3065,14 @@ static int drbd_uuid_compare(struct drbd_device *device, int *rule_nr) __must_ho *rule_nr = 71; self = device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1); if (self == peer) { - if (first_peer_device(device)->connection->agreed_pro_version < 96 ? + if (connection->agreed_pro_version < 96 ? (device->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START] & ~((u64)1)) : self + UUID_NEW_BM_OFFSET == (device->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) { /* The last P_SYNC_UUID did not get though. Undo the last start of resync as sync source modifications of our UUIDs. */ - if (first_peer_device(device)->connection->agreed_pro_version < 91) + if (connection->agreed_pro_version < 91) return -1091; __drbd_uuid_set(device, UI_BITMAP, device->ldev->md.uuid[UI_HISTORY_START]); @@ -3352,8 +3442,7 @@ disconnect: * return: NULL (alg name was "") * ERR_PTR(error) if something goes wrong * or the crypto hash ptr, if it worked out ok. */ -static -struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_device *device, +static struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_device *device, const char *alg, const char *name) { struct crypto_hash *tfm; @@ -3639,7 +3728,7 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info struct drbd_device *device; struct p_sizes *p = pi->data; enum determine_dev_size dd = DS_UNCHANGED; - sector_t p_size, p_usize, my_usize; + sector_t p_size, p_usize, p_csize, my_usize; int ldsc = 0; /* local disk size changed */ enum dds_flags ddsf; @@ -3650,6 +3739,7 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info p_size = be64_to_cpu(p->d_size); p_usize = be64_to_cpu(p->u_size); + p_csize = be64_to_cpu(p->c_size); /* just store the peer's disk size for now. * we still need to figure out whether we accept that. */ @@ -3710,7 +3800,6 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info } device->peer_max_bio_size = be32_to_cpu(p->max_bio_size); - drbd_reconsider_max_bio_size(device); /* Leave drbd_reconsider_max_bio_size() before drbd_determine_dev_size(). In case we cleared the QUEUE_FLAG_DISCARD from our queue in drbd_reconsider_max_bio_size(), we can be sure that after @@ -3718,14 +3807,28 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info ddsf = be16_to_cpu(p->dds_flags); if (get_ldev(device)) { + drbd_reconsider_max_bio_size(device, device->ldev); dd = drbd_determine_dev_size(device, ddsf, NULL); put_ldev(device); if (dd == DS_ERROR) return -EIO; drbd_md_sync(device); } else { - /* I am diskless, need to accept the peer's size. */ - drbd_set_my_capacity(device, p_size); + /* + * I am diskless, need to accept the peer's *current* size. + * I must NOT accept the peers backing disk size, + * it may have been larger than mine all along... + * + * At this point, the peer knows more about my disk, or at + * least about what we last agreed upon, than myself. + * So if his c_size is less than his d_size, the most likely + * reason is that *my* d_size was smaller last time we checked. + * + * However, if he sends a zero current size, + * take his (user-capped or) backing disk size anyways. + */ + drbd_reconsider_max_bio_size(device, NULL); + drbd_set_my_capacity(device, p_csize ?: p_usize ?: p_size); } if (get_ldev(device)) { @@ -4501,6 +4604,7 @@ static void drbdd(struct drbd_connection *connection) struct data_cmd *cmd; drbd_thread_current_set_cpu(&connection->receiver); + update_receiver_timing_details(connection, drbd_recv_header); if (drbd_recv_header(connection, &pi)) goto err_out; @@ -4519,12 +4623,14 @@ static void drbdd(struct drbd_connection *connection) } if (shs) { + update_receiver_timing_details(connection, drbd_recv_all_warn); err = drbd_recv_all_warn(connection, pi.data, shs); if (err) goto err_out; pi.size -= shs; } + update_receiver_timing_details(connection, cmd->fn); err = cmd->fn(connection, &pi); if (err) { drbd_err(connection, "error receiving %s, e: %d l: %d!\n", diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 09803d0d5207..c67717d572d1 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -52,7 +52,7 @@ static void _drbd_start_io_acct(struct drbd_device *device, struct drbd_request static void _drbd_end_io_acct(struct drbd_device *device, struct drbd_request *req) { int rw = bio_data_dir(req->master_bio); - unsigned long duration = jiffies - req->start_time; + unsigned long duration = jiffies - req->start_jif; int cpu; cpu = part_stat_lock(); part_stat_add(cpu, &device->vdisk->part0, ticks[rw], duration); @@ -66,7 +66,7 @@ static struct drbd_request *drbd_req_new(struct drbd_device *device, { struct drbd_request *req; - req = mempool_alloc(drbd_request_mempool, GFP_NOIO); + req = mempool_alloc(drbd_request_mempool, GFP_NOIO | __GFP_ZERO); if (!req) return NULL; @@ -84,6 +84,8 @@ static struct drbd_request *drbd_req_new(struct drbd_device *device, INIT_LIST_HEAD(&req->tl_requests); INIT_LIST_HEAD(&req->w.list); + INIT_LIST_HEAD(&req->req_pending_master_completion); + INIT_LIST_HEAD(&req->req_pending_local); /* one reference to be put by __drbd_make_request */ atomic_set(&req->completion_ref, 1); @@ -92,6 +94,19 @@ static struct drbd_request *drbd_req_new(struct drbd_device *device, return req; } +static void drbd_remove_request_interval(struct rb_root *root, + struct drbd_request *req) +{ + struct drbd_device *device = req->device; + struct drbd_interval *i = &req->i; + + drbd_remove_interval(root, i); + + /* Wake up any processes waiting for this request to complete. */ + if (i->waiting) + wake_up(&device->misc_wait); +} + void drbd_req_destroy(struct kref *kref) { struct drbd_request *req = container_of(kref, struct drbd_request, kref); @@ -107,14 +122,30 @@ void drbd_req_destroy(struct kref *kref) return; } - /* remove it from the transfer log. - * well, only if it had been there in the first - * place... if it had not (local only or conflicting - * and never sent), it should still be "empty" as - * initialized in drbd_req_new(), so we can list_del() it - * here unconditionally */ + /* If called from mod_rq_state (expected normal case) or + * drbd_send_and_submit (the less likely normal path), this holds the + * req_lock, and req->tl_requests will typicaly be on ->transfer_log, + * though it may be still empty (never added to the transfer log). + * + * If called from do_retry(), we do NOT hold the req_lock, but we are + * still allowed to unconditionally list_del(&req->tl_requests), + * because it will be on a local on-stack list only. */ list_del_init(&req->tl_requests); + /* finally remove the request from the conflict detection + * respective block_id verification interval tree. */ + if (!drbd_interval_empty(&req->i)) { + struct rb_root *root; + + if (s & RQ_WRITE) + root = &device->write_requests; + else + root = &device->read_requests; + drbd_remove_request_interval(root, req); + } else if (s & (RQ_NET_MASK & ~RQ_NET_DONE) && req->i.size != 0) + drbd_err(device, "drbd_req_destroy: Logic BUG: interval empty, but: rq_state=0x%x, sect=%llu, size=%u\n", + s, (unsigned long long)req->i.sector, req->i.size); + /* if it was a write, we may have to set the corresponding * bit(s) out-of-sync first. If it had a local part, we need to * release the reference to the activity log. */ @@ -188,19 +219,6 @@ void complete_master_bio(struct drbd_device *device, } -static void drbd_remove_request_interval(struct rb_root *root, - struct drbd_request *req) -{ - struct drbd_device *device = req->device; - struct drbd_interval *i = &req->i; - - drbd_remove_interval(root, i); - - /* Wake up any processes waiting for this request to complete. */ - if (i->waiting) - wake_up(&device->misc_wait); -} - /* Helper for __req_mod(). * Set m->bio to the master bio, if it is fit to be completed, * or leave it alone (it is initialized to NULL in __req_mod), @@ -254,18 +272,6 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m) ok = (s & RQ_LOCAL_OK) || (s & RQ_NET_OK); error = PTR_ERR(req->private_bio); - /* remove the request from the conflict detection - * respective block_id verification hash */ - if (!drbd_interval_empty(&req->i)) { - struct rb_root *root; - - if (rw == WRITE) - root = &device->write_requests; - else - root = &device->read_requests; - drbd_remove_request_interval(root, req); - } - /* Before we can signal completion to the upper layers, * we may need to close the current transfer log epoch. * We are within the request lock, so we can simply compare @@ -301,9 +307,24 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m) m->error = ok ? 0 : (error ?: -EIO); m->bio = req->master_bio; req->master_bio = NULL; + /* We leave it in the tree, to be able to verify later + * write-acks in protocol != C during resync. + * But we mark it as "complete", so it won't be counted as + * conflict in a multi-primary setup. */ + req->i.completed = true; } + + if (req->i.waiting) + wake_up(&device->misc_wait); + + /* Either we are about to complete to upper layers, + * or we will restart this request. + * In either case, the request object will be destroyed soon, + * so better remove it from all lists. */ + list_del_init(&req->req_pending_master_completion); } +/* still holds resource->req_lock */ static int drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put) { struct drbd_device *device = req->device; @@ -324,12 +345,91 @@ static int drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_ return 1; } +static void set_if_null_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req) +{ + struct drbd_connection *connection = peer_device ? peer_device->connection : NULL; + if (!connection) + return; + if (connection->req_next == NULL) + connection->req_next = req; +} + +static void advance_conn_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req) +{ + struct drbd_connection *connection = peer_device ? peer_device->connection : NULL; + if (!connection) + return; + if (connection->req_next != req) + return; + list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) { + const unsigned s = req->rq_state; + if (s & RQ_NET_QUEUED) + break; + } + if (&req->tl_requests == &connection->transfer_log) + req = NULL; + connection->req_next = req; +} + +static void set_if_null_req_ack_pending(struct drbd_peer_device *peer_device, struct drbd_request *req) +{ + struct drbd_connection *connection = peer_device ? peer_device->connection : NULL; + if (!connection) + return; + if (connection->req_ack_pending == NULL) + connection->req_ack_pending = req; +} + +static void advance_conn_req_ack_pending(struct drbd_peer_device *peer_device, struct drbd_request *req) +{ + struct drbd_connection *connection = peer_device ? peer_device->connection : NULL; + if (!connection) + return; + if (connection->req_ack_pending != req) + return; + list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) { + const unsigned s = req->rq_state; + if ((s & RQ_NET_SENT) && (s & RQ_NET_PENDING)) + break; + } + if (&req->tl_requests == &connection->transfer_log) + req = NULL; + connection->req_ack_pending = req; +} + +static void set_if_null_req_not_net_done(struct drbd_peer_device *peer_device, struct drbd_request *req) +{ + struct drbd_connection *connection = peer_device ? peer_device->connection : NULL; + if (!connection) + return; + if (connection->req_not_net_done == NULL) + connection->req_not_net_done = req; +} + +static void advance_conn_req_not_net_done(struct drbd_peer_device *peer_device, struct drbd_request *req) +{ + struct drbd_connection *connection = peer_device ? peer_device->connection : NULL; + if (!connection) + return; + if (connection->req_not_net_done != req) + return; + list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) { + const unsigned s = req->rq_state; + if ((s & RQ_NET_SENT) && !(s & RQ_NET_DONE)) + break; + } + if (&req->tl_requests == &connection->transfer_log) + req = NULL; + connection->req_not_net_done = req; +} + /* I'd like this to be the only place that manipulates * req->completion_ref and req->kref. */ static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m, int clear, int set) { struct drbd_device *device = req->device; + struct drbd_peer_device *peer_device = first_peer_device(device); unsigned s = req->rq_state; int c_put = 0; int k_put = 0; @@ -356,14 +456,23 @@ static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m, atomic_inc(&req->completion_ref); } - if (!(s & RQ_NET_QUEUED) && (set & RQ_NET_QUEUED)) + if (!(s & RQ_NET_QUEUED) && (set & RQ_NET_QUEUED)) { atomic_inc(&req->completion_ref); + set_if_null_req_next(peer_device, req); + } if (!(s & RQ_EXP_BARR_ACK) && (set & RQ_EXP_BARR_ACK)) kref_get(&req->kref); /* wait for the DONE */ - if (!(s & RQ_NET_SENT) && (set & RQ_NET_SENT)) - atomic_add(req->i.size >> 9, &device->ap_in_flight); + if (!(s & RQ_NET_SENT) && (set & RQ_NET_SENT)) { + /* potentially already completed in the asender thread */ + if (!(s & RQ_NET_DONE)) { + atomic_add(req->i.size >> 9, &device->ap_in_flight); + set_if_null_req_not_net_done(peer_device, req); + } + if (s & RQ_NET_PENDING) + set_if_null_req_ack_pending(peer_device, req); + } if (!(s & RQ_COMPLETION_SUSP) && (set & RQ_COMPLETION_SUSP)) atomic_inc(&req->completion_ref); @@ -386,20 +495,34 @@ static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m, ++k_put; else ++c_put; + list_del_init(&req->req_pending_local); } if ((s & RQ_NET_PENDING) && (clear & RQ_NET_PENDING)) { dec_ap_pending(device); ++c_put; + req->acked_jif = jiffies; + advance_conn_req_ack_pending(peer_device, req); } - if ((s & RQ_NET_QUEUED) && (clear & RQ_NET_QUEUED)) + if ((s & RQ_NET_QUEUED) && (clear & RQ_NET_QUEUED)) { ++c_put; + advance_conn_req_next(peer_device, req); + } - if ((s & RQ_EXP_BARR_ACK) && !(s & RQ_NET_DONE) && (set & RQ_NET_DONE)) { - if (req->rq_state & RQ_NET_SENT) + if (!(s & RQ_NET_DONE) && (set & RQ_NET_DONE)) { + if (s & RQ_NET_SENT) atomic_sub(req->i.size >> 9, &device->ap_in_flight); - ++k_put; + if (s & RQ_EXP_BARR_ACK) + ++k_put; + req->net_done_jif = jiffies; + + /* in ahead/behind mode, or just in case, + * before we finally destroy this request, + * the caching pointers must not reference it anymore */ + advance_conn_req_next(peer_device, req); + advance_conn_req_ack_pending(peer_device, req); + advance_conn_req_not_net_done(peer_device, req); } /* potentially complete and destroy */ @@ -439,6 +562,19 @@ static void drbd_report_io_error(struct drbd_device *device, struct drbd_request bdevname(device->ldev->backing_bdev, b)); } +/* Helper for HANDED_OVER_TO_NETWORK. + * Is this a protocol A write (neither WRITE_ACK nor RECEIVE_ACK expected)? + * Is it also still "PENDING"? + * --> If so, clear PENDING and set NET_OK below. + * If it is a protocol A write, but not RQ_PENDING anymore, neg-ack was faster + * (and we must not set RQ_NET_OK) */ +static inline bool is_pending_write_protocol_A(struct drbd_request *req) +{ + return (req->rq_state & + (RQ_WRITE|RQ_NET_PENDING|RQ_EXP_WRITE_ACK|RQ_EXP_RECEIVE_ACK)) + == (RQ_WRITE|RQ_NET_PENDING); +} + /* obviously this could be coded as many single functions * instead of one huge switch, * or by putting the code directly in the respective locations @@ -454,7 +590,9 @@ static void drbd_report_io_error(struct drbd_device *device, struct drbd_request int __req_mod(struct drbd_request *req, enum drbd_req_event what, struct bio_and_error *m) { - struct drbd_device *device = req->device; + struct drbd_device *const device = req->device; + struct drbd_peer_device *const peer_device = first_peer_device(device); + struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL; struct net_conf *nc; int p, rv = 0; @@ -477,7 +615,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, * and from w_read_retry_remote */ D_ASSERT(device, !(req->rq_state & RQ_NET_MASK)); rcu_read_lock(); - nc = rcu_dereference(first_peer_device(device)->connection->net_conf); + nc = rcu_dereference(connection->net_conf); p = nc->wire_protocol; rcu_read_unlock(); req->rq_state |= @@ -549,7 +687,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, D_ASSERT(device, (req->rq_state & RQ_LOCAL_MASK) == 0); mod_rq_state(req, m, 0, RQ_NET_QUEUED); req->w.cb = w_send_read_req; - drbd_queue_work(&first_peer_device(device)->connection->sender_work, + drbd_queue_work(&connection->sender_work, &req->w); break; @@ -585,23 +723,23 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, D_ASSERT(device, req->rq_state & RQ_NET_PENDING); mod_rq_state(req, m, 0, RQ_NET_QUEUED|RQ_EXP_BARR_ACK); req->w.cb = w_send_dblock; - drbd_queue_work(&first_peer_device(device)->connection->sender_work, + drbd_queue_work(&connection->sender_work, &req->w); /* close the epoch, in case it outgrew the limit */ rcu_read_lock(); - nc = rcu_dereference(first_peer_device(device)->connection->net_conf); + nc = rcu_dereference(connection->net_conf); p = nc->max_epoch_size; rcu_read_unlock(); - if (first_peer_device(device)->connection->current_tle_writes >= p) - start_new_tl_epoch(first_peer_device(device)->connection); + if (connection->current_tle_writes >= p) + start_new_tl_epoch(connection); break; case QUEUE_FOR_SEND_OOS: mod_rq_state(req, m, 0, RQ_NET_QUEUED); req->w.cb = w_send_out_of_sync; - drbd_queue_work(&first_peer_device(device)->connection->sender_work, + drbd_queue_work(&connection->sender_work, &req->w); break; @@ -615,18 +753,16 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, case HANDED_OVER_TO_NETWORK: /* assert something? */ - if (bio_data_dir(req->master_bio) == WRITE && - !(req->rq_state & (RQ_EXP_RECEIVE_ACK | RQ_EXP_WRITE_ACK))) { + if (is_pending_write_protocol_A(req)) /* this is what is dangerous about protocol A: * pretend it was successfully written on the peer. */ - if (req->rq_state & RQ_NET_PENDING) - mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK); - /* else: neg-ack was faster... */ - /* it is still not yet RQ_NET_DONE until the - * corresponding epoch barrier got acked as well, - * so we know what to dirty on connection loss */ - } - mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_SENT); + mod_rq_state(req, m, RQ_NET_QUEUED|RQ_NET_PENDING, + RQ_NET_SENT|RQ_NET_OK); + else + mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_SENT); + /* It is still not yet RQ_NET_DONE until the + * corresponding epoch barrier got acked as well, + * so we know what to dirty on connection loss. */ break; case OOS_HANDED_TO_NETWORK: @@ -658,12 +794,13 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, case WRITE_ACKED_BY_PEER_AND_SIS: req->rq_state |= RQ_NET_SIS; case WRITE_ACKED_BY_PEER: - D_ASSERT(device, req->rq_state & RQ_EXP_WRITE_ACK); - /* protocol C; successfully written on peer. + /* Normal operation protocol C: successfully written on peer. + * During resync, even in protocol != C, + * we requested an explicit write ack anyways. + * Which means we cannot even assert anything here. * Nothing more to do here. * We want to keep the tl in place for all protocols, to cater * for volatile write-back caches on lower level devices. */ - goto ack_common; case RECV_ACKED_BY_PEER: D_ASSERT(device, req->rq_state & RQ_EXP_RECEIVE_ACK); @@ -671,7 +808,6 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, * see also notes above in HANDED_OVER_TO_NETWORK about * protocol != C */ ack_common: - D_ASSERT(device, req->rq_state & RQ_NET_PENDING); mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK); break; @@ -714,7 +850,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, get_ldev(device); /* always succeeds in this call path */ req->w.cb = w_restart_disk_io; - drbd_queue_work(&first_peer_device(device)->connection->sender_work, + drbd_queue_work(&connection->sender_work, &req->w); break; @@ -736,7 +872,8 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, mod_rq_state(req, m, RQ_COMPLETION_SUSP, RQ_NET_QUEUED|RQ_NET_PENDING); if (req->w.cb) { - drbd_queue_work(&first_peer_device(device)->connection->sender_work, + /* w.cb expected to be w_send_dblock, or w_send_read_req */ + drbd_queue_work(&connection->sender_work, &req->w); rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ; } /* else: FIXME can this happen? */ @@ -769,7 +906,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, break; case QUEUE_AS_DRBD_BARRIER: - start_new_tl_epoch(first_peer_device(device)->connection); + start_new_tl_epoch(connection); mod_rq_state(req, m, 0, RQ_NET_OK|RQ_NET_DONE); break; }; @@ -886,6 +1023,9 @@ static void maybe_pull_ahead(struct drbd_device *device) connection->agreed_pro_version < 96) return; + if (on_congestion == OC_PULL_AHEAD && device->state.conn == C_AHEAD) + return; /* nothing to do ... */ + /* If I don't even have good local storage, we can not reasonably try * to pull ahead of the peer. We also need the local reference to make * sure device->act_log is there. @@ -1021,6 +1161,7 @@ drbd_submit_req_private_bio(struct drbd_request *req) * stable storage, and this is a WRITE, we may not even submit * this bio. */ if (get_ldev(device)) { + req->pre_submit_jif = jiffies; if (drbd_insert_fault(device, rw == WRITE ? DRBD_FAULT_DT_WR : rw == READ ? DRBD_FAULT_DT_RD @@ -1035,10 +1176,14 @@ drbd_submit_req_private_bio(struct drbd_request *req) static void drbd_queue_write(struct drbd_device *device, struct drbd_request *req) { - spin_lock(&device->submit.lock); + spin_lock_irq(&device->resource->req_lock); list_add_tail(&req->tl_requests, &device->submit.writes); - spin_unlock(&device->submit.lock); + list_add_tail(&req->req_pending_master_completion, + &device->pending_master_completion[1 /* WRITE */]); + spin_unlock_irq(&device->resource->req_lock); queue_work(device->submit.wq, &device->submit.worker); + /* do_submit() may sleep internally on al_wait, too */ + wake_up(&device->al_wait); } /* returns the new drbd_request pointer, if the caller is expected to @@ -1047,7 +1192,7 @@ static void drbd_queue_write(struct drbd_device *device, struct drbd_request *re * Returns ERR_PTR(-ENOMEM) if we cannot allocate a drbd_request. */ static struct drbd_request * -drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long start_time) +drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long start_jif) { const int rw = bio_data_dir(bio); struct drbd_request *req; @@ -1062,7 +1207,7 @@ drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long bio_endio(bio, -ENOMEM); return ERR_PTR(-ENOMEM); } - req->start_time = start_time; + req->start_jif = start_jif; if (!get_ldev(device)) { bio_put(req->private_bio); @@ -1075,10 +1220,12 @@ drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long if (rw == WRITE && req->private_bio && req->i.size && !test_bit(AL_SUSPENDED, &device->flags)) { if (!drbd_al_begin_io_fastpath(device, &req->i)) { + atomic_inc(&device->ap_actlog_cnt); drbd_queue_write(device, req); return NULL; } req->rq_state |= RQ_IN_ACT_LOG; + req->in_actlog_jif = jiffies; } return req; @@ -1086,11 +1233,13 @@ drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request *req) { + struct drbd_resource *resource = device->resource; const int rw = bio_rw(req->master_bio); struct bio_and_error m = { NULL, }; bool no_remote = false; + bool submit_private_bio = false; - spin_lock_irq(&device->resource->req_lock); + spin_lock_irq(&resource->req_lock); if (rw == WRITE) { /* This may temporarily give up the req_lock, * but will re-aquire it before it returns here. @@ -1148,13 +1297,18 @@ static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request no_remote = true; } + /* If it took the fast path in drbd_request_prepare, add it here. + * The slow path has added it already. */ + if (list_empty(&req->req_pending_master_completion)) + list_add_tail(&req->req_pending_master_completion, + &device->pending_master_completion[rw == WRITE]); if (req->private_bio) { /* needs to be marked within the same spinlock */ + list_add_tail(&req->req_pending_local, + &device->pending_completion[rw == WRITE]); _req_mod(req, TO_BE_SUBMITTED); /* but we need to give up the spinlock to submit */ - spin_unlock_irq(&device->resource->req_lock); - drbd_submit_req_private_bio(req); - spin_lock_irq(&device->resource->req_lock); + submit_private_bio = true; } else if (no_remote) { nodata: if (__ratelimit(&drbd_ratelimit_state)) @@ -1167,15 +1321,23 @@ nodata: out: if (drbd_req_put_completion_ref(req, &m, 1)) kref_put(&req->kref, drbd_req_destroy); - spin_unlock_irq(&device->resource->req_lock); - + spin_unlock_irq(&resource->req_lock); + + /* Even though above is a kref_put(), this is safe. + * As long as we still need to submit our private bio, + * we hold a completion ref, and the request cannot disappear. + * If however this request did not even have a private bio to submit + * (e.g. remote read), req may already be invalid now. + * That's why we cannot check on req->private_bio. */ + if (submit_private_bio) + drbd_submit_req_private_bio(req); if (m.bio) complete_master_bio(device, &m); } -void __drbd_make_request(struct drbd_device *device, struct bio *bio, unsigned long start_time) +void __drbd_make_request(struct drbd_device *device, struct bio *bio, unsigned long start_jif) { - struct drbd_request *req = drbd_request_prepare(device, bio, start_time); + struct drbd_request *req = drbd_request_prepare(device, bio, start_jif); if (IS_ERR_OR_NULL(req)) return; drbd_send_and_submit(device, req); @@ -1194,6 +1356,8 @@ static void submit_fast_path(struct drbd_device *device, struct list_head *incom continue; req->rq_state |= RQ_IN_ACT_LOG; + req->in_actlog_jif = jiffies; + atomic_dec(&device->ap_actlog_cnt); } list_del_init(&req->tl_requests); @@ -1203,7 +1367,8 @@ static void submit_fast_path(struct drbd_device *device, struct list_head *incom static bool prepare_al_transaction_nonblock(struct drbd_device *device, struct list_head *incoming, - struct list_head *pending) + struct list_head *pending, + struct list_head *later) { struct drbd_request *req, *tmp; int wake = 0; @@ -1212,45 +1377,105 @@ static bool prepare_al_transaction_nonblock(struct drbd_device *device, spin_lock_irq(&device->al_lock); list_for_each_entry_safe(req, tmp, incoming, tl_requests) { err = drbd_al_begin_io_nonblock(device, &req->i); + if (err == -ENOBUFS) + break; if (err == -EBUSY) wake = 1; if (err) - continue; - req->rq_state |= RQ_IN_ACT_LOG; - list_move_tail(&req->tl_requests, pending); + list_move_tail(&req->tl_requests, later); + else + list_move_tail(&req->tl_requests, pending); } spin_unlock_irq(&device->al_lock); if (wake) wake_up(&device->al_wait); - return !list_empty(pending); } +void send_and_submit_pending(struct drbd_device *device, struct list_head *pending) +{ + struct drbd_request *req, *tmp; + + list_for_each_entry_safe(req, tmp, pending, tl_requests) { + req->rq_state |= RQ_IN_ACT_LOG; + req->in_actlog_jif = jiffies; + atomic_dec(&device->ap_actlog_cnt); + list_del_init(&req->tl_requests); + drbd_send_and_submit(device, req); + } +} + void do_submit(struct work_struct *ws) { struct drbd_device *device = container_of(ws, struct drbd_device, submit.worker); - LIST_HEAD(incoming); - LIST_HEAD(pending); - struct drbd_request *req, *tmp; + LIST_HEAD(incoming); /* from drbd_make_request() */ + LIST_HEAD(pending); /* to be submitted after next AL-transaction commit */ + LIST_HEAD(busy); /* blocked by resync requests */ + + /* grab new incoming requests */ + spin_lock_irq(&device->resource->req_lock); + list_splice_tail_init(&device->submit.writes, &incoming); + spin_unlock_irq(&device->resource->req_lock); for (;;) { - spin_lock(&device->submit.lock); - list_splice_tail_init(&device->submit.writes, &incoming); - spin_unlock(&device->submit.lock); + DEFINE_WAIT(wait); + /* move used-to-be-busy back to front of incoming */ + list_splice_init(&busy, &incoming); submit_fast_path(device, &incoming); if (list_empty(&incoming)) break; -skip_fast_path: - wait_event(device->al_wait, prepare_al_transaction_nonblock(device, &incoming, &pending)); - /* Maybe more was queued, while we prepared the transaction? - * Try to stuff them into this transaction as well. - * Be strictly non-blocking here, no wait_event, we already - * have something to commit. - * Stop if we don't make any more progres. - */ for (;;) { + prepare_to_wait(&device->al_wait, &wait, TASK_UNINTERRUPTIBLE); + + list_splice_init(&busy, &incoming); + prepare_al_transaction_nonblock(device, &incoming, &pending, &busy); + if (!list_empty(&pending)) + break; + + schedule(); + + /* If all currently "hot" activity log extents are kept busy by + * incoming requests, we still must not totally starve new + * requests to "cold" extents. + * Something left on &incoming means there had not been + * enough update slots available, and the activity log + * has been marked as "starving". + * + * Try again now, without looking for new requests, + * effectively blocking all new requests until we made + * at least _some_ progress with what we currently have. + */ + if (!list_empty(&incoming)) + continue; + + /* Nothing moved to pending, but nothing left + * on incoming: all moved to busy! + * Grab new and iterate. */ + spin_lock_irq(&device->resource->req_lock); + list_splice_tail_init(&device->submit.writes, &incoming); + spin_unlock_irq(&device->resource->req_lock); + } + finish_wait(&device->al_wait, &wait); + + /* If the transaction was full, before all incoming requests + * had been processed, skip ahead to commit, and iterate + * without splicing in more incoming requests from upper layers. + * + * Else, if all incoming have been processed, + * they have become either "pending" (to be submitted after + * next transaction commit) or "busy" (blocked by resync). + * + * Maybe more was queued, while we prepared the transaction? + * Try to stuff those into this transaction as well. + * Be strictly non-blocking here, + * we already have something to commit. + * + * Commit if we don't make any more progres. + */ + + while (list_empty(&incoming)) { LIST_HEAD(more_pending); LIST_HEAD(more_incoming); bool made_progress; @@ -1260,55 +1485,32 @@ skip_fast_path: if (list_empty(&device->submit.writes)) break; - spin_lock(&device->submit.lock); + spin_lock_irq(&device->resource->req_lock); list_splice_tail_init(&device->submit.writes, &more_incoming); - spin_unlock(&device->submit.lock); + spin_unlock_irq(&device->resource->req_lock); if (list_empty(&more_incoming)) break; - made_progress = prepare_al_transaction_nonblock(device, &more_incoming, &more_pending); + made_progress = prepare_al_transaction_nonblock(device, &more_incoming, &more_pending, &busy); list_splice_tail_init(&more_pending, &pending); list_splice_tail_init(&more_incoming, &incoming); - if (!made_progress) break; } - drbd_al_begin_io_commit(device, false); - - list_for_each_entry_safe(req, tmp, &pending, tl_requests) { - list_del_init(&req->tl_requests); - drbd_send_and_submit(device, req); - } - /* If all currently hot activity log extents are kept busy by - * incoming requests, we still must not totally starve new - * requests to cold extents. In that case, prepare one request - * in blocking mode. */ - list_for_each_entry_safe(req, tmp, &incoming, tl_requests) { - list_del_init(&req->tl_requests); - req->rq_state |= RQ_IN_ACT_LOG; - if (!drbd_al_begin_io_prepare(device, &req->i)) { - /* Corresponding extent was hot after all? */ - drbd_send_and_submit(device, req); - } else { - /* Found a request to a cold extent. - * Put on "pending" list, - * and try to cumulate with more. */ - list_add(&req->tl_requests, &pending); - goto skip_fast_path; - } - } + drbd_al_begin_io_commit(device); + send_and_submit_pending(device, &pending); } } void drbd_make_request(struct request_queue *q, struct bio *bio) { struct drbd_device *device = (struct drbd_device *) q->queuedata; - unsigned long start_time; + unsigned long start_jif; - start_time = jiffies; + start_jif = jiffies; /* * what we "blindly" assume: @@ -1316,7 +1518,7 @@ void drbd_make_request(struct request_queue *q, struct bio *bio) D_ASSERT(device, IS_ALIGNED(bio->bi_iter.bi_size, 512)); inc_ap_bio(device); - __drbd_make_request(device, bio, start_time); + __drbd_make_request(device, bio, start_jif); } /* This is called by bio_add_page(). @@ -1353,36 +1555,13 @@ int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct return limit; } -static void find_oldest_requests( - struct drbd_connection *connection, - struct drbd_device *device, - struct drbd_request **oldest_req_waiting_for_peer, - struct drbd_request **oldest_req_waiting_for_disk) -{ - struct drbd_request *r; - *oldest_req_waiting_for_peer = NULL; - *oldest_req_waiting_for_disk = NULL; - list_for_each_entry(r, &connection->transfer_log, tl_requests) { - const unsigned s = r->rq_state; - if (!*oldest_req_waiting_for_peer - && ((s & RQ_NET_MASK) && !(s & RQ_NET_DONE))) - *oldest_req_waiting_for_peer = r; - - if (!*oldest_req_waiting_for_disk - && (s & RQ_LOCAL_PENDING) && r->device == device) - *oldest_req_waiting_for_disk = r; - - if (*oldest_req_waiting_for_peer && *oldest_req_waiting_for_disk) - break; - } -} - void request_timer_fn(unsigned long data) { struct drbd_device *device = (struct drbd_device *) data; struct drbd_connection *connection = first_peer_device(device)->connection; - struct drbd_request *req_disk, *req_peer; /* oldest request */ + struct drbd_request *req_read, *req_write, *req_peer; /* oldest request */ struct net_conf *nc; + unsigned long oldest_submit_jif; unsigned long ent = 0, dt = 0, et, nt; /* effective timeout = ko_count * timeout */ unsigned long now; @@ -1403,14 +1582,31 @@ void request_timer_fn(unsigned long data) return; /* Recurring timer stopped */ now = jiffies; + nt = now + et; spin_lock_irq(&device->resource->req_lock); - find_oldest_requests(connection, device, &req_peer, &req_disk); - if (req_peer == NULL && req_disk == NULL) { - spin_unlock_irq(&device->resource->req_lock); - mod_timer(&device->request_timer, now + et); - return; - } + req_read = list_first_entry_or_null(&device->pending_completion[0], struct drbd_request, req_pending_local); + req_write = list_first_entry_or_null(&device->pending_completion[1], struct drbd_request, req_pending_local); + req_peer = connection->req_not_net_done; + /* maybe the oldest request waiting for the peer is in fact still + * blocking in tcp sendmsg */ + if (!req_peer && connection->req_next && connection->req_next->pre_send_jif) + req_peer = connection->req_next; + + /* evaluate the oldest peer request only in one timer! */ + if (req_peer && req_peer->device != device) + req_peer = NULL; + + /* do we have something to evaluate? */ + if (req_peer == NULL && req_write == NULL && req_read == NULL) + goto out; + + oldest_submit_jif = + (req_write && req_read) + ? ( time_before(req_write->pre_submit_jif, req_read->pre_submit_jif) + ? req_write->pre_submit_jif : req_read->pre_submit_jif ) + : req_write ? req_write->pre_submit_jif + : req_read ? req_read->pre_submit_jif : now; /* The request is considered timed out, if * - we have some effective timeout from the configuration, @@ -1429,13 +1625,13 @@ void request_timer_fn(unsigned long data) * to expire twice (worst case) to become effective. Good enough. */ if (ent && req_peer && - time_after(now, req_peer->start_time + ent) && + time_after(now, req_peer->pre_send_jif + ent) && !time_in_range(now, connection->last_reconnect_jif, connection->last_reconnect_jif + ent)) { drbd_warn(device, "Remote failed to finish a request within ko-count * timeout\n"); _drbd_set_state(_NS(device, conn, C_TIMEOUT), CS_VERBOSE | CS_HARD, NULL); } - if (dt && req_disk && - time_after(now, req_disk->start_time + dt) && + if (dt && oldest_submit_jif != now && + time_after(now, oldest_submit_jif + dt) && !time_in_range(now, device->last_reattach_jif, device->last_reattach_jif + dt)) { drbd_warn(device, "Local backing device failed to meet the disk-timeout\n"); __drbd_chk_io_error(device, DRBD_FORCE_DETACH); @@ -1443,11 +1639,12 @@ void request_timer_fn(unsigned long data) /* Reschedule timer for the nearest not already expired timeout. * Fallback to now + min(effective network timeout, disk timeout). */ - ent = (ent && req_peer && time_before(now, req_peer->start_time + ent)) - ? req_peer->start_time + ent : now + et; - dt = (dt && req_disk && time_before(now, req_disk->start_time + dt)) - ? req_disk->start_time + dt : now + et; + ent = (ent && req_peer && time_before(now, req_peer->pre_send_jif + ent)) + ? req_peer->pre_send_jif + ent : now + et; + dt = (dt && oldest_submit_jif != now && time_before(now, oldest_submit_jif + dt)) + ? oldest_submit_jif + dt : now + et; nt = time_before(ent, dt) ? ent : dt; +out: spin_unlock_irq(&connection->resource->req_lock); mod_timer(&device->request_timer, nt); } diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h index 8566cd5866b4..9f6a04080e9f 100644 --- a/drivers/block/drbd/drbd_req.h +++ b/drivers/block/drbd/drbd_req.h @@ -288,6 +288,7 @@ extern void complete_master_bio(struct drbd_device *device, extern void request_timer_fn(unsigned long data); extern void tl_restart(struct drbd_connection *connection, enum drbd_req_event what); extern void _tl_restart(struct drbd_connection *connection, enum drbd_req_event what); +extern void tl_abort_disk_io(struct drbd_device *device); /* this is in drbd_main.c */ extern void drbd_restart_request(struct drbd_request *req); diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c index a5d8aae00e04..c35c0f001bb7 100644 --- a/drivers/block/drbd/drbd_state.c +++ b/drivers/block/drbd/drbd_state.c @@ -410,7 +410,7 @@ _drbd_request_state(struct drbd_device *device, union drbd_state mask, return rv; } -static void print_st(struct drbd_device *device, char *name, union drbd_state ns) +static void print_st(struct drbd_device *device, const char *name, union drbd_state ns) { drbd_err(device, " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c%c%c }\n", name, @@ -952,11 +952,12 @@ enum drbd_state_rv __drbd_set_state(struct drbd_device *device, union drbd_state ns, enum chg_state_flags flags, struct completion *done) { + struct drbd_peer_device *peer_device = first_peer_device(device); + struct drbd_connection *connection = peer_device ? peer_device->connection : NULL; union drbd_state os; enum drbd_state_rv rv = SS_SUCCESS; enum sanitize_state_warnings ssw; struct after_state_chg_work *ascw; - bool did_remote, should_do_remote; os = drbd_read_state(device); @@ -978,9 +979,9 @@ __drbd_set_state(struct drbd_device *device, union drbd_state ns, this happen...*/ if (is_valid_state(device, os) == rv) - rv = is_valid_soft_transition(os, ns, first_peer_device(device)->connection); + rv = is_valid_soft_transition(os, ns, connection); } else - rv = is_valid_soft_transition(os, ns, first_peer_device(device)->connection); + rv = is_valid_soft_transition(os, ns, connection); } if (rv < SS_SUCCESS) { @@ -997,7 +998,7 @@ __drbd_set_state(struct drbd_device *device, union drbd_state ns, sanitize_state(). Only display it here if we where not called from _conn_request_state() */ if (!(flags & CS_DC_SUSP)) - conn_pr_state_change(first_peer_device(device)->connection, os, ns, + conn_pr_state_change(connection, os, ns, (flags & ~CS_DC_MASK) | CS_DC_SUSP); /* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference @@ -1008,28 +1009,35 @@ __drbd_set_state(struct drbd_device *device, union drbd_state ns, (os.disk != D_DISKLESS && ns.disk == D_DISKLESS)) atomic_inc(&device->local_cnt); - did_remote = drbd_should_do_remote(device->state); + if (!is_sync_state(os.conn) && is_sync_state(ns.conn)) + clear_bit(RS_DONE, &device->flags); + + /* changes to local_cnt and device flags should be visible before + * changes to state, which again should be visible before anything else + * depending on that change happens. */ + smp_wmb(); device->state.i = ns.i; - should_do_remote = drbd_should_do_remote(device->state); device->resource->susp = ns.susp; device->resource->susp_nod = ns.susp_nod; device->resource->susp_fen = ns.susp_fen; + smp_wmb(); /* put replicated vs not-replicated requests in seperate epochs */ - if (did_remote != should_do_remote) - start_new_tl_epoch(first_peer_device(device)->connection); + if (drbd_should_do_remote((union drbd_dev_state)os.i) != + drbd_should_do_remote((union drbd_dev_state)ns.i)) + start_new_tl_epoch(connection); if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING) drbd_print_uuids(device, "attached to UUIDs"); /* Wake up role changes, that were delayed because of connection establishing */ if (os.conn == C_WF_REPORT_PARAMS && ns.conn != C_WF_REPORT_PARAMS && - no_peer_wf_report_params(first_peer_device(device)->connection)) - clear_bit(STATE_SENT, &first_peer_device(device)->connection->flags); + no_peer_wf_report_params(connection)) + clear_bit(STATE_SENT, &connection->flags); wake_up(&device->misc_wait); wake_up(&device->state_wait); - wake_up(&first_peer_device(device)->connection->ping_wait); + wake_up(&connection->ping_wait); /* Aborted verify run, or we reached the stop sector. * Log the last position, unless end-of-device. */ @@ -1118,21 +1126,21 @@ __drbd_set_state(struct drbd_device *device, union drbd_state ns, /* Receiver should clean up itself */ if (os.conn != C_DISCONNECTING && ns.conn == C_DISCONNECTING) - drbd_thread_stop_nowait(&first_peer_device(device)->connection->receiver); + drbd_thread_stop_nowait(&connection->receiver); /* Now the receiver finished cleaning up itself, it should die */ if (os.conn != C_STANDALONE && ns.conn == C_STANDALONE) - drbd_thread_stop_nowait(&first_peer_device(device)->connection->receiver); + drbd_thread_stop_nowait(&connection->receiver); /* Upon network failure, we need to restart the receiver. */ if (os.conn > C_WF_CONNECTION && ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT) - drbd_thread_restart_nowait(&first_peer_device(device)->connection->receiver); + drbd_thread_restart_nowait(&connection->receiver); /* Resume AL writing if we get a connection */ if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) { drbd_resume_al(device); - first_peer_device(device)->connection->connect_cnt++; + connection->connect_cnt++; } /* remember last attach time so request_timer_fn() won't @@ -1150,7 +1158,7 @@ __drbd_set_state(struct drbd_device *device, union drbd_state ns, ascw->w.cb = w_after_state_ch; ascw->device = device; ascw->done = done; - drbd_queue_work(&first_peer_device(device)->connection->sender_work, + drbd_queue_work(&connection->sender_work, &ascw->w); } else { drbd_err(device, "Could not kmalloc an ascw\n"); @@ -1222,13 +1230,16 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os, union drbd_state ns, enum chg_state_flags flags) { struct drbd_resource *resource = device->resource; + struct drbd_peer_device *peer_device = first_peer_device(device); + struct drbd_connection *connection = peer_device ? peer_device->connection : NULL; struct sib_info sib; sib.sib_reason = SIB_STATE_CHANGE; sib.os = os; sib.ns = ns; - if (os.conn != C_CONNECTED && ns.conn == C_CONNECTED) { + if ((os.disk != D_UP_TO_DATE || os.pdsk != D_UP_TO_DATE) + && (ns.disk == D_UP_TO_DATE && ns.pdsk == D_UP_TO_DATE)) { clear_bit(CRASHED_PRIMARY, &device->flags); if (device->p_uuid) device->p_uuid[UI_FLAGS] &= ~((u64)2); @@ -1245,7 +1256,6 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os, state change. This function might sleep */ if (ns.susp_nod) { - struct drbd_connection *connection = first_peer_device(device)->connection; enum drbd_req_event what = NOTHING; spin_lock_irq(&device->resource->req_lock); @@ -1267,8 +1277,6 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os, } if (ns.susp_fen) { - struct drbd_connection *connection = first_peer_device(device)->connection; - spin_lock_irq(&device->resource->req_lock); if (resource->susp_fen && conn_lowest_conn(connection) >= C_CONNECTED) { /* case2: The connection was established again: */ @@ -1294,8 +1302,8 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os, * which is unexpected. */ if ((os.conn != C_SYNC_SOURCE && os.conn != C_PAUSED_SYNC_S) && (ns.conn == C_SYNC_SOURCE || ns.conn == C_PAUSED_SYNC_S) && - first_peer_device(device)->connection->agreed_pro_version >= 96 && get_ldev(device)) { - drbd_gen_and_send_sync_uuid(first_peer_device(device)); + connection->agreed_pro_version >= 96 && get_ldev(device)) { + drbd_gen_and_send_sync_uuid(peer_device); put_ldev(device); } @@ -1309,8 +1317,8 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os, atomic_set(&device->rs_pending_cnt, 0); drbd_rs_cancel_all(device); - drbd_send_uuids(first_peer_device(device)); - drbd_send_state(first_peer_device(device), ns); + drbd_send_uuids(peer_device); + drbd_send_state(peer_device, ns); } /* No point in queuing send_bitmap if we don't have a connection * anymore, so check also the _current_ state, not only the new state @@ -1335,7 +1343,7 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os, set_bit(NEW_CUR_UUID, &device->flags); } else { drbd_uuid_new_current(device); - drbd_send_uuids(first_peer_device(device)); + drbd_send_uuids(peer_device); } } put_ldev(device); @@ -1346,7 +1354,7 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os, if (os.peer == R_SECONDARY && ns.peer == R_PRIMARY && device->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) { drbd_uuid_new_current(device); - drbd_send_uuids(first_peer_device(device)); + drbd_send_uuids(peer_device); } /* D_DISKLESS Peer becomes secondary */ if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY) @@ -1373,16 +1381,16 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os, /* Last part of the attaching process ... */ if (ns.conn >= C_CONNECTED && os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) { - drbd_send_sizes(first_peer_device(device), 0, 0); /* to start sync... */ - drbd_send_uuids(first_peer_device(device)); - drbd_send_state(first_peer_device(device), ns); + drbd_send_sizes(peer_device, 0, 0); /* to start sync... */ + drbd_send_uuids(peer_device); + drbd_send_state(peer_device, ns); } /* We want to pause/continue resync, tell peer. */ if (ns.conn >= C_CONNECTED && ((os.aftr_isp != ns.aftr_isp) || (os.user_isp != ns.user_isp))) - drbd_send_state(first_peer_device(device), ns); + drbd_send_state(peer_device, ns); /* In case one of the isp bits got set, suspend other devices. */ if ((!os.aftr_isp && !os.peer_isp && !os.user_isp) && @@ -1392,10 +1400,10 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os, /* Make sure the peer gets informed about eventual state changes (ISP bits) while we were in WFReportParams. */ if (os.conn == C_WF_REPORT_PARAMS && ns.conn >= C_CONNECTED) - drbd_send_state(first_peer_device(device), ns); + drbd_send_state(peer_device, ns); if (os.conn != C_AHEAD && ns.conn == C_AHEAD) - drbd_send_state(first_peer_device(device), ns); + drbd_send_state(peer_device, ns); /* We are in the progress to start a full sync... */ if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) || @@ -1449,7 +1457,7 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os, drbd_disk_str(device->state.disk)); if (ns.conn >= C_CONNECTED) - drbd_send_state(first_peer_device(device), ns); + drbd_send_state(peer_device, ns); drbd_rs_cancel_all(device); @@ -1473,7 +1481,7 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os, drbd_disk_str(device->state.disk)); if (ns.conn >= C_CONNECTED) - drbd_send_state(first_peer_device(device), ns); + drbd_send_state(peer_device, ns); /* corresponding get_ldev in __drbd_set_state * this may finally trigger drbd_ldev_destroy. */ put_ldev(device); @@ -1481,7 +1489,7 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os, /* Notify peer that I had a local IO error, and did not detached.. */ if (os.disk == D_UP_TO_DATE && ns.disk == D_INCONSISTENT && ns.conn >= C_CONNECTED) - drbd_send_state(first_peer_device(device), ns); + drbd_send_state(peer_device, ns); /* Disks got bigger while they were detached */ if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING && @@ -1499,14 +1507,14 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os, /* sync target done with resync. Explicitly notify peer, even though * it should (at least for non-empty resyncs) already know itself. */ if (os.disk < D_UP_TO_DATE && os.conn >= C_SYNC_SOURCE && ns.conn == C_CONNECTED) - drbd_send_state(first_peer_device(device), ns); + drbd_send_state(peer_device, ns); /* Verify finished, or reached stop sector. Peer did not know about * the stop sector, and we may even have changed the stop sector during * verify to interrupt/stop early. Send the new state. */ if (os.conn == C_VERIFY_S && ns.conn == C_CONNECTED && verify_can_do_stop_sector(device)) - drbd_send_state(first_peer_device(device), ns); + drbd_send_state(peer_device, ns); /* This triggers bitmap writeout of potentially still unwritten pages * if the resync finished cleanly, or aborted because of peer disk @@ -1563,7 +1571,7 @@ static int w_after_conn_state_ch(struct drbd_work *w, int unused) old_conf = connection->net_conf; connection->my_addr_len = 0; connection->peer_addr_len = 0; - rcu_assign_pointer(connection->net_conf, NULL); + RCU_INIT_POINTER(connection->net_conf, NULL); conn_free_crypto(connection); mutex_unlock(&connection->resource->conf_update); @@ -1599,7 +1607,7 @@ static int w_after_conn_state_ch(struct drbd_work *w, int unused) return 0; } -void conn_old_common_state(struct drbd_connection *connection, union drbd_state *pcs, enum chg_state_flags *pf) +static void conn_old_common_state(struct drbd_connection *connection, union drbd_state *pcs, enum chg_state_flags *pf) { enum chg_state_flags flags = ~0; struct drbd_peer_device *peer_device; @@ -1688,7 +1696,7 @@ conn_is_valid_transition(struct drbd_connection *connection, union drbd_state ma return rv; } -void +static void conn_set_state(struct drbd_connection *connection, union drbd_state mask, union drbd_state val, union drbd_state *pns_min, union drbd_state *pns_max, enum chg_state_flags flags) { diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index d8f57b6305cd..50776b362828 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -67,13 +67,10 @@ rwlock_t global_state_lock; */ void drbd_md_io_complete(struct bio *bio, int error) { - struct drbd_md_io *md_io; struct drbd_device *device; - md_io = (struct drbd_md_io *)bio->bi_private; - device = container_of(md_io, struct drbd_device, md_io); - - md_io->error = error; + device = bio->bi_private; + device->md_io.error = error; /* We grabbed an extra reference in _drbd_md_sync_page_io() to be able * to timeout on the lower level device, and eventually detach from it. @@ -87,7 +84,7 @@ void drbd_md_io_complete(struct bio *bio, int error) * ASSERT(atomic_read(&device->md_io_in_use) == 1) there. */ drbd_md_put_buffer(device); - md_io->done = 1; + device->md_io.done = 1; wake_up(&device->misc_wait); bio_put(bio); if (device->ldev) /* special case: drbd_md_read() during drbd_adm_attach() */ @@ -135,6 +132,7 @@ void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __releases(l i = peer_req->i; do_al_complete_io = peer_req->flags & EE_CALL_AL_COMPLETE_IO; block_id = peer_req->block_id; + peer_req->flags &= ~EE_CALL_AL_COMPLETE_IO; spin_lock_irqsave(&device->resource->req_lock, flags); device->writ_cnt += peer_req->i.size >> 9; @@ -398,9 +396,6 @@ static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector, if (!get_ldev(device)) return -EIO; - if (drbd_rs_should_slow_down(device, sector)) - goto defer; - /* GFP_TRY, because if there is no memory available right now, this may * be rescheduled for later. It is "only" background resync, after all. */ peer_req = drbd_alloc_peer_req(peer_device, ID_SYNCER /* unused */, sector, @@ -410,7 +405,7 @@ static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector, peer_req->w.cb = w_e_send_csum; spin_lock_irq(&device->resource->req_lock); - list_add(&peer_req->w.list, &device->read_ee); + list_add_tail(&peer_req->w.list, &device->read_ee); spin_unlock_irq(&device->resource->req_lock); atomic_add(size >> 9, &device->rs_sect_ev); @@ -452,9 +447,9 @@ void resync_timer_fn(unsigned long data) { struct drbd_device *device = (struct drbd_device *) data; - if (list_empty(&device->resync_work.list)) - drbd_queue_work(&first_peer_device(device)->connection->sender_work, - &device->resync_work); + drbd_queue_work_if_unqueued( + &first_peer_device(device)->connection->sender_work, + &device->resync_work); } static void fifo_set(struct fifo_buffer *fb, int value) @@ -504,9 +499,9 @@ struct fifo_buffer *fifo_alloc(int fifo_size) static int drbd_rs_controller(struct drbd_device *device, unsigned int sect_in) { struct disk_conf *dc; - unsigned int want; /* The number of sectors we want in the proxy */ + unsigned int want; /* The number of sectors we want in-flight */ int req_sect; /* Number of sectors to request in this turn */ - int correction; /* Number of sectors more we need in the proxy*/ + int correction; /* Number of sectors more we need in-flight */ int cps; /* correction per invocation of drbd_rs_controller() */ int steps; /* Number of time steps to plan ahead */ int curr_corr; @@ -577,20 +572,27 @@ static int drbd_rs_number_requests(struct drbd_device *device) * potentially causing a distributed deadlock on congestion during * online-verify or (checksum-based) resync, if max-buffers, * socket buffer sizes and resync rate settings are mis-configured. */ - if (mxb - device->rs_in_flight < number) - number = mxb - device->rs_in_flight; + + /* note that "number" is in units of "BM_BLOCK_SIZE" (which is 4k), + * mxb (as used here, and in drbd_alloc_pages on the peer) is + * "number of pages" (typically also 4k), + * but "rs_in_flight" is in "sectors" (512 Byte). */ + if (mxb - device->rs_in_flight/8 < number) + number = mxb - device->rs_in_flight/8; return number; } -static int make_resync_request(struct drbd_device *device, int cancel) +static int make_resync_request(struct drbd_device *const device, int cancel) { + struct drbd_peer_device *const peer_device = first_peer_device(device); + struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL; unsigned long bit; sector_t sector; const sector_t capacity = drbd_get_capacity(device->this_bdev); int max_bio_size; int number, rollback_i, size; - int align, queued, sndbuf; + int align, requeue = 0; int i = 0; if (unlikely(cancel)) @@ -617,17 +619,22 @@ static int make_resync_request(struct drbd_device *device, int cancel) goto requeue; for (i = 0; i < number; i++) { - /* Stop generating RS requests, when half of the send buffer is filled */ - mutex_lock(&first_peer_device(device)->connection->data.mutex); - if (first_peer_device(device)->connection->data.socket) { - queued = first_peer_device(device)->connection->data.socket->sk->sk_wmem_queued; - sndbuf = first_peer_device(device)->connection->data.socket->sk->sk_sndbuf; - } else { - queued = 1; - sndbuf = 0; - } - mutex_unlock(&first_peer_device(device)->connection->data.mutex); - if (queued > sndbuf / 2) + /* Stop generating RS requests when half of the send buffer is filled, + * but notify TCP that we'd like to have more space. */ + mutex_lock(&connection->data.mutex); + if (connection->data.socket) { + struct sock *sk = connection->data.socket->sk; + int queued = sk->sk_wmem_queued; + int sndbuf = sk->sk_sndbuf; + if (queued > sndbuf / 2) { + requeue = 1; + if (sk->sk_socket) + set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); + } + } else + requeue = 1; + mutex_unlock(&connection->data.mutex); + if (requeue) goto requeue; next_sector: @@ -642,8 +649,7 @@ next_sector: sector = BM_BIT_TO_SECT(bit); - if (drbd_rs_should_slow_down(device, sector) || - drbd_try_rs_begin_io(device, sector)) { + if (drbd_try_rs_begin_io(device, sector)) { device->bm_resync_fo = bit; goto requeue; } @@ -696,9 +702,9 @@ next_sector: /* adjust very last sectors, in case we are oddly sized */ if (sector + (size>>9) > capacity) size = (capacity-sector)<<9; - if (first_peer_device(device)->connection->agreed_pro_version >= 89 && - first_peer_device(device)->connection->csums_tfm) { - switch (read_for_csum(first_peer_device(device), sector, size)) { + + if (device->use_csums) { + switch (read_for_csum(peer_device, sector, size)) { case -EIO: /* Disk failure */ put_ldev(device); return -EIO; @@ -717,7 +723,7 @@ next_sector: int err; inc_rs_pending(device); - err = drbd_send_drequest(first_peer_device(device), P_RS_DATA_REQUEST, + err = drbd_send_drequest(peer_device, P_RS_DATA_REQUEST, sector, size, ID_SYNCER); if (err) { drbd_err(device, "drbd_send_drequest() failed, aborting...\n"); @@ -774,8 +780,7 @@ static int make_ov_request(struct drbd_device *device, int cancel) size = BM_BLOCK_SIZE; - if (drbd_rs_should_slow_down(device, sector) || - drbd_try_rs_begin_io(device, sector)) { + if (drbd_try_rs_begin_io(device, sector)) { device->ov_position = sector; goto requeue; } @@ -911,7 +916,7 @@ int drbd_resync_finished(struct drbd_device *device) if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T) khelper_cmd = "after-resync-target"; - if (first_peer_device(device)->connection->csums_tfm && device->rs_total) { + if (device->use_csums && device->rs_total) { const unsigned long s = device->rs_same_csum; const unsigned long t = device->rs_total; const int ratio = @@ -1351,13 +1356,15 @@ int w_send_out_of_sync(struct drbd_work *w, int cancel) { struct drbd_request *req = container_of(w, struct drbd_request, w); struct drbd_device *device = req->device; - struct drbd_connection *connection = first_peer_device(device)->connection; + struct drbd_peer_device *const peer_device = first_peer_device(device); + struct drbd_connection *const connection = peer_device->connection; int err; if (unlikely(cancel)) { req_mod(req, SEND_CANCELED); return 0; } + req->pre_send_jif = jiffies; /* this time, no connection->send.current_epoch_writes++; * If it was sent, it was the closing barrier for the last @@ -1365,7 +1372,7 @@ int w_send_out_of_sync(struct drbd_work *w, int cancel) * No more barriers will be sent, until we leave AHEAD mode again. */ maybe_send_barrier(connection, req->epoch); - err = drbd_send_out_of_sync(first_peer_device(device), req); + err = drbd_send_out_of_sync(peer_device, req); req_mod(req, OOS_HANDED_TO_NETWORK); return err; @@ -1380,19 +1387,21 @@ int w_send_dblock(struct drbd_work *w, int cancel) { struct drbd_request *req = container_of(w, struct drbd_request, w); struct drbd_device *device = req->device; - struct drbd_connection *connection = first_peer_device(device)->connection; + struct drbd_peer_device *const peer_device = first_peer_device(device); + struct drbd_connection *connection = peer_device->connection; int err; if (unlikely(cancel)) { req_mod(req, SEND_CANCELED); return 0; } + req->pre_send_jif = jiffies; re_init_if_first_write(connection, req->epoch); maybe_send_barrier(connection, req->epoch); connection->send.current_epoch_writes++; - err = drbd_send_dblock(first_peer_device(device), req); + err = drbd_send_dblock(peer_device, req); req_mod(req, err ? SEND_FAILED : HANDED_OVER_TO_NETWORK); return err; @@ -1407,19 +1416,21 @@ int w_send_read_req(struct drbd_work *w, int cancel) { struct drbd_request *req = container_of(w, struct drbd_request, w); struct drbd_device *device = req->device; - struct drbd_connection *connection = first_peer_device(device)->connection; + struct drbd_peer_device *const peer_device = first_peer_device(device); + struct drbd_connection *connection = peer_device->connection; int err; if (unlikely(cancel)) { req_mod(req, SEND_CANCELED); return 0; } + req->pre_send_jif = jiffies; /* Even read requests may close a write epoch, * if there was any yet. */ maybe_send_barrier(connection, req->epoch); - err = drbd_send_drequest(first_peer_device(device), P_DATA_REQUEST, req->i.sector, req->i.size, + err = drbd_send_drequest(peer_device, P_DATA_REQUEST, req->i.sector, req->i.size, (unsigned long)req); req_mod(req, err ? SEND_FAILED : HANDED_OVER_TO_NETWORK); @@ -1433,7 +1444,7 @@ int w_restart_disk_io(struct drbd_work *w, int cancel) struct drbd_device *device = req->device; if (bio_data_dir(req->master_bio) == WRITE && req->rq_state & RQ_IN_ACT_LOG) - drbd_al_begin_io(device, &req->i, false); + drbd_al_begin_io(device, &req->i); drbd_req_make_private_bio(req, req->master_bio); req->private_bio->bi_bdev = device->ldev->backing_bdev; @@ -1601,26 +1612,32 @@ void drbd_rs_controller_reset(struct drbd_device *device) void start_resync_timer_fn(unsigned long data) { struct drbd_device *device = (struct drbd_device *) data; - - drbd_queue_work(&first_peer_device(device)->connection->sender_work, - &device->start_resync_work); + drbd_device_post_work(device, RS_START); } -int w_start_resync(struct drbd_work *w, int cancel) +static void do_start_resync(struct drbd_device *device) { - struct drbd_device *device = - container_of(w, struct drbd_device, start_resync_work); - if (atomic_read(&device->unacked_cnt) || atomic_read(&device->rs_pending_cnt)) { - drbd_warn(device, "w_start_resync later...\n"); + drbd_warn(device, "postponing start_resync ...\n"); device->start_resync_timer.expires = jiffies + HZ/10; add_timer(&device->start_resync_timer); - return 0; + return; } drbd_start_resync(device, C_SYNC_SOURCE); clear_bit(AHEAD_TO_SYNC_SOURCE, &device->flags); - return 0; +} + +static bool use_checksum_based_resync(struct drbd_connection *connection, struct drbd_device *device) +{ + bool csums_after_crash_only; + rcu_read_lock(); + csums_after_crash_only = rcu_dereference(connection->net_conf)->csums_after_crash_only; + rcu_read_unlock(); + return connection->agreed_pro_version >= 89 && /* supported? */ + connection->csums_tfm && /* configured? */ + (csums_after_crash_only == 0 /* use for each resync? */ + || test_bit(CRASHED_PRIMARY, &device->flags)); /* or only after Primary crash? */ } /** @@ -1633,6 +1650,8 @@ int w_start_resync(struct drbd_work *w, int cancel) */ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side) { + struct drbd_peer_device *peer_device = first_peer_device(device); + struct drbd_connection *connection = peer_device ? peer_device->connection : NULL; union drbd_state ns; int r; @@ -1651,7 +1670,7 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side) if (r > 0) { drbd_info(device, "before-resync-target handler returned %d, " "dropping connection.\n", r); - conn_request_state(first_peer_device(device)->connection, NS(conn, C_DISCONNECTING), CS_HARD); + conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD); return; } } else /* C_SYNC_SOURCE */ { @@ -1664,7 +1683,7 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side) } else { drbd_info(device, "before-resync-source handler returned %d, " "dropping connection.\n", r); - conn_request_state(first_peer_device(device)->connection, + conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD); return; } @@ -1672,7 +1691,7 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side) } } - if (current == first_peer_device(device)->connection->worker.task) { + if (current == connection->worker.task) { /* The worker should not sleep waiting for state_mutex, that can take long */ if (!mutex_trylock(device->state_mutex)) { @@ -1733,11 +1752,20 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side) device->rs_mark_time[i] = now; } _drbd_pause_after(device); + /* Forget potentially stale cached per resync extent bit-counts. + * Open coded drbd_rs_cancel_all(device), we already have IRQs + * disabled, and know the disk state is ok. */ + spin_lock(&device->al_lock); + lc_reset(device->resync); + device->resync_locked = 0; + device->resync_wenr = LC_FREE; + spin_unlock(&device->al_lock); } write_unlock(&global_state_lock); spin_unlock_irq(&device->resource->req_lock); if (r == SS_SUCCESS) { + wake_up(&device->al_wait); /* for lc_reset() above */ /* reset rs_last_bcast when a resync or verify is started, * to deal with potential jiffies wrap. */ device->rs_last_bcast = jiffies - HZ; @@ -1746,8 +1774,12 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side) drbd_conn_str(ns.conn), (unsigned long) device->rs_total << (BM_BLOCK_SHIFT-10), (unsigned long) device->rs_total); - if (side == C_SYNC_TARGET) + if (side == C_SYNC_TARGET) { device->bm_resync_fo = 0; + device->use_csums = use_checksum_based_resync(connection, device); + } else { + device->use_csums = 0; + } /* Since protocol 96, we must serialize drbd_gen_and_send_sync_uuid * with w_send_oos, or the sync target will get confused as to @@ -1756,12 +1788,10 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side) * drbd_resync_finished from here in that case. * We drbd_gen_and_send_sync_uuid here for protocol < 96, * and from after_state_ch otherwise. */ - if (side == C_SYNC_SOURCE && - first_peer_device(device)->connection->agreed_pro_version < 96) - drbd_gen_and_send_sync_uuid(first_peer_device(device)); + if (side == C_SYNC_SOURCE && connection->agreed_pro_version < 96) + drbd_gen_and_send_sync_uuid(peer_device); - if (first_peer_device(device)->connection->agreed_pro_version < 95 && - device->rs_total == 0) { + if (connection->agreed_pro_version < 95 && device->rs_total == 0) { /* This still has a race (about when exactly the peers * detect connection loss) that can lead to a full sync * on next handshake. In 8.3.9 we fixed this with explicit @@ -1777,7 +1807,7 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side) int timeo; rcu_read_lock(); - nc = rcu_dereference(first_peer_device(device)->connection->net_conf); + nc = rcu_dereference(connection->net_conf); timeo = nc->ping_int * HZ + nc->ping_timeo * HZ / 9; rcu_read_unlock(); schedule_timeout_interruptible(timeo); @@ -1799,10 +1829,165 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side) mutex_unlock(device->state_mutex); } +static void update_on_disk_bitmap(struct drbd_device *device, bool resync_done) +{ + struct sib_info sib = { .sib_reason = SIB_SYNC_PROGRESS, }; + device->rs_last_bcast = jiffies; + + if (!get_ldev(device)) + return; + + drbd_bm_write_lazy(device, 0); + if (resync_done && is_sync_state(device->state.conn)) + drbd_resync_finished(device); + + drbd_bcast_event(device, &sib); + /* update timestamp, in case it took a while to write out stuff */ + device->rs_last_bcast = jiffies; + put_ldev(device); +} + +static void drbd_ldev_destroy(struct drbd_device *device) +{ + lc_destroy(device->resync); + device->resync = NULL; + lc_destroy(device->act_log); + device->act_log = NULL; + __no_warn(local, + drbd_free_ldev(device->ldev); + device->ldev = NULL;); + clear_bit(GOING_DISKLESS, &device->flags); + wake_up(&device->misc_wait); +} + +static void go_diskless(struct drbd_device *device) +{ + D_ASSERT(device, device->state.disk == D_FAILED); + /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will + * inc/dec it frequently. Once we are D_DISKLESS, no one will touch + * the protected members anymore, though, so once put_ldev reaches zero + * again, it will be safe to free them. */ + + /* Try to write changed bitmap pages, read errors may have just + * set some bits outside the area covered by the activity log. + * + * If we have an IO error during the bitmap writeout, + * we will want a full sync next time, just in case. + * (Do we want a specific meta data flag for this?) + * + * If that does not make it to stable storage either, + * we cannot do anything about that anymore. + * + * We still need to check if both bitmap and ldev are present, we may + * end up here after a failed attach, before ldev was even assigned. + */ + if (device->bitmap && device->ldev) { + /* An interrupted resync or similar is allowed to recounts bits + * while we detach. + * Any modifications would not be expected anymore, though. + */ + if (drbd_bitmap_io_from_worker(device, drbd_bm_write, + "detach", BM_LOCKED_TEST_ALLOWED)) { + if (test_bit(WAS_READ_ERROR, &device->flags)) { + drbd_md_set_flag(device, MDF_FULL_SYNC); + drbd_md_sync(device); + } + } + } + + drbd_force_state(device, NS(disk, D_DISKLESS)); +} + +static int do_md_sync(struct drbd_device *device) +{ + drbd_warn(device, "md_sync_timer expired! Worker calls drbd_md_sync().\n"); + drbd_md_sync(device); + return 0; +} + +/* only called from drbd_worker thread, no locking */ +void __update_timing_details( + struct drbd_thread_timing_details *tdp, + unsigned int *cb_nr, + void *cb, + const char *fn, const unsigned int line) +{ + unsigned int i = *cb_nr % DRBD_THREAD_DETAILS_HIST; + struct drbd_thread_timing_details *td = tdp + i; + + td->start_jif = jiffies; + td->cb_addr = cb; + td->caller_fn = fn; + td->line = line; + td->cb_nr = *cb_nr; + + i = (i+1) % DRBD_THREAD_DETAILS_HIST; + td = tdp + i; + memset(td, 0, sizeof(*td)); + + ++(*cb_nr); +} + +#define WORK_PENDING(work_bit, todo) (todo & (1UL << work_bit)) +static void do_device_work(struct drbd_device *device, const unsigned long todo) +{ + if (WORK_PENDING(MD_SYNC, todo)) + do_md_sync(device); + if (WORK_PENDING(RS_DONE, todo) || + WORK_PENDING(RS_PROGRESS, todo)) + update_on_disk_bitmap(device, WORK_PENDING(RS_DONE, todo)); + if (WORK_PENDING(GO_DISKLESS, todo)) + go_diskless(device); + if (WORK_PENDING(DESTROY_DISK, todo)) + drbd_ldev_destroy(device); + if (WORK_PENDING(RS_START, todo)) + do_start_resync(device); +} + +#define DRBD_DEVICE_WORK_MASK \ + ((1UL << GO_DISKLESS) \ + |(1UL << DESTROY_DISK) \ + |(1UL << MD_SYNC) \ + |(1UL << RS_START) \ + |(1UL << RS_PROGRESS) \ + |(1UL << RS_DONE) \ + ) + +static unsigned long get_work_bits(unsigned long *flags) +{ + unsigned long old, new; + do { + old = *flags; + new = old & ~DRBD_DEVICE_WORK_MASK; + } while (cmpxchg(flags, old, new) != old); + return old & DRBD_DEVICE_WORK_MASK; +} + +static void do_unqueued_work(struct drbd_connection *connection) +{ + struct drbd_peer_device *peer_device; + int vnr; + + rcu_read_lock(); + idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { + struct drbd_device *device = peer_device->device; + unsigned long todo = get_work_bits(&device->flags); + if (!todo) + continue; + + kref_get(&device->kref); + rcu_read_unlock(); + do_device_work(device, todo); + kref_put(&device->kref, drbd_destroy_device); + rcu_read_lock(); + } + rcu_read_unlock(); +} + static bool dequeue_work_batch(struct drbd_work_queue *queue, struct list_head *work_list) { spin_lock_irq(&queue->q_lock); - list_splice_init(&queue->q, work_list); + list_splice_tail_init(&queue->q, work_list); spin_unlock_irq(&queue->q_lock); return !list_empty(work_list); } @@ -1851,7 +2036,7 @@ static void wait_for_work(struct drbd_connection *connection, struct list_head * /* dequeue single item only, * we still use drbd_queue_work_front() in some places */ if (!list_empty(&connection->sender_work.q)) - list_move(connection->sender_work.q.next, work_list); + list_splice_tail_init(&connection->sender_work.q, work_list); spin_unlock(&connection->sender_work.q_lock); /* FIXME get rid of this one? */ if (!list_empty(work_list) || signal_pending(current)) { spin_unlock_irq(&connection->resource->req_lock); @@ -1873,6 +2058,14 @@ static void wait_for_work(struct drbd_connection *connection, struct list_head * if (send_barrier) maybe_send_barrier(connection, connection->send.current_epoch_nr + 1); + + if (test_bit(DEVICE_WORK_PENDING, &connection->flags)) + break; + + /* drbd_send() may have called flush_signals() */ + if (get_t_state(&connection->worker) != RUNNING) + break; + schedule(); /* may be woken up for other things but new work, too, * e.g. if the current epoch got closed. @@ -1906,10 +2099,15 @@ int drbd_worker(struct drbd_thread *thi) while (get_t_state(thi) == RUNNING) { drbd_thread_current_set_cpu(thi); - /* as long as we use drbd_queue_work_front(), - * we may only dequeue single work items here, not batches. */ - if (list_empty(&work_list)) + if (list_empty(&work_list)) { + update_worker_timing_details(connection, wait_for_work); wait_for_work(connection, &work_list); + } + + if (test_and_clear_bit(DEVICE_WORK_PENDING, &connection->flags)) { + update_worker_timing_details(connection, do_unqueued_work); + do_unqueued_work(connection); + } if (signal_pending(current)) { flush_signals(current); @@ -1926,6 +2124,7 @@ int drbd_worker(struct drbd_thread *thi) while (!list_empty(&work_list)) { w = list_first_entry(&work_list, struct drbd_work, list); list_del_init(&w->list); + update_worker_timing_details(connection, w->cb); if (w->cb(w, connection->cstate < C_WF_REPORT_PARAMS) == 0) continue; if (connection->cstate >= C_WF_REPORT_PARAMS) @@ -1934,13 +2133,18 @@ int drbd_worker(struct drbd_thread *thi) } do { + if (test_and_clear_bit(DEVICE_WORK_PENDING, &connection->flags)) { + update_worker_timing_details(connection, do_unqueued_work); + do_unqueued_work(connection); + } while (!list_empty(&work_list)) { w = list_first_entry(&work_list, struct drbd_work, list); list_del_init(&w->list); + update_worker_timing_details(connection, w->cb); w->cb(w, 1); } dequeue_work_batch(&connection->sender_work, &work_list); - } while (!list_empty(&work_list)); + } while (!list_empty(&work_list) || test_bit(DEVICE_WORK_PENDING, &connection->flags)); rcu_read_lock(); idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 295f3afbbef5..5c8e7fe07745 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -3918,7 +3918,6 @@ skip_create_disk: if (rv) { dev_err(&dd->pdev->dev, "Unable to allocate request queue\n"); - rv = -ENOMEM; goto block_queue_alloc_init_error; } @@ -4632,7 +4631,7 @@ static void mtip_pci_shutdown(struct pci_dev *pdev) } /* Table of device ids supported by this driver. */ -static DEFINE_PCI_DEVICE_TABLE(mtip_pci_tbl) = { +static const struct pci_device_id mtip_pci_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P320H_DEVICE_ID) }, { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P320M_DEVICE_ID) }, { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P320S_DEVICE_ID) }, diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index a3b042c4d448..00d469c7f9f7 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -462,17 +462,21 @@ static int null_add_dev(void) struct gendisk *disk; struct nullb *nullb; sector_t size; + int rv; nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node); - if (!nullb) + if (!nullb) { + rv = -ENOMEM; goto out; + } spin_lock_init(&nullb->lock); if (queue_mode == NULL_Q_MQ && use_per_node_hctx) submit_queues = nr_online_nodes; - if (setup_queues(nullb)) + rv = setup_queues(nullb); + if (rv) goto out_free_nullb; if (queue_mode == NULL_Q_MQ) { @@ -484,22 +488,29 @@ static int null_add_dev(void) nullb->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; nullb->tag_set.driver_data = nullb; - if (blk_mq_alloc_tag_set(&nullb->tag_set)) + rv = blk_mq_alloc_tag_set(&nullb->tag_set); + if (rv) goto out_cleanup_queues; nullb->q = blk_mq_init_queue(&nullb->tag_set); - if (!nullb->q) + if (!nullb->q) { + rv = -ENOMEM; goto out_cleanup_tags; + } } else if (queue_mode == NULL_Q_BIO) { nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node); - if (!nullb->q) + if (!nullb->q) { + rv = -ENOMEM; goto out_cleanup_queues; + } blk_queue_make_request(nullb->q, null_queue_bio); init_driver_queues(nullb); } else { nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node); - if (!nullb->q) + if (!nullb->q) { + rv = -ENOMEM; goto out_cleanup_queues; + } blk_queue_prep_rq(nullb->q, null_rq_prep_fn); blk_queue_softirq_done(nullb->q, null_softirq_done_fn); init_driver_queues(nullb); @@ -509,8 +520,10 @@ static int null_add_dev(void) queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q); disk = nullb->disk = alloc_disk_node(1, home_node); - if (!disk) + if (!disk) { + rv = -ENOMEM; goto out_cleanup_blk_queue; + } mutex_lock(&lock); list_add_tail(&nullb->list, &nullb_list); @@ -544,7 +557,7 @@ out_cleanup_queues: out_free_nullb: kfree(nullb); out: - return -ENOMEM; + return rv; } static int __init null_init(void) diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index b2c98c1bc037..4b97baf8afa3 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -42,6 +42,7 @@ #include <linux/blkdev.h> #include <linux/slab.h> #include <linux/idr.h> +#include <linux/workqueue.h> #include "rbd_types.h" @@ -332,7 +333,10 @@ struct rbd_device { char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */ + struct list_head rq_queue; /* incoming rq queue */ spinlock_t lock; /* queue, flags, open_count */ + struct workqueue_struct *rq_wq; + struct work_struct rq_work; struct rbd_image_header header; unsigned long flags; /* possibly lock protected */ @@ -514,7 +518,8 @@ static void rbd_dev_remove_parent(struct rbd_device *rbd_dev); static int rbd_dev_refresh(struct rbd_device *rbd_dev); static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev); -static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev); +static int rbd_dev_header_info(struct rbd_device *rbd_dev); +static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev); static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, u64 snap_id); static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id, @@ -971,12 +976,6 @@ static int rbd_header_from_disk(struct rbd_device *rbd_dev, header->snap_names = snap_names; header->snap_sizes = snap_sizes; - /* Make sure mapping size is consistent with header info */ - - if (rbd_dev->spec->snap_id == CEPH_NOSNAP || first_time) - if (rbd_dev->mapping.size != header->image_size) - rbd_dev->mapping.size = header->image_size; - return 0; out_2big: ret = -EIO; @@ -1139,6 +1138,13 @@ static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev) rbd_dev->mapping.features = 0; } +static void rbd_segment_name_free(const char *name) +{ + /* The explicit cast here is needed to drop the const qualifier */ + + kmem_cache_free(rbd_segment_name_cache, (void *)name); +} + static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset) { char *name; @@ -1158,20 +1164,13 @@ static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset) if (ret < 0 || ret > CEPH_MAX_OID_NAME_LEN) { pr_err("error formatting segment name for #%llu (%d)\n", segment, ret); - kfree(name); + rbd_segment_name_free(name); name = NULL; } return name; } -static void rbd_segment_name_free(const char *name) -{ - /* The explicit cast here is needed to drop the const qualifier */ - - kmem_cache_free(rbd_segment_name_cache, (void *)name); -} - static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset) { u64 segment_size = (u64) 1 << rbd_dev->header.obj_order; @@ -1371,7 +1370,7 @@ static void obj_request_img_data_set(struct rbd_obj_request *obj_request) struct rbd_device *rbd_dev; rbd_dev = obj_request->img_request->rbd_dev; - rbd_warn(rbd_dev, "obj_request %p already marked img_data\n", + rbd_warn(rbd_dev, "obj_request %p already marked img_data", obj_request); } } @@ -1389,7 +1388,7 @@ static void obj_request_done_set(struct rbd_obj_request *obj_request) if (obj_request_img_data_test(obj_request)) rbd_dev = obj_request->img_request->rbd_dev; - rbd_warn(rbd_dev, "obj_request %p already marked done\n", + rbd_warn(rbd_dev, "obj_request %p already marked done", obj_request); } } @@ -1527,11 +1526,37 @@ static bool obj_request_type_valid(enum obj_request_type type) static int rbd_obj_request_submit(struct ceph_osd_client *osdc, struct rbd_obj_request *obj_request) { - dout("%s: osdc %p obj %p\n", __func__, osdc, obj_request); - + dout("%s %p\n", __func__, obj_request); return ceph_osdc_start_request(osdc, obj_request->osd_req, false); } +static void rbd_obj_request_end(struct rbd_obj_request *obj_request) +{ + dout("%s %p\n", __func__, obj_request); + ceph_osdc_cancel_request(obj_request->osd_req); +} + +/* + * Wait for an object request to complete. If interrupted, cancel the + * underlying osd request. + */ +static int rbd_obj_request_wait(struct rbd_obj_request *obj_request) +{ + int ret; + + dout("%s %p\n", __func__, obj_request); + + ret = wait_for_completion_interruptible(&obj_request->completion); + if (ret < 0) { + dout("%s %p interrupted\n", __func__, obj_request); + rbd_obj_request_end(obj_request); + return ret; + } + + dout("%s %p done\n", __func__, obj_request); + return 0; +} + static void rbd_img_request_complete(struct rbd_img_request *img_request) { @@ -1558,15 +1583,6 @@ static void rbd_img_request_complete(struct rbd_img_request *img_request) rbd_img_request_put(img_request); } -/* Caller is responsible for rbd_obj_request_destroy(obj_request) */ - -static int rbd_obj_request_wait(struct rbd_obj_request *obj_request) -{ - dout("%s: obj %p\n", __func__, obj_request); - - return wait_for_completion_interruptible(&obj_request->completion); -} - /* * The default/initial value for all image request flags is 0. Each * is conditionally set to 1 at image request initialization time @@ -1763,7 +1779,7 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req, rbd_osd_trivial_callback(obj_request); break; default: - rbd_warn(NULL, "%s: unsupported op %hu\n", + rbd_warn(NULL, "%s: unsupported op %hu", obj_request->object_name, (unsigned short) opcode); break; } @@ -1998,7 +2014,7 @@ static void rbd_dev_parent_put(struct rbd_device *rbd_dev) if (!counter) rbd_dev_unparent(rbd_dev); else - rbd_warn(rbd_dev, "parent reference underflow\n"); + rbd_warn(rbd_dev, "parent reference underflow"); } /* @@ -2028,7 +2044,7 @@ static bool rbd_dev_parent_get(struct rbd_device *rbd_dev) /* Image was flattened, but parent is not yet torn down */ if (counter < 0) - rbd_warn(rbd_dev, "parent reference overflow\n"); + rbd_warn(rbd_dev, "parent reference overflow"); return false; } @@ -2045,7 +2061,7 @@ static struct rbd_img_request *rbd_img_request_create( { struct rbd_img_request *img_request; - img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_ATOMIC); + img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_NOIO); if (!img_request) return NULL; @@ -2161,11 +2177,11 @@ static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request) if (result) { struct rbd_device *rbd_dev = img_request->rbd_dev; - rbd_warn(rbd_dev, "%s %llx at %llx (%llx)\n", + rbd_warn(rbd_dev, "%s %llx at %llx (%llx)", img_request_write_test(img_request) ? "write" : "read", obj_request->length, obj_request->img_offset, obj_request->offset); - rbd_warn(rbd_dev, " result %d xferred %x\n", + rbd_warn(rbd_dev, " result %d xferred %x", result, xferred); if (!img_request->result) img_request->result = result; @@ -2946,154 +2962,135 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data) dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__, rbd_dev->header_name, (unsigned long long)notify_id, (unsigned int)opcode); + + /* + * Until adequate refresh error handling is in place, there is + * not much we can do here, except warn. + * + * See http://tracker.ceph.com/issues/5040 + */ ret = rbd_dev_refresh(rbd_dev); if (ret) - rbd_warn(rbd_dev, "header refresh error (%d)\n", ret); + rbd_warn(rbd_dev, "refresh failed: %d", ret); - rbd_obj_notify_ack_sync(rbd_dev, notify_id); + ret = rbd_obj_notify_ack_sync(rbd_dev, notify_id); + if (ret) + rbd_warn(rbd_dev, "notify_ack ret %d", ret); } /* - * Initiate a watch request, synchronously. + * Send a (un)watch request and wait for the ack. Return a request + * with a ref held on success or error. */ -static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev) +static struct rbd_obj_request *rbd_obj_watch_request_helper( + struct rbd_device *rbd_dev, + bool watch) { struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; struct rbd_obj_request *obj_request; int ret; - rbd_assert(!rbd_dev->watch_event); - rbd_assert(!rbd_dev->watch_request); - - ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev, - &rbd_dev->watch_event); - if (ret < 0) - return ret; - - rbd_assert(rbd_dev->watch_event); - obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0, OBJ_REQUEST_NODATA); - if (!obj_request) { - ret = -ENOMEM; - goto out_cancel; - } + if (!obj_request) + return ERR_PTR(-ENOMEM); obj_request->osd_req = rbd_osd_req_create(rbd_dev, true, 1, obj_request); if (!obj_request->osd_req) { ret = -ENOMEM; - goto out_put; + goto out; } - ceph_osdc_set_request_linger(osdc, obj_request->osd_req); - osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH, - rbd_dev->watch_event->cookie, 0, 1); + rbd_dev->watch_event->cookie, 0, watch); rbd_osd_req_format_write(obj_request); + if (watch) + ceph_osdc_set_request_linger(osdc, obj_request->osd_req); + ret = rbd_obj_request_submit(osdc, obj_request); if (ret) - goto out_linger; + goto out; ret = rbd_obj_request_wait(obj_request); if (ret) - goto out_linger; + goto out; ret = obj_request->result; - if (ret) - goto out_linger; - - /* - * A watch request is set to linger, so the underlying osd - * request won't go away until we unregister it. We retain - * a pointer to the object request during that time (in - * rbd_dev->watch_request), so we'll keep a reference to - * it. We'll drop that reference (below) after we've - * unregistered it. - */ - rbd_dev->watch_request = obj_request; + if (ret) { + if (watch) + rbd_obj_request_end(obj_request); + goto out; + } - return 0; + return obj_request; -out_linger: - ceph_osdc_unregister_linger_request(osdc, obj_request->osd_req); -out_put: +out: rbd_obj_request_put(obj_request); -out_cancel: - ceph_osdc_cancel_event(rbd_dev->watch_event); - rbd_dev->watch_event = NULL; - - return ret; + return ERR_PTR(ret); } /* - * Tear down a watch request, synchronously. + * Initiate a watch request, synchronously. */ -static int __rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev) +static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev) { struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; struct rbd_obj_request *obj_request; int ret; - rbd_assert(rbd_dev->watch_event); - rbd_assert(rbd_dev->watch_request); + rbd_assert(!rbd_dev->watch_event); + rbd_assert(!rbd_dev->watch_request); - obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0, - OBJ_REQUEST_NODATA); - if (!obj_request) { - ret = -ENOMEM; - goto out_cancel; - } + ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev, + &rbd_dev->watch_event); + if (ret < 0) + return ret; - obj_request->osd_req = rbd_osd_req_create(rbd_dev, true, 1, - obj_request); - if (!obj_request->osd_req) { - ret = -ENOMEM; - goto out_put; + obj_request = rbd_obj_watch_request_helper(rbd_dev, true); + if (IS_ERR(obj_request)) { + ceph_osdc_cancel_event(rbd_dev->watch_event); + rbd_dev->watch_event = NULL; + return PTR_ERR(obj_request); } - osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH, - rbd_dev->watch_event->cookie, 0, 0); - rbd_osd_req_format_write(obj_request); - - ret = rbd_obj_request_submit(osdc, obj_request); - if (ret) - goto out_put; + /* + * A watch request is set to linger, so the underlying osd + * request won't go away until we unregister it. We retain + * a pointer to the object request during that time (in + * rbd_dev->watch_request), so we'll keep a reference to it. + * We'll drop that reference after we've unregistered it in + * rbd_dev_header_unwatch_sync(). + */ + rbd_dev->watch_request = obj_request; - ret = rbd_obj_request_wait(obj_request); - if (ret) - goto out_put; + return 0; +} - ret = obj_request->result; - if (ret) - goto out_put; +/* + * Tear down a watch request, synchronously. + */ +static void rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev) +{ + struct rbd_obj_request *obj_request; - /* We have successfully torn down the watch request */ + rbd_assert(rbd_dev->watch_event); + rbd_assert(rbd_dev->watch_request); - ceph_osdc_unregister_linger_request(osdc, - rbd_dev->watch_request->osd_req); + rbd_obj_request_end(rbd_dev->watch_request); rbd_obj_request_put(rbd_dev->watch_request); rbd_dev->watch_request = NULL; -out_put: - rbd_obj_request_put(obj_request); -out_cancel: + obj_request = rbd_obj_watch_request_helper(rbd_dev, false); + if (!IS_ERR(obj_request)) + rbd_obj_request_put(obj_request); + else + rbd_warn(rbd_dev, "unable to tear down watch request (%ld)", + PTR_ERR(obj_request)); + ceph_osdc_cancel_event(rbd_dev->watch_event); rbd_dev->watch_event = NULL; - - return ret; -} - -static void rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev) -{ - int ret; - - ret = __rbd_dev_header_unwatch_sync(rbd_dev); - if (ret) { - rbd_warn(rbd_dev, "unable to tear down watch request: %d\n", - ret); - } } /* @@ -3183,102 +3180,129 @@ out: return ret; } -static void rbd_request_fn(struct request_queue *q) - __releases(q->queue_lock) __acquires(q->queue_lock) +static void rbd_handle_request(struct rbd_device *rbd_dev, struct request *rq) { - struct rbd_device *rbd_dev = q->queuedata; - struct request *rq; + struct rbd_img_request *img_request; + u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT; + u64 length = blk_rq_bytes(rq); + bool wr = rq_data_dir(rq) == WRITE; int result; - while ((rq = blk_fetch_request(q))) { - bool write_request = rq_data_dir(rq) == WRITE; - struct rbd_img_request *img_request; - u64 offset; - u64 length; + /* Ignore/skip any zero-length requests */ - /* Ignore any non-FS requests that filter through. */ + if (!length) { + dout("%s: zero-length request\n", __func__); + result = 0; + goto err_rq; + } - if (rq->cmd_type != REQ_TYPE_FS) { - dout("%s: non-fs request type %d\n", __func__, - (int) rq->cmd_type); - __blk_end_request_all(rq, 0); - continue; + /* Disallow writes to a read-only device */ + + if (wr) { + if (rbd_dev->mapping.read_only) { + result = -EROFS; + goto err_rq; } + rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP); + } - /* Ignore/skip any zero-length requests */ + /* + * Quit early if the mapped snapshot no longer exists. It's + * still possible the snapshot will have disappeared by the + * time our request arrives at the osd, but there's no sense in + * sending it if we already know. + */ + if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) { + dout("request for non-existent snapshot"); + rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP); + result = -ENXIO; + goto err_rq; + } - offset = (u64) blk_rq_pos(rq) << SECTOR_SHIFT; - length = (u64) blk_rq_bytes(rq); + if (offset && length > U64_MAX - offset + 1) { + rbd_warn(rbd_dev, "bad request range (%llu~%llu)", offset, + length); + result = -EINVAL; + goto err_rq; /* Shouldn't happen */ + } - if (!length) { - dout("%s: zero-length request\n", __func__); - __blk_end_request_all(rq, 0); - continue; - } + if (offset + length > rbd_dev->mapping.size) { + rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset, + length, rbd_dev->mapping.size); + result = -EIO; + goto err_rq; + } - spin_unlock_irq(q->queue_lock); + img_request = rbd_img_request_create(rbd_dev, offset, length, wr); + if (!img_request) { + result = -ENOMEM; + goto err_rq; + } + img_request->rq = rq; - /* Disallow writes to a read-only device */ + result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO, rq->bio); + if (result) + goto err_img_request; - if (write_request) { - result = -EROFS; - if (rbd_dev->mapping.read_only) - goto end_request; - rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP); - } + result = rbd_img_request_submit(img_request); + if (result) + goto err_img_request; - /* - * Quit early if the mapped snapshot no longer - * exists. It's still possible the snapshot will - * have disappeared by the time our request arrives - * at the osd, but there's no sense in sending it if - * we already know. - */ - if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) { - dout("request for non-existent snapshot"); - rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP); - result = -ENXIO; - goto end_request; - } + return; - result = -EINVAL; - if (offset && length > U64_MAX - offset + 1) { - rbd_warn(rbd_dev, "bad request range (%llu~%llu)\n", - offset, length); - goto end_request; /* Shouldn't happen */ - } +err_img_request: + rbd_img_request_put(img_request); +err_rq: + if (result) + rbd_warn(rbd_dev, "%s %llx at %llx result %d", + wr ? "write" : "read", length, offset, result); + blk_end_request_all(rq, result); +} - result = -EIO; - if (offset + length > rbd_dev->mapping.size) { - rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)\n", - offset, length, rbd_dev->mapping.size); - goto end_request; - } +static void rbd_request_workfn(struct work_struct *work) +{ + struct rbd_device *rbd_dev = + container_of(work, struct rbd_device, rq_work); + struct request *rq, *next; + LIST_HEAD(requests); - result = -ENOMEM; - img_request = rbd_img_request_create(rbd_dev, offset, length, - write_request); - if (!img_request) - goto end_request; + spin_lock_irq(&rbd_dev->lock); /* rq->q->queue_lock */ + list_splice_init(&rbd_dev->rq_queue, &requests); + spin_unlock_irq(&rbd_dev->lock); - img_request->rq = rq; + list_for_each_entry_safe(rq, next, &requests, queuelist) { + list_del_init(&rq->queuelist); + rbd_handle_request(rbd_dev, rq); + } +} - result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO, - rq->bio); - if (!result) - result = rbd_img_request_submit(img_request); - if (result) - rbd_img_request_put(img_request); -end_request: - spin_lock_irq(q->queue_lock); - if (result < 0) { - rbd_warn(rbd_dev, "%s %llx at %llx result %d\n", - write_request ? "write" : "read", - length, offset, result); - - __blk_end_request_all(rq, result); +/* + * Called with q->queue_lock held and interrupts disabled, possibly on + * the way to schedule(). Do not sleep here! + */ +static void rbd_request_fn(struct request_queue *q) +{ + struct rbd_device *rbd_dev = q->queuedata; + struct request *rq; + int queued = 0; + + rbd_assert(rbd_dev); + + while ((rq = blk_fetch_request(q))) { + /* Ignore any non-FS requests that filter through. */ + if (rq->cmd_type != REQ_TYPE_FS) { + dout("%s: non-fs request type %d\n", __func__, + (int) rq->cmd_type); + __blk_end_request_all(rq, 0); + continue; } + + list_add_tail(&rq->queuelist, &rbd_dev->rq_queue); + queued++; } + + if (queued) + queue_work(rbd_dev->rq_wq, &rbd_dev->rq_work); } /* @@ -3517,24 +3541,37 @@ static int rbd_dev_refresh(struct rbd_device *rbd_dev) u64 mapping_size; int ret; - rbd_assert(rbd_image_format_valid(rbd_dev->image_format)); down_write(&rbd_dev->header_rwsem); mapping_size = rbd_dev->mapping.size; - if (rbd_dev->image_format == 1) - ret = rbd_dev_v1_header_info(rbd_dev); - else - ret = rbd_dev_v2_header_info(rbd_dev); - /* If it's a mapped snapshot, validate its EXISTS flag */ + ret = rbd_dev_header_info(rbd_dev); + if (ret) + return ret; + + /* + * If there is a parent, see if it has disappeared due to the + * mapped image getting flattened. + */ + if (rbd_dev->parent) { + ret = rbd_dev_v2_parent_info(rbd_dev); + if (ret) + return ret; + } + + if (rbd_dev->spec->snap_id == CEPH_NOSNAP) { + if (rbd_dev->mapping.size != rbd_dev->header.image_size) + rbd_dev->mapping.size = rbd_dev->header.image_size; + } else { + /* validate mapped snapshot's EXISTS flag */ + rbd_exists_validate(rbd_dev); + } - rbd_exists_validate(rbd_dev); up_write(&rbd_dev->header_rwsem); - if (mapping_size != rbd_dev->mapping.size) { + if (mapping_size != rbd_dev->mapping.size) rbd_dev_update_size(rbd_dev); - } - return ret; + return 0; } static int rbd_init_disk(struct rbd_device *rbd_dev) @@ -3696,46 +3733,36 @@ static ssize_t rbd_snap_show(struct device *dev, } /* - * For an rbd v2 image, shows the pool id, image id, and snapshot id - * for the parent image. If there is no parent, simply shows - * "(no parent image)". + * For a v2 image, shows the chain of parent images, separated by empty + * lines. For v1 images or if there is no parent, shows "(no parent + * image)". */ static ssize_t rbd_parent_show(struct device *dev, - struct device_attribute *attr, - char *buf) + struct device_attribute *attr, + char *buf) { struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); - struct rbd_spec *spec = rbd_dev->parent_spec; - int count; - char *bufp = buf; + ssize_t count = 0; - if (!spec) + if (!rbd_dev->parent) return sprintf(buf, "(no parent image)\n"); - count = sprintf(bufp, "pool_id %llu\npool_name %s\n", - (unsigned long long) spec->pool_id, spec->pool_name); - if (count < 0) - return count; - bufp += count; - - count = sprintf(bufp, "image_id %s\nimage_name %s\n", spec->image_id, - spec->image_name ? spec->image_name : "(unknown)"); - if (count < 0) - return count; - bufp += count; - - count = sprintf(bufp, "snap_id %llu\nsnap_name %s\n", - (unsigned long long) spec->snap_id, spec->snap_name); - if (count < 0) - return count; - bufp += count; - - count = sprintf(bufp, "overlap %llu\n", rbd_dev->parent_overlap); - if (count < 0) - return count; - bufp += count; + for ( ; rbd_dev->parent; rbd_dev = rbd_dev->parent) { + struct rbd_spec *spec = rbd_dev->parent_spec; + + count += sprintf(&buf[count], "%s" + "pool_id %llu\npool_name %s\n" + "image_id %s\nimage_name %s\n" + "snap_id %llu\nsnap_name %s\n" + "overlap %llu\n", + !count ? "" : "\n", /* first? */ + spec->pool_id, spec->pool_name, + spec->image_id, spec->image_name ?: "(unknown)", + spec->snap_id, spec->snap_name, + rbd_dev->parent_overlap); + } - return (ssize_t) (bufp - buf); + return count; } static ssize_t rbd_image_refresh(struct device *dev, @@ -3748,9 +3775,9 @@ static ssize_t rbd_image_refresh(struct device *dev, ret = rbd_dev_refresh(rbd_dev); if (ret) - rbd_warn(rbd_dev, ": manual header refresh error (%d)\n", ret); + return ret; - return ret < 0 ? ret : size; + return size; } static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL); @@ -3822,6 +3849,9 @@ static struct rbd_spec *rbd_spec_alloc(void) spec = kzalloc(sizeof (*spec), GFP_KERNEL); if (!spec) return NULL; + + spec->pool_id = CEPH_NOPOOL; + spec->snap_id = CEPH_NOSNAP; kref_init(&spec->kref); return spec; @@ -3848,6 +3878,8 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc, return NULL; spin_lock_init(&rbd_dev->lock); + INIT_LIST_HEAD(&rbd_dev->rq_queue); + INIT_WORK(&rbd_dev->rq_work, rbd_request_workfn); rbd_dev->flags = 0; atomic_set(&rbd_dev->parent_ref, 0); INIT_LIST_HEAD(&rbd_dev->node); @@ -4021,7 +4053,7 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) goto out_err; } - snapid = cpu_to_le64(CEPH_NOSNAP); + snapid = cpu_to_le64(rbd_dev->spec->snap_id); ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name, "rbd", "get_parent", &snapid, sizeof (snapid), @@ -4059,7 +4091,7 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) ret = -EIO; if (pool_id > (u64)U32_MAX) { - rbd_warn(NULL, "parent pool id too large (%llu > %u)\n", + rbd_warn(NULL, "parent pool id too large (%llu > %u)", (unsigned long long)pool_id, U32_MAX); goto out_err; } @@ -4083,6 +4115,8 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) parent_spec->snap_id = snap_id; rbd_dev->parent_spec = parent_spec; parent_spec = NULL; /* rbd_dev now owns this */ + } else { + kfree(image_id); } /* @@ -4110,8 +4144,7 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) * overlap is zero we just pretend there was * no parent image. */ - rbd_warn(rbd_dev, "ignoring parent of " - "clone with overlap 0\n"); + rbd_warn(rbd_dev, "ignoring parent with overlap 0"); } } out: @@ -4279,18 +4312,38 @@ static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name) } /* - * When an rbd image has a parent image, it is identified by the - * pool, image, and snapshot ids (not names). This function fills - * in the names for those ids. (It's OK if we can't figure out the - * name for an image id, but the pool and snapshot ids should always - * exist and have names.) All names in an rbd spec are dynamically - * allocated. + * An image being mapped will have everything but the snap id. + */ +static int rbd_spec_fill_snap_id(struct rbd_device *rbd_dev) +{ + struct rbd_spec *spec = rbd_dev->spec; + + rbd_assert(spec->pool_id != CEPH_NOPOOL && spec->pool_name); + rbd_assert(spec->image_id && spec->image_name); + rbd_assert(spec->snap_name); + + if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) { + u64 snap_id; + + snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name); + if (snap_id == CEPH_NOSNAP) + return -ENOENT; + + spec->snap_id = snap_id; + } else { + spec->snap_id = CEPH_NOSNAP; + } + + return 0; +} + +/* + * A parent image will have all ids but none of the names. * - * When an image being mapped (not a parent) is probed, we have the - * pool name and pool id, image name and image id, and the snapshot - * name. The only thing we're missing is the snapshot id. + * All names in an rbd spec are dynamically allocated. It's OK if we + * can't figure out the name for an image id. */ -static int rbd_dev_spec_update(struct rbd_device *rbd_dev) +static int rbd_spec_fill_names(struct rbd_device *rbd_dev) { struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; struct rbd_spec *spec = rbd_dev->spec; @@ -4299,24 +4352,9 @@ static int rbd_dev_spec_update(struct rbd_device *rbd_dev) const char *snap_name; int ret; - /* - * An image being mapped will have the pool name (etc.), but - * we need to look up the snapshot id. - */ - if (spec->pool_name) { - if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) { - u64 snap_id; - - snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name); - if (snap_id == CEPH_NOSNAP) - return -ENOENT; - spec->snap_id = snap_id; - } else { - spec->snap_id = CEPH_NOSNAP; - } - - return 0; - } + rbd_assert(spec->pool_id != CEPH_NOPOOL); + rbd_assert(spec->image_id); + rbd_assert(spec->snap_id != CEPH_NOSNAP); /* Get the pool name; we have to make our own copy of this */ @@ -4335,7 +4373,7 @@ static int rbd_dev_spec_update(struct rbd_device *rbd_dev) if (!image_name) rbd_warn(rbd_dev, "unable to get image name"); - /* Look up the snapshot name, and make a copy */ + /* Fetch the snapshot name */ snap_name = rbd_snap_name(rbd_dev, spec->snap_id); if (IS_ERR(snap_name)) { @@ -4348,10 +4386,10 @@ static int rbd_dev_spec_update(struct rbd_device *rbd_dev) spec->snap_name = snap_name; return 0; + out_err: kfree(image_name); kfree(pool_name); - return ret; } @@ -4483,43 +4521,22 @@ static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev) return ret; } - /* - * If the image supports layering, get the parent info. We - * need to probe the first time regardless. Thereafter we - * only need to if there's a parent, to see if it has - * disappeared due to the mapped image getting flattened. - */ - if (rbd_dev->header.features & RBD_FEATURE_LAYERING && - (first_time || rbd_dev->parent_spec)) { - bool warn; - - ret = rbd_dev_v2_parent_info(rbd_dev); - if (ret) - return ret; - - /* - * Print a warning if this is the initial probe and - * the image has a parent. Don't print it if the - * image now being probed is itself a parent. We - * can tell at this point because we won't know its - * pool name yet (just its pool id). - */ - warn = rbd_dev->parent_spec && rbd_dev->spec->pool_name; - if (first_time && warn) - rbd_warn(rbd_dev, "WARNING: kernel layering " - "is EXPERIMENTAL!"); - } - - if (rbd_dev->spec->snap_id == CEPH_NOSNAP) - if (rbd_dev->mapping.size != rbd_dev->header.image_size) - rbd_dev->mapping.size = rbd_dev->header.image_size; - ret = rbd_dev_v2_snap_context(rbd_dev); dout("rbd_dev_v2_snap_context returned %d\n", ret); return ret; } +static int rbd_dev_header_info(struct rbd_device *rbd_dev) +{ + rbd_assert(rbd_image_format_valid(rbd_dev->image_format)); + + if (rbd_dev->image_format == 1) + return rbd_dev_v1_header_info(rbd_dev); + + return rbd_dev_v2_header_info(rbd_dev); +} + static int rbd_bus_add_dev(struct rbd_device *rbd_dev) { struct device *dev; @@ -5066,12 +5083,19 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev) ret = rbd_dev_mapping_set(rbd_dev); if (ret) goto err_out_disk; + set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE); set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only); + rbd_dev->rq_wq = alloc_workqueue("%s", 0, 0, rbd_dev->disk->disk_name); + if (!rbd_dev->rq_wq) { + ret = -ENOMEM; + goto err_out_mapping; + } + ret = rbd_bus_add_dev(rbd_dev); if (ret) - goto err_out_mapping; + goto err_out_workqueue; /* Everything's ready. Announce the disk to the world. */ @@ -5083,6 +5107,9 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev) return ret; +err_out_workqueue: + destroy_workqueue(rbd_dev->rq_wq); + rbd_dev->rq_wq = NULL; err_out_mapping: rbd_dev_mapping_clear(rbd_dev); err_out_disk: @@ -5155,8 +5182,6 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping) ret = rbd_dev_image_id(rbd_dev); if (ret) return ret; - rbd_assert(rbd_dev->spec->image_id); - rbd_assert(rbd_image_format_valid(rbd_dev->image_format)); ret = rbd_dev_header_name(rbd_dev); if (ret) @@ -5168,25 +5193,45 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping) goto out_header_name; } - if (rbd_dev->image_format == 1) - ret = rbd_dev_v1_header_info(rbd_dev); - else - ret = rbd_dev_v2_header_info(rbd_dev); + ret = rbd_dev_header_info(rbd_dev); if (ret) goto err_out_watch; - ret = rbd_dev_spec_update(rbd_dev); + /* + * If this image is the one being mapped, we have pool name and + * id, image name and id, and snap name - need to fill snap id. + * Otherwise this is a parent image, identified by pool, image + * and snap ids - need to fill in names for those ids. + */ + if (mapping) + ret = rbd_spec_fill_snap_id(rbd_dev); + else + ret = rbd_spec_fill_names(rbd_dev); if (ret) goto err_out_probe; + if (rbd_dev->header.features & RBD_FEATURE_LAYERING) { + ret = rbd_dev_v2_parent_info(rbd_dev); + if (ret) + goto err_out_probe; + + /* + * Need to warn users if this image is the one being + * mapped and has a parent. + */ + if (mapping && rbd_dev->parent_spec) + rbd_warn(rbd_dev, + "WARNING: kernel layering is EXPERIMENTAL!"); + } + ret = rbd_dev_probe_parent(rbd_dev); if (ret) goto err_out_probe; dout("discovered format %u image, header name is %s\n", rbd_dev->image_format, rbd_dev->header_name); - return 0; + err_out_probe: rbd_dev_unprobe(rbd_dev); err_out_watch: @@ -5199,9 +5244,6 @@ err_out_format: rbd_dev->image_format = 0; kfree(rbd_dev->spec->image_id); rbd_dev->spec->image_id = NULL; - - dout("probe failed, returning %d\n", ret); - return ret; } @@ -5243,7 +5285,7 @@ static ssize_t do_rbd_add(struct bus_type *bus, /* The ceph file layout needs to fit pool id in 32 bits */ if (spec->pool_id > (u64)U32_MAX) { - rbd_warn(NULL, "pool id too large (%llu > %u)\n", + rbd_warn(NULL, "pool id too large (%llu > %u)", (unsigned long long)spec->pool_id, U32_MAX); rc = -EIO; goto err_out_client; @@ -5314,6 +5356,7 @@ static void rbd_dev_device_release(struct device *dev) { struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); + destroy_workqueue(rbd_dev->rq_wq); rbd_free_disk(rbd_dev); clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags); rbd_dev_mapping_clear(rbd_dev); diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c index a8de2eec6ff3..820b4009d5f7 100644 --- a/drivers/block/rsxx/core.c +++ b/drivers/block/rsxx/core.c @@ -1137,7 +1137,7 @@ static const struct pci_error_handlers rsxx_err_handler = { .slot_reset = rsxx_slot_reset, }; -static DEFINE_PCI_DEVICE_TABLE(rsxx_pci_ids) = { +static const struct pci_device_id rsxx_pci_ids[] = { {PCI_DEVICE(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_FS70_FLASH)}, {PCI_DEVICE(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_FS80_FLASH)}, {0,}, diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c index 608532d3f8c9..8fcdcfb4b472 100644 --- a/drivers/block/skd_main.c +++ b/drivers/block/skd_main.c @@ -4112,16 +4112,14 @@ static int skd_cons_skcomp(struct skd_device *skdev) skdev->name, __func__, __LINE__, nbytes, SKD_N_COMPLETION_ENTRY); - skcomp = pci_alloc_consistent(skdev->pdev, nbytes, - &skdev->cq_dma_address); + skcomp = pci_zalloc_consistent(skdev->pdev, nbytes, + &skdev->cq_dma_address); if (skcomp == NULL) { rc = -ENOMEM; goto err_out; } - memset(skcomp, 0, nbytes); - skdev->skcomp_table = skcomp; skdev->skerr_table = (struct fit_comp_error_info *)((char *)skcomp + sizeof(*skcomp) * @@ -4304,15 +4302,14 @@ static int skd_cons_skspcl(struct skd_device *skdev) nbytes = SKD_N_SPECIAL_FITMSG_BYTES; - skspcl->msg_buf = pci_alloc_consistent(skdev->pdev, nbytes, - &skspcl->mb_dma_address); + skspcl->msg_buf = + pci_zalloc_consistent(skdev->pdev, nbytes, + &skspcl->mb_dma_address); if (skspcl->msg_buf == NULL) { rc = -ENOMEM; goto err_out; } - memset(skspcl->msg_buf, 0, nbytes); - skspcl->req.sg = kzalloc(sizeof(struct scatterlist) * SKD_N_SG_PER_SPECIAL, GFP_KERNEL); if (skspcl->req.sg == NULL) { @@ -4353,25 +4350,21 @@ static int skd_cons_sksb(struct skd_device *skdev) nbytes = SKD_N_INTERNAL_BYTES; - skspcl->data_buf = pci_alloc_consistent(skdev->pdev, nbytes, - &skspcl->db_dma_address); + skspcl->data_buf = pci_zalloc_consistent(skdev->pdev, nbytes, + &skspcl->db_dma_address); if (skspcl->data_buf == NULL) { rc = -ENOMEM; goto err_out; } - memset(skspcl->data_buf, 0, nbytes); - nbytes = SKD_N_SPECIAL_FITMSG_BYTES; - skspcl->msg_buf = pci_alloc_consistent(skdev->pdev, nbytes, - &skspcl->mb_dma_address); + skspcl->msg_buf = pci_zalloc_consistent(skdev->pdev, nbytes, + &skspcl->mb_dma_address); if (skspcl->msg_buf == NULL) { rc = -ENOMEM; goto err_out; } - memset(skspcl->msg_buf, 0, nbytes); - skspcl->req.sksg_list = skd_cons_sg_list(skdev, 1, &skspcl->req.sksg_dma_address); if (skspcl->req.sksg_list == NULL) { @@ -4773,7 +4766,7 @@ static const struct block_device_operations skd_blockdev_ops = { ***************************************************************************** */ -static DEFINE_PCI_DEVICE_TABLE(skd_pci_tbl) = { +static const struct pci_device_id skd_pci_tbl[] = { { PCI_VENDOR_ID_STEC, PCI_DEVICE_ID_S1120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, { 0 } /* terminate list */ diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index f63d358f3d93..0a581400de0f 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -15,17 +15,22 @@ #include <linux/numa.h> #define PART_BITS 4 +#define VQ_NAME_LEN 16 static int major; static DEFINE_IDA(vd_index_ida); static struct workqueue_struct *virtblk_wq; +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + struct virtio_blk { struct virtio_device *vdev; - struct virtqueue *vq; - spinlock_t vq_lock; /* The disk structure for the kernel. */ struct gendisk *disk; @@ -47,6 +52,10 @@ struct virtio_blk /* Ida index - used to track minor number allocations. */ int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; }; struct virtblk_req @@ -133,14 +142,15 @@ static void virtblk_done(struct virtqueue *vq) { struct virtio_blk *vblk = vq->vdev->priv; bool req_done = false; + int qid = vq->index; struct virtblk_req *vbr; unsigned long flags; unsigned int len; - spin_lock_irqsave(&vblk->vq_lock, flags); + spin_lock_irqsave(&vblk->vqs[qid].lock, flags); do { virtqueue_disable_cb(vq); - while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) { + while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) { blk_mq_complete_request(vbr->req); req_done = true; } @@ -151,7 +161,7 @@ static void virtblk_done(struct virtqueue *vq) /* In case queue is stopped waiting for more buffers. */ if (req_done) blk_mq_start_stopped_hw_queues(vblk->disk->queue, true); - spin_unlock_irqrestore(&vblk->vq_lock, flags); + spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); } static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req) @@ -160,6 +170,7 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req) struct virtblk_req *vbr = blk_mq_rq_to_pdu(req); unsigned long flags; unsigned int num; + int qid = hctx->queue_num; const bool last = (req->cmd_flags & REQ_END) != 0; int err; bool notify = false; @@ -202,12 +213,12 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req) vbr->out_hdr.type |= VIRTIO_BLK_T_IN; } - spin_lock_irqsave(&vblk->vq_lock, flags); - err = __virtblk_add_req(vblk->vq, vbr, vbr->sg, num); + spin_lock_irqsave(&vblk->vqs[qid].lock, flags); + err = __virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num); if (err) { - virtqueue_kick(vblk->vq); + virtqueue_kick(vblk->vqs[qid].vq); blk_mq_stop_hw_queue(hctx); - spin_unlock_irqrestore(&vblk->vq_lock, flags); + spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); /* Out of mem doesn't actually happen, since we fall back * to direct descriptors */ if (err == -ENOMEM || err == -ENOSPC) @@ -215,12 +226,12 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req) return BLK_MQ_RQ_QUEUE_ERROR; } - if (last && virtqueue_kick_prepare(vblk->vq)) + if (last && virtqueue_kick_prepare(vblk->vqs[qid].vq)) notify = true; - spin_unlock_irqrestore(&vblk->vq_lock, flags); + spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); if (notify) - virtqueue_notify(vblk->vq); + virtqueue_notify(vblk->vqs[qid].vq); return BLK_MQ_RQ_QUEUE_OK; } @@ -377,12 +388,64 @@ static void virtblk_config_changed(struct virtio_device *vdev) static int init_vq(struct virtio_blk *vblk) { int err = 0; + int i; + vq_callback_t **callbacks; + const char **names; + struct virtqueue **vqs; + unsigned short num_vqs; + struct virtio_device *vdev = vblk->vdev; + + err = virtio_cread_feature(vdev, VIRTIO_BLK_F_MQ, + struct virtio_blk_config, num_queues, + &num_vqs); + if (err) + num_vqs = 1; + + vblk->vqs = kmalloc(sizeof(*vblk->vqs) * num_vqs, GFP_KERNEL); + if (!vblk->vqs) { + err = -ENOMEM; + goto out; + } + + names = kmalloc(sizeof(*names) * num_vqs, GFP_KERNEL); + if (!names) + goto err_names; + + callbacks = kmalloc(sizeof(*callbacks) * num_vqs, GFP_KERNEL); + if (!callbacks) + goto err_callbacks; + + vqs = kmalloc(sizeof(*vqs) * num_vqs, GFP_KERNEL); + if (!vqs) + goto err_vqs; - /* We expect one virtqueue, for output. */ - vblk->vq = virtio_find_single_vq(vblk->vdev, virtblk_done, "requests"); - if (IS_ERR(vblk->vq)) - err = PTR_ERR(vblk->vq); + for (i = 0; i < num_vqs; i++) { + callbacks[i] = virtblk_done; + snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%d", i); + names[i] = vblk->vqs[i].name; + } + + /* Discover virtqueues and write information to configuration. */ + err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names); + if (err) + goto err_find_vqs; + for (i = 0; i < num_vqs; i++) { + spin_lock_init(&vblk->vqs[i].lock); + vblk->vqs[i].vq = vqs[i]; + } + vblk->num_vqs = num_vqs; + + err_find_vqs: + kfree(vqs); + err_vqs: + kfree(callbacks); + err_callbacks: + kfree(names); + err_names: + if (err) + kfree(vblk->vqs); + out: return err; } @@ -551,7 +614,6 @@ static int virtblk_probe(struct virtio_device *vdev) err = init_vq(vblk); if (err) goto out_free_vblk; - spin_lock_init(&vblk->vq_lock); /* FIXME: How many partitions? How long is a piece of string? */ vblk->disk = alloc_disk(1 << PART_BITS); @@ -562,7 +624,7 @@ static int virtblk_probe(struct virtio_device *vdev) /* Default queue sizing is to fill the ring. */ if (!virtblk_queue_depth) { - virtblk_queue_depth = vblk->vq->num_free; + virtblk_queue_depth = vblk->vqs[0].vq->num_free; /* ... but without indirect descs, we use 2 descs per req */ if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC)) virtblk_queue_depth /= 2; @@ -570,7 +632,6 @@ static int virtblk_probe(struct virtio_device *vdev) memset(&vblk->tag_set, 0, sizeof(vblk->tag_set)); vblk->tag_set.ops = &virtio_mq_ops; - vblk->tag_set.nr_hw_queues = 1; vblk->tag_set.queue_depth = virtblk_queue_depth; vblk->tag_set.numa_node = NUMA_NO_NODE; vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; @@ -578,6 +639,7 @@ static int virtblk_probe(struct virtio_device *vdev) sizeof(struct virtblk_req) + sizeof(struct scatterlist) * sg_elems; vblk->tag_set.driver_data = vblk; + vblk->tag_set.nr_hw_queues = vblk->num_vqs; err = blk_mq_alloc_tag_set(&vblk->tag_set); if (err) @@ -727,6 +789,7 @@ static void virtblk_remove(struct virtio_device *vdev) refc = atomic_read(&disk_to_dev(vblk->disk)->kobj.kref.refcount); put_disk(vblk->disk); vdev->config->del_vqs(vdev); + kfree(vblk->vqs); kfree(vblk); /* Only free device id if we don't have any users */ @@ -777,7 +840,8 @@ static const struct virtio_device_id id_table[] = { static unsigned int features[] = { VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY, VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, VIRTIO_BLK_F_SCSI, - VIRTIO_BLK_F_WCE, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE + VIRTIO_BLK_F_WCE, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE, + VIRTIO_BLK_F_MQ, }; static struct virtio_driver virtio_blk = { diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c index ab3ea62e5dfc..c4328d9d9981 100644 --- a/drivers/block/xsysace.c +++ b/drivers/block/xsysace.c @@ -1203,7 +1203,6 @@ static struct platform_driver ace_platform_driver = { .probe = ace_probe, .remove = ace_remove, .driver = { - .owner = THIS_MODULE, .name = "xsysace", .of_match_table = ace_of_match, }, diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 36e54be402df..d00831c3d731 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -183,19 +183,32 @@ static ssize_t comp_algorithm_store(struct device *dev, static int zram_test_flag(struct zram_meta *meta, u32 index, enum zram_pageflags flag) { - return meta->table[index].flags & BIT(flag); + return meta->table[index].value & BIT(flag); } static void zram_set_flag(struct zram_meta *meta, u32 index, enum zram_pageflags flag) { - meta->table[index].flags |= BIT(flag); + meta->table[index].value |= BIT(flag); } static void zram_clear_flag(struct zram_meta *meta, u32 index, enum zram_pageflags flag) { - meta->table[index].flags &= ~BIT(flag); + meta->table[index].value &= ~BIT(flag); +} + +static size_t zram_get_obj_size(struct zram_meta *meta, u32 index) +{ + return meta->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1); +} + +static void zram_set_obj_size(struct zram_meta *meta, + u32 index, size_t size) +{ + unsigned long flags = meta->table[index].value >> ZRAM_FLAG_SHIFT; + + meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size; } static inline int is_partial_io(struct bio_vec *bvec) @@ -255,7 +268,6 @@ static struct zram_meta *zram_meta_alloc(u64 disksize) goto free_table; } - rwlock_init(&meta->tb_lock); return meta; free_table: @@ -304,7 +316,12 @@ static void handle_zero_page(struct bio_vec *bvec) flush_dcache_page(page); } -/* NOTE: caller should hold meta->tb_lock with write-side */ + +/* + * To protect concurrent access to the same index entry, + * caller should hold this table index entry's bit_spinlock to + * indicate this index entry is accessing. + */ static void zram_free_page(struct zram *zram, size_t index) { struct zram_meta *meta = zram->meta; @@ -324,11 +341,12 @@ static void zram_free_page(struct zram *zram, size_t index) zs_free(meta->mem_pool, handle); - atomic64_sub(meta->table[index].size, &zram->stats.compr_data_size); + atomic64_sub(zram_get_obj_size(meta, index), + &zram->stats.compr_data_size); atomic64_dec(&zram->stats.pages_stored); meta->table[index].handle = 0; - meta->table[index].size = 0; + zram_set_obj_size(meta, index, 0); } static int zram_decompress_page(struct zram *zram, char *mem, u32 index) @@ -337,14 +355,14 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index) unsigned char *cmem; struct zram_meta *meta = zram->meta; unsigned long handle; - u16 size; + size_t size; - read_lock(&meta->tb_lock); + bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); handle = meta->table[index].handle; - size = meta->table[index].size; + size = zram_get_obj_size(meta, index); if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) { - read_unlock(&meta->tb_lock); + bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); clear_page(mem); return 0; } @@ -355,12 +373,11 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index) else ret = zcomp_decompress(zram->comp, cmem, size, mem); zs_unmap_object(meta->mem_pool, handle); - read_unlock(&meta->tb_lock); + bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); /* Should NEVER happen. Return bio error if it does. */ if (unlikely(ret)) { pr_err("Decompression failed! err=%d, page=%u\n", ret, index); - atomic64_inc(&zram->stats.failed_reads); return ret; } @@ -376,14 +393,14 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, struct zram_meta *meta = zram->meta; page = bvec->bv_page; - read_lock(&meta->tb_lock); + bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); if (unlikely(!meta->table[index].handle) || zram_test_flag(meta, index, ZRAM_ZERO)) { - read_unlock(&meta->tb_lock); + bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); handle_zero_page(bvec); return 0; } - read_unlock(&meta->tb_lock); + bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); if (is_partial_io(bvec)) /* Use a temporary buffer to decompress the page */ @@ -461,10 +478,10 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, if (page_zero_filled(uncmem)) { kunmap_atomic(user_mem); /* Free memory associated with this sector now. */ - write_lock(&zram->meta->tb_lock); + bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); zram_free_page(zram, index); zram_set_flag(meta, index, ZRAM_ZERO); - write_unlock(&zram->meta->tb_lock); + bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); atomic64_inc(&zram->stats.zero_pages); ret = 0; @@ -514,12 +531,12 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, * Free memory associated with this sector * before overwriting unused sectors. */ - write_lock(&zram->meta->tb_lock); + bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); zram_free_page(zram, index); meta->table[index].handle = handle; - meta->table[index].size = clen; - write_unlock(&zram->meta->tb_lock); + zram_set_obj_size(meta, index, clen); + bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); /* Update stats */ atomic64_add(clen, &zram->stats.compr_data_size); @@ -529,8 +546,6 @@ out: zcomp_strm_release(zram->comp, zstrm); if (is_partial_io(bvec)) kfree(uncmem); - if (ret) - atomic64_inc(&zram->stats.failed_writes); return ret; } @@ -548,6 +563,13 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index, ret = zram_bvec_write(zram, bvec, index, offset); } + if (unlikely(ret)) { + if (rw == READ) + atomic64_inc(&zram->stats.failed_reads); + else + atomic64_inc(&zram->stats.failed_writes); + } + return ret; } @@ -560,6 +582,7 @@ static void zram_bio_discard(struct zram *zram, u32 index, int offset, struct bio *bio) { size_t n = bio->bi_iter.bi_size; + struct zram_meta *meta = zram->meta; /* * zram manages data in physical block size units. Because logical block @@ -580,13 +603,9 @@ static void zram_bio_discard(struct zram *zram, u32 index, } while (n >= PAGE_SIZE) { - /* - * Discard request can be large so the lock hold times could be - * lengthy. So take the lock once per page. - */ - write_lock(&zram->meta->tb_lock); + bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); zram_free_page(zram, index); - write_unlock(&zram->meta->tb_lock); + bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); index++; n -= PAGE_SIZE; } @@ -821,9 +840,9 @@ static void zram_slot_free_notify(struct block_device *bdev, zram = bdev->bd_disk->private_data; meta = zram->meta; - write_lock(&meta->tb_lock); + bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); zram_free_page(zram, index); - write_unlock(&meta->tb_lock); + bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); atomic64_inc(&zram->stats.notify_free); } diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h index 7f21c145e317..e0f725c87cc6 100644 --- a/drivers/block/zram/zram_drv.h +++ b/drivers/block/zram/zram_drv.h @@ -43,7 +43,6 @@ static const size_t max_zpage_size = PAGE_SIZE / 4 * 3; /*-- End of configurable params */ #define SECTOR_SHIFT 9 -#define SECTOR_SIZE (1 << SECTOR_SHIFT) #define SECTORS_PER_PAGE_SHIFT (PAGE_SHIFT - SECTOR_SHIFT) #define SECTORS_PER_PAGE (1 << SECTORS_PER_PAGE_SHIFT) #define ZRAM_LOGICAL_BLOCK_SHIFT 12 @@ -51,10 +50,24 @@ static const size_t max_zpage_size = PAGE_SIZE / 4 * 3; #define ZRAM_SECTOR_PER_LOGICAL_BLOCK \ (1 << (ZRAM_LOGICAL_BLOCK_SHIFT - SECTOR_SHIFT)) -/* Flags for zram pages (table[page_no].flags) */ + +/* + * The lower ZRAM_FLAG_SHIFT bits of table.value is for + * object size (excluding header), the higher bits is for + * zram_pageflags. + * + * zram is mainly used for memory efficiency so we want to keep memory + * footprint small so we can squeeze size and flags into a field. + * The lower ZRAM_FLAG_SHIFT bits is for object size (excluding header), + * the higher bits is for zram_pageflags. + */ +#define ZRAM_FLAG_SHIFT 24 + +/* Flags for zram pages (table[page_no].value) */ enum zram_pageflags { /* Page consists entirely of zeros */ - ZRAM_ZERO, + ZRAM_ZERO = ZRAM_FLAG_SHIFT + 1, + ZRAM_ACCESS, /* page in now accessed */ __NR_ZRAM_PAGEFLAGS, }; @@ -62,17 +75,16 @@ enum zram_pageflags { /*-- Data structures */ /* Allocated for each disk page */ -struct table { +struct zram_table_entry { unsigned long handle; - u16 size; /* object size (excluding header) */ - u8 flags; -} __aligned(4); + unsigned long value; +}; struct zram_stats { atomic64_t compr_data_size; /* compressed size of pages stored */ atomic64_t num_reads; /* failed + successful */ atomic64_t num_writes; /* --do-- */ - atomic64_t failed_reads; /* should NEVER! happen */ + atomic64_t failed_reads; /* can happen when memory is too low */ atomic64_t failed_writes; /* can happen when memory is too low */ atomic64_t invalid_io; /* non-page-aligned I/O requests */ atomic64_t notify_free; /* no. of swap slot free notifications */ @@ -81,8 +93,7 @@ struct zram_stats { }; struct zram_meta { - rwlock_t tb_lock; /* protect table */ - struct table *table; + struct zram_table_entry *table; struct zs_pool *mem_pool; }; diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig index 1f37d9870e7a..603eb1be4f6a 100644 --- a/drivers/bus/Kconfig +++ b/drivers/bus/Kconfig @@ -50,6 +50,14 @@ config ARM_CCI Driver supporting the CCI cache coherent interconnect for ARM platforms. +config ARM_CCN + bool "ARM CCN driver support" + depends on ARM || ARM64 + depends on PERF_EVENTS + help + PMU (perf) driver supporting the ARM CCN (Cache Coherent Network) + interconnect. + config VEXPRESS_CONFIG bool "Versatile Express configuration bus" default y if ARCH_VEXPRESS diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile index 6a4ea7e4af1a..2973c18cbcc2 100644 --- a/drivers/bus/Makefile +++ b/drivers/bus/Makefile @@ -9,7 +9,9 @@ obj-$(CONFIG_OMAP_OCP2SCP) += omap-ocp2scp.o # Interconnect bus driver for OMAP SoCs. obj-$(CONFIG_OMAP_INTERCONNECT) += omap_l3_smx.o omap_l3_noc.o -# CCI cache coherent interconnect for ARM platforms + +# Interconnect bus drivers for ARM platforms obj-$(CONFIG_ARM_CCI) += arm-cci.o +obj-$(CONFIG_ARM_CCN) += arm-ccn.o obj-$(CONFIG_VEXPRESS_CONFIG) += vexpress-config.o diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c index 5a86da97a70b..7af78df241f2 100644 --- a/drivers/bus/arm-cci.c +++ b/drivers/bus/arm-cci.c @@ -397,7 +397,8 @@ static irqreturn_t pmu_handle_irq(int irq_num, void *dev) hw_counter = &event->hw; /* Did this counter overflow? */ - if (!pmu_read_register(idx, CCI_PMU_OVRFLW) & CCI_PMU_OVRFLW_FLAG) + if (!(pmu_read_register(idx, CCI_PMU_OVRFLW) & + CCI_PMU_OVRFLW_FLAG)) continue; pmu_write_register(CCI_PMU_OVRFLW_FLAG, idx, CCI_PMU_OVRFLW); diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c new file mode 100644 index 000000000000..a60f26400705 --- /dev/null +++ b/drivers/bus/arm-ccn.c @@ -0,0 +1,1390 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2014 ARM Limited + */ + +#include <linux/ctype.h> +#include <linux/hrtimer.h> +#include <linux/idr.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/perf_event.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +#define CCN_NUM_XP_PORTS 2 +#define CCN_NUM_VCS 4 +#define CCN_NUM_REGIONS 256 +#define CCN_REGION_SIZE 0x10000 + +#define CCN_ALL_OLY_ID 0xff00 +#define CCN_ALL_OLY_ID__OLY_ID__SHIFT 0 +#define CCN_ALL_OLY_ID__OLY_ID__MASK 0x1f +#define CCN_ALL_OLY_ID__NODE_ID__SHIFT 8 +#define CCN_ALL_OLY_ID__NODE_ID__MASK 0x3f + +#define CCN_MN_ERRINT_STATUS 0x0008 +#define CCN_MN_ERRINT_STATUS__INTREQ__DESSERT 0x11 +#define CCN_MN_ERRINT_STATUS__ALL_ERRORS__ENABLE 0x02 +#define CCN_MN_ERRINT_STATUS__ALL_ERRORS__DISABLED 0x20 +#define CCN_MN_ERRINT_STATUS__ALL_ERRORS__DISABLE 0x22 +#define CCN_MN_ERRINT_STATUS__CORRECTED_ERRORS_ENABLE 0x04 +#define CCN_MN_ERRINT_STATUS__CORRECTED_ERRORS_DISABLED 0x40 +#define CCN_MN_ERRINT_STATUS__CORRECTED_ERRORS_DISABLE 0x44 +#define CCN_MN_ERRINT_STATUS__PMU_EVENTS__ENABLE 0x08 +#define CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLED 0x80 +#define CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLE 0x88 +#define CCN_MN_OLY_COMP_LIST_63_0 0x01e0 +#define CCN_MN_ERR_SIG_VAL_63_0 0x0300 +#define CCN_MN_ERR_SIG_VAL_63_0__DT (1 << 1) + +#define CCN_DT_ACTIVE_DSM 0x0000 +#define CCN_DT_ACTIVE_DSM__DSM_ID__SHIFT(n) ((n) * 8) +#define CCN_DT_ACTIVE_DSM__DSM_ID__MASK 0xff +#define CCN_DT_CTL 0x0028 +#define CCN_DT_CTL__DT_EN (1 << 0) +#define CCN_DT_PMEVCNT(n) (0x0100 + (n) * 0x8) +#define CCN_DT_PMCCNTR 0x0140 +#define CCN_DT_PMCCNTRSR 0x0190 +#define CCN_DT_PMOVSR 0x0198 +#define CCN_DT_PMOVSR_CLR 0x01a0 +#define CCN_DT_PMCR 0x01a8 +#define CCN_DT_PMCR__OVFL_INTR_EN (1 << 6) +#define CCN_DT_PMCR__PMU_EN (1 << 0) +#define CCN_DT_PMSR 0x01b0 +#define CCN_DT_PMSR_REQ 0x01b8 +#define CCN_DT_PMSR_CLR 0x01c0 + +#define CCN_HNF_PMU_EVENT_SEL 0x0600 +#define CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(n) ((n) * 4) +#define CCN_HNF_PMU_EVENT_SEL__ID__MASK 0xf + +#define CCN_XP_DT_CONFIG 0x0300 +#define CCN_XP_DT_CONFIG__DT_CFG__SHIFT(n) ((n) * 4) +#define CCN_XP_DT_CONFIG__DT_CFG__MASK 0xf +#define CCN_XP_DT_CONFIG__DT_CFG__PASS_THROUGH 0x0 +#define CCN_XP_DT_CONFIG__DT_CFG__WATCHPOINT_0_OR_1 0x1 +#define CCN_XP_DT_CONFIG__DT_CFG__WATCHPOINT(n) (0x2 + (n)) +#define CCN_XP_DT_CONFIG__DT_CFG__XP_PMU_EVENT(n) (0x4 + (n)) +#define CCN_XP_DT_CONFIG__DT_CFG__DEVICE_PMU_EVENT(d, n) (0x8 + (d) * 4 + (n)) +#define CCN_XP_DT_INTERFACE_SEL 0x0308 +#define CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__SHIFT(n) (0 + (n) * 8) +#define CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__MASK 0x1 +#define CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__SHIFT(n) (1 + (n) * 8) +#define CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__MASK 0x1 +#define CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__SHIFT(n) (2 + (n) * 8) +#define CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__MASK 0x3 +#define CCN_XP_DT_CMP_VAL_L(n) (0x0310 + (n) * 0x40) +#define CCN_XP_DT_CMP_VAL_H(n) (0x0318 + (n) * 0x40) +#define CCN_XP_DT_CMP_MASK_L(n) (0x0320 + (n) * 0x40) +#define CCN_XP_DT_CMP_MASK_H(n) (0x0328 + (n) * 0x40) +#define CCN_XP_DT_CONTROL 0x0370 +#define CCN_XP_DT_CONTROL__DT_ENABLE (1 << 0) +#define CCN_XP_DT_CONTROL__WP_ARM_SEL__SHIFT(n) (12 + (n) * 4) +#define CCN_XP_DT_CONTROL__WP_ARM_SEL__MASK 0xf +#define CCN_XP_DT_CONTROL__WP_ARM_SEL__ALWAYS 0xf +#define CCN_XP_PMU_EVENT_SEL 0x0600 +#define CCN_XP_PMU_EVENT_SEL__ID__SHIFT(n) ((n) * 7) +#define CCN_XP_PMU_EVENT_SEL__ID__MASK 0x3f + +#define CCN_SBAS_PMU_EVENT_SEL 0x0600 +#define CCN_SBAS_PMU_EVENT_SEL__ID__SHIFT(n) ((n) * 4) +#define CCN_SBAS_PMU_EVENT_SEL__ID__MASK 0xf + +#define CCN_RNI_PMU_EVENT_SEL 0x0600 +#define CCN_RNI_PMU_EVENT_SEL__ID__SHIFT(n) ((n) * 4) +#define CCN_RNI_PMU_EVENT_SEL__ID__MASK 0xf + +#define CCN_TYPE_MN 0x01 +#define CCN_TYPE_DT 0x02 +#define CCN_TYPE_HNF 0x04 +#define CCN_TYPE_HNI 0x05 +#define CCN_TYPE_XP 0x08 +#define CCN_TYPE_SBSX 0x0c +#define CCN_TYPE_SBAS 0x10 +#define CCN_TYPE_RNI_1P 0x14 +#define CCN_TYPE_RNI_2P 0x15 +#define CCN_TYPE_RNI_3P 0x16 +#define CCN_TYPE_RND_1P 0x18 /* RN-D = RN-I + DVM */ +#define CCN_TYPE_RND_2P 0x19 +#define CCN_TYPE_RND_3P 0x1a +#define CCN_TYPE_CYCLES 0xff /* Pseudotype */ + +#define CCN_EVENT_WATCHPOINT 0xfe /* Pseudoevent */ + +#define CCN_NUM_PMU_EVENTS 4 +#define CCN_NUM_XP_WATCHPOINTS 2 /* See DT.dbg_id.num_watchpoints */ +#define CCN_NUM_PMU_EVENT_COUNTERS 8 /* See DT.dbg_id.num_pmucntr */ +#define CCN_IDX_PMU_CYCLE_COUNTER CCN_NUM_PMU_EVENT_COUNTERS + +#define CCN_NUM_PREDEFINED_MASKS 4 +#define CCN_IDX_MASK_ANY (CCN_NUM_PMU_EVENT_COUNTERS + 0) +#define CCN_IDX_MASK_EXACT (CCN_NUM_PMU_EVENT_COUNTERS + 1) +#define CCN_IDX_MASK_ORDER (CCN_NUM_PMU_EVENT_COUNTERS + 2) +#define CCN_IDX_MASK_OPCODE (CCN_NUM_PMU_EVENT_COUNTERS + 3) + +struct arm_ccn_component { + void __iomem *base; + u32 type; + + DECLARE_BITMAP(pmu_events_mask, CCN_NUM_PMU_EVENTS); + union { + struct { + DECLARE_BITMAP(dt_cmp_mask, CCN_NUM_XP_WATCHPOINTS); + } xp; + }; +}; + +#define pmu_to_arm_ccn(_pmu) container_of(container_of(_pmu, \ + struct arm_ccn_dt, pmu), struct arm_ccn, dt) + +struct arm_ccn_dt { + int id; + void __iomem *base; + + spinlock_t config_lock; + + DECLARE_BITMAP(pmu_counters_mask, CCN_NUM_PMU_EVENT_COUNTERS + 1); + struct { + struct arm_ccn_component *source; + struct perf_event *event; + } pmu_counters[CCN_NUM_PMU_EVENT_COUNTERS + 1]; + + struct { + u64 l, h; + } cmp_mask[CCN_NUM_PMU_EVENT_COUNTERS + CCN_NUM_PREDEFINED_MASKS]; + + struct hrtimer hrtimer; + + struct pmu pmu; +}; + +struct arm_ccn { + struct device *dev; + void __iomem *base; + unsigned irq_used:1; + unsigned sbas_present:1; + unsigned sbsx_present:1; + + int num_nodes; + struct arm_ccn_component *node; + + int num_xps; + struct arm_ccn_component *xp; + + struct arm_ccn_dt dt; +}; + + +static int arm_ccn_node_to_xp(int node) +{ + return node / CCN_NUM_XP_PORTS; +} + +static int arm_ccn_node_to_xp_port(int node) +{ + return node % CCN_NUM_XP_PORTS; +} + + +/* + * Bit shifts and masks in these defines must be kept in sync with + * arm_ccn_pmu_config_set() and CCN_FORMAT_ATTRs below! + */ +#define CCN_CONFIG_NODE(_config) (((_config) >> 0) & 0xff) +#define CCN_CONFIG_XP(_config) (((_config) >> 0) & 0xff) +#define CCN_CONFIG_TYPE(_config) (((_config) >> 8) & 0xff) +#define CCN_CONFIG_EVENT(_config) (((_config) >> 16) & 0xff) +#define CCN_CONFIG_PORT(_config) (((_config) >> 24) & 0x3) +#define CCN_CONFIG_VC(_config) (((_config) >> 26) & 0x7) +#define CCN_CONFIG_DIR(_config) (((_config) >> 29) & 0x1) +#define CCN_CONFIG_MASK(_config) (((_config) >> 30) & 0xf) + +static void arm_ccn_pmu_config_set(u64 *config, u32 node_xp, u32 type, u32 port) +{ + *config &= ~((0xff << 0) | (0xff << 8) | (0xff << 24)); + *config |= (node_xp << 0) | (type << 8) | (port << 24); +} + +static ssize_t arm_ccn_pmu_format_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dev_ext_attribute *ea = container_of(attr, + struct dev_ext_attribute, attr); + + return snprintf(buf, PAGE_SIZE, "%s\n", (char *)ea->var); +} + +#define CCN_FORMAT_ATTR(_name, _config) \ + struct dev_ext_attribute arm_ccn_pmu_format_attr_##_name = \ + { __ATTR(_name, S_IRUGO, arm_ccn_pmu_format_show, \ + NULL), _config } + +static CCN_FORMAT_ATTR(node, "config:0-7"); +static CCN_FORMAT_ATTR(xp, "config:0-7"); +static CCN_FORMAT_ATTR(type, "config:8-15"); +static CCN_FORMAT_ATTR(event, "config:16-23"); +static CCN_FORMAT_ATTR(port, "config:24-25"); +static CCN_FORMAT_ATTR(vc, "config:26-28"); +static CCN_FORMAT_ATTR(dir, "config:29-29"); +static CCN_FORMAT_ATTR(mask, "config:30-33"); +static CCN_FORMAT_ATTR(cmp_l, "config1:0-62"); +static CCN_FORMAT_ATTR(cmp_h, "config2:0-59"); + +static struct attribute *arm_ccn_pmu_format_attrs[] = { + &arm_ccn_pmu_format_attr_node.attr.attr, + &arm_ccn_pmu_format_attr_xp.attr.attr, + &arm_ccn_pmu_format_attr_type.attr.attr, + &arm_ccn_pmu_format_attr_event.attr.attr, + &arm_ccn_pmu_format_attr_port.attr.attr, + &arm_ccn_pmu_format_attr_vc.attr.attr, + &arm_ccn_pmu_format_attr_dir.attr.attr, + &arm_ccn_pmu_format_attr_mask.attr.attr, + &arm_ccn_pmu_format_attr_cmp_l.attr.attr, + &arm_ccn_pmu_format_attr_cmp_h.attr.attr, + NULL +}; + +static struct attribute_group arm_ccn_pmu_format_attr_group = { + .name = "format", + .attrs = arm_ccn_pmu_format_attrs, +}; + + +struct arm_ccn_pmu_event { + struct device_attribute attr; + u32 type; + u32 event; + int num_ports; + int num_vcs; + const char *def; + int mask; +}; + +#define CCN_EVENT_ATTR(_name) \ + __ATTR(_name, S_IRUGO, arm_ccn_pmu_event_show, NULL) + +/* + * Events defined in TRM for MN, HN-I and SBSX are actually watchpoints set on + * their ports in XP they are connected to. For the sake of usability they are + * explicitly defined here (and translated into a relevant watchpoint in + * arm_ccn_pmu_event_init()) so the user can easily request them without deep + * knowledge of the flit format. + */ + +#define CCN_EVENT_MN(_name, _def, _mask) { .attr = CCN_EVENT_ATTR(mn_##_name), \ + .type = CCN_TYPE_MN, .event = CCN_EVENT_WATCHPOINT, \ + .num_ports = CCN_NUM_XP_PORTS, .num_vcs = CCN_NUM_VCS, \ + .def = _def, .mask = _mask, } + +#define CCN_EVENT_HNI(_name, _def, _mask) { \ + .attr = CCN_EVENT_ATTR(hni_##_name), .type = CCN_TYPE_HNI, \ + .event = CCN_EVENT_WATCHPOINT, .num_ports = CCN_NUM_XP_PORTS, \ + .num_vcs = CCN_NUM_VCS, .def = _def, .mask = _mask, } + +#define CCN_EVENT_SBSX(_name, _def, _mask) { \ + .attr = CCN_EVENT_ATTR(sbsx_##_name), .type = CCN_TYPE_SBSX, \ + .event = CCN_EVENT_WATCHPOINT, .num_ports = CCN_NUM_XP_PORTS, \ + .num_vcs = CCN_NUM_VCS, .def = _def, .mask = _mask, } + +#define CCN_EVENT_HNF(_name, _event) { .attr = CCN_EVENT_ATTR(hnf_##_name), \ + .type = CCN_TYPE_HNF, .event = _event, } + +#define CCN_EVENT_XP(_name, _event) { .attr = CCN_EVENT_ATTR(xp_##_name), \ + .type = CCN_TYPE_XP, .event = _event, \ + .num_ports = CCN_NUM_XP_PORTS, .num_vcs = CCN_NUM_VCS, } + +/* + * RN-I & RN-D (RN-D = RN-I + DVM) nodes have different type ID depending + * on configuration. One of them is picked to represent the whole group, + * as they all share the same event types. + */ +#define CCN_EVENT_RNI(_name, _event) { .attr = CCN_EVENT_ATTR(rni_##_name), \ + .type = CCN_TYPE_RNI_3P, .event = _event, } + +#define CCN_EVENT_SBAS(_name, _event) { .attr = CCN_EVENT_ATTR(sbas_##_name), \ + .type = CCN_TYPE_SBAS, .event = _event, } + +#define CCN_EVENT_CYCLES(_name) { .attr = CCN_EVENT_ATTR(_name), \ + .type = CCN_TYPE_CYCLES } + + +static ssize_t arm_ccn_pmu_event_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct arm_ccn_pmu_event *event = container_of(attr, + struct arm_ccn_pmu_event, attr); + ssize_t res; + + res = snprintf(buf, PAGE_SIZE, "type=0x%x", event->type); + if (event->event) + res += snprintf(buf + res, PAGE_SIZE - res, ",event=0x%x", + event->event); + if (event->def) + res += snprintf(buf + res, PAGE_SIZE - res, ",%s", + event->def); + if (event->mask) + res += snprintf(buf + res, PAGE_SIZE - res, ",mask=0x%x", + event->mask); + res += snprintf(buf + res, PAGE_SIZE - res, "\n"); + + return res; +} + +static umode_t arm_ccn_pmu_events_is_visible(struct kobject *kobj, + struct attribute *attr, int index) +{ + struct device *dev = kobj_to_dev(kobj); + struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev)); + struct device_attribute *dev_attr = container_of(attr, + struct device_attribute, attr); + struct arm_ccn_pmu_event *event = container_of(dev_attr, + struct arm_ccn_pmu_event, attr); + + if (event->type == CCN_TYPE_SBAS && !ccn->sbas_present) + return 0; + if (event->type == CCN_TYPE_SBSX && !ccn->sbsx_present) + return 0; + + return attr->mode; +} + +static struct arm_ccn_pmu_event arm_ccn_pmu_events[] = { + CCN_EVENT_MN(eobarrier, "dir=0,vc=0,cmp_h=0x1c00", CCN_IDX_MASK_OPCODE), + CCN_EVENT_MN(ecbarrier, "dir=0,vc=0,cmp_h=0x1e00", CCN_IDX_MASK_OPCODE), + CCN_EVENT_MN(dvmop, "dir=0,vc=0,cmp_h=0x2800", CCN_IDX_MASK_OPCODE), + CCN_EVENT_HNI(txdatflits, "dir=1,vc=3", CCN_IDX_MASK_ANY), + CCN_EVENT_HNI(rxdatflits, "dir=0,vc=3", CCN_IDX_MASK_ANY), + CCN_EVENT_HNI(txreqflits, "dir=1,vc=0", CCN_IDX_MASK_ANY), + CCN_EVENT_HNI(rxreqflits, "dir=0,vc=0", CCN_IDX_MASK_ANY), + CCN_EVENT_HNI(rxreqflits_order, "dir=0,vc=0,cmp_h=0x8000", + CCN_IDX_MASK_ORDER), + CCN_EVENT_SBSX(txdatflits, "dir=1,vc=3", CCN_IDX_MASK_ANY), + CCN_EVENT_SBSX(rxdatflits, "dir=0,vc=3", CCN_IDX_MASK_ANY), + CCN_EVENT_SBSX(txreqflits, "dir=1,vc=0", CCN_IDX_MASK_ANY), + CCN_EVENT_SBSX(rxreqflits, "dir=0,vc=0", CCN_IDX_MASK_ANY), + CCN_EVENT_SBSX(rxreqflits_order, "dir=0,vc=0,cmp_h=0x8000", + CCN_IDX_MASK_ORDER), + CCN_EVENT_HNF(cache_miss, 0x1), + CCN_EVENT_HNF(l3_sf_cache_access, 0x02), + CCN_EVENT_HNF(cache_fill, 0x3), + CCN_EVENT_HNF(pocq_retry, 0x4), + CCN_EVENT_HNF(pocq_reqs_recvd, 0x5), + CCN_EVENT_HNF(sf_hit, 0x6), + CCN_EVENT_HNF(sf_evictions, 0x7), + CCN_EVENT_HNF(snoops_sent, 0x8), + CCN_EVENT_HNF(snoops_broadcast, 0x9), + CCN_EVENT_HNF(l3_eviction, 0xa), + CCN_EVENT_HNF(l3_fill_invalid_way, 0xb), + CCN_EVENT_HNF(mc_retries, 0xc), + CCN_EVENT_HNF(mc_reqs, 0xd), + CCN_EVENT_HNF(qos_hh_retry, 0xe), + CCN_EVENT_RNI(rdata_beats_p0, 0x1), + CCN_EVENT_RNI(rdata_beats_p1, 0x2), + CCN_EVENT_RNI(rdata_beats_p2, 0x3), + CCN_EVENT_RNI(rxdat_flits, 0x4), + CCN_EVENT_RNI(txdat_flits, 0x5), + CCN_EVENT_RNI(txreq_flits, 0x6), + CCN_EVENT_RNI(txreq_flits_retried, 0x7), + CCN_EVENT_RNI(rrt_full, 0x8), + CCN_EVENT_RNI(wrt_full, 0x9), + CCN_EVENT_RNI(txreq_flits_replayed, 0xa), + CCN_EVENT_XP(upload_starvation, 0x1), + CCN_EVENT_XP(download_starvation, 0x2), + CCN_EVENT_XP(respin, 0x3), + CCN_EVENT_XP(valid_flit, 0x4), + CCN_EVENT_XP(watchpoint, CCN_EVENT_WATCHPOINT), + CCN_EVENT_SBAS(rdata_beats_p0, 0x1), + CCN_EVENT_SBAS(rxdat_flits, 0x4), + CCN_EVENT_SBAS(txdat_flits, 0x5), + CCN_EVENT_SBAS(txreq_flits, 0x6), + CCN_EVENT_SBAS(txreq_flits_retried, 0x7), + CCN_EVENT_SBAS(rrt_full, 0x8), + CCN_EVENT_SBAS(wrt_full, 0x9), + CCN_EVENT_SBAS(txreq_flits_replayed, 0xa), + CCN_EVENT_CYCLES(cycles), +}; + +/* Populated in arm_ccn_init() */ +static struct attribute + *arm_ccn_pmu_events_attrs[ARRAY_SIZE(arm_ccn_pmu_events) + 1]; + +static struct attribute_group arm_ccn_pmu_events_attr_group = { + .name = "events", + .is_visible = arm_ccn_pmu_events_is_visible, + .attrs = arm_ccn_pmu_events_attrs, +}; + + +static u64 *arm_ccn_pmu_get_cmp_mask(struct arm_ccn *ccn, const char *name) +{ + unsigned long i; + + if (WARN_ON(!name || !name[0] || !isxdigit(name[0]) || !name[1])) + return NULL; + i = isdigit(name[0]) ? name[0] - '0' : 0xa + tolower(name[0]) - 'a'; + + switch (name[1]) { + case 'l': + return &ccn->dt.cmp_mask[i].l; + case 'h': + return &ccn->dt.cmp_mask[i].h; + default: + return NULL; + } +} + +static ssize_t arm_ccn_pmu_cmp_mask_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev)); + u64 *mask = arm_ccn_pmu_get_cmp_mask(ccn, attr->attr.name); + + return mask ? snprintf(buf, PAGE_SIZE, "0x%016llx\n", *mask) : -EINVAL; +} + +static ssize_t arm_ccn_pmu_cmp_mask_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev)); + u64 *mask = arm_ccn_pmu_get_cmp_mask(ccn, attr->attr.name); + int err = -EINVAL; + + if (mask) + err = kstrtoull(buf, 0, mask); + + return err ? err : count; +} + +#define CCN_CMP_MASK_ATTR(_name) \ + struct device_attribute arm_ccn_pmu_cmp_mask_attr_##_name = \ + __ATTR(_name, S_IRUGO | S_IWUSR, \ + arm_ccn_pmu_cmp_mask_show, arm_ccn_pmu_cmp_mask_store) + +#define CCN_CMP_MASK_ATTR_RO(_name) \ + struct device_attribute arm_ccn_pmu_cmp_mask_attr_##_name = \ + __ATTR(_name, S_IRUGO, arm_ccn_pmu_cmp_mask_show, NULL) + +static CCN_CMP_MASK_ATTR(0l); +static CCN_CMP_MASK_ATTR(0h); +static CCN_CMP_MASK_ATTR(1l); +static CCN_CMP_MASK_ATTR(1h); +static CCN_CMP_MASK_ATTR(2l); +static CCN_CMP_MASK_ATTR(2h); +static CCN_CMP_MASK_ATTR(3l); +static CCN_CMP_MASK_ATTR(3h); +static CCN_CMP_MASK_ATTR(4l); +static CCN_CMP_MASK_ATTR(4h); +static CCN_CMP_MASK_ATTR(5l); +static CCN_CMP_MASK_ATTR(5h); +static CCN_CMP_MASK_ATTR(6l); +static CCN_CMP_MASK_ATTR(6h); +static CCN_CMP_MASK_ATTR(7l); +static CCN_CMP_MASK_ATTR(7h); +static CCN_CMP_MASK_ATTR_RO(8l); +static CCN_CMP_MASK_ATTR_RO(8h); +static CCN_CMP_MASK_ATTR_RO(9l); +static CCN_CMP_MASK_ATTR_RO(9h); +static CCN_CMP_MASK_ATTR_RO(al); +static CCN_CMP_MASK_ATTR_RO(ah); +static CCN_CMP_MASK_ATTR_RO(bl); +static CCN_CMP_MASK_ATTR_RO(bh); + +static struct attribute *arm_ccn_pmu_cmp_mask_attrs[] = { + &arm_ccn_pmu_cmp_mask_attr_0l.attr, &arm_ccn_pmu_cmp_mask_attr_0h.attr, + &arm_ccn_pmu_cmp_mask_attr_1l.attr, &arm_ccn_pmu_cmp_mask_attr_1h.attr, + &arm_ccn_pmu_cmp_mask_attr_2l.attr, &arm_ccn_pmu_cmp_mask_attr_2h.attr, + &arm_ccn_pmu_cmp_mask_attr_3l.attr, &arm_ccn_pmu_cmp_mask_attr_3h.attr, + &arm_ccn_pmu_cmp_mask_attr_4l.attr, &arm_ccn_pmu_cmp_mask_attr_4h.attr, + &arm_ccn_pmu_cmp_mask_attr_5l.attr, &arm_ccn_pmu_cmp_mask_attr_5h.attr, + &arm_ccn_pmu_cmp_mask_attr_6l.attr, &arm_ccn_pmu_cmp_mask_attr_6h.attr, + &arm_ccn_pmu_cmp_mask_attr_7l.attr, &arm_ccn_pmu_cmp_mask_attr_7h.attr, + &arm_ccn_pmu_cmp_mask_attr_8l.attr, &arm_ccn_pmu_cmp_mask_attr_8h.attr, + &arm_ccn_pmu_cmp_mask_attr_9l.attr, &arm_ccn_pmu_cmp_mask_attr_9h.attr, + &arm_ccn_pmu_cmp_mask_attr_al.attr, &arm_ccn_pmu_cmp_mask_attr_ah.attr, + &arm_ccn_pmu_cmp_mask_attr_bl.attr, &arm_ccn_pmu_cmp_mask_attr_bh.attr, + NULL +}; + +static struct attribute_group arm_ccn_pmu_cmp_mask_attr_group = { + .name = "cmp_mask", + .attrs = arm_ccn_pmu_cmp_mask_attrs, +}; + + +/* + * Default poll period is 10ms, which is way over the top anyway, + * as in the worst case scenario (an event every cycle), with 1GHz + * clocked bus, the smallest, 32 bit counter will overflow in + * more than 4s. + */ +static unsigned int arm_ccn_pmu_poll_period_us = 10000; +module_param_named(pmu_poll_period_us, arm_ccn_pmu_poll_period_us, uint, + S_IRUGO | S_IWUSR); + +static ktime_t arm_ccn_pmu_timer_period(void) +{ + return ns_to_ktime((u64)arm_ccn_pmu_poll_period_us * 1000); +} + + +static const struct attribute_group *arm_ccn_pmu_attr_groups[] = { + &arm_ccn_pmu_events_attr_group, + &arm_ccn_pmu_format_attr_group, + &arm_ccn_pmu_cmp_mask_attr_group, + NULL +}; + + +static int arm_ccn_pmu_alloc_bit(unsigned long *bitmap, unsigned long size) +{ + int bit; + + do { + bit = find_first_zero_bit(bitmap, size); + if (bit >= size) + return -EAGAIN; + } while (test_and_set_bit(bit, bitmap)); + + return bit; +} + +/* All RN-I and RN-D nodes have identical PMUs */ +static int arm_ccn_pmu_type_eq(u32 a, u32 b) +{ + if (a == b) + return 1; + + switch (a) { + case CCN_TYPE_RNI_1P: + case CCN_TYPE_RNI_2P: + case CCN_TYPE_RNI_3P: + case CCN_TYPE_RND_1P: + case CCN_TYPE_RND_2P: + case CCN_TYPE_RND_3P: + switch (b) { + case CCN_TYPE_RNI_1P: + case CCN_TYPE_RNI_2P: + case CCN_TYPE_RNI_3P: + case CCN_TYPE_RND_1P: + case CCN_TYPE_RND_2P: + case CCN_TYPE_RND_3P: + return 1; + } + break; + } + + return 0; +} + +static void arm_ccn_pmu_event_destroy(struct perf_event *event) +{ + struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); + struct hw_perf_event *hw = &event->hw; + + if (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER) { + clear_bit(CCN_IDX_PMU_CYCLE_COUNTER, ccn->dt.pmu_counters_mask); + } else { + struct arm_ccn_component *source = + ccn->dt.pmu_counters[hw->idx].source; + + if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP && + CCN_CONFIG_EVENT(event->attr.config) == + CCN_EVENT_WATCHPOINT) + clear_bit(hw->config_base, source->xp.dt_cmp_mask); + else + clear_bit(hw->config_base, source->pmu_events_mask); + clear_bit(hw->idx, ccn->dt.pmu_counters_mask); + } + + ccn->dt.pmu_counters[hw->idx].source = NULL; + ccn->dt.pmu_counters[hw->idx].event = NULL; +} + +static int arm_ccn_pmu_event_init(struct perf_event *event) +{ + struct arm_ccn *ccn; + struct hw_perf_event *hw = &event->hw; + u32 node_xp, type, event_id; + int valid, bit; + struct arm_ccn_component *source; + int i; + + if (event->attr.type != event->pmu->type) + return -ENOENT; + + ccn = pmu_to_arm_ccn(event->pmu); + event->destroy = arm_ccn_pmu_event_destroy; + + if (hw->sample_period) { + dev_warn(ccn->dev, "Sampling not supported!\n"); + return -EOPNOTSUPP; + } + + if (has_branch_stack(event) || event->attr.exclude_user || + event->attr.exclude_kernel || event->attr.exclude_hv || + event->attr.exclude_idle) { + dev_warn(ccn->dev, "Can't exclude execution levels!\n"); + return -EOPNOTSUPP; + } + + if (event->cpu < 0) { + dev_warn(ccn->dev, "Can't provide per-task data!\n"); + return -EOPNOTSUPP; + } + + node_xp = CCN_CONFIG_NODE(event->attr.config); + type = CCN_CONFIG_TYPE(event->attr.config); + event_id = CCN_CONFIG_EVENT(event->attr.config); + + /* Validate node/xp vs topology */ + switch (type) { + case CCN_TYPE_XP: + if (node_xp >= ccn->num_xps) { + dev_warn(ccn->dev, "Invalid XP ID %d!\n", node_xp); + return -EINVAL; + } + break; + case CCN_TYPE_CYCLES: + break; + default: + if (node_xp >= ccn->num_nodes) { + dev_warn(ccn->dev, "Invalid node ID %d!\n", node_xp); + return -EINVAL; + } + if (!arm_ccn_pmu_type_eq(type, ccn->node[node_xp].type)) { + dev_warn(ccn->dev, "Invalid type 0x%x for node %d!\n", + type, node_xp); + return -EINVAL; + } + break; + } + + /* Validate event ID vs available for the type */ + for (i = 0, valid = 0; i < ARRAY_SIZE(arm_ccn_pmu_events) && !valid; + i++) { + struct arm_ccn_pmu_event *e = &arm_ccn_pmu_events[i]; + u32 port = CCN_CONFIG_PORT(event->attr.config); + u32 vc = CCN_CONFIG_VC(event->attr.config); + + if (!arm_ccn_pmu_type_eq(type, e->type)) + continue; + if (event_id != e->event) + continue; + if (e->num_ports && port >= e->num_ports) { + dev_warn(ccn->dev, "Invalid port %d for node/XP %d!\n", + port, node_xp); + return -EINVAL; + } + if (e->num_vcs && vc >= e->num_vcs) { + dev_warn(ccn->dev, "Invalid vc %d for node/XP %d!\n", + vc, node_xp); + return -EINVAL; + } + valid = 1; + } + if (!valid) { + dev_warn(ccn->dev, "Invalid event 0x%x for node/XP %d!\n", + event_id, node_xp); + return -EINVAL; + } + + /* Watchpoint-based event for a node is actually set on XP */ + if (event_id == CCN_EVENT_WATCHPOINT && type != CCN_TYPE_XP) { + u32 port; + + type = CCN_TYPE_XP; + port = arm_ccn_node_to_xp_port(node_xp); + node_xp = arm_ccn_node_to_xp(node_xp); + + arm_ccn_pmu_config_set(&event->attr.config, + node_xp, type, port); + } + + /* Allocate the cycle counter */ + if (type == CCN_TYPE_CYCLES) { + if (test_and_set_bit(CCN_IDX_PMU_CYCLE_COUNTER, + ccn->dt.pmu_counters_mask)) + return -EAGAIN; + + hw->idx = CCN_IDX_PMU_CYCLE_COUNTER; + ccn->dt.pmu_counters[CCN_IDX_PMU_CYCLE_COUNTER].event = event; + + return 0; + } + + /* Allocate an event counter */ + hw->idx = arm_ccn_pmu_alloc_bit(ccn->dt.pmu_counters_mask, + CCN_NUM_PMU_EVENT_COUNTERS); + if (hw->idx < 0) { + dev_warn(ccn->dev, "No more counters available!\n"); + return -EAGAIN; + } + + if (type == CCN_TYPE_XP) + source = &ccn->xp[node_xp]; + else + source = &ccn->node[node_xp]; + ccn->dt.pmu_counters[hw->idx].source = source; + + /* Allocate an event source or a watchpoint */ + if (type == CCN_TYPE_XP && event_id == CCN_EVENT_WATCHPOINT) + bit = arm_ccn_pmu_alloc_bit(source->xp.dt_cmp_mask, + CCN_NUM_XP_WATCHPOINTS); + else + bit = arm_ccn_pmu_alloc_bit(source->pmu_events_mask, + CCN_NUM_PMU_EVENTS); + if (bit < 0) { + dev_warn(ccn->dev, "No more event sources/watchpoints on node/XP %d!\n", + node_xp); + clear_bit(hw->idx, ccn->dt.pmu_counters_mask); + return -EAGAIN; + } + hw->config_base = bit; + + ccn->dt.pmu_counters[hw->idx].event = event; + + return 0; +} + +static u64 arm_ccn_pmu_read_counter(struct arm_ccn *ccn, int idx) +{ + u64 res; + + if (idx == CCN_IDX_PMU_CYCLE_COUNTER) { +#ifdef readq + res = readq(ccn->dt.base + CCN_DT_PMCCNTR); +#else + /* 40 bit counter, can do snapshot and read in two parts */ + writel(0x1, ccn->dt.base + CCN_DT_PMSR_REQ); + while (!(readl(ccn->dt.base + CCN_DT_PMSR) & 0x1)) + ; + writel(0x1, ccn->dt.base + CCN_DT_PMSR_CLR); + res = readl(ccn->dt.base + CCN_DT_PMCCNTRSR + 4) & 0xff; + res <<= 32; + res |= readl(ccn->dt.base + CCN_DT_PMCCNTRSR); +#endif + } else { + res = readl(ccn->dt.base + CCN_DT_PMEVCNT(idx)); + } + + return res; +} + +static void arm_ccn_pmu_event_update(struct perf_event *event) +{ + struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); + struct hw_perf_event *hw = &event->hw; + u64 prev_count, new_count, mask; + + do { + prev_count = local64_read(&hw->prev_count); + new_count = arm_ccn_pmu_read_counter(ccn, hw->idx); + } while (local64_xchg(&hw->prev_count, new_count) != prev_count); + + mask = (1LLU << (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER ? 40 : 32)) - 1; + + local64_add((new_count - prev_count) & mask, &event->count); +} + +static void arm_ccn_pmu_xp_dt_config(struct perf_event *event, int enable) +{ + struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); + struct hw_perf_event *hw = &event->hw; + struct arm_ccn_component *xp; + u32 val, dt_cfg; + + if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP) + xp = &ccn->xp[CCN_CONFIG_XP(event->attr.config)]; + else + xp = &ccn->xp[arm_ccn_node_to_xp( + CCN_CONFIG_NODE(event->attr.config))]; + + if (enable) + dt_cfg = hw->event_base; + else + dt_cfg = CCN_XP_DT_CONFIG__DT_CFG__PASS_THROUGH; + + spin_lock(&ccn->dt.config_lock); + + val = readl(xp->base + CCN_XP_DT_CONFIG); + val &= ~(CCN_XP_DT_CONFIG__DT_CFG__MASK << + CCN_XP_DT_CONFIG__DT_CFG__SHIFT(hw->idx)); + val |= dt_cfg << CCN_XP_DT_CONFIG__DT_CFG__SHIFT(hw->idx); + writel(val, xp->base + CCN_XP_DT_CONFIG); + + spin_unlock(&ccn->dt.config_lock); +} + +static void arm_ccn_pmu_event_start(struct perf_event *event, int flags) +{ + struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); + struct hw_perf_event *hw = &event->hw; + + local64_set(&event->hw.prev_count, + arm_ccn_pmu_read_counter(ccn, hw->idx)); + hw->state = 0; + + if (!ccn->irq_used) + hrtimer_start(&ccn->dt.hrtimer, arm_ccn_pmu_timer_period(), + HRTIMER_MODE_REL); + + /* Set the DT bus input, engaging the counter */ + arm_ccn_pmu_xp_dt_config(event, 1); +} + +static void arm_ccn_pmu_event_stop(struct perf_event *event, int flags) +{ + struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); + struct hw_perf_event *hw = &event->hw; + u64 timeout; + + /* Disable counting, setting the DT bus to pass-through mode */ + arm_ccn_pmu_xp_dt_config(event, 0); + + if (!ccn->irq_used) + hrtimer_cancel(&ccn->dt.hrtimer); + + /* Let the DT bus drain */ + timeout = arm_ccn_pmu_read_counter(ccn, CCN_IDX_PMU_CYCLE_COUNTER) + + ccn->num_xps; + while (arm_ccn_pmu_read_counter(ccn, CCN_IDX_PMU_CYCLE_COUNTER) < + timeout) + cpu_relax(); + + if (flags & PERF_EF_UPDATE) + arm_ccn_pmu_event_update(event); + + hw->state |= PERF_HES_STOPPED; +} + +static void arm_ccn_pmu_xp_watchpoint_config(struct perf_event *event) +{ + struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); + struct hw_perf_event *hw = &event->hw; + struct arm_ccn_component *source = + ccn->dt.pmu_counters[hw->idx].source; + unsigned long wp = hw->config_base; + u32 val; + u64 cmp_l = event->attr.config1; + u64 cmp_h = event->attr.config2; + u64 mask_l = ccn->dt.cmp_mask[CCN_CONFIG_MASK(event->attr.config)].l; + u64 mask_h = ccn->dt.cmp_mask[CCN_CONFIG_MASK(event->attr.config)].h; + + hw->event_base = CCN_XP_DT_CONFIG__DT_CFG__WATCHPOINT(wp); + + /* Direction (RX/TX), device (port) & virtual channel */ + val = readl(source->base + CCN_XP_DT_INTERFACE_SEL); + val &= ~(CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__MASK << + CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__SHIFT(wp)); + val |= CCN_CONFIG_DIR(event->attr.config) << + CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__SHIFT(wp); + val &= ~(CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__MASK << + CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__SHIFT(wp)); + val |= CCN_CONFIG_PORT(event->attr.config) << + CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__SHIFT(wp); + val &= ~(CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__MASK << + CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__SHIFT(wp)); + val |= CCN_CONFIG_VC(event->attr.config) << + CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__SHIFT(wp); + writel(val, source->base + CCN_XP_DT_INTERFACE_SEL); + + /* Comparison values */ + writel(cmp_l & 0xffffffff, source->base + CCN_XP_DT_CMP_VAL_L(wp)); + writel((cmp_l >> 32) & 0xefffffff, + source->base + CCN_XP_DT_CMP_VAL_L(wp) + 4); + writel(cmp_h & 0xffffffff, source->base + CCN_XP_DT_CMP_VAL_H(wp)); + writel((cmp_h >> 32) & 0x0fffffff, + source->base + CCN_XP_DT_CMP_VAL_H(wp) + 4); + + /* Mask */ + writel(mask_l & 0xffffffff, source->base + CCN_XP_DT_CMP_MASK_L(wp)); + writel((mask_l >> 32) & 0xefffffff, + source->base + CCN_XP_DT_CMP_MASK_L(wp) + 4); + writel(mask_h & 0xffffffff, source->base + CCN_XP_DT_CMP_MASK_H(wp)); + writel((mask_h >> 32) & 0x0fffffff, + source->base + CCN_XP_DT_CMP_MASK_H(wp) + 4); +} + +static void arm_ccn_pmu_xp_event_config(struct perf_event *event) +{ + struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); + struct hw_perf_event *hw = &event->hw; + struct arm_ccn_component *source = + ccn->dt.pmu_counters[hw->idx].source; + u32 val, id; + + hw->event_base = CCN_XP_DT_CONFIG__DT_CFG__XP_PMU_EVENT(hw->config_base); + + id = (CCN_CONFIG_VC(event->attr.config) << 4) | + (CCN_CONFIG_PORT(event->attr.config) << 3) | + (CCN_CONFIG_EVENT(event->attr.config) << 0); + + val = readl(source->base + CCN_XP_PMU_EVENT_SEL); + val &= ~(CCN_XP_PMU_EVENT_SEL__ID__MASK << + CCN_XP_PMU_EVENT_SEL__ID__SHIFT(hw->config_base)); + val |= id << CCN_XP_PMU_EVENT_SEL__ID__SHIFT(hw->config_base); + writel(val, source->base + CCN_XP_PMU_EVENT_SEL); +} + +static void arm_ccn_pmu_node_event_config(struct perf_event *event) +{ + struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); + struct hw_perf_event *hw = &event->hw; + struct arm_ccn_component *source = + ccn->dt.pmu_counters[hw->idx].source; + u32 type = CCN_CONFIG_TYPE(event->attr.config); + u32 val, port; + + port = arm_ccn_node_to_xp_port(CCN_CONFIG_NODE(event->attr.config)); + hw->event_base = CCN_XP_DT_CONFIG__DT_CFG__DEVICE_PMU_EVENT(port, + hw->config_base); + + /* These *_event_sel regs should be identical, but let's make sure... */ + BUILD_BUG_ON(CCN_HNF_PMU_EVENT_SEL != CCN_SBAS_PMU_EVENT_SEL); + BUILD_BUG_ON(CCN_SBAS_PMU_EVENT_SEL != CCN_RNI_PMU_EVENT_SEL); + BUILD_BUG_ON(CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(1) != + CCN_SBAS_PMU_EVENT_SEL__ID__SHIFT(1)); + BUILD_BUG_ON(CCN_SBAS_PMU_EVENT_SEL__ID__SHIFT(1) != + CCN_RNI_PMU_EVENT_SEL__ID__SHIFT(1)); + BUILD_BUG_ON(CCN_HNF_PMU_EVENT_SEL__ID__MASK != + CCN_SBAS_PMU_EVENT_SEL__ID__MASK); + BUILD_BUG_ON(CCN_SBAS_PMU_EVENT_SEL__ID__MASK != + CCN_RNI_PMU_EVENT_SEL__ID__MASK); + if (WARN_ON(type != CCN_TYPE_HNF && type != CCN_TYPE_SBAS && + !arm_ccn_pmu_type_eq(type, CCN_TYPE_RNI_3P))) + return; + + /* Set the event id for the pre-allocated counter */ + val = readl(source->base + CCN_HNF_PMU_EVENT_SEL); + val &= ~(CCN_HNF_PMU_EVENT_SEL__ID__MASK << + CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(hw->config_base)); + val |= CCN_CONFIG_EVENT(event->attr.config) << + CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(hw->config_base); + writel(val, source->base + CCN_HNF_PMU_EVENT_SEL); +} + +static void arm_ccn_pmu_event_config(struct perf_event *event) +{ + struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); + struct hw_perf_event *hw = &event->hw; + u32 xp, offset, val; + + /* Cycle counter requires no setup */ + if (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER) + return; + + if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP) + xp = CCN_CONFIG_XP(event->attr.config); + else + xp = arm_ccn_node_to_xp(CCN_CONFIG_NODE(event->attr.config)); + + spin_lock(&ccn->dt.config_lock); + + /* Set the DT bus "distance" register */ + offset = (hw->idx / 4) * 4; + val = readl(ccn->dt.base + CCN_DT_ACTIVE_DSM + offset); + val &= ~(CCN_DT_ACTIVE_DSM__DSM_ID__MASK << + CCN_DT_ACTIVE_DSM__DSM_ID__SHIFT(hw->idx % 4)); + val |= xp << CCN_DT_ACTIVE_DSM__DSM_ID__SHIFT(hw->idx % 4); + writel(val, ccn->dt.base + CCN_DT_ACTIVE_DSM + offset); + + if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP) { + if (CCN_CONFIG_EVENT(event->attr.config) == + CCN_EVENT_WATCHPOINT) + arm_ccn_pmu_xp_watchpoint_config(event); + else + arm_ccn_pmu_xp_event_config(event); + } else { + arm_ccn_pmu_node_event_config(event); + } + + spin_unlock(&ccn->dt.config_lock); +} + +static int arm_ccn_pmu_event_add(struct perf_event *event, int flags) +{ + struct hw_perf_event *hw = &event->hw; + + arm_ccn_pmu_event_config(event); + + hw->state = PERF_HES_STOPPED; + + if (flags & PERF_EF_START) + arm_ccn_pmu_event_start(event, PERF_EF_UPDATE); + + return 0; +} + +static void arm_ccn_pmu_event_del(struct perf_event *event, int flags) +{ + arm_ccn_pmu_event_stop(event, PERF_EF_UPDATE); +} + +static void arm_ccn_pmu_event_read(struct perf_event *event) +{ + arm_ccn_pmu_event_update(event); +} + +static irqreturn_t arm_ccn_pmu_overflow_handler(struct arm_ccn_dt *dt) +{ + u32 pmovsr = readl(dt->base + CCN_DT_PMOVSR); + int idx; + + if (!pmovsr) + return IRQ_NONE; + + writel(pmovsr, dt->base + CCN_DT_PMOVSR_CLR); + + BUILD_BUG_ON(CCN_IDX_PMU_CYCLE_COUNTER != CCN_NUM_PMU_EVENT_COUNTERS); + + for (idx = 0; idx < CCN_NUM_PMU_EVENT_COUNTERS + 1; idx++) { + struct perf_event *event = dt->pmu_counters[idx].event; + int overflowed = pmovsr & BIT(idx); + + WARN_ON_ONCE(overflowed && !event); + + if (!event || !overflowed) + continue; + + arm_ccn_pmu_event_update(event); + } + + return IRQ_HANDLED; +} + +static enum hrtimer_restart arm_ccn_pmu_timer_handler(struct hrtimer *hrtimer) +{ + struct arm_ccn_dt *dt = container_of(hrtimer, struct arm_ccn_dt, + hrtimer); + unsigned long flags; + + local_irq_save(flags); + arm_ccn_pmu_overflow_handler(dt); + local_irq_restore(flags); + + hrtimer_forward_now(hrtimer, arm_ccn_pmu_timer_period()); + return HRTIMER_RESTART; +} + + +static DEFINE_IDA(arm_ccn_pmu_ida); + +static int arm_ccn_pmu_init(struct arm_ccn *ccn) +{ + int i; + char *name; + + /* Initialize DT subsystem */ + ccn->dt.base = ccn->base + CCN_REGION_SIZE; + spin_lock_init(&ccn->dt.config_lock); + writel(CCN_DT_CTL__DT_EN, ccn->dt.base + CCN_DT_CTL); + writel(CCN_DT_PMCR__OVFL_INTR_EN | CCN_DT_PMCR__PMU_EN, + ccn->dt.base + CCN_DT_PMCR); + writel(0x1, ccn->dt.base + CCN_DT_PMSR_CLR); + for (i = 0; i < ccn->num_xps; i++) { + writel(0, ccn->xp[i].base + CCN_XP_DT_CONFIG); + writel((CCN_XP_DT_CONTROL__WP_ARM_SEL__ALWAYS << + CCN_XP_DT_CONTROL__WP_ARM_SEL__SHIFT(0)) | + (CCN_XP_DT_CONTROL__WP_ARM_SEL__ALWAYS << + CCN_XP_DT_CONTROL__WP_ARM_SEL__SHIFT(1)) | + CCN_XP_DT_CONTROL__DT_ENABLE, + ccn->xp[i].base + CCN_XP_DT_CONTROL); + } + ccn->dt.cmp_mask[CCN_IDX_MASK_ANY].l = ~0; + ccn->dt.cmp_mask[CCN_IDX_MASK_ANY].h = ~0; + ccn->dt.cmp_mask[CCN_IDX_MASK_EXACT].l = 0; + ccn->dt.cmp_mask[CCN_IDX_MASK_EXACT].h = 0; + ccn->dt.cmp_mask[CCN_IDX_MASK_ORDER].l = ~0; + ccn->dt.cmp_mask[CCN_IDX_MASK_ORDER].h = ~(0x1 << 15); + ccn->dt.cmp_mask[CCN_IDX_MASK_OPCODE].l = ~0; + ccn->dt.cmp_mask[CCN_IDX_MASK_OPCODE].h = ~(0x1f << 9); + + /* Get a convenient /sys/event_source/devices/ name */ + ccn->dt.id = ida_simple_get(&arm_ccn_pmu_ida, 0, 0, GFP_KERNEL); + if (ccn->dt.id == 0) { + name = "ccn"; + } else { + int len = snprintf(NULL, 0, "ccn_%d", ccn->dt.id); + + name = devm_kzalloc(ccn->dev, len + 1, GFP_KERNEL); + snprintf(name, len + 1, "ccn_%d", ccn->dt.id); + } + + /* Perf driver registration */ + ccn->dt.pmu = (struct pmu) { + .attr_groups = arm_ccn_pmu_attr_groups, + .task_ctx_nr = perf_invalid_context, + .event_init = arm_ccn_pmu_event_init, + .add = arm_ccn_pmu_event_add, + .del = arm_ccn_pmu_event_del, + .start = arm_ccn_pmu_event_start, + .stop = arm_ccn_pmu_event_stop, + .read = arm_ccn_pmu_event_read, + }; + + /* No overflow interrupt? Have to use a timer instead. */ + if (!ccn->irq_used) { + dev_info(ccn->dev, "No access to interrupts, using timer.\n"); + hrtimer_init(&ccn->dt.hrtimer, CLOCK_MONOTONIC, + HRTIMER_MODE_REL); + ccn->dt.hrtimer.function = arm_ccn_pmu_timer_handler; + } + + return perf_pmu_register(&ccn->dt.pmu, name, -1); +} + +static void arm_ccn_pmu_cleanup(struct arm_ccn *ccn) +{ + int i; + + for (i = 0; i < ccn->num_xps; i++) + writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL); + writel(0, ccn->dt.base + CCN_DT_PMCR); + perf_pmu_unregister(&ccn->dt.pmu); + ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id); +} + + +static int arm_ccn_for_each_valid_region(struct arm_ccn *ccn, + int (*callback)(struct arm_ccn *ccn, int region, + void __iomem *base, u32 type, u32 id)) +{ + int region; + + for (region = 0; region < CCN_NUM_REGIONS; region++) { + u32 val, type, id; + void __iomem *base; + int err; + + val = readl(ccn->base + CCN_MN_OLY_COMP_LIST_63_0 + + 4 * (region / 32)); + if (!(val & (1 << (region % 32)))) + continue; + + base = ccn->base + region * CCN_REGION_SIZE; + val = readl(base + CCN_ALL_OLY_ID); + type = (val >> CCN_ALL_OLY_ID__OLY_ID__SHIFT) & + CCN_ALL_OLY_ID__OLY_ID__MASK; + id = (val >> CCN_ALL_OLY_ID__NODE_ID__SHIFT) & + CCN_ALL_OLY_ID__NODE_ID__MASK; + + err = callback(ccn, region, base, type, id); + if (err) + return err; + } + + return 0; +} + +static int arm_ccn_get_nodes_num(struct arm_ccn *ccn, int region, + void __iomem *base, u32 type, u32 id) +{ + + if (type == CCN_TYPE_XP && id >= ccn->num_xps) + ccn->num_xps = id + 1; + else if (id >= ccn->num_nodes) + ccn->num_nodes = id + 1; + + return 0; +} + +static int arm_ccn_init_nodes(struct arm_ccn *ccn, int region, + void __iomem *base, u32 type, u32 id) +{ + struct arm_ccn_component *component; + + dev_dbg(ccn->dev, "Region %d: id=%u, type=0x%02x\n", region, id, type); + + switch (type) { + case CCN_TYPE_MN: + case CCN_TYPE_DT: + return 0; + case CCN_TYPE_XP: + component = &ccn->xp[id]; + break; + case CCN_TYPE_SBSX: + ccn->sbsx_present = 1; + component = &ccn->node[id]; + break; + case CCN_TYPE_SBAS: + ccn->sbas_present = 1; + /* Fall-through */ + default: + component = &ccn->node[id]; + break; + } + + component->base = base; + component->type = type; + + return 0; +} + + +static irqreturn_t arm_ccn_error_handler(struct arm_ccn *ccn, + const u32 *err_sig_val) +{ + /* This should be really handled by firmware... */ + dev_err(ccn->dev, "Error reported in %08x%08x%08x%08x%08x%08x.\n", + err_sig_val[5], err_sig_val[4], err_sig_val[3], + err_sig_val[2], err_sig_val[1], err_sig_val[0]); + dev_err(ccn->dev, "Disabling interrupt generation for all errors.\n"); + writel(CCN_MN_ERRINT_STATUS__ALL_ERRORS__DISABLE, + ccn->base + CCN_MN_ERRINT_STATUS); + + return IRQ_HANDLED; +} + + +static irqreturn_t arm_ccn_irq_handler(int irq, void *dev_id) +{ + irqreturn_t res = IRQ_NONE; + struct arm_ccn *ccn = dev_id; + u32 err_sig_val[6]; + u32 err_or; + int i; + + /* PMU overflow is a special case */ + err_or = err_sig_val[0] = readl(ccn->base + CCN_MN_ERR_SIG_VAL_63_0); + if (err_or & CCN_MN_ERR_SIG_VAL_63_0__DT) { + err_or &= ~CCN_MN_ERR_SIG_VAL_63_0__DT; + res = arm_ccn_pmu_overflow_handler(&ccn->dt); + } + + /* Have to read all err_sig_vals to clear them */ + for (i = 1; i < ARRAY_SIZE(err_sig_val); i++) { + err_sig_val[i] = readl(ccn->base + + CCN_MN_ERR_SIG_VAL_63_0 + i * 4); + err_or |= err_sig_val[i]; + } + if (err_or) + res |= arm_ccn_error_handler(ccn, err_sig_val); + + if (res != IRQ_NONE) + writel(CCN_MN_ERRINT_STATUS__INTREQ__DESSERT, + ccn->base + CCN_MN_ERRINT_STATUS); + + return res; +} + + +static int arm_ccn_probe(struct platform_device *pdev) +{ + struct arm_ccn *ccn; + struct resource *res; + int err; + + ccn = devm_kzalloc(&pdev->dev, sizeof(*ccn), GFP_KERNEL); + if (!ccn) + return -ENOMEM; + ccn->dev = &pdev->dev; + platform_set_drvdata(pdev, ccn); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -EINVAL; + + if (!devm_request_mem_region(ccn->dev, res->start, + resource_size(res), pdev->name)) + return -EBUSY; + + ccn->base = devm_ioremap(ccn->dev, res->start, + resource_size(res)); + if (!ccn->base) + return -EFAULT; + + res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!res) + return -EINVAL; + + /* Check if we can use the interrupt */ + writel(CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLE, + ccn->base + CCN_MN_ERRINT_STATUS); + if (readl(ccn->base + CCN_MN_ERRINT_STATUS) & + CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLED) { + /* Can set 'disable' bits, so can acknowledge interrupts */ + writel(CCN_MN_ERRINT_STATUS__PMU_EVENTS__ENABLE, + ccn->base + CCN_MN_ERRINT_STATUS); + err = devm_request_irq(ccn->dev, res->start, + arm_ccn_irq_handler, 0, dev_name(ccn->dev), + ccn); + if (err) + return err; + + ccn->irq_used = 1; + } + + + /* Build topology */ + + err = arm_ccn_for_each_valid_region(ccn, arm_ccn_get_nodes_num); + if (err) + return err; + + ccn->node = devm_kzalloc(ccn->dev, sizeof(*ccn->node) * ccn->num_nodes, + GFP_KERNEL); + ccn->xp = devm_kzalloc(ccn->dev, sizeof(*ccn->node) * ccn->num_xps, + GFP_KERNEL); + if (!ccn->node || !ccn->xp) + return -ENOMEM; + + err = arm_ccn_for_each_valid_region(ccn, arm_ccn_init_nodes); + if (err) + return err; + + return arm_ccn_pmu_init(ccn); +} + +static int arm_ccn_remove(struct platform_device *pdev) +{ + struct arm_ccn *ccn = platform_get_drvdata(pdev); + + arm_ccn_pmu_cleanup(ccn); + + return 0; +} + +static const struct of_device_id arm_ccn_match[] = { + { .compatible = "arm,ccn-504", }, + {}, +}; + +static struct platform_driver arm_ccn_driver = { + .driver = { + .name = "arm-ccn", + .of_match_table = arm_ccn_match, + }, + .probe = arm_ccn_probe, + .remove = arm_ccn_remove, +}; + +static int __init arm_ccn_init(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(arm_ccn_pmu_events); i++) + arm_ccn_pmu_events_attrs[i] = &arm_ccn_pmu_events[i].attr.attr; + + return platform_driver_register(&arm_ccn_driver); +} + +static void __exit arm_ccn_exit(void) +{ + platform_driver_unregister(&arm_ccn_driver); +} + +module_init(arm_ccn_init); +module_exit(arm_ccn_exit); + +MODULE_AUTHOR("Pawel Moll <pawel.moll@arm.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c index f8ee13c7bf7b..75c9681f8021 100644 --- a/drivers/bus/imx-weim.c +++ b/drivers/bus/imx-weim.c @@ -162,7 +162,9 @@ static int __init weim_parse_dt(struct platform_device *pdev, } } - ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev); + ret = of_platform_populate(pdev->dev.of_node, + of_default_bus_match_table, + NULL, &pdev->dev); if (ret) dev_err(&pdev->dev, "%s fail to create devices.\n", pdev->dev.of_node->full_name); diff --git a/drivers/bus/omap_l3_noc.h b/drivers/bus/omap_l3_noc.h index 551e01061434..95254585db86 100644 --- a/drivers/bus/omap_l3_noc.h +++ b/drivers/bus/omap_l3_noc.h @@ -188,31 +188,31 @@ static struct l3_flagmux_data omap_l3_flagmux_clk3 = { }; static struct l3_masters_data omap_l3_masters[] = { - { 0x0 , "MPU"}, - { 0x10, "CS_ADP"}, - { 0x14, "xxx"}, - { 0x20, "DSP"}, - { 0x30, "IVAHD"}, - { 0x40, "ISS"}, - { 0x44, "DucatiM3"}, - { 0x48, "FaceDetect"}, - { 0x50, "SDMA_Rd"}, - { 0x54, "SDMA_Wr"}, - { 0x58, "xxx"}, - { 0x5C, "xxx"}, - { 0x60, "SGX"}, - { 0x70, "DSS"}, - { 0x80, "C2C"}, - { 0x88, "xxx"}, - { 0x8C, "xxx"}, - { 0x90, "HSI"}, - { 0xA0, "MMC1"}, - { 0xA4, "MMC2"}, - { 0xA8, "MMC6"}, - { 0xB0, "UNIPRO1"}, - { 0xC0, "USBHOSTHS"}, - { 0xC4, "USBOTGHS"}, - { 0xC8, "USBHOSTFS"} + { 0x00, "MPU"}, + { 0x04, "CS_ADP"}, + { 0x05, "xxx"}, + { 0x08, "DSP"}, + { 0x0C, "IVAHD"}, + { 0x10, "ISS"}, + { 0x11, "DucatiM3"}, + { 0x12, "FaceDetect"}, + { 0x14, "SDMA_Rd"}, + { 0x15, "SDMA_Wr"}, + { 0x16, "xxx"}, + { 0x17, "xxx"}, + { 0x18, "SGX"}, + { 0x1C, "DSS"}, + { 0x20, "C2C"}, + { 0x22, "xxx"}, + { 0x23, "xxx"}, + { 0x24, "HSI"}, + { 0x28, "MMC1"}, + { 0x29, "MMC2"}, + { 0x2A, "MMC6"}, + { 0x2C, "UNIPRO1"}, + { 0x30, "USBHOSTHS"}, + { 0x31, "USBOTGHS"}, + { 0x32, "USBHOSTFS"} }; static struct l3_flagmux_data *omap_l3_flagmux[] = { diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c index b29703324e94..09f17eb73486 100644 --- a/drivers/char/agp/frontend.c +++ b/drivers/char/agp/frontend.c @@ -710,19 +710,6 @@ static int agp_open(struct inode *inode, struct file *file) return 0; } - -static ssize_t agp_read(struct file *file, char __user *buf, - size_t count, loff_t * ppos) -{ - return -EINVAL; -} - -static ssize_t agp_write(struct file *file, const char __user *buf, - size_t count, loff_t * ppos) -{ - return -EINVAL; -} - static int agpioc_info_wrap(struct agp_file_private *priv, void __user *arg) { struct agp_info userinfo; @@ -1047,8 +1034,6 @@ static const struct file_operations agp_fops = { .owner = THIS_MODULE, .llseek = no_llseek, - .read = agp_read, - .write = agp_write, .unlocked_ioctl = agp_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = compat_agp_ioctl, diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index 6e02ec103cc7..aa30a25c8d49 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c @@ -81,12 +81,6 @@ static void add_early_randomness(struct hwrng *rng) unsigned char bytes[16]; int bytes_read; - /* - * Currently only virtio-rng cannot return data during device - * probe, and that's handled in virtio-rng.c itself. If there - * are more such devices, this call to rng_get_data can be - * made conditional here instead of doing it per-device. - */ bytes_read = rng_get_data(rng, bytes, sizeof(bytes), 1); if (bytes_read > 0) add_device_randomness(bytes, bytes_read); diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c index e9b15bc18b4d..132c9ccfdc62 100644 --- a/drivers/char/hw_random/virtio-rng.c +++ b/drivers/char/hw_random/virtio-rng.c @@ -28,17 +28,17 @@ static DEFINE_IDA(rng_index_ida); struct virtrng_info { - struct virtio_device *vdev; struct hwrng hwrng; struct virtqueue *vq; - unsigned int data_avail; struct completion have_data; - bool busy; char name[25]; + unsigned int data_avail; int index; + bool busy; + bool hwrng_register_done; + bool hwrng_removed; }; -static bool probe_done; static void random_recv_done(struct virtqueue *vq) { @@ -69,12 +69,8 @@ static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait) int ret; struct virtrng_info *vi = (struct virtrng_info *)rng->priv; - /* - * Don't ask host for data till we're setup. This call can - * happen during hwrng_register(), after commit d9e7972619. - */ - if (unlikely(!probe_done)) - return 0; + if (vi->hwrng_removed) + return -ENODEV; if (!vi->busy) { vi->busy = true; @@ -124,6 +120,7 @@ static int probe_common(struct virtio_device *vdev) .cleanup = virtio_cleanup, .priv = (unsigned long)vi, .name = vi->name, + .quality = 1000, }; vdev->priv = vi; @@ -137,25 +134,20 @@ static int probe_common(struct virtio_device *vdev) return err; } - err = hwrng_register(&vi->hwrng); - if (err) { - vdev->config->del_vqs(vdev); - vi->vq = NULL; - kfree(vi); - ida_simple_remove(&rng_index_ida, index); - return err; - } - - probe_done = true; return 0; } static void remove_common(struct virtio_device *vdev) { struct virtrng_info *vi = vdev->priv; + + vi->hwrng_removed = true; + vi->data_avail = 0; + complete(&vi->have_data); vdev->config->reset(vdev); vi->busy = false; - hwrng_unregister(&vi->hwrng); + if (vi->hwrng_register_done) + hwrng_unregister(&vi->hwrng); vdev->config->del_vqs(vdev); ida_simple_remove(&rng_index_ida, vi->index); kfree(vi); @@ -171,6 +163,16 @@ static void virtrng_remove(struct virtio_device *vdev) remove_common(vdev); } +static void virtrng_scan(struct virtio_device *vdev) +{ + struct virtrng_info *vi = vdev->priv; + int err; + + err = hwrng_register(&vi->hwrng); + if (!err) + vi->hwrng_register_done = true; +} + #ifdef CONFIG_PM_SLEEP static int virtrng_freeze(struct virtio_device *vdev) { @@ -195,6 +197,7 @@ static struct virtio_driver virtio_rng_driver = { .id_table = id_table, .probe = virtrng_probe, .remove = virtrng_remove, + .scan = virtrng_scan, #ifdef CONFIG_PM_SLEEP .freeze = virtrng_freeze, .restore = virtrng_restore, diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index e6db9381b2c7..f816211f062f 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -2796,7 +2796,6 @@ channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg) = IPMI_CHANNEL_MEDIUM_IPMB; intf->channels[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB; - rv = -ENOSYS; intf->curr_channel = IPMI_MAX_CHANNELS; wake_up(&intf->waitq); @@ -2821,12 +2820,12 @@ channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg) if (rv) { /* Got an error somehow, just give up. */ + printk(KERN_WARNING PFX + "Error sending channel information for channel" + " %d: %d\n", intf->curr_channel, rv); + intf->curr_channel = IPMI_MAX_CHANNELS; wake_up(&intf->waitq); - - printk(KERN_WARNING PFX - "Error sending channel information: %d\n", - rv); } } out: @@ -2964,8 +2963,12 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, intf->null_user_handler = channel_handler; intf->curr_channel = 0; rv = send_channel_info_cmd(intf, 0); - if (rv) + if (rv) { + printk(KERN_WARNING PFX + "Error sending channel information for channel" + " 0, %d\n", rv); goto out; + } /* Wait for the channel info to be read. */ wait_event(intf->waitq, diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 5d665680ae33..5c4e1f625bbb 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -965,9 +965,9 @@ static inline int ipmi_si_is_busy(struct timespec *ts) return ts->tv_nsec != -1; } -static int ipmi_thread_busy_wait(enum si_sm_result smi_result, - const struct smi_info *smi_info, - struct timespec *busy_until) +static inline int ipmi_thread_busy_wait(enum si_sm_result smi_result, + const struct smi_info *smi_info, + struct timespec *busy_until) { unsigned int max_busy_us = 0; @@ -2658,6 +2658,9 @@ static int ipmi_probe(struct platform_device *dev) if (!match) return -EINVAL; + if (!of_device_is_available(np)) + return -EINVAL; + ret = of_address_to_resource(np, 0, &resource); if (ret) { dev_warn(&dev->dev, PFX "invalid address from OF\n"); @@ -3655,6 +3658,9 @@ static void cleanup_one_si(struct smi_info *to_clean) if (!to_clean) return; + if (to_clean->dev) + dev_set_drvdata(to_clean->dev, NULL); + list_del(&to_clean->link); /* Tell the driver that we are shutting down. */ diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 60aafb8a1f2e..b585b4789822 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -2262,8 +2262,7 @@ static int __init init(void) unregister: unregister_virtio_driver(&virtio_console); free: - if (pdrvdata.debugfs_dir) - debugfs_remove_recursive(pdrvdata.debugfs_dir); + debugfs_remove_recursive(pdrvdata.debugfs_dir); class_destroy(pdrvdata.class); return err; } @@ -2276,8 +2275,7 @@ static void __exit fini(void) unregister_virtio_driver(&virtio_rproc_serial); class_destroy(pdrvdata.class); - if (pdrvdata.debugfs_dir) - debugfs_remove_recursive(pdrvdata.debugfs_dir); + debugfs_remove_recursive(pdrvdata.debugfs_dir); } module_init(init); module_exit(fini); diff --git a/drivers/clk/at91/clk-slow.c b/drivers/clk/at91/clk-slow.c index 0300c46ee247..32f7c1b36204 100644 --- a/drivers/clk/at91/clk-slow.c +++ b/drivers/clk/at91/clk-slow.c @@ -447,7 +447,7 @@ void __init of_at91sam9260_clk_slow_setup(struct device_node *np, int i; num_parents = of_count_phandle_with_args(np, "clocks", "#clock-cells"); - if (num_parents <= 0 || num_parents > 1) + if (num_parents != 2) return; for (i = 0; i < num_parents; ++i) { diff --git a/drivers/clk/clk-efm32gg.c b/drivers/clk/clk-efm32gg.c index bac2ddf49d02..73a8d0ff530c 100644 --- a/drivers/clk/clk-efm32gg.c +++ b/drivers/clk/clk-efm32gg.c @@ -22,7 +22,7 @@ static struct clk_onecell_data clk_data = { .clk_num = ARRAY_SIZE(clk), }; -static int __init efm32gg_cmu_init(struct device_node *np) +static void __init efm32gg_cmu_init(struct device_node *np) { int i; void __iomem *base; @@ -33,7 +33,7 @@ static int __init efm32gg_cmu_init(struct device_node *np) base = of_iomap(np, 0); if (!base) { pr_warn("Failed to map address range for efm32gg,cmu node\n"); - return -EADDRNOTAVAIL; + return; } clk[clk_HFXO] = clk_register_fixed_rate(NULL, "HFXO", NULL, @@ -76,6 +76,6 @@ static int __init efm32gg_cmu_init(struct device_node *np) clk[clk_HFPERCLKDAC0] = clk_register_gate(NULL, "HFPERCLK.DAC0", "HFXO", 0, base + CMU_HFPERCLKEN0, 17, 0, NULL); - return of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data); + of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data); } CLK_OF_DECLARE(efm32ggcmu, "efm32gg,cmu", efm32gg_cmu_init); diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index b76fa69b44cb..bacc06ff939b 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -1467,6 +1467,7 @@ static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long even static void clk_change_rate(struct clk *clk) { struct clk *child; + struct hlist_node *tmp; unsigned long old_rate; unsigned long best_parent_rate = 0; bool skip_set_rate = false; @@ -1502,7 +1503,11 @@ static void clk_change_rate(struct clk *clk) if (clk->notifier_count && old_rate != clk->rate) __clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate); - hlist_for_each_entry(child, &clk->children, child_node) { + /* + * Use safe iteration, as change_rate can actually swap parents + * for certain clock types. + */ + hlist_for_each_entry_safe(child, tmp, &clk->children, child_node) { /* Skip children who will be reparented to another clock */ if (child->new_parent && child->new_parent != clk) continue; diff --git a/drivers/clk/mvebu/clk-cpu.c b/drivers/clk/mvebu/clk-cpu.c index 8ebf757d29e2..3821a88077ea 100644 --- a/drivers/clk/mvebu/clk-cpu.c +++ b/drivers/clk/mvebu/clk-cpu.c @@ -16,10 +16,19 @@ #include <linux/io.h> #include <linux/of.h> #include <linux/delay.h> +#include <linux/mvebu-pmsu.h> +#include <asm/smp_plat.h> -#define SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET 0x0 -#define SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET 0xC -#define SYS_CTRL_CLK_DIVIDER_MASK 0x3F +#define SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET 0x0 +#define SYS_CTRL_CLK_DIVIDER_CTRL_RESET_ALL 0xff +#define SYS_CTRL_CLK_DIVIDER_CTRL_RESET_SHIFT 8 +#define SYS_CTRL_CLK_DIVIDER_CTRL2_OFFSET 0x8 +#define SYS_CTRL_CLK_DIVIDER_CTRL2_NBCLK_RATIO_SHIFT 16 +#define SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET 0xC +#define SYS_CTRL_CLK_DIVIDER_MASK 0x3F + +#define PMU_DFS_RATIO_SHIFT 16 +#define PMU_DFS_RATIO_MASK 0x3F #define MAX_CPU 4 struct cpu_clk { @@ -28,6 +37,7 @@ struct cpu_clk { const char *clk_name; const char *parent_name; void __iomem *reg_base; + void __iomem *pmu_dfs; }; static struct clk **clks; @@ -62,8 +72,9 @@ static long clk_cpu_round_rate(struct clk_hw *hwclk, unsigned long rate, return *parent_rate / div; } -static int clk_cpu_set_rate(struct clk_hw *hwclk, unsigned long rate, - unsigned long parent_rate) +static int clk_cpu_off_set_rate(struct clk_hw *hwclk, unsigned long rate, + unsigned long parent_rate) + { struct cpu_clk *cpuclk = to_cpu_clk(hwclk); u32 reg, div; @@ -95,6 +106,58 @@ static int clk_cpu_set_rate(struct clk_hw *hwclk, unsigned long rate, return 0; } +static int clk_cpu_on_set_rate(struct clk_hw *hwclk, unsigned long rate, + unsigned long parent_rate) +{ + u32 reg; + unsigned long fabric_div, target_div, cur_rate; + struct cpu_clk *cpuclk = to_cpu_clk(hwclk); + + /* + * PMU DFS registers are not mapped, Device Tree does not + * describes them. We cannot change the frequency dynamically. + */ + if (!cpuclk->pmu_dfs) + return -ENODEV; + + cur_rate = __clk_get_rate(hwclk->clk); + + reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL2_OFFSET); + fabric_div = (reg >> SYS_CTRL_CLK_DIVIDER_CTRL2_NBCLK_RATIO_SHIFT) & + SYS_CTRL_CLK_DIVIDER_MASK; + + /* Frequency is going up */ + if (rate == 2 * cur_rate) + target_div = fabric_div / 2; + /* Frequency is going down */ + else + target_div = fabric_div; + + if (target_div == 0) + target_div = 1; + + reg = readl(cpuclk->pmu_dfs); + reg &= ~(PMU_DFS_RATIO_MASK << PMU_DFS_RATIO_SHIFT); + reg |= (target_div << PMU_DFS_RATIO_SHIFT); + writel(reg, cpuclk->pmu_dfs); + + reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET); + reg |= (SYS_CTRL_CLK_DIVIDER_CTRL_RESET_ALL << + SYS_CTRL_CLK_DIVIDER_CTRL_RESET_SHIFT); + writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET); + + return mvebu_pmsu_dfs_request(cpuclk->cpu); +} + +static int clk_cpu_set_rate(struct clk_hw *hwclk, unsigned long rate, + unsigned long parent_rate) +{ + if (__clk_is_enabled(hwclk->clk)) + return clk_cpu_on_set_rate(hwclk, rate, parent_rate); + else + return clk_cpu_off_set_rate(hwclk, rate, parent_rate); +} + static const struct clk_ops cpu_ops = { .recalc_rate = clk_cpu_recalc_rate, .round_rate = clk_cpu_round_rate, @@ -105,6 +168,7 @@ static void __init of_cpu_clk_setup(struct device_node *node) { struct cpu_clk *cpuclk; void __iomem *clock_complex_base = of_iomap(node, 0); + void __iomem *pmu_dfs_base = of_iomap(node, 1); int ncpus = 0; struct device_node *dn; @@ -114,6 +178,10 @@ static void __init of_cpu_clk_setup(struct device_node *node) return; } + if (pmu_dfs_base == NULL) + pr_warn("%s: pmu-dfs base register not set, dynamic frequency scaling not available\n", + __func__); + for_each_node_by_type(dn, "cpu") ncpus++; @@ -146,6 +214,8 @@ static void __init of_cpu_clk_setup(struct device_node *node) cpuclk[cpu].clk_name = clk_name; cpuclk[cpu].cpu = cpu; cpuclk[cpu].reg_base = clock_complex_base; + if (pmu_dfs_base) + cpuclk[cpu].pmu_dfs = pmu_dfs_base + 4 * cpu; cpuclk[cpu].hw.init = &init; init.name = cpuclk[cpu].clk_name; diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c index 4032e510d9aa..3b83b7dd78c7 100644 --- a/drivers/clk/qcom/gcc-ipq806x.c +++ b/drivers/clk/qcom/gcc-ipq806x.c @@ -1095,7 +1095,7 @@ static struct clk_branch prng_clk = { }; static const struct freq_tbl clk_tbl_sdc[] = { - { 144000, P_PXO, 5, 18,625 }, + { 200000, P_PXO, 2, 2, 125 }, { 400000, P_PLL8, 4, 1, 240 }, { 16000000, P_PLL8, 4, 1, 6 }, { 17070000, P_PLL8, 1, 2, 45 }, diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c index 0d8c6c59a75e..b22a2d2f21e9 100644 --- a/drivers/clk/rockchip/clk-rk3288.c +++ b/drivers/clk/rockchip/clk-rk3288.c @@ -545,7 +545,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = { GATE(PCLK_PWM, "pclk_pwm", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 0, GFLAGS), GATE(PCLK_TIMER, "pclk_timer", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 1, GFLAGS), GATE(PCLK_I2C0, "pclk_i2c0", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 2, GFLAGS), - GATE(PCLK_I2C1, "pclk_i2c1", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 3, GFLAGS), + GATE(PCLK_I2C2, "pclk_i2c2", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 3, GFLAGS), GATE(0, "pclk_ddrupctl0", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 14, GFLAGS), GATE(0, "pclk_publ0", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 15, GFLAGS), GATE(0, "pclk_ddrupctl1", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 0, GFLAGS), @@ -603,7 +603,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = { GATE(PCLK_I2C4, "pclk_i2c4", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 15, GFLAGS), GATE(PCLK_UART3, "pclk_uart3", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 11, GFLAGS), GATE(PCLK_UART4, "pclk_uart4", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 12, GFLAGS), - GATE(PCLK_I2C2, "pclk_i2c2", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 13, GFLAGS), + GATE(PCLK_I2C1, "pclk_i2c1", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 13, GFLAGS), GATE(PCLK_I2C3, "pclk_i2c3", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 14, GFLAGS), GATE(PCLK_SARADC, "pclk_saradc", "pclk_peri", 0, RK3288_CLKGATE_CON(7), 1, GFLAGS), GATE(PCLK_TSADC, "pclk_tsadc", "pclk_peri", 0, RK3288_CLKGATE_CON(7), 2, GFLAGS), diff --git a/drivers/clk/samsung/Makefile b/drivers/clk/samsung/Makefile index 2949a556af8f..6fb4bc602e8a 100644 --- a/drivers/clk/samsung/Makefile +++ b/drivers/clk/samsung/Makefile @@ -17,3 +17,4 @@ obj-$(CONFIG_S3C2410_COMMON_DCLK)+= clk-s3c2410-dclk.o obj-$(CONFIG_S3C2412_COMMON_CLK)+= clk-s3c2412.o obj-$(CONFIG_S3C2443_COMMON_CLK)+= clk-s3c2443.o obj-$(CONFIG_ARCH_S3C64XX) += clk-s3c64xx.o +obj-$(CONFIG_ARCH_S5PV210) += clk-s5pv210.o clk-s5pv210-audss.o diff --git a/drivers/clk/samsung/clk-s5pv210-audss.c b/drivers/clk/samsung/clk-s5pv210-audss.c new file mode 100644 index 000000000000..a8053b4aca56 --- /dev/null +++ b/drivers/clk/samsung/clk-s5pv210-audss.c @@ -0,0 +1,241 @@ +/* + * Copyright (c) 2014 Tomasz Figa <t.figa@samsung.com> + * + * Based on Exynos Audio Subsystem Clock Controller driver: + * + * Copyright (c) 2013 Samsung Electronics Co., Ltd. + * Author: Padmavathi Venna <padma.v@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Driver for Audio Subsystem Clock Controller of S5PV210-compatible SoCs. +*/ + +#include <linux/clkdev.h> +#include <linux/io.h> +#include <linux/clk-provider.h> +#include <linux/of_address.h> +#include <linux/syscore_ops.h> +#include <linux/module.h> +#include <linux/platform_device.h> + +#include <dt-bindings/clock/s5pv210-audss.h> + +static DEFINE_SPINLOCK(lock); +static struct clk **clk_table; +static void __iomem *reg_base; +static struct clk_onecell_data clk_data; + +#define ASS_CLK_SRC 0x0 +#define ASS_CLK_DIV 0x4 +#define ASS_CLK_GATE 0x8 + +#ifdef CONFIG_PM_SLEEP +static unsigned long reg_save[][2] = { + {ASS_CLK_SRC, 0}, + {ASS_CLK_DIV, 0}, + {ASS_CLK_GATE, 0}, +}; + +static int s5pv210_audss_clk_suspend(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(reg_save); i++) + reg_save[i][1] = readl(reg_base + reg_save[i][0]); + + return 0; +} + +static void s5pv210_audss_clk_resume(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(reg_save); i++) + writel(reg_save[i][1], reg_base + reg_save[i][0]); +} + +static struct syscore_ops s5pv210_audss_clk_syscore_ops = { + .suspend = s5pv210_audss_clk_suspend, + .resume = s5pv210_audss_clk_resume, +}; +#endif /* CONFIG_PM_SLEEP */ + +/* register s5pv210_audss clocks */ +static int s5pv210_audss_clk_probe(struct platform_device *pdev) +{ + int i, ret = 0; + struct resource *res; + const char *mout_audss_p[2]; + const char *mout_i2s_p[3]; + const char *hclk_p; + struct clk *hclk, *pll_ref, *pll_in, *cdclk, *sclk_audio; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + reg_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(reg_base)) { + dev_err(&pdev->dev, "failed to map audss registers\n"); + return PTR_ERR(reg_base); + } + + clk_table = devm_kzalloc(&pdev->dev, + sizeof(struct clk *) * AUDSS_MAX_CLKS, + GFP_KERNEL); + if (!clk_table) + return -ENOMEM; + + clk_data.clks = clk_table; + clk_data.clk_num = AUDSS_MAX_CLKS; + + hclk = devm_clk_get(&pdev->dev, "hclk"); + if (IS_ERR(hclk)) { + dev_err(&pdev->dev, "failed to get hclk clock\n"); + return PTR_ERR(hclk); + } + + pll_in = devm_clk_get(&pdev->dev, "fout_epll"); + if (IS_ERR(pll_in)) { + dev_err(&pdev->dev, "failed to get fout_epll clock\n"); + return PTR_ERR(pll_in); + } + + sclk_audio = devm_clk_get(&pdev->dev, "sclk_audio0"); + if (IS_ERR(sclk_audio)) { + dev_err(&pdev->dev, "failed to get sclk_audio0 clock\n"); + return PTR_ERR(sclk_audio); + } + + /* iiscdclk0 is an optional external I2S codec clock */ + cdclk = devm_clk_get(&pdev->dev, "iiscdclk0"); + pll_ref = devm_clk_get(&pdev->dev, "xxti"); + + if (!IS_ERR(pll_ref)) + mout_audss_p[0] = __clk_get_name(pll_ref); + else + mout_audss_p[0] = "xxti"; + mout_audss_p[1] = __clk_get_name(pll_in); + clk_table[CLK_MOUT_AUDSS] = clk_register_mux(NULL, "mout_audss", + mout_audss_p, ARRAY_SIZE(mout_audss_p), + CLK_SET_RATE_NO_REPARENT, + reg_base + ASS_CLK_SRC, 0, 1, 0, &lock); + + mout_i2s_p[0] = "mout_audss"; + if (!IS_ERR(cdclk)) + mout_i2s_p[1] = __clk_get_name(cdclk); + else + mout_i2s_p[1] = "iiscdclk0"; + mout_i2s_p[2] = __clk_get_name(sclk_audio); + clk_table[CLK_MOUT_I2S_A] = clk_register_mux(NULL, "mout_i2s_audss", + mout_i2s_p, ARRAY_SIZE(mout_i2s_p), + CLK_SET_RATE_NO_REPARENT, + reg_base + ASS_CLK_SRC, 2, 2, 0, &lock); + + clk_table[CLK_DOUT_AUD_BUS] = clk_register_divider(NULL, + "dout_aud_bus", "mout_audss", 0, + reg_base + ASS_CLK_DIV, 0, 4, 0, &lock); + clk_table[CLK_DOUT_I2S_A] = clk_register_divider(NULL, "dout_i2s_audss", + "mout_i2s_audss", 0, reg_base + ASS_CLK_DIV, + 4, 4, 0, &lock); + + clk_table[CLK_I2S] = clk_register_gate(NULL, "i2s_audss", + "dout_i2s_audss", CLK_SET_RATE_PARENT, + reg_base + ASS_CLK_GATE, 6, 0, &lock); + + hclk_p = __clk_get_name(hclk); + + clk_table[CLK_HCLK_I2S] = clk_register_gate(NULL, "hclk_i2s_audss", + hclk_p, CLK_IGNORE_UNUSED, + reg_base + ASS_CLK_GATE, 5, 0, &lock); + clk_table[CLK_HCLK_UART] = clk_register_gate(NULL, "hclk_uart_audss", + hclk_p, CLK_IGNORE_UNUSED, + reg_base + ASS_CLK_GATE, 4, 0, &lock); + clk_table[CLK_HCLK_HWA] = clk_register_gate(NULL, "hclk_hwa_audss", + hclk_p, CLK_IGNORE_UNUSED, + reg_base + ASS_CLK_GATE, 3, 0, &lock); + clk_table[CLK_HCLK_DMA] = clk_register_gate(NULL, "hclk_dma_audss", + hclk_p, CLK_IGNORE_UNUSED, + reg_base + ASS_CLK_GATE, 2, 0, &lock); + clk_table[CLK_HCLK_BUF] = clk_register_gate(NULL, "hclk_buf_audss", + hclk_p, CLK_IGNORE_UNUSED, + reg_base + ASS_CLK_GATE, 1, 0, &lock); + clk_table[CLK_HCLK_RP] = clk_register_gate(NULL, "hclk_rp_audss", + hclk_p, CLK_IGNORE_UNUSED, + reg_base + ASS_CLK_GATE, 0, 0, &lock); + + for (i = 0; i < clk_data.clk_num; i++) { + if (IS_ERR(clk_table[i])) { + dev_err(&pdev->dev, "failed to register clock %d\n", i); + ret = PTR_ERR(clk_table[i]); + goto unregister; + } + } + + ret = of_clk_add_provider(pdev->dev.of_node, of_clk_src_onecell_get, + &clk_data); + if (ret) { + dev_err(&pdev->dev, "failed to add clock provider\n"); + goto unregister; + } + +#ifdef CONFIG_PM_SLEEP + register_syscore_ops(&s5pv210_audss_clk_syscore_ops); +#endif + + return 0; + +unregister: + for (i = 0; i < clk_data.clk_num; i++) { + if (!IS_ERR(clk_table[i])) + clk_unregister(clk_table[i]); + } + + return ret; +} + +static int s5pv210_audss_clk_remove(struct platform_device *pdev) +{ + int i; + + of_clk_del_provider(pdev->dev.of_node); + + for (i = 0; i < clk_data.clk_num; i++) { + if (!IS_ERR(clk_table[i])) + clk_unregister(clk_table[i]); + } + + return 0; +} + +static const struct of_device_id s5pv210_audss_clk_of_match[] = { + { .compatible = "samsung,s5pv210-audss-clock", }, + {}, +}; + +static struct platform_driver s5pv210_audss_clk_driver = { + .driver = { + .name = "s5pv210-audss-clk", + .owner = THIS_MODULE, + .of_match_table = s5pv210_audss_clk_of_match, + }, + .probe = s5pv210_audss_clk_probe, + .remove = s5pv210_audss_clk_remove, +}; + +static int __init s5pv210_audss_clk_init(void) +{ + return platform_driver_register(&s5pv210_audss_clk_driver); +} +core_initcall(s5pv210_audss_clk_init); + +static void __exit s5pv210_audss_clk_exit(void) +{ + platform_driver_unregister(&s5pv210_audss_clk_driver); +} +module_exit(s5pv210_audss_clk_exit); + +MODULE_AUTHOR("Tomasz Figa <t.figa@samsung.com>"); +MODULE_DESCRIPTION("S5PV210 Audio Subsystem Clock Controller"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:s5pv210-audss-clk"); diff --git a/drivers/clk/samsung/clk-s5pv210.c b/drivers/clk/samsung/clk-s5pv210.c new file mode 100644 index 000000000000..d270a2084644 --- /dev/null +++ b/drivers/clk/samsung/clk-s5pv210.c @@ -0,0 +1,856 @@ +/* + * Copyright (c) 2013 Samsung Electronics Co., Ltd. + * Author: Mateusz Krawczuk <m.krawczuk@partner.samsung.com> + * + * Based on clock drivers for S3C64xx and Exynos4 SoCs. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Common Clock Framework support for all S5PC110/S5PV210 SoCs. + */ + +#include <linux/clk.h> +#include <linux/clkdev.h> +#include <linux/clk-provider.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/syscore_ops.h> + +#include "clk.h" +#include "clk-pll.h" + +#include <dt-bindings/clock/s5pv210.h> + +/* S5PC110/S5PV210 clock controller register offsets */ +#define APLL_LOCK 0x0000 +#define MPLL_LOCK 0x0008 +#define EPLL_LOCK 0x0010 +#define VPLL_LOCK 0x0020 +#define APLL_CON0 0x0100 +#define APLL_CON1 0x0104 +#define MPLL_CON 0x0108 +#define EPLL_CON0 0x0110 +#define EPLL_CON1 0x0114 +#define VPLL_CON 0x0120 +#define CLK_SRC0 0x0200 +#define CLK_SRC1 0x0204 +#define CLK_SRC2 0x0208 +#define CLK_SRC3 0x020c +#define CLK_SRC4 0x0210 +#define CLK_SRC5 0x0214 +#define CLK_SRC6 0x0218 +#define CLK_SRC_MASK0 0x0280 +#define CLK_SRC_MASK1 0x0284 +#define CLK_DIV0 0x0300 +#define CLK_DIV1 0x0304 +#define CLK_DIV2 0x0308 +#define CLK_DIV3 0x030c +#define CLK_DIV4 0x0310 +#define CLK_DIV5 0x0314 +#define CLK_DIV6 0x0318 +#define CLK_DIV7 0x031c +#define CLK_GATE_MAIN0 0x0400 +#define CLK_GATE_MAIN1 0x0404 +#define CLK_GATE_MAIN2 0x0408 +#define CLK_GATE_PERI0 0x0420 +#define CLK_GATE_PERI1 0x0424 +#define CLK_GATE_SCLK0 0x0440 +#define CLK_GATE_SCLK1 0x0444 +#define CLK_GATE_IP0 0x0460 +#define CLK_GATE_IP1 0x0464 +#define CLK_GATE_IP2 0x0468 +#define CLK_GATE_IP3 0x046c +#define CLK_GATE_IP4 0x0470 +#define CLK_GATE_BLOCK 0x0480 +#define CLK_GATE_IP5 0x0484 +#define CLK_OUT 0x0500 +#define MISC 0xe000 +#define OM_STAT 0xe100 + +/* IDs of PLLs available on S5PV210/S5P6442 SoCs */ +enum { + apll, + mpll, + epll, + vpll, +}; + +/* IDs of external clocks (used for legacy boards) */ +enum { + xxti, + xusbxti, +}; + +static void __iomem *reg_base; + +#ifdef CONFIG_PM_SLEEP +static struct samsung_clk_reg_dump *s5pv210_clk_dump; + +/* List of registers that need to be preserved across suspend/resume. */ +static unsigned long s5pv210_clk_regs[] __initdata = { + CLK_SRC0, + CLK_SRC1, + CLK_SRC2, + CLK_SRC3, + CLK_SRC4, + CLK_SRC5, + CLK_SRC6, + CLK_SRC_MASK0, + CLK_SRC_MASK1, + CLK_DIV0, + CLK_DIV1, + CLK_DIV2, + CLK_DIV3, + CLK_DIV4, + CLK_DIV5, + CLK_DIV6, + CLK_DIV7, + CLK_GATE_MAIN0, + CLK_GATE_MAIN1, + CLK_GATE_MAIN2, + CLK_GATE_PERI0, + CLK_GATE_PERI1, + CLK_GATE_SCLK0, + CLK_GATE_SCLK1, + CLK_GATE_IP0, + CLK_GATE_IP1, + CLK_GATE_IP2, + CLK_GATE_IP3, + CLK_GATE_IP4, + CLK_GATE_IP5, + CLK_GATE_BLOCK, + APLL_LOCK, + MPLL_LOCK, + EPLL_LOCK, + VPLL_LOCK, + APLL_CON0, + APLL_CON1, + MPLL_CON, + EPLL_CON0, + EPLL_CON1, + VPLL_CON, + CLK_OUT, +}; + +static int s5pv210_clk_suspend(void) +{ + samsung_clk_save(reg_base, s5pv210_clk_dump, + ARRAY_SIZE(s5pv210_clk_regs)); + return 0; +} + +static void s5pv210_clk_resume(void) +{ + samsung_clk_restore(reg_base, s5pv210_clk_dump, + ARRAY_SIZE(s5pv210_clk_regs)); +} + +static struct syscore_ops s5pv210_clk_syscore_ops = { + .suspend = s5pv210_clk_suspend, + .resume = s5pv210_clk_resume, +}; + +static void s5pv210_clk_sleep_init(void) +{ + s5pv210_clk_dump = + samsung_clk_alloc_reg_dump(s5pv210_clk_regs, + ARRAY_SIZE(s5pv210_clk_regs)); + if (!s5pv210_clk_dump) { + pr_warn("%s: Failed to allocate sleep save data\n", __func__); + return; + } + + register_syscore_ops(&s5pv210_clk_syscore_ops); +} +#else +static inline void s5pv210_clk_sleep_init(void) { } +#endif + +/* Mux parent lists. */ +static const char *fin_pll_p[] __initconst = { + "xxti", + "xusbxti" +}; + +static const char *mout_apll_p[] __initconst = { + "fin_pll", + "fout_apll" +}; + +static const char *mout_mpll_p[] __initconst = { + "fin_pll", + "fout_mpll" +}; + +static const char *mout_epll_p[] __initconst = { + "fin_pll", + "fout_epll" +}; + +static const char *mout_vpllsrc_p[] __initconst = { + "fin_pll", + "sclk_hdmi27m" +}; + +static const char *mout_vpll_p[] __initconst = { + "mout_vpllsrc", + "fout_vpll" +}; + +static const char *mout_group1_p[] __initconst = { + "dout_a2m", + "mout_mpll", + "mout_epll", + "mout_vpll" +}; + +static const char *mout_group2_p[] __initconst = { + "xxti", + "xusbxti", + "sclk_hdmi27m", + "sclk_usbphy0", + "sclk_usbphy1", + "sclk_hdmiphy", + "mout_mpll", + "mout_epll", + "mout_vpll", +}; + +static const char *mout_audio0_p[] __initconst = { + "xxti", + "pcmcdclk0", + "sclk_hdmi27m", + "sclk_usbphy0", + "sclk_usbphy1", + "sclk_hdmiphy", + "mout_mpll", + "mout_epll", + "mout_vpll", +}; + +static const char *mout_audio1_p[] __initconst = { + "i2scdclk1", + "pcmcdclk1", + "sclk_hdmi27m", + "sclk_usbphy0", + "sclk_usbphy1", + "sclk_hdmiphy", + "mout_mpll", + "mout_epll", + "mout_vpll", +}; + +static const char *mout_audio2_p[] __initconst = { + "i2scdclk2", + "pcmcdclk2", + "sclk_hdmi27m", + "sclk_usbphy0", + "sclk_usbphy1", + "sclk_hdmiphy", + "mout_mpll", + "mout_epll", + "mout_vpll", +}; + +static const char *mout_spdif_p[] __initconst = { + "dout_audio0", + "dout_audio1", + "dout_audio3", +}; + +static const char *mout_group3_p[] __initconst = { + "mout_apll", + "mout_mpll" +}; + +static const char *mout_group4_p[] __initconst = { + "mout_mpll", + "dout_a2m" +}; + +static const char *mout_flash_p[] __initconst = { + "dout_hclkd", + "dout_hclkp" +}; + +static const char *mout_dac_p[] __initconst = { + "mout_vpll", + "sclk_hdmiphy" +}; + +static const char *mout_hdmi_p[] __initconst = { + "sclk_hdmiphy", + "dout_tblk" +}; + +static const char *mout_mixer_p[] __initconst = { + "mout_dac", + "mout_hdmi" +}; + +static const char *mout_vpll_6442_p[] __initconst = { + "fin_pll", + "fout_vpll" +}; + +static const char *mout_mixer_6442_p[] __initconst = { + "mout_vpll", + "dout_mixer" +}; + +static const char *mout_d0sync_6442_p[] __initconst = { + "mout_dsys", + "div_apll" +}; + +static const char *mout_d1sync_6442_p[] __initconst = { + "mout_psys", + "div_apll" +}; + +static const char *mout_group2_6442_p[] __initconst = { + "fin_pll", + "none", + "none", + "sclk_usbphy0", + "none", + "none", + "mout_mpll", + "mout_epll", + "mout_vpll", +}; + +static const char *mout_audio0_6442_p[] __initconst = { + "fin_pll", + "pcmcdclk0", + "none", + "sclk_usbphy0", + "none", + "none", + "mout_mpll", + "mout_epll", + "mout_vpll", +}; + +static const char *mout_audio1_6442_p[] __initconst = { + "i2scdclk1", + "pcmcdclk1", + "none", + "sclk_usbphy0", + "none", + "none", + "mout_mpll", + "mout_epll", + "mout_vpll", + "fin_pll", +}; + +static const char *mout_clksel_p[] __initconst = { + "fout_apll_clkout", + "fout_mpll_clkout", + "fout_epll", + "fout_vpll", + "sclk_usbphy0", + "sclk_usbphy1", + "sclk_hdmiphy", + "rtc", + "rtc_tick", + "dout_hclkm", + "dout_pclkm", + "dout_hclkd", + "dout_pclkd", + "dout_hclkp", + "dout_pclkp", + "dout_apll_clkout", + "dout_hpm", + "xxti", + "xusbxti", + "div_dclk" +}; + +static const char *mout_clksel_6442_p[] __initconst = { + "fout_apll_clkout", + "fout_mpll_clkout", + "fout_epll", + "fout_vpll", + "sclk_usbphy0", + "none", + "none", + "rtc", + "rtc_tick", + "none", + "none", + "dout_hclkd", + "dout_pclkd", + "dout_hclkp", + "dout_pclkp", + "dout_apll_clkout", + "none", + "fin_pll", + "none", + "div_dclk" +}; + +static const char *mout_clkout_p[] __initconst = { + "dout_clkout", + "none", + "xxti", + "xusbxti" +}; + +/* Common fixed factor clocks. */ +static struct samsung_fixed_factor_clock ffactor_clks[] __initdata = { + FFACTOR(FOUT_APLL_CLKOUT, "fout_apll_clkout", "fout_apll", 1, 4, 0), + FFACTOR(FOUT_MPLL_CLKOUT, "fout_mpll_clkout", "fout_mpll", 1, 2, 0), + FFACTOR(DOUT_APLL_CLKOUT, "dout_apll_clkout", "dout_apll", 1, 4, 0), +}; + +/* PLL input mux (fin_pll), which needs to be registered before PLLs. */ +static struct samsung_mux_clock early_mux_clks[] __initdata = { + MUX_F(FIN_PLL, "fin_pll", fin_pll_p, OM_STAT, 0, 1, + CLK_MUX_READ_ONLY, 0), +}; + +/* Common clock muxes. */ +static struct samsung_mux_clock mux_clks[] __initdata = { + MUX(MOUT_FLASH, "mout_flash", mout_flash_p, CLK_SRC0, 28, 1), + MUX(MOUT_PSYS, "mout_psys", mout_group4_p, CLK_SRC0, 24, 1), + MUX(MOUT_DSYS, "mout_dsys", mout_group4_p, CLK_SRC0, 20, 1), + MUX(MOUT_MSYS, "mout_msys", mout_group3_p, CLK_SRC0, 16, 1), + MUX(MOUT_EPLL, "mout_epll", mout_epll_p, CLK_SRC0, 8, 1), + MUX(MOUT_MPLL, "mout_mpll", mout_mpll_p, CLK_SRC0, 4, 1), + MUX(MOUT_APLL, "mout_apll", mout_apll_p, CLK_SRC0, 0, 1), + + MUX(MOUT_CLKOUT, "mout_clkout", mout_clkout_p, MISC, 8, 2), +}; + +/* S5PV210-specific clock muxes. */ +static struct samsung_mux_clock s5pv210_mux_clks[] __initdata = { + MUX(MOUT_VPLL, "mout_vpll", mout_vpll_p, CLK_SRC0, 12, 1), + + MUX(MOUT_VPLLSRC, "mout_vpllsrc", mout_vpllsrc_p, CLK_SRC1, 28, 1), + MUX(MOUT_CSIS, "mout_csis", mout_group2_p, CLK_SRC1, 24, 4), + MUX(MOUT_FIMD, "mout_fimd", mout_group2_p, CLK_SRC1, 20, 4), + MUX(MOUT_CAM1, "mout_cam1", mout_group2_p, CLK_SRC1, 16, 4), + MUX(MOUT_CAM0, "mout_cam0", mout_group2_p, CLK_SRC1, 12, 4), + MUX(MOUT_DAC, "mout_dac", mout_dac_p, CLK_SRC1, 8, 1), + MUX(MOUT_MIXER, "mout_mixer", mout_mixer_p, CLK_SRC1, 4, 1), + MUX(MOUT_HDMI, "mout_hdmi", mout_hdmi_p, CLK_SRC1, 0, 1), + + MUX(MOUT_G2D, "mout_g2d", mout_group1_p, CLK_SRC2, 8, 2), + MUX(MOUT_MFC, "mout_mfc", mout_group1_p, CLK_SRC2, 4, 2), + MUX(MOUT_G3D, "mout_g3d", mout_group1_p, CLK_SRC2, 0, 2), + + MUX(MOUT_FIMC2, "mout_fimc2", mout_group2_p, CLK_SRC3, 20, 4), + MUX(MOUT_FIMC1, "mout_fimc1", mout_group2_p, CLK_SRC3, 16, 4), + MUX(MOUT_FIMC0, "mout_fimc0", mout_group2_p, CLK_SRC3, 12, 4), + + MUX(MOUT_UART3, "mout_uart3", mout_group2_p, CLK_SRC4, 28, 4), + MUX(MOUT_UART2, "mout_uart2", mout_group2_p, CLK_SRC4, 24, 4), + MUX(MOUT_UART1, "mout_uart1", mout_group2_p, CLK_SRC4, 20, 4), + MUX(MOUT_UART0, "mout_uart0", mout_group2_p, CLK_SRC4, 16, 4), + MUX(MOUT_MMC3, "mout_mmc3", mout_group2_p, CLK_SRC4, 12, 4), + MUX(MOUT_MMC2, "mout_mmc2", mout_group2_p, CLK_SRC4, 8, 4), + MUX(MOUT_MMC1, "mout_mmc1", mout_group2_p, CLK_SRC4, 4, 4), + MUX(MOUT_MMC0, "mout_mmc0", mout_group2_p, CLK_SRC4, 0, 4), + + MUX(MOUT_PWM, "mout_pwm", mout_group2_p, CLK_SRC5, 12, 4), + MUX(MOUT_SPI1, "mout_spi1", mout_group2_p, CLK_SRC5, 4, 4), + MUX(MOUT_SPI0, "mout_spi0", mout_group2_p, CLK_SRC5, 0, 4), + + MUX(MOUT_DMC0, "mout_dmc0", mout_group1_p, CLK_SRC6, 24, 2), + MUX(MOUT_PWI, "mout_pwi", mout_group2_p, CLK_SRC6, 20, 4), + MUX(MOUT_HPM, "mout_hpm", mout_group3_p, CLK_SRC6, 16, 1), + MUX(MOUT_SPDIF, "mout_spdif", mout_spdif_p, CLK_SRC6, 12, 2), + MUX(MOUT_AUDIO2, "mout_audio2", mout_audio2_p, CLK_SRC6, 8, 4), + MUX(MOUT_AUDIO1, "mout_audio1", mout_audio1_p, CLK_SRC6, 4, 4), + MUX(MOUT_AUDIO0, "mout_audio0", mout_audio0_p, CLK_SRC6, 0, 4), + + MUX(MOUT_CLKSEL, "mout_clksel", mout_clksel_p, CLK_OUT, 12, 5), +}; + +/* S5P6442-specific clock muxes. */ +static struct samsung_mux_clock s5p6442_mux_clks[] __initdata = { + MUX(MOUT_VPLL, "mout_vpll", mout_vpll_6442_p, CLK_SRC0, 12, 1), + + MUX(MOUT_FIMD, "mout_fimd", mout_group2_6442_p, CLK_SRC1, 20, 4), + MUX(MOUT_CAM1, "mout_cam1", mout_group2_6442_p, CLK_SRC1, 16, 4), + MUX(MOUT_CAM0, "mout_cam0", mout_group2_6442_p, CLK_SRC1, 12, 4), + MUX(MOUT_MIXER, "mout_mixer", mout_mixer_6442_p, CLK_SRC1, 4, 1), + + MUX(MOUT_D0SYNC, "mout_d0sync", mout_d0sync_6442_p, CLK_SRC2, 28, 1), + MUX(MOUT_D1SYNC, "mout_d1sync", mout_d1sync_6442_p, CLK_SRC2, 24, 1), + + MUX(MOUT_FIMC2, "mout_fimc2", mout_group2_6442_p, CLK_SRC3, 20, 4), + MUX(MOUT_FIMC1, "mout_fimc1", mout_group2_6442_p, CLK_SRC3, 16, 4), + MUX(MOUT_FIMC0, "mout_fimc0", mout_group2_6442_p, CLK_SRC3, 12, 4), + + MUX(MOUT_UART2, "mout_uart2", mout_group2_6442_p, CLK_SRC4, 24, 4), + MUX(MOUT_UART1, "mout_uart1", mout_group2_6442_p, CLK_SRC4, 20, 4), + MUX(MOUT_UART0, "mout_uart0", mout_group2_6442_p, CLK_SRC4, 16, 4), + MUX(MOUT_MMC2, "mout_mmc2", mout_group2_6442_p, CLK_SRC4, 8, 4), + MUX(MOUT_MMC1, "mout_mmc1", mout_group2_6442_p, CLK_SRC4, 4, 4), + MUX(MOUT_MMC0, "mout_mmc0", mout_group2_6442_p, CLK_SRC4, 0, 4), + + MUX(MOUT_PWM, "mout_pwm", mout_group2_6442_p, CLK_SRC5, 12, 4), + MUX(MOUT_SPI0, "mout_spi0", mout_group2_6442_p, CLK_SRC5, 0, 4), + + MUX(MOUT_AUDIO1, "mout_audio1", mout_audio1_6442_p, CLK_SRC6, 4, 4), + MUX(MOUT_AUDIO0, "mout_audio0", mout_audio0_6442_p, CLK_SRC6, 0, 4), + + MUX(MOUT_CLKSEL, "mout_clksel", mout_clksel_6442_p, CLK_OUT, 12, 5), +}; + +/* S5PV210-specific fixed rate clocks generated inside the SoC. */ +static struct samsung_fixed_rate_clock s5pv210_frate_clks[] __initdata = { + FRATE(SCLK_HDMI27M, "sclk_hdmi27m", NULL, CLK_IS_ROOT, 27000000), + FRATE(SCLK_HDMIPHY, "sclk_hdmiphy", NULL, CLK_IS_ROOT, 27000000), + FRATE(SCLK_USBPHY0, "sclk_usbphy0", NULL, CLK_IS_ROOT, 48000000), + FRATE(SCLK_USBPHY1, "sclk_usbphy1", NULL, CLK_IS_ROOT, 48000000), +}; + +/* S5P6442-specific fixed rate clocks generated inside the SoC. */ +static struct samsung_fixed_rate_clock s5p6442_frate_clks[] __initdata = { + FRATE(SCLK_USBPHY0, "sclk_usbphy0", NULL, CLK_IS_ROOT, 30000000), +}; + +/* Common clock dividers. */ +static struct samsung_div_clock div_clks[] __initdata = { + DIV(DOUT_PCLKP, "dout_pclkp", "dout_hclkp", CLK_DIV0, 28, 3), + DIV(DOUT_PCLKD, "dout_pclkd", "dout_hclkd", CLK_DIV0, 20, 3), + DIV(DOUT_A2M, "dout_a2m", "mout_apll", CLK_DIV0, 4, 3), + DIV(DOUT_APLL, "dout_apll", "mout_msys", CLK_DIV0, 0, 3), + + DIV(DOUT_FIMD, "dout_fimd", "mout_fimd", CLK_DIV1, 20, 4), + DIV(DOUT_CAM1, "dout_cam1", "mout_cam1", CLK_DIV1, 16, 4), + DIV(DOUT_CAM0, "dout_cam0", "mout_cam0", CLK_DIV1, 12, 4), + + DIV(DOUT_FIMC2, "dout_fimc2", "mout_fimc2", CLK_DIV3, 20, 4), + DIV(DOUT_FIMC1, "dout_fimc1", "mout_fimc1", CLK_DIV3, 16, 4), + DIV(DOUT_FIMC0, "dout_fimc0", "mout_fimc0", CLK_DIV3, 12, 4), + + DIV(DOUT_UART2, "dout_uart2", "mout_uart2", CLK_DIV4, 24, 4), + DIV(DOUT_UART1, "dout_uart1", "mout_uart1", CLK_DIV4, 20, 4), + DIV(DOUT_UART0, "dout_uart0", "mout_uart0", CLK_DIV4, 16, 4), + DIV(DOUT_MMC2, "dout_mmc2", "mout_mmc2", CLK_DIV4, 8, 4), + DIV(DOUT_MMC1, "dout_mmc1", "mout_mmc1", CLK_DIV4, 4, 4), + DIV(DOUT_MMC0, "dout_mmc0", "mout_mmc0", CLK_DIV4, 0, 4), + + DIV(DOUT_PWM, "dout_pwm", "mout_pwm", CLK_DIV5, 12, 4), + DIV(DOUT_SPI0, "dout_spi0", "mout_spi0", CLK_DIV5, 0, 4), + + DIV(DOUT_FLASH, "dout_flash", "mout_flash", CLK_DIV6, 12, 3), + DIV(DOUT_AUDIO1, "dout_audio1", "mout_audio1", CLK_DIV6, 4, 4), + DIV(DOUT_AUDIO0, "dout_audio0", "mout_audio0", CLK_DIV6, 0, 4), + + DIV(DOUT_CLKOUT, "dout_clkout", "mout_clksel", CLK_OUT, 20, 4), +}; + +/* S5PV210-specific clock dividers. */ +static struct samsung_div_clock s5pv210_div_clks[] __initdata = { + DIV(DOUT_HCLKP, "dout_hclkp", "mout_psys", CLK_DIV0, 24, 4), + DIV(DOUT_HCLKD, "dout_hclkd", "mout_dsys", CLK_DIV0, 16, 4), + DIV(DOUT_PCLKM, "dout_pclkm", "dout_hclkm", CLK_DIV0, 12, 3), + DIV(DOUT_HCLKM, "dout_hclkm", "dout_apll", CLK_DIV0, 8, 3), + + DIV(DOUT_CSIS, "dout_csis", "mout_csis", CLK_DIV1, 28, 4), + DIV(DOUT_TBLK, "dout_tblk", "mout_vpll", CLK_DIV1, 0, 4), + + DIV(DOUT_G2D, "dout_g2d", "mout_g2d", CLK_DIV2, 8, 4), + DIV(DOUT_MFC, "dout_mfc", "mout_mfc", CLK_DIV2, 4, 4), + DIV(DOUT_G3D, "dout_g3d", "mout_g3d", CLK_DIV2, 0, 4), + + DIV(DOUT_UART3, "dout_uart3", "mout_uart3", CLK_DIV4, 28, 4), + DIV(DOUT_MMC3, "dout_mmc3", "mout_mmc3", CLK_DIV4, 12, 4), + + DIV(DOUT_SPI1, "dout_spi1", "mout_spi1", CLK_DIV5, 4, 4), + + DIV(DOUT_DMC0, "dout_dmc0", "mout_dmc0", CLK_DIV6, 28, 4), + DIV(DOUT_PWI, "dout_pwi", "mout_pwi", CLK_DIV6, 24, 4), + DIV(DOUT_HPM, "dout_hpm", "dout_copy", CLK_DIV6, 20, 3), + DIV(DOUT_COPY, "dout_copy", "mout_hpm", CLK_DIV6, 16, 3), + DIV(DOUT_AUDIO2, "dout_audio2", "mout_audio2", CLK_DIV6, 8, 4), + + DIV(DOUT_DPM, "dout_dpm", "dout_pclkp", CLK_DIV7, 8, 7), + DIV(DOUT_DVSEM, "dout_dvsem", "dout_pclkp", CLK_DIV7, 0, 7), +}; + +/* S5P6442-specific clock dividers. */ +static struct samsung_div_clock s5p6442_div_clks[] __initdata = { + DIV(DOUT_HCLKP, "dout_hclkp", "mout_d1sync", CLK_DIV0, 24, 4), + DIV(DOUT_HCLKD, "dout_hclkd", "mout_d0sync", CLK_DIV0, 16, 4), + + DIV(DOUT_MIXER, "dout_mixer", "mout_vpll", CLK_DIV1, 0, 4), +}; + +/* Common clock gates. */ +static struct samsung_gate_clock gate_clks[] __initdata = { + GATE(CLK_ROTATOR, "rotator", "dout_hclkd", CLK_GATE_IP0, 29, 0, 0), + GATE(CLK_FIMC2, "fimc2", "dout_hclkd", CLK_GATE_IP0, 26, 0, 0), + GATE(CLK_FIMC1, "fimc1", "dout_hclkd", CLK_GATE_IP0, 25, 0, 0), + GATE(CLK_FIMC0, "fimc0", "dout_hclkd", CLK_GATE_IP0, 24, 0, 0), + GATE(CLK_PDMA0, "pdma0", "dout_hclkp", CLK_GATE_IP0, 3, 0, 0), + GATE(CLK_MDMA, "mdma", "dout_hclkd", CLK_GATE_IP0, 2, 0, 0), + + GATE(CLK_SROMC, "sromc", "dout_hclkp", CLK_GATE_IP1, 26, 0, 0), + GATE(CLK_NANDXL, "nandxl", "dout_hclkp", CLK_GATE_IP1, 24, 0, 0), + GATE(CLK_USB_OTG, "usb_otg", "dout_hclkp", CLK_GATE_IP1, 16, 0, 0), + GATE(CLK_TVENC, "tvenc", "dout_hclkd", CLK_GATE_IP1, 10, 0, 0), + GATE(CLK_MIXER, "mixer", "dout_hclkd", CLK_GATE_IP1, 9, 0, 0), + GATE(CLK_VP, "vp", "dout_hclkd", CLK_GATE_IP1, 8, 0, 0), + GATE(CLK_FIMD, "fimd", "dout_hclkd", CLK_GATE_IP1, 0, 0, 0), + + GATE(CLK_HSMMC2, "hsmmc2", "dout_hclkp", CLK_GATE_IP2, 18, 0, 0), + GATE(CLK_HSMMC1, "hsmmc1", "dout_hclkp", CLK_GATE_IP2, 17, 0, 0), + GATE(CLK_HSMMC0, "hsmmc0", "dout_hclkp", CLK_GATE_IP2, 16, 0, 0), + GATE(CLK_MODEMIF, "modemif", "dout_hclkp", CLK_GATE_IP2, 9, 0, 0), + GATE(CLK_SECSS, "secss", "dout_hclkp", CLK_GATE_IP2, 0, 0, 0), + + GATE(CLK_PCM1, "pcm1", "dout_pclkp", CLK_GATE_IP3, 29, 0, 0), + GATE(CLK_PCM0, "pcm0", "dout_pclkp", CLK_GATE_IP3, 28, 0, 0), + GATE(CLK_TSADC, "tsadc", "dout_pclkp", CLK_GATE_IP3, 24, 0, 0), + GATE(CLK_PWM, "pwm", "dout_pclkp", CLK_GATE_IP3, 23, 0, 0), + GATE(CLK_WDT, "watchdog", "dout_pclkp", CLK_GATE_IP3, 22, 0, 0), + GATE(CLK_KEYIF, "keyif", "dout_pclkp", CLK_GATE_IP3, 21, 0, 0), + GATE(CLK_UART2, "uart2", "dout_pclkp", CLK_GATE_IP3, 19, 0, 0), + GATE(CLK_UART1, "uart1", "dout_pclkp", CLK_GATE_IP3, 18, 0, 0), + GATE(CLK_UART0, "uart0", "dout_pclkp", CLK_GATE_IP3, 17, 0, 0), + GATE(CLK_SYSTIMER, "systimer", "dout_pclkp", CLK_GATE_IP3, 16, 0, 0), + GATE(CLK_RTC, "rtc", "dout_pclkp", CLK_GATE_IP3, 15, 0, 0), + GATE(CLK_SPI0, "spi0", "dout_pclkp", CLK_GATE_IP3, 12, 0, 0), + GATE(CLK_I2C2, "i2c2", "dout_pclkp", CLK_GATE_IP3, 9, 0, 0), + GATE(CLK_I2C0, "i2c0", "dout_pclkp", CLK_GATE_IP3, 7, 0, 0), + GATE(CLK_I2S1, "i2s1", "dout_pclkp", CLK_GATE_IP3, 5, 0, 0), + GATE(CLK_I2S0, "i2s0", "dout_pclkp", CLK_GATE_IP3, 4, 0, 0), + + GATE(CLK_SECKEY, "seckey", "dout_pclkp", CLK_GATE_IP4, 3, 0, 0), + GATE(CLK_CHIPID, "chipid", "dout_pclkp", CLK_GATE_IP4, 0, 0, 0), + + GATE(SCLK_AUDIO1, "sclk_audio1", "dout_audio1", CLK_SRC_MASK0, 25, + CLK_SET_RATE_PARENT, 0), + GATE(SCLK_AUDIO0, "sclk_audio0", "dout_audio0", CLK_SRC_MASK0, 24, + CLK_SET_RATE_PARENT, 0), + GATE(SCLK_PWM, "sclk_pwm", "dout_pwm", CLK_SRC_MASK0, 19, + CLK_SET_RATE_PARENT, 0), + GATE(SCLK_SPI0, "sclk_spi0", "dout_spi0", CLK_SRC_MASK0, 16, + CLK_SET_RATE_PARENT, 0), + GATE(SCLK_UART2, "sclk_uart2", "dout_uart2", CLK_SRC_MASK0, 14, + CLK_SET_RATE_PARENT, 0), + GATE(SCLK_UART1, "sclk_uart1", "dout_uart1", CLK_SRC_MASK0, 13, + CLK_SET_RATE_PARENT, 0), + GATE(SCLK_UART0, "sclk_uart0", "dout_uart0", CLK_SRC_MASK0, 12, + CLK_SET_RATE_PARENT, 0), + GATE(SCLK_MMC2, "sclk_mmc2", "dout_mmc2", CLK_SRC_MASK0, 10, + CLK_SET_RATE_PARENT, 0), + GATE(SCLK_MMC1, "sclk_mmc1", "dout_mmc1", CLK_SRC_MASK0, 9, + CLK_SET_RATE_PARENT, 0), + GATE(SCLK_MMC0, "sclk_mmc0", "dout_mmc0", CLK_SRC_MASK0, 8, + CLK_SET_RATE_PARENT, 0), + GATE(SCLK_FIMD, "sclk_fimd", "dout_fimd", CLK_SRC_MASK0, 5, + CLK_SET_RATE_PARENT, 0), + GATE(SCLK_CAM1, "sclk_cam1", "dout_cam1", CLK_SRC_MASK0, 4, + CLK_SET_RATE_PARENT, 0), + GATE(SCLK_CAM0, "sclk_cam0", "dout_cam0", CLK_SRC_MASK0, 3, + CLK_SET_RATE_PARENT, 0), + GATE(SCLK_MIXER, "sclk_mixer", "mout_mixer", CLK_SRC_MASK0, 1, + CLK_SET_RATE_PARENT, 0), + + GATE(SCLK_FIMC2, "sclk_fimc2", "dout_fimc2", CLK_SRC_MASK1, 4, + CLK_SET_RATE_PARENT, 0), + GATE(SCLK_FIMC1, "sclk_fimc1", "dout_fimc1", CLK_SRC_MASK1, 3, + CLK_SET_RATE_PARENT, 0), + GATE(SCLK_FIMC0, "sclk_fimc0", "dout_fimc0", CLK_SRC_MASK1, 2, + CLK_SET_RATE_PARENT, 0), +}; + +/* S5PV210-specific clock gates. */ +static struct samsung_gate_clock s5pv210_gate_clks[] __initdata = { + GATE(CLK_CSIS, "clk_csis", "dout_hclkd", CLK_GATE_IP0, 31, 0, 0), + GATE(CLK_MFC, "mfc", "dout_hclkm", CLK_GATE_IP0, 16, 0, 0), + GATE(CLK_G2D, "g2d", "dout_hclkd", CLK_GATE_IP0, 12, 0, 0), + GATE(CLK_G3D, "g3d", "dout_hclkm", CLK_GATE_IP0, 8, 0, 0), + GATE(CLK_IMEM, "imem", "dout_hclkm", CLK_GATE_IP0, 5, 0, 0), + GATE(CLK_PDMA1, "pdma1", "dout_hclkp", CLK_GATE_IP0, 4, 0, 0), + + GATE(CLK_NFCON, "nfcon", "dout_hclkp", CLK_GATE_IP1, 28, 0, 0), + GATE(CLK_CFCON, "cfcon", "dout_hclkp", CLK_GATE_IP1, 25, 0, 0), + GATE(CLK_USB_HOST, "usb_host", "dout_hclkp", CLK_GATE_IP1, 17, 0, 0), + GATE(CLK_HDMI, "hdmi", "dout_hclkd", CLK_GATE_IP1, 11, 0, 0), + GATE(CLK_DSIM, "dsim", "dout_pclkd", CLK_GATE_IP1, 2, 0, 0), + + GATE(CLK_TZIC3, "tzic3", "dout_hclkm", CLK_GATE_IP2, 31, 0, 0), + GATE(CLK_TZIC2, "tzic2", "dout_hclkm", CLK_GATE_IP2, 30, 0, 0), + GATE(CLK_TZIC1, "tzic1", "dout_hclkm", CLK_GATE_IP2, 29, 0, 0), + GATE(CLK_TZIC0, "tzic0", "dout_hclkm", CLK_GATE_IP2, 28, 0, 0), + GATE(CLK_TSI, "tsi", "dout_hclkd", CLK_GATE_IP2, 20, 0, 0), + GATE(CLK_HSMMC3, "hsmmc3", "dout_hclkp", CLK_GATE_IP2, 19, 0, 0), + GATE(CLK_JTAG, "jtag", "dout_hclkp", CLK_GATE_IP2, 11, 0, 0), + GATE(CLK_CORESIGHT, "coresight", "dout_pclkp", CLK_GATE_IP2, 8, 0, 0), + GATE(CLK_SDM, "sdm", "dout_pclkm", CLK_GATE_IP2, 1, 0, 0), + + GATE(CLK_PCM2, "pcm2", "dout_pclkp", CLK_GATE_IP3, 30, 0, 0), + GATE(CLK_UART3, "uart3", "dout_pclkp", CLK_GATE_IP3, 20, 0, 0), + GATE(CLK_SPI1, "spi1", "dout_pclkp", CLK_GATE_IP3, 13, 0, 0), + GATE(CLK_I2C_HDMI_PHY, "i2c_hdmi_phy", "dout_pclkd", + CLK_GATE_IP3, 11, 0, 0), + GATE(CLK_I2C1, "i2c1", "dout_pclkd", CLK_GATE_IP3, 10, 0, 0), + GATE(CLK_I2S2, "i2s2", "dout_pclkp", CLK_GATE_IP3, 6, 0, 0), + GATE(CLK_AC97, "ac97", "dout_pclkp", CLK_GATE_IP3, 1, 0, 0), + GATE(CLK_SPDIF, "spdif", "dout_pclkp", CLK_GATE_IP3, 0, 0, 0), + + GATE(CLK_TZPC3, "tzpc.3", "dout_pclkd", CLK_GATE_IP4, 8, 0, 0), + GATE(CLK_TZPC2, "tzpc.2", "dout_pclkd", CLK_GATE_IP4, 7, 0, 0), + GATE(CLK_TZPC1, "tzpc.1", "dout_pclkp", CLK_GATE_IP4, 6, 0, 0), + GATE(CLK_TZPC0, "tzpc.0", "dout_pclkm", CLK_GATE_IP4, 5, 0, 0), + GATE(CLK_IEM_APC, "iem_apc", "dout_pclkp", CLK_GATE_IP4, 2, 0, 0), + GATE(CLK_IEM_IEC, "iem_iec", "dout_pclkp", CLK_GATE_IP4, 1, 0, 0), + + GATE(CLK_JPEG, "jpeg", "dout_hclkd", CLK_GATE_IP5, 29, 0, 0), + + GATE(SCLK_SPDIF, "sclk_spdif", "mout_spdif", CLK_SRC_MASK0, 27, + CLK_SET_RATE_PARENT, 0), + GATE(SCLK_AUDIO2, "sclk_audio2", "dout_audio2", CLK_SRC_MASK0, 26, + CLK_SET_RATE_PARENT, 0), + GATE(SCLK_SPI1, "sclk_spi1", "dout_spi1", CLK_SRC_MASK0, 17, + CLK_SET_RATE_PARENT, 0), + GATE(SCLK_UART3, "sclk_uart3", "dout_uart3", CLK_SRC_MASK0, 15, + CLK_SET_RATE_PARENT, 0), + GATE(SCLK_MMC3, "sclk_mmc3", "dout_mmc3", CLK_SRC_MASK0, 11, + CLK_SET_RATE_PARENT, 0), + GATE(SCLK_CSIS, "sclk_csis", "dout_csis", CLK_SRC_MASK0, 6, + CLK_SET_RATE_PARENT, 0), + GATE(SCLK_DAC, "sclk_dac", "mout_dac", CLK_SRC_MASK0, 2, + CLK_SET_RATE_PARENT, 0), + GATE(SCLK_HDMI, "sclk_hdmi", "mout_hdmi", CLK_SRC_MASK0, 0, + CLK_SET_RATE_PARENT, 0), +}; + +/* S5P6442-specific clock gates. */ +static struct samsung_gate_clock s5p6442_gate_clks[] __initdata = { + GATE(CLK_JPEG, "jpeg", "dout_hclkd", CLK_GATE_IP0, 28, 0, 0), + GATE(CLK_MFC, "mfc", "dout_hclkd", CLK_GATE_IP0, 16, 0, 0), + GATE(CLK_G2D, "g2d", "dout_hclkd", CLK_GATE_IP0, 12, 0, 0), + GATE(CLK_G3D, "g3d", "dout_hclkd", CLK_GATE_IP0, 8, 0, 0), + GATE(CLK_IMEM, "imem", "dout_hclkd", CLK_GATE_IP0, 5, 0, 0), + + GATE(CLK_ETB, "etb", "dout_hclkd", CLK_GATE_IP1, 31, 0, 0), + GATE(CLK_ETM, "etm", "dout_hclkd", CLK_GATE_IP1, 30, 0, 0), + + GATE(CLK_I2C1, "i2c1", "dout_pclkp", CLK_GATE_IP3, 8, 0, 0), + + GATE(SCLK_DAC, "sclk_dac", "mout_vpll", CLK_SRC_MASK0, 2, + CLK_SET_RATE_PARENT, 0), +}; + +/* + * Clock aliases for legacy clkdev look-up. + * NOTE: Needed only to support legacy board files. + */ +static struct samsung_clock_alias s5pv210_aliases[] = { + ALIAS(DOUT_APLL, NULL, "armclk"), + ALIAS(DOUT_HCLKM, NULL, "hclk_msys"), + ALIAS(MOUT_DMC0, NULL, "sclk_dmc0"), +}; + +/* S5PV210-specific PLLs. */ +static struct samsung_pll_clock s5pv210_pll_clks[] __initdata = { + [apll] = PLL(pll_4508, FOUT_APLL, "fout_apll", "fin_pll", + APLL_LOCK, APLL_CON0, NULL), + [mpll] = PLL(pll_4502, FOUT_MPLL, "fout_mpll", "fin_pll", + MPLL_LOCK, MPLL_CON, NULL), + [epll] = PLL(pll_4600, FOUT_EPLL, "fout_epll", "fin_pll", + EPLL_LOCK, EPLL_CON0, NULL), + [vpll] = PLL(pll_4502, FOUT_VPLL, "fout_vpll", "mout_vpllsrc", + VPLL_LOCK, VPLL_CON, NULL), +}; + +/* S5P6442-specific PLLs. */ +static struct samsung_pll_clock s5p6442_pll_clks[] __initdata = { + [apll] = PLL(pll_4502, FOUT_APLL, "fout_apll", "fin_pll", + APLL_LOCK, APLL_CON0, NULL), + [mpll] = PLL(pll_4502, FOUT_MPLL, "fout_mpll", "fin_pll", + MPLL_LOCK, MPLL_CON, NULL), + [epll] = PLL(pll_4500, FOUT_EPLL, "fout_epll", "fin_pll", + EPLL_LOCK, EPLL_CON0, NULL), + [vpll] = PLL(pll_4500, FOUT_VPLL, "fout_vpll", "fin_pll", + VPLL_LOCK, VPLL_CON, NULL), +}; + +static void __init __s5pv210_clk_init(struct device_node *np, + unsigned long xxti_f, + unsigned long xusbxti_f, + bool is_s5p6442) +{ + struct samsung_clk_provider *ctx; + + ctx = samsung_clk_init(np, reg_base, NR_CLKS); + if (!ctx) + panic("%s: unable to allocate context.\n", __func__); + + samsung_clk_register_mux(ctx, early_mux_clks, + ARRAY_SIZE(early_mux_clks)); + + if (is_s5p6442) { + samsung_clk_register_fixed_rate(ctx, s5p6442_frate_clks, + ARRAY_SIZE(s5p6442_frate_clks)); + samsung_clk_register_pll(ctx, s5p6442_pll_clks, + ARRAY_SIZE(s5p6442_pll_clks), reg_base); + samsung_clk_register_mux(ctx, s5p6442_mux_clks, + ARRAY_SIZE(s5p6442_mux_clks)); + samsung_clk_register_div(ctx, s5p6442_div_clks, + ARRAY_SIZE(s5p6442_div_clks)); + samsung_clk_register_gate(ctx, s5p6442_gate_clks, + ARRAY_SIZE(s5p6442_gate_clks)); + } else { + samsung_clk_register_fixed_rate(ctx, s5pv210_frate_clks, + ARRAY_SIZE(s5pv210_frate_clks)); + samsung_clk_register_pll(ctx, s5pv210_pll_clks, + ARRAY_SIZE(s5pv210_pll_clks), reg_base); + samsung_clk_register_mux(ctx, s5pv210_mux_clks, + ARRAY_SIZE(s5pv210_mux_clks)); + samsung_clk_register_div(ctx, s5pv210_div_clks, + ARRAY_SIZE(s5pv210_div_clks)); + samsung_clk_register_gate(ctx, s5pv210_gate_clks, + ARRAY_SIZE(s5pv210_gate_clks)); + } + + samsung_clk_register_mux(ctx, mux_clks, ARRAY_SIZE(mux_clks)); + samsung_clk_register_div(ctx, div_clks, ARRAY_SIZE(div_clks)); + samsung_clk_register_gate(ctx, gate_clks, ARRAY_SIZE(gate_clks)); + + samsung_clk_register_fixed_factor(ctx, ffactor_clks, + ARRAY_SIZE(ffactor_clks)); + + samsung_clk_register_alias(ctx, s5pv210_aliases, + ARRAY_SIZE(s5pv210_aliases)); + + s5pv210_clk_sleep_init(); + + pr_info("%s clocks: mout_apll = %ld, mout_mpll = %ld\n" + "\tmout_epll = %ld, mout_vpll = %ld\n", + is_s5p6442 ? "S5P6442" : "S5PV210", + _get_rate("mout_apll"), _get_rate("mout_mpll"), + _get_rate("mout_epll"), _get_rate("mout_vpll")); +} + +static void __init s5pv210_clk_dt_init(struct device_node *np) +{ + reg_base = of_iomap(np, 0); + if (!reg_base) + panic("%s: failed to map registers\n", __func__); + + __s5pv210_clk_init(np, 0, 0, false); +} +CLK_OF_DECLARE(s5pv210_clk, "samsung,s5pv210-clock", s5pv210_clk_dt_init); + +static void __init s5p6442_clk_dt_init(struct device_node *np) +{ + reg_base = of_iomap(np, 0); + if (!reg_base) + panic("%s: failed to map registers\n", __func__); + + __s5pv210_clk_init(np, 0, 0, true); +} +CLK_OF_DECLARE(s5p6442_clk, "samsung,s5p6442-clock", s5p6442_clk_dt_init); diff --git a/drivers/clk/tegra/clk-periph-gate.c b/drivers/clk/tegra/clk-periph-gate.c index 507015314827..0aa8830ae7cc 100644 --- a/drivers/clk/tegra/clk-periph-gate.c +++ b/drivers/clk/tegra/clk-periph-gate.c @@ -20,7 +20,8 @@ #include <linux/io.h> #include <linux/delay.h> #include <linux/err.h> -#include <linux/tegra-soc.h> + +#include <soc/tegra/fuse.h> #include "clk.h" diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c index 8b10c38b6e3c..5bbacd01094f 100644 --- a/drivers/clk/tegra/clk-tegra30.c +++ b/drivers/clk/tegra/clk-tegra30.c @@ -22,8 +22,11 @@ #include <linux/of.h> #include <linux/of_address.h> #include <linux/clk/tegra.h> -#include <linux/tegra-powergate.h> + +#include <soc/tegra/pmc.h> + #include <dt-bindings/clock/tegra30-car.h> + #include "clk.h" #include "clk-id.h" diff --git a/drivers/clk/tegra/clk.c b/drivers/clk/tegra/clk.c index bf452b62beb8..f87c609e8f72 100644 --- a/drivers/clk/tegra/clk.c +++ b/drivers/clk/tegra/clk.c @@ -19,7 +19,8 @@ #include <linux/of.h> #include <linux/clk/tegra.h> #include <linux/reset-controller.h> -#include <linux/tegra-soc.h> + +#include <soc/tegra/fuse.h> #include "clk.h" diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c index 4a65b410e4d5..af29359677da 100644 --- a/drivers/clk/ti/clk-dra7-atl.c +++ b/drivers/clk/ti/clk-dra7-atl.c @@ -139,9 +139,13 @@ static long atl_clk_round_rate(struct clk_hw *hw, unsigned long rate, static int atl_clk_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { - struct dra7_atl_desc *cdesc = to_atl_desc(hw); + struct dra7_atl_desc *cdesc; u32 divider; + if (!hw || !rate) + return -EINVAL; + + cdesc = to_atl_desc(hw); divider = ((parent_rate + rate / 2) / rate) - 1; if (divider > DRA7_ATL_DIVIDER_MASK) divider = DRA7_ATL_DIVIDER_MASK; diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c index e6aa10db7bba..a837f703be65 100644 --- a/drivers/clk/ti/divider.c +++ b/drivers/clk/ti/divider.c @@ -211,11 +211,16 @@ static long ti_clk_divider_round_rate(struct clk_hw *hw, unsigned long rate, static int ti_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { - struct clk_divider *divider = to_clk_divider(hw); + struct clk_divider *divider; unsigned int div, value; unsigned long flags = 0; u32 val; + if (!hw || !rate) + return -EINVAL; + + divider = to_clk_divider(hw); + div = DIV_ROUND_UP(parent_rate, rate); value = _get_val(divider, div); diff --git a/drivers/clk/versatile/Makefile b/drivers/clk/versatile/Makefile index fd449f9b006d..162e519cb0f9 100644 --- a/drivers/clk/versatile/Makefile +++ b/drivers/clk/versatile/Makefile @@ -1,6 +1,5 @@ # Makefile for Versatile-specific clocks -obj-$(CONFIG_ICST) += clk-icst.o -obj-$(CONFIG_ARCH_INTEGRATOR) += clk-integrator.o +obj-$(CONFIG_ICST) += clk-icst.o clk-versatile.o obj-$(CONFIG_INTEGRATOR_IMPD1) += clk-impd1.o obj-$(CONFIG_ARCH_REALVIEW) += clk-realview.o obj-$(CONFIG_ARCH_VEXPRESS) += clk-vexpress.o diff --git a/drivers/clk/versatile/clk-integrator.c b/drivers/clk/versatile/clk-versatile.c index 734c4b8fe6ab..a76981e88cb6 100644 --- a/drivers/clk/versatile/clk-integrator.c +++ b/drivers/clk/versatile/clk-versatile.c @@ -1,5 +1,6 @@ /* - * Clock driver for the ARM Integrator/AP and Integrator/CP boards + * Clock driver for the ARM Integrator/AP, Integrator/CP, Versatile AB and + * Versatile PB boards. * Copyright (C) 2012 Linus Walleij * * This program is free software; you can redistribute it and/or modify @@ -17,6 +18,9 @@ #define INTEGRATOR_HDR_LOCK_OFFSET 0x14 +#define VERSATILE_SYS_OSCCLCD_OFFSET 0x1c +#define VERSATILE_SYS_LOCK_OFFSET 0x20 + /* Base offset for the core module */ static void __iomem *cm_base; @@ -37,11 +41,27 @@ static const struct clk_icst_desc __initdata cm_auxosc_desc = { .lock_offset = INTEGRATOR_HDR_LOCK_OFFSET, }; -static void __init of_integrator_cm_osc_setup(struct device_node *np) +static const struct icst_params versatile_auxosc_params = { + .vco_max = ICST307_VCO_MAX, + .vco_min = ICST307_VCO_MIN, + .vd_min = 4 + 8, + .vd_max = 511 + 8, + .rd_min = 1 + 2, + .rd_max = 127 + 2, + .s2div = icst307_s2div, + .idx2s = icst307_idx2s, +}; + +static const struct clk_icst_desc versatile_auxosc_desc __initconst = { + .params = &versatile_auxosc_params, + .vco_offset = VERSATILE_SYS_OSCCLCD_OFFSET, + .lock_offset = VERSATILE_SYS_LOCK_OFFSET, +}; +static void __init cm_osc_setup(struct device_node *np, + const struct clk_icst_desc *desc) { struct clk *clk = ERR_PTR(-EINVAL); const char *clk_name = np->name; - const struct clk_icst_desc *desc = &cm_auxosc_desc; const char *parent_name; if (!cm_base) { @@ -65,5 +85,17 @@ static void __init of_integrator_cm_osc_setup(struct device_node *np) if (!IS_ERR(clk)) of_clk_add_provider(np, of_clk_src_simple_get, clk); } + +static void __init of_integrator_cm_osc_setup(struct device_node *np) +{ + cm_osc_setup(np, &cm_auxosc_desc); +} CLK_OF_DECLARE(integrator_cm_auxosc_clk, "arm,integrator-cm-auxosc", of_integrator_cm_osc_setup); + +static void __init of_versatile_cm_osc_setup(struct device_node *np) +{ + cm_osc_setup(np, &versatile_auxosc_desc); +} +CLK_OF_DECLARE(versatile_cm_auxosc_clk, + "arm,versatile-cm-auxosc", of_versatile_cm_osc_setup); diff --git a/drivers/clocksource/tegra20_timer.c b/drivers/clocksource/tegra20_timer.c index d1869f02051c..d2616ef16770 100644 --- a/drivers/clocksource/tegra20_timer.c +++ b/drivers/clocksource/tegra20_timer.c @@ -27,6 +27,7 @@ #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/sched_clock.h> +#include <linux/delay.h> #include <asm/mach/time.h> #include <asm/smp_twd.h> @@ -53,6 +54,8 @@ static void __iomem *rtc_base; static struct timespec persistent_ts; static u64 persistent_ms, last_persistent_ms; +static struct delay_timer tegra_delay_timer; + #define timer_writel(value, reg) \ __raw_writel(value, timer_reg_base + (reg)) #define timer_readl(reg) \ @@ -139,6 +142,11 @@ static void tegra_read_persistent_clock(struct timespec *ts) *ts = *tsp; } +static unsigned long tegra_delay_timer_read_counter_long(void) +{ + return readl(timer_reg_base + TIMERUS_CNTR_1US); +} + static irqreturn_t tegra_timer_interrupt(int irq, void *dev_id) { struct clock_event_device *evt = (struct clock_event_device *)dev_id; @@ -206,6 +214,11 @@ static void __init tegra20_init_timer(struct device_node *np) BUG(); } + tegra_delay_timer.read_current_timer = + tegra_delay_timer_read_counter_long; + tegra_delay_timer.freq = 1000000; + register_current_timer_delay(&tegra_delay_timer); + ret = setup_irq(tegra_timer_irq.irq, &tegra_timer_irq); if (ret) { pr_err("Failed to register timer IRQ: %d\n", ret); diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c index 1f4d4e315057..a46c223c2506 100644 --- a/drivers/cpufreq/arm_big_little.c +++ b/drivers/cpufreq/arm_big_little.c @@ -24,6 +24,7 @@ #include <linux/cpufreq.h> #include <linux/cpumask.h> #include <linux/export.h> +#include <linux/module.h> #include <linux/mutex.h> #include <linux/of_platform.h> #include <linux/pm_opp.h> @@ -593,3 +594,7 @@ void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops) arm_bL_ops = NULL; } EXPORT_SYMBOL_GPL(bL_cpufreq_unregister); + +MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>"); +MODULE_DESCRIPTION("Generic ARM big LITTLE cpufreq driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/cpufreq/arm_big_little_dt.c b/drivers/cpufreq/arm_big_little_dt.c index 8d9d59108906..4550f6976768 100644 --- a/drivers/cpufreq/arm_big_little_dt.c +++ b/drivers/cpufreq/arm_big_little_dt.c @@ -114,4 +114,4 @@ module_platform_driver(generic_bL_platdrv); MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>"); MODULE_DESCRIPTION("Generic ARM big LITTLE cpufreq driver via DT"); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c index 86beda9f950b..0d2172b07765 100644 --- a/drivers/cpufreq/cpufreq-cpu0.c +++ b/drivers/cpufreq/cpufreq-cpu0.c @@ -137,7 +137,7 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev) * not yet registered, we should try defering probe. */ if (PTR_ERR(cpu_reg) == -EPROBE_DEFER) { - dev_err(cpu_dev, "cpu0 regulator not ready, retry\n"); + dev_dbg(cpu_dev, "cpu0 regulator not ready, retry\n"); ret = -EPROBE_DEFER; goto out_put_node; } diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index d9fdeddcef96..61190f6b4829 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1289,6 +1289,8 @@ err_get_freq: per_cpu(cpufreq_cpu_data, j) = NULL; write_unlock_irqrestore(&cpufreq_driver_lock, flags); + up_write(&policy->rwsem); + if (cpufreq_driver->exit) cpufreq_driver->exit(policy); err_set_policy_cpu: @@ -1657,7 +1659,7 @@ void cpufreq_suspend(void) return; if (!has_target()) - return; + goto suspend; pr_debug("%s: Suspending Governors\n", __func__); @@ -1671,6 +1673,7 @@ void cpufreq_suspend(void) policy); } +suspend: cpufreq_suspended = true; } @@ -1687,13 +1690,13 @@ void cpufreq_resume(void) if (!cpufreq_driver) return; + cpufreq_suspended = false; + if (!has_target()) return; pr_debug("%s: Resuming Governors\n", __func__); - cpufreq_suspended = false; - list_for_each_entry(policy, &cpufreq_policy_list, policy_list) { if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) pr_err("%s: Failed to resume driver: %p\n", __func__, diff --git a/drivers/cpufreq/cpufreq_opp.c b/drivers/cpufreq/cpufreq_opp.c index c0c6f4a4eccf..773bcde893c0 100644 --- a/drivers/cpufreq/cpufreq_opp.c +++ b/drivers/cpufreq/cpufreq_opp.c @@ -60,7 +60,7 @@ int dev_pm_opp_init_cpufreq_table(struct device *dev, goto out; } - freq_table = kzalloc(sizeof(*freq_table) * (max_opps + 1), GFP_KERNEL); + freq_table = kcalloc((max_opps + 1), sizeof(*freq_table), GFP_ATOMIC); if (!freq_table) { ret = -ENOMEM; goto out; diff --git a/drivers/cpufreq/integrator-cpufreq.c b/drivers/cpufreq/integrator-cpufreq.c index e5122f1bfe78..6bd69adc3c5e 100644 --- a/drivers/cpufreq/integrator-cpufreq.c +++ b/drivers/cpufreq/integrator-cpufreq.c @@ -92,7 +92,7 @@ static int integrator_set_target(struct cpufreq_policy *policy, * Bind to the specified CPU. When this call returns, * we should be running on the right CPU. */ - set_cpus_allowed(current, cpumask_of_cpu(cpu)); + set_cpus_allowed_ptr(current, cpumask_of(cpu)); BUG_ON(cpu != smp_processor_id()); /* get current setting */ @@ -118,7 +118,7 @@ static int integrator_set_target(struct cpufreq_policy *policy, freqs.new = icst_hz(&cclk_params, vco) / 1000; if (freqs.old == freqs.new) { - set_cpus_allowed(current, cpus_allowed); + set_cpus_allowed_ptr(current, &cpus_allowed); return 0; } @@ -141,7 +141,7 @@ static int integrator_set_target(struct cpufreq_policy *policy, /* * Restore the CPUs allowed mask. */ - set_cpus_allowed(current, cpus_allowed); + set_cpus_allowed_ptr(current, &cpus_allowed); cpufreq_freq_transition_end(policy, &freqs, 0); @@ -157,7 +157,7 @@ static unsigned int integrator_get(unsigned int cpu) cpus_allowed = current->cpus_allowed; - set_cpus_allowed(current, cpumask_of_cpu(cpu)); + set_cpus_allowed_ptr(current, cpumask_of(cpu)); BUG_ON(cpu != smp_processor_id()); /* detect memory etc. */ @@ -173,7 +173,7 @@ static unsigned int integrator_get(unsigned int cpu) current_freq = icst_hz(&cclk_params, vco) / 1000; /* current freq */ - set_cpus_allowed(current, cpus_allowed); + set_cpus_allowed_ptr(current, &cpus_allowed); return current_freq; } @@ -213,9 +213,9 @@ static int __init integrator_cpufreq_probe(struct platform_device *pdev) return cpufreq_register_driver(&integrator_driver); } -static void __exit integrator_cpufreq_remove(struct platform_device *pdev) +static int __exit integrator_cpufreq_remove(struct platform_device *pdev) { - cpufreq_unregister_driver(&integrator_driver); + return cpufreq_unregister_driver(&integrator_driver); } static const struct of_device_id integrator_cpufreq_match[] = { diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index c5eac949760d..0668b389c516 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -660,6 +660,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = { ICPU(0x3f, core_params), ICPU(0x45, core_params), ICPU(0x46, core_params), + ICPU(0x4c, byt_params), ICPU(0x4f, core_params), ICPU(0x56, core_params), {} @@ -688,7 +689,7 @@ static int intel_pstate_init_cpu(unsigned int cpunum) add_timer_on(&cpu->timer, cpunum); - pr_info("Intel pstate controlling: cpu %d\n", cpunum); + pr_debug("Intel pstate controlling: cpu %d\n", cpunum); return 0; } @@ -707,10 +708,6 @@ static unsigned int intel_pstate_get(unsigned int cpu_num) static int intel_pstate_set_policy(struct cpufreq_policy *policy) { - struct cpudata *cpu; - - cpu = all_cpu_data[policy->cpu]; - if (!policy->cpuinfo.max_freq) return -ENODEV; diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c index d4add8621944..9fa177206032 100644 --- a/drivers/cpufreq/loongson2_cpufreq.c +++ b/drivers/cpufreq/loongson2_cpufreq.c @@ -148,9 +148,9 @@ static void loongson2_cpu_wait(void) u32 cpu_freq; spin_lock_irqsave(&loongson2_wait_lock, flags); - cpu_freq = LOONGSON_CHIPCFG0; - LOONGSON_CHIPCFG0 &= ~0x7; /* Put CPU into wait mode */ - LOONGSON_CHIPCFG0 = cpu_freq; /* Restore CPU state */ + cpu_freq = LOONGSON_CHIPCFG(0); + LOONGSON_CHIPCFG(0) &= ~0x7; /* Put CPU into wait mode */ + LOONGSON_CHIPCFG(0) = cpu_freq; /* Restore CPU state */ spin_unlock_irqrestore(&loongson2_wait_lock, flags); local_irq_enable(); } diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c index 728a2d879499..4d2c8e861089 100644 --- a/drivers/cpufreq/pcc-cpufreq.c +++ b/drivers/cpufreq/pcc-cpufreq.c @@ -204,7 +204,6 @@ static int pcc_cpufreq_target(struct cpufreq_policy *policy, u32 input_buffer; int cpu; - spin_lock(&pcc_lock); cpu = policy->cpu; pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu); @@ -216,6 +215,7 @@ static int pcc_cpufreq_target(struct cpufreq_policy *policy, freqs.old = policy->cur; freqs.new = target_freq; cpufreq_freq_transition_begin(policy, &freqs); + spin_lock(&pcc_lock); input_buffer = 0x1 | (((target_freq * 100) / (ioread32(&pcch_hdr->nominal) * 1000)) << 8); diff --git a/drivers/cpufreq/pmac64-cpufreq.c b/drivers/cpufreq/pmac64-cpufreq.c index 8bc422977b5b..4ff86878727f 100644 --- a/drivers/cpufreq/pmac64-cpufreq.c +++ b/drivers/cpufreq/pmac64-cpufreq.c @@ -499,8 +499,7 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpunode) } /* Lookup the i2c hwclock */ - for (hwclock = NULL; - (hwclock = of_find_node_by_name(hwclock, "i2c-hwclock")) != NULL;){ + for_each_node_by_name(hwclock, "i2c-hwclock") { const char *loc = of_get_property(hwclock, "hwctrl-location", NULL); if (loc == NULL) diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c index bb1d08dc8cc8..379c0837f5a9 100644 --- a/drivers/cpufreq/powernv-cpufreq.c +++ b/drivers/cpufreq/powernv-cpufreq.c @@ -28,6 +28,7 @@ #include <linux/of.h> #include <asm/cputhreads.h> +#include <asm/firmware.h> #include <asm/reg.h> #include <asm/smp.h> /* Required for cpu_sibling_mask() in UP configs */ @@ -98,7 +99,11 @@ static int init_powernv_pstates(void) return -ENODEV; } - WARN_ON(len_ids != len_freqs); + if (len_ids != len_freqs) { + pr_warn("Entries in ibm,pstate-ids and " + "ibm,pstate-frequencies-mhz does not match\n"); + } + nr_pstates = min(len_ids, len_freqs) / sizeof(u32); if (!nr_pstates) { pr_warn("No PStates found\n"); @@ -131,7 +136,12 @@ static unsigned int pstate_id_to_freq(int pstate_id) int i; i = powernv_pstate_info.max - pstate_id; - BUG_ON(i >= powernv_pstate_info.nr_pstates || i < 0); + if (i >= powernv_pstate_info.nr_pstates || i < 0) { + pr_warn("PState id %d outside of PState table, " + "reporting nominal id %d instead\n", + pstate_id, powernv_pstate_info.nominal); + i = powernv_pstate_info.max - powernv_pstate_info.nominal; + } return powernv_freqs[i].frequency; } @@ -321,6 +331,10 @@ static int __init powernv_cpufreq_init(void) { int rc = 0; + /* Don't probe on pseries (guest) platforms */ + if (!firmware_has_feature(FW_FEATURE_OPALv3)) + return -ENODEV; + /* Discover pstates from device tree and init */ rc = init_powernv_pstates(); if (rc) { diff --git a/drivers/cpufreq/s3c2410-cpufreq.c b/drivers/cpufreq/s3c2410-cpufreq.c index cfa0dd8723ec..b8e5da8e188b 100644 --- a/drivers/cpufreq/s3c2410-cpufreq.c +++ b/drivers/cpufreq/s3c2410-cpufreq.c @@ -26,7 +26,6 @@ #include <mach/regs-clock.h> #include <plat/cpu.h> -#include <plat/clock.h> #include <plat/cpu-freq-core.h> /* Note, 2410A has an extra mode for 1:4:4 ratio, bit 2 of CLKDIV */ @@ -104,7 +103,6 @@ static struct s3c_cpufreq_info s3c2410_cpufreq_info = { .calc_iotiming = s3c2410_iotiming_calc, .set_iotiming = s3c2410_iotiming_set, .get_iotiming = s3c2410_iotiming_get, - .resume_clocks = s3c2410_setup_clocks, .set_fvco = s3c2410_set_fvco, .set_refresh = s3c2410_cpufreq_setrefresh, diff --git a/drivers/cpufreq/s3c2412-cpufreq.c b/drivers/cpufreq/s3c2412-cpufreq.c index 4645b4898996..eb262133fef2 100644 --- a/drivers/cpufreq/s3c2412-cpufreq.c +++ b/drivers/cpufreq/s3c2412-cpufreq.c @@ -28,7 +28,6 @@ #include <mach/s3c2412.h> #include <plat/cpu.h> -#include <plat/clock.h> #include <plat/cpu-freq-core.h> /* our clock resources. */ @@ -188,8 +187,6 @@ static struct s3c_cpufreq_info s3c2412_cpufreq_info = { .set_iotiming = s3c2412_iotiming_set, .get_iotiming = s3c2412_iotiming_get, - .resume_clocks = s3c2412_setup_clocks, - .debug_io_show = s3c_cpufreq_debugfs_call(s3c2412_iotiming_debugfs), }; diff --git a/drivers/cpufreq/s3c2440-cpufreq.c b/drivers/cpufreq/s3c2440-cpufreq.c index f84ed10755b5..0129f5c70a61 100644 --- a/drivers/cpufreq/s3c2440-cpufreq.c +++ b/drivers/cpufreq/s3c2440-cpufreq.c @@ -29,7 +29,6 @@ #include <plat/cpu.h> #include <plat/cpu-freq-core.h> -#include <plat/clock.h> static struct clk *xtal; static struct clk *fclk; @@ -262,8 +261,6 @@ static struct s3c_cpufreq_info s3c2440_cpufreq_info = { .calc_divs = s3c2440_cpufreq_calcdivs, .calc_freqtable = s3c2440_cpufreq_calctable, - .resume_clocks = s3c244x_setup_clocks, - .debug_io_show = s3c_cpufreq_debugfs_call(s3c2410_iotiming_debugfs), }; diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c index 227ebf7c1eea..d00f1cee4509 100644 --- a/drivers/cpufreq/s3c24xx-cpufreq.c +++ b/drivers/cpufreq/s3c24xx-cpufreq.c @@ -27,7 +27,6 @@ #include <asm/mach/map.h> #include <plat/cpu.h> -#include <plat/clock.h> #include <plat/cpu-freq-core.h> #include <mach/regs-clock.h> diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c index 19a10b89fef7..3f9791f07b8e 100644 --- a/drivers/cpufreq/s5pv210-cpufreq.c +++ b/drivers/cpufreq/s5pv210-cpufreq.c @@ -16,11 +16,70 @@ #include <linux/clk.h> #include <linux/io.h> #include <linux/cpufreq.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/platform_device.h> #include <linux/reboot.h> #include <linux/regulator/consumer.h> -#include <mach/map.h> -#include <mach/regs-clock.h> +static void __iomem *clk_base; +static void __iomem *dmc_base[2]; + +#define S5P_CLKREG(x) (clk_base + (x)) + +#define S5P_APLL_LOCK S5P_CLKREG(0x00) +#define S5P_APLL_CON S5P_CLKREG(0x100) +#define S5P_CLK_SRC0 S5P_CLKREG(0x200) +#define S5P_CLK_SRC2 S5P_CLKREG(0x208) +#define S5P_CLK_DIV0 S5P_CLKREG(0x300) +#define S5P_CLK_DIV2 S5P_CLKREG(0x308) +#define S5P_CLK_DIV6 S5P_CLKREG(0x318) +#define S5P_CLKDIV_STAT0 S5P_CLKREG(0x1000) +#define S5P_CLKDIV_STAT1 S5P_CLKREG(0x1004) +#define S5P_CLKMUX_STAT0 S5P_CLKREG(0x1100) +#define S5P_CLKMUX_STAT1 S5P_CLKREG(0x1104) + +#define S5P_ARM_MCS_CON S5P_CLKREG(0x6100) + +/* CLKSRC0 */ +#define S5P_CLKSRC0_MUX200_SHIFT (16) +#define S5P_CLKSRC0_MUX200_MASK (0x1 << S5P_CLKSRC0_MUX200_SHIFT) +#define S5P_CLKSRC0_MUX166_MASK (0x1<<20) +#define S5P_CLKSRC0_MUX133_MASK (0x1<<24) + +/* CLKSRC2 */ +#define S5P_CLKSRC2_G3D_SHIFT (0) +#define S5P_CLKSRC2_G3D_MASK (0x3 << S5P_CLKSRC2_G3D_SHIFT) +#define S5P_CLKSRC2_MFC_SHIFT (4) +#define S5P_CLKSRC2_MFC_MASK (0x3 << S5P_CLKSRC2_MFC_SHIFT) + +/* CLKDIV0 */ +#define S5P_CLKDIV0_APLL_SHIFT (0) +#define S5P_CLKDIV0_APLL_MASK (0x7 << S5P_CLKDIV0_APLL_SHIFT) +#define S5P_CLKDIV0_A2M_SHIFT (4) +#define S5P_CLKDIV0_A2M_MASK (0x7 << S5P_CLKDIV0_A2M_SHIFT) +#define S5P_CLKDIV0_HCLK200_SHIFT (8) +#define S5P_CLKDIV0_HCLK200_MASK (0x7 << S5P_CLKDIV0_HCLK200_SHIFT) +#define S5P_CLKDIV0_PCLK100_SHIFT (12) +#define S5P_CLKDIV0_PCLK100_MASK (0x7 << S5P_CLKDIV0_PCLK100_SHIFT) +#define S5P_CLKDIV0_HCLK166_SHIFT (16) +#define S5P_CLKDIV0_HCLK166_MASK (0xF << S5P_CLKDIV0_HCLK166_SHIFT) +#define S5P_CLKDIV0_PCLK83_SHIFT (20) +#define S5P_CLKDIV0_PCLK83_MASK (0x7 << S5P_CLKDIV0_PCLK83_SHIFT) +#define S5P_CLKDIV0_HCLK133_SHIFT (24) +#define S5P_CLKDIV0_HCLK133_MASK (0xF << S5P_CLKDIV0_HCLK133_SHIFT) +#define S5P_CLKDIV0_PCLK66_SHIFT (28) +#define S5P_CLKDIV0_PCLK66_MASK (0x7 << S5P_CLKDIV0_PCLK66_SHIFT) + +/* CLKDIV2 */ +#define S5P_CLKDIV2_G3D_SHIFT (0) +#define S5P_CLKDIV2_G3D_MASK (0xF << S5P_CLKDIV2_G3D_SHIFT) +#define S5P_CLKDIV2_MFC_SHIFT (4) +#define S5P_CLKDIV2_MFC_MASK (0xF << S5P_CLKDIV2_MFC_SHIFT) + +/* CLKDIV6 */ +#define S5P_CLKDIV6_ONEDRAM_SHIFT (28) +#define S5P_CLKDIV6_ONEDRAM_MASK (0xF << S5P_CLKDIV6_ONEDRAM_SHIFT) static struct clk *dmc0_clk; static struct clk *dmc1_clk; @@ -142,9 +201,9 @@ static void s5pv210_set_refresh(enum s5pv210_dmc_port ch, unsigned long freq) void __iomem *reg = NULL; if (ch == DMC0) { - reg = (S5P_VA_DMC0 + 0x30); + reg = (dmc_base[0] + 0x30); } else if (ch == DMC1) { - reg = (S5P_VA_DMC1 + 0x30); + reg = (dmc_base[1] + 0x30); } else { printk(KERN_ERR "Cannot find DMC port\n"); return; @@ -442,7 +501,7 @@ static int check_mem_type(void __iomem *dmc_reg) return val >> 8; } -static int __init s5pv210_cpu_init(struct cpufreq_policy *policy) +static int s5pv210_cpu_init(struct cpufreq_policy *policy) { unsigned long mem_type; int ret; @@ -472,7 +531,7 @@ static int __init s5pv210_cpu_init(struct cpufreq_policy *policy) * check_mem_type : This driver only support LPDDR & LPDDR2. * other memory type is not supported. */ - mem_type = check_mem_type(S5P_VA_DMC0); + mem_type = check_mem_type(dmc_base[0]); if ((mem_type != LPDDR) && (mem_type != LPDDR2)) { printk(KERN_ERR "CPUFreq doesn't support this memory type\n"); @@ -481,10 +540,10 @@ static int __init s5pv210_cpu_init(struct cpufreq_policy *policy) } /* Find current refresh counter and frequency each DMC */ - s5pv210_dram_conf[0].refresh = (__raw_readl(S5P_VA_DMC0 + 0x30) * 1000); + s5pv210_dram_conf[0].refresh = (__raw_readl(dmc_base[0] + 0x30) * 1000); s5pv210_dram_conf[0].freq = clk_get_rate(dmc0_clk); - s5pv210_dram_conf[1].refresh = (__raw_readl(S5P_VA_DMC1 + 0x30) * 1000); + s5pv210_dram_conf[1].refresh = (__raw_readl(dmc_base[1] + 0x30) * 1000); s5pv210_dram_conf[1].freq = clk_get_rate(dmc1_clk); policy->suspend_freq = SLEEP_FREQ; @@ -527,8 +586,55 @@ static struct notifier_block s5pv210_cpufreq_reboot_notifier = { .notifier_call = s5pv210_cpufreq_reboot_notifier_event, }; -static int __init s5pv210_cpufreq_init(void) +static int s5pv210_cpufreq_probe(struct platform_device *pdev) { + struct device_node *np; + int id; + + /* + * HACK: This is a temporary workaround to get access to clock + * and DMC controller registers directly and remove static mappings + * and dependencies on platform headers. It is necessary to enable + * S5PV210 multi-platform support and will be removed together with + * this whole driver as soon as S5PV210 gets migrated to use + * cpufreq-cpu0 driver. + */ + np = of_find_compatible_node(NULL, NULL, "samsung,s5pv210-clock"); + if (!np) { + pr_err("%s: failed to find clock controller DT node\n", + __func__); + return -ENODEV; + } + + clk_base = of_iomap(np, 0); + if (!clk_base) { + pr_err("%s: failed to map clock registers\n", __func__); + return -EFAULT; + } + + for_each_compatible_node(np, NULL, "samsung,s5pv210-dmc") { + id = of_alias_get_id(np, "dmc"); + if (id < 0 || id >= ARRAY_SIZE(dmc_base)) { + pr_err("%s: failed to get alias of dmc node '%s'\n", + __func__, np->name); + return id; + } + + dmc_base[id] = of_iomap(np, 0); + if (!dmc_base[id]) { + pr_err("%s: failed to map dmc%d registers\n", + __func__, id); + return -EFAULT; + } + } + + for (id = 0; id < ARRAY_SIZE(dmc_base); ++id) { + if (!dmc_base[id]) { + pr_err("%s: failed to find dmc%d node\n", __func__, id); + return -ENODEV; + } + } + arm_regulator = regulator_get(NULL, "vddarm"); if (IS_ERR(arm_regulator)) { pr_err("failed to get regulator vddarm"); @@ -547,4 +653,11 @@ static int __init s5pv210_cpufreq_init(void) return cpufreq_register_driver(&s5pv210_driver); } -late_initcall(s5pv210_cpufreq_init); +static struct platform_driver s5pv210_cpufreq_platdrv = { + .driver = { + .name = "s5pv210-cpufreq", + .owner = THIS_MODULE, + }, + .probe = s5pv210_cpufreq_probe, +}; +module_platform_driver(s5pv210_cpufreq_platdrv); diff --git a/drivers/cpufreq/speedstep-smi.c b/drivers/cpufreq/speedstep-smi.c index 8635eec96da5..5fc96d5d656b 100644 --- a/drivers/cpufreq/speedstep-smi.c +++ b/drivers/cpufreq/speedstep-smi.c @@ -324,8 +324,8 @@ static int __init speedstep_init(void) return -ENODEV; } - pr_debug("signature:0x%.8ulx, command:0x%.8ulx, " - "event:0x%.8ulx, perf_level:0x%.8ulx.\n", + pr_debug("signature:0x%.8x, command:0x%.8x, " + "event:0x%.8x, perf_level:0x%.8x.\n", ist_info.signature, ist_info.command, ist_info.event, ist_info.perf_level); diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm index a186dec8e5df..38cff69ffe06 100644 --- a/drivers/cpuidle/Kconfig.arm +++ b/drivers/cpuidle/Kconfig.arm @@ -1,15 +1,9 @@ # # ARM CPU Idle drivers # -config ARM_ARMADA_370_XP_CPUIDLE - bool "CPU Idle Driver for Armada 370/XP family processors" - depends on ARCH_MVEBU - help - Select this to enable cpuidle on Armada 370/XP processors. - config ARM_BIG_LITTLE_CPUIDLE bool "Support for ARM big.LITTLE processors" - depends on ARCH_VEXPRESS_TC2_PM + depends on ARCH_VEXPRESS_TC2_PM || ARCH_EXYNOS depends on MCPM select ARM_CPU_SUSPEND select CPU_IDLE_MULTIPLE_DRIVERS @@ -62,3 +56,9 @@ config ARM_EXYNOS_CPUIDLE depends on ARCH_EXYNOS help Select this to enable cpuidle for Exynos processors + +config ARM_MVEBU_V7_CPUIDLE + bool "CPU Idle Driver for mvebu v7 family processors" + depends on ARCH_MVEBU + help + Select this to enable cpuidle on Armada 370, 38x and XP processors. diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile index d8bb1ff72561..11edb31c55e9 100644 --- a/drivers/cpuidle/Makefile +++ b/drivers/cpuidle/Makefile @@ -7,7 +7,7 @@ obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o ################################################################################## # ARM SoC drivers -obj-$(CONFIG_ARM_ARMADA_370_XP_CPUIDLE) += cpuidle-armada-370-xp.o +obj-$(CONFIG_ARM_MVEBU_V7_CPUIDLE) += cpuidle-mvebu-v7.o obj-$(CONFIG_ARM_BIG_LITTLE_CPUIDLE) += cpuidle-big_little.o obj-$(CONFIG_ARM_CLPS711X_CPUIDLE) += cpuidle-clps711x.o obj-$(CONFIG_ARM_HIGHBANK_CPUIDLE) += cpuidle-calxeda.o diff --git a/drivers/cpuidle/cpuidle-armada-370-xp.c b/drivers/cpuidle/cpuidle-armada-370-xp.c deleted file mode 100644 index a5fba0287bfb..000000000000 --- a/drivers/cpuidle/cpuidle-armada-370-xp.c +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Marvell Armada 370 and Armada XP SoC cpuidle driver - * - * Copyright (C) 2014 Marvell - * - * Nadav Haklai <nadavh@marvell.com> - * Gregory CLEMENT <gregory.clement@free-electrons.com> - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. - * - * Maintainer: Gregory CLEMENT <gregory.clement@free-electrons.com> - */ - -#include <linux/cpu_pm.h> -#include <linux/cpuidle.h> -#include <linux/module.h> -#include <linux/of.h> -#include <linux/suspend.h> -#include <linux/platform_device.h> -#include <asm/cpuidle.h> - -#define ARMADA_370_XP_MAX_STATES 3 -#define ARMADA_370_XP_FLAG_DEEP_IDLE 0x10000 - -static int (*armada_370_xp_cpu_suspend)(int); - -static int armada_370_xp_enter_idle(struct cpuidle_device *dev, - struct cpuidle_driver *drv, - int index) -{ - int ret; - bool deepidle = false; - cpu_pm_enter(); - - if (drv->states[index].flags & ARMADA_370_XP_FLAG_DEEP_IDLE) - deepidle = true; - - ret = armada_370_xp_cpu_suspend(deepidle); - if (ret) - return ret; - - cpu_pm_exit(); - - return index; -} - -static struct cpuidle_driver armada_370_xp_idle_driver = { - .name = "armada_370_xp_idle", - .states[0] = ARM_CPUIDLE_WFI_STATE, - .states[1] = { - .enter = armada_370_xp_enter_idle, - .exit_latency = 10, - .power_usage = 50, - .target_residency = 100, - .flags = CPUIDLE_FLAG_TIME_VALID, - .name = "Idle", - .desc = "CPU power down", - }, - .states[2] = { - .enter = armada_370_xp_enter_idle, - .exit_latency = 100, - .power_usage = 5, - .target_residency = 1000, - .flags = CPUIDLE_FLAG_TIME_VALID | - ARMADA_370_XP_FLAG_DEEP_IDLE, - .name = "Deep idle", - .desc = "CPU and L2 Fabric power down", - }, - .state_count = ARMADA_370_XP_MAX_STATES, -}; - -static int armada_370_xp_cpuidle_probe(struct platform_device *pdev) -{ - - armada_370_xp_cpu_suspend = (void *)(pdev->dev.platform_data); - return cpuidle_register(&armada_370_xp_idle_driver, NULL); -} - -static struct platform_driver armada_370_xp_cpuidle_plat_driver = { - .driver = { - .name = "cpuidle-armada-370-xp", - .owner = THIS_MODULE, - }, - .probe = armada_370_xp_cpuidle_probe, -}; - -module_platform_driver(armada_370_xp_cpuidle_plat_driver); - -MODULE_AUTHOR("Gregory CLEMENT <gregory.clement@free-electrons.com>"); -MODULE_DESCRIPTION("Armada 370/XP cpu idle driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/cpuidle/cpuidle-big_little.c b/drivers/cpuidle/cpuidle-big_little.c index b45fc6249041..ef94c3b81f18 100644 --- a/drivers/cpuidle/cpuidle-big_little.c +++ b/drivers/cpuidle/cpuidle-big_little.c @@ -138,39 +138,42 @@ static int bl_enter_powerdown(struct cpuidle_device *dev, return idx; } -static int __init bl_idle_driver_init(struct cpuidle_driver *drv, int cpu_id) +static int __init bl_idle_driver_init(struct cpuidle_driver *drv, int part_id) { - struct cpuinfo_arm *cpu_info; struct cpumask *cpumask; - unsigned long cpuid; int cpu; cpumask = kzalloc(cpumask_size(), GFP_KERNEL); if (!cpumask) return -ENOMEM; - for_each_possible_cpu(cpu) { - cpu_info = &per_cpu(cpu_data, cpu); - cpuid = is_smp() ? cpu_info->cpuid : read_cpuid_id(); - - /* read cpu id part number */ - if ((cpuid & 0xFFF0) == cpu_id) + for_each_possible_cpu(cpu) + if (smp_cpuid_part(cpu) == part_id) cpumask_set_cpu(cpu, cpumask); - } drv->cpumask = cpumask; return 0; } +static const struct of_device_id compatible_machine_match[] = { + { .compatible = "arm,vexpress,v2p-ca15_a7" }, + { .compatible = "samsung,exynos5420" }, + {}, +}; + static int __init bl_idle_init(void) { int ret; + struct device_node *root = of_find_node_by_path("/"); + + if (!root) + return -ENODEV; /* * Initialize the driver just for a compliant set of machines */ - if (!of_machine_is_compatible("arm,vexpress,v2p-ca15_a7")) + if (!of_match_node(compatible_machine_match, root)) return -ENODEV; /* * For now the differentiation between little and big cores diff --git a/drivers/cpuidle/cpuidle-exynos.c b/drivers/cpuidle/cpuidle-exynos.c index 7c0151263828..ba9b34b579f3 100644 --- a/drivers/cpuidle/cpuidle-exynos.c +++ b/drivers/cpuidle/cpuidle-exynos.c @@ -20,25 +20,6 @@ static void (*exynos_enter_aftr)(void); -static int idle_finisher(unsigned long flags) -{ - exynos_enter_aftr(); - cpu_do_idle(); - - return 1; -} - -static int exynos_enter_core0_aftr(struct cpuidle_device *dev, - struct cpuidle_driver *drv, - int index) -{ - cpu_pm_enter(); - cpu_suspend(0, idle_finisher); - cpu_pm_exit(); - - return index; -} - static int exynos_enter_lowpower(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) @@ -51,8 +32,10 @@ static int exynos_enter_lowpower(struct cpuidle_device *dev, if (new_index == 0) return arm_cpuidle_simple_enter(dev, drv, new_index); - else - return exynos_enter_core0_aftr(dev, drv, new_index); + + exynos_enter_aftr(); + + return new_index; } static struct cpuidle_driver exynos_idle_driver = { diff --git a/drivers/cpuidle/cpuidle-mvebu-v7.c b/drivers/cpuidle/cpuidle-mvebu-v7.c new file mode 100644 index 000000000000..45371bb16214 --- /dev/null +++ b/drivers/cpuidle/cpuidle-mvebu-v7.c @@ -0,0 +1,150 @@ +/* + * Marvell Armada 370, 38x and XP SoC cpuidle driver + * + * Copyright (C) 2014 Marvell + * + * Nadav Haklai <nadavh@marvell.com> + * Gregory CLEMENT <gregory.clement@free-electrons.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + * + * Maintainer: Gregory CLEMENT <gregory.clement@free-electrons.com> + */ + +#include <linux/cpu_pm.h> +#include <linux/cpuidle.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/suspend.h> +#include <linux/platform_device.h> +#include <asm/cpuidle.h> + +#define MVEBU_V7_FLAG_DEEP_IDLE 0x10000 + +static int (*mvebu_v7_cpu_suspend)(int); + +static int mvebu_v7_enter_idle(struct cpuidle_device *dev, + struct cpuidle_driver *drv, + int index) +{ + int ret; + bool deepidle = false; + cpu_pm_enter(); + + if (drv->states[index].flags & MVEBU_V7_FLAG_DEEP_IDLE) + deepidle = true; + + ret = mvebu_v7_cpu_suspend(deepidle); + if (ret) + return ret; + + cpu_pm_exit(); + + return index; +} + +static struct cpuidle_driver armadaxp_idle_driver = { + .name = "armada_xp_idle", + .states[0] = ARM_CPUIDLE_WFI_STATE, + .states[1] = { + .enter = mvebu_v7_enter_idle, + .exit_latency = 10, + .power_usage = 50, + .target_residency = 100, + .flags = CPUIDLE_FLAG_TIME_VALID, + .name = "MV CPU IDLE", + .desc = "CPU power down", + }, + .states[2] = { + .enter = mvebu_v7_enter_idle, + .exit_latency = 100, + .power_usage = 5, + .target_residency = 1000, + .flags = CPUIDLE_FLAG_TIME_VALID | + MVEBU_V7_FLAG_DEEP_IDLE, + .name = "MV CPU DEEP IDLE", + .desc = "CPU and L2 Fabric power down", + }, + .state_count = 3, +}; + +static struct cpuidle_driver armada370_idle_driver = { + .name = "armada_370_idle", + .states[0] = ARM_CPUIDLE_WFI_STATE, + .states[1] = { + .enter = mvebu_v7_enter_idle, + .exit_latency = 100, + .power_usage = 5, + .target_residency = 1000, + .flags = (CPUIDLE_FLAG_TIME_VALID | + MVEBU_V7_FLAG_DEEP_IDLE), + .name = "Deep Idle", + .desc = "CPU and L2 Fabric power down", + }, + .state_count = 2, +}; + +static struct cpuidle_driver armada38x_idle_driver = { + .name = "armada_38x_idle", + .states[0] = ARM_CPUIDLE_WFI_STATE, + .states[1] = { + .enter = mvebu_v7_enter_idle, + .exit_latency = 10, + .power_usage = 5, + .target_residency = 100, + .flags = CPUIDLE_FLAG_TIME_VALID, + .name = "Idle", + .desc = "CPU and SCU power down", + }, + .state_count = 2, +}; + +static int mvebu_v7_cpuidle_probe(struct platform_device *pdev) +{ + mvebu_v7_cpu_suspend = pdev->dev.platform_data; + + if (!strcmp(pdev->dev.driver->name, "cpuidle-armada-xp")) + return cpuidle_register(&armadaxp_idle_driver, NULL); + else if (!strcmp(pdev->dev.driver->name, "cpuidle-armada-370")) + return cpuidle_register(&armada370_idle_driver, NULL); + else if (!strcmp(pdev->dev.driver->name, "cpuidle-armada-38x")) + return cpuidle_register(&armada38x_idle_driver, NULL); + else + return -EINVAL; +} + +static struct platform_driver armadaxp_cpuidle_plat_driver = { + .driver = { + .name = "cpuidle-armada-xp", + .owner = THIS_MODULE, + }, + .probe = mvebu_v7_cpuidle_probe, +}; + +module_platform_driver(armadaxp_cpuidle_plat_driver); + +static struct platform_driver armada370_cpuidle_plat_driver = { + .driver = { + .name = "cpuidle-armada-370", + .owner = THIS_MODULE, + }, + .probe = mvebu_v7_cpuidle_probe, +}; + +module_platform_driver(armada370_cpuidle_plat_driver); + +static struct platform_driver armada38x_cpuidle_plat_driver = { + .driver = { + .name = "cpuidle-armada-38x", + .owner = THIS_MODULE, + }, + .probe = mvebu_v7_cpuidle_probe, +}; + +module_platform_driver(armada38x_cpuidle_plat_driver); + +MODULE_AUTHOR("Gregory CLEMENT <gregory.clement@free-electrons.com>"); +MODULE_DESCRIPTION("Marvell EBU v7 cpuidle driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c index 74f5788d50b1..a64be578dab2 100644 --- a/drivers/cpuidle/cpuidle-powernv.c +++ b/drivers/cpuidle/cpuidle-powernv.c @@ -160,10 +160,10 @@ static int powernv_cpuidle_driver_init(void) static int powernv_add_idle_states(void) { struct device_node *power_mgt; - struct property *prop; int nr_idle_states = 1; /* Snooze */ int dt_idle_states; - u32 *flags; + const __be32 *idle_state_flags; + u32 len_flags, flags; int i; /* Currently we have snooze statically defined */ @@ -174,18 +174,18 @@ static int powernv_add_idle_states(void) return nr_idle_states; } - prop = of_find_property(power_mgt, "ibm,cpu-idle-state-flags", NULL); - if (!prop) { + idle_state_flags = of_get_property(power_mgt, "ibm,cpu-idle-state-flags", &len_flags); + if (!idle_state_flags) { pr_warn("DT-PowerMgmt: missing ibm,cpu-idle-state-flags\n"); return nr_idle_states; } - dt_idle_states = prop->length / sizeof(u32); - flags = (u32 *) prop->value; + dt_idle_states = len_flags / sizeof(u32); for (i = 0; i < dt_idle_states; i++) { - if (flags[i] & IDLE_USE_INST_NAP) { + flags = be32_to_cpu(idle_state_flags[i]); + if (flags & IDLE_USE_INST_NAP) { /* Add NAP state */ strcpy(powernv_states[nr_idle_states].name, "Nap"); strcpy(powernv_states[nr_idle_states].desc, "Nap"); @@ -196,7 +196,7 @@ static int powernv_add_idle_states(void) nr_idle_states++; } - if (flags[i] & IDLE_USE_INST_SLEEP) { + if (flags & IDLE_USE_INST_SLEEP) { /* Add FASTSLEEP state */ strcpy(powernv_states[nr_idle_states].name, "FastSleep"); strcpy(powernv_states[nr_idle_states].desc, "FastSleep"); diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index ae5a42595e1c..34db2fb3ef1e 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c @@ -31,7 +31,8 @@ * The default values do not overflow. */ #define BUCKETS 12 -#define INTERVALS 8 +#define INTERVAL_SHIFT 3 +#define INTERVALS (1UL << INTERVAL_SHIFT) #define RESOLUTION 1024 #define DECAY 8 #define MAX_INTERESTING 50000 @@ -133,15 +134,12 @@ struct menu_device { #define LOAD_INT(x) ((x) >> FSHIFT) #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100) -static int get_loadavg(void) +static inline int get_loadavg(unsigned long load) { - unsigned long this = this_cpu_load(); - - - return LOAD_INT(this) * 10 + LOAD_FRAC(this) / 10; + return LOAD_INT(load) * 10 + LOAD_FRAC(load) / 10; } -static inline int which_bucket(unsigned int duration) +static inline int which_bucket(unsigned int duration, unsigned long nr_iowaiters) { int bucket = 0; @@ -151,7 +149,7 @@ static inline int which_bucket(unsigned int duration) * This allows us to calculate * E(duration)|iowait */ - if (nr_iowait_cpu(smp_processor_id())) + if (nr_iowaiters) bucket = BUCKETS/2; if (duration < 10) @@ -174,16 +172,16 @@ static inline int which_bucket(unsigned int duration) * to be, the higher this multiplier, and thus the higher * the barrier to go to an expensive C state. */ -static inline int performance_multiplier(void) +static inline int performance_multiplier(unsigned long nr_iowaiters, unsigned long load) { int mult = 1; /* for higher loadavg, we are more reluctant */ - mult += 2 * get_loadavg(); + mult += 2 * get_loadavg(load); /* for IO wait tasks (per cpu!) we add 5x each */ - mult += 10 * nr_iowait_cpu(smp_processor_id()); + mult += 10 * nr_iowaiters; return mult; } @@ -227,7 +225,10 @@ again: max = value; } } - do_div(avg, divisor); + if (divisor == INTERVALS) + avg >>= INTERVAL_SHIFT; + else + do_div(avg, divisor); /* Then try to determine standard deviation */ stddev = 0; @@ -238,7 +239,11 @@ again: stddev += diff * diff; } } - do_div(stddev, divisor); + if (divisor == INTERVALS) + stddev >>= INTERVAL_SHIFT; + else + do_div(stddev, divisor); + /* * The typical interval is obtained when standard deviation is small * or standard deviation is small compared to the average interval. @@ -288,7 +293,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY); int i; unsigned int interactivity_req; - struct timespec t; + unsigned long nr_iowaiters, cpu_load; if (data->needs_update) { menu_update(drv, dev); @@ -302,12 +307,10 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) return 0; /* determine the expected residency time, round up */ - t = ktime_to_timespec(tick_nohz_get_sleep_length()); - data->next_timer_us = - t.tv_sec * USEC_PER_SEC + t.tv_nsec / NSEC_PER_USEC; - + data->next_timer_us = ktime_to_us(tick_nohz_get_sleep_length()); - data->bucket = which_bucket(data->next_timer_us); + get_iowait_load(&nr_iowaiters, &cpu_load); + data->bucket = which_bucket(data->next_timer_us, nr_iowaiters); /* * Force the result of multiplication to be 64 bits even if both @@ -325,7 +328,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) * duration / latency ratio. Adjust the latency limit if * necessary. */ - interactivity_req = data->predicted_us / performance_multiplier(); + interactivity_req = data->predicted_us / performance_multiplier(nr_iowaiters, cpu_load); if (latency_req > interactivity_req) latency_req = interactivity_req; diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c index 20dc848481e7..4d4e016d755b 100644 --- a/drivers/crypto/ccp/ccp-crypto-main.c +++ b/drivers/crypto/ccp/ccp-crypto-main.c @@ -367,6 +367,10 @@ static int ccp_crypto_init(void) { int ret; + ret = ccp_present(); + if (ret) + return ret; + spin_lock_init(&req_queue_lock); INIT_LIST_HEAD(&req_queue.cmds); req_queue.backlog = &req_queue.cmds; diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c index a7d110652a74..c6e6171eb6d3 100644 --- a/drivers/crypto/ccp/ccp-dev.c +++ b/drivers/crypto/ccp/ccp-dev.c @@ -55,6 +55,20 @@ static inline void ccp_del_device(struct ccp_device *ccp) } /** + * ccp_present - check if a CCP device is present + * + * Returns zero if a CCP device is present, -ENODEV otherwise. + */ +int ccp_present(void) +{ + if (ccp_get_device()) + return 0; + + return -ENODEV; +} +EXPORT_SYMBOL_GPL(ccp_present); + +/** * ccp_enqueue_cmd - queue an operation for processing by the CCP * * @cmd: ccp_cmd struct to be processed diff --git a/drivers/crypto/ccp/ccp-pci.c b/drivers/crypto/ccp/ccp-pci.c index 180cc87b4dbb..7f89c946adfe 100644 --- a/drivers/crypto/ccp/ccp-pci.c +++ b/drivers/crypto/ccp/ccp-pci.c @@ -320,7 +320,7 @@ static int ccp_pci_resume(struct pci_dev *pdev) } #endif -static DEFINE_PCI_DEVICE_TABLE(ccp_pci_table) = { +static const struct pci_device_id ccp_pci_table[] = { { PCI_VDEVICE(AMD, 0x1537), }, /* Last entry must be zero */ { 0, } diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c index 12fea3e22348..8d2a7728434d 100644 --- a/drivers/crypto/hifn_795x.c +++ b/drivers/crypto/hifn_795x.c @@ -2617,14 +2617,13 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id) } } - dev->desc_virt = pci_alloc_consistent(pdev, sizeof(struct hifn_dma), - &dev->desc_dma); + dev->desc_virt = pci_zalloc_consistent(pdev, sizeof(struct hifn_dma), + &dev->desc_dma); if (!dev->desc_virt) { dprintk("Failed to allocate descriptor rings.\n"); err = -ENOMEM; goto err_out_unmap_bars; } - memset(dev->desc_virt, 0, sizeof(struct hifn_dma)); dev->pdev = pdev; dev->irq = pdev->irq; diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c index 544f6d327ede..061407d59520 100644 --- a/drivers/crypto/nx/nx-842.c +++ b/drivers/crypto/nx/nx-842.c @@ -936,28 +936,14 @@ static int nx842_OF_upd(struct property *new_prop) goto error_out; } - /* Set ptr to new property if provided */ - if (new_prop) { - /* Single property */ - if (!strncmp(new_prop->name, "status", new_prop->length)) { - status = new_prop; - - } else if (!strncmp(new_prop->name, "ibm,max-sg-len", - new_prop->length)) { - maxsglen = new_prop; - - } else if (!strncmp(new_prop->name, "ibm,max-sync-cop", - new_prop->length)) { - maxsyncop = new_prop; - - } else { - /* - * Skip the update, the property being updated - * has no impact. - */ - goto out; - } - } + /* + * If this is a property update, there are only certain properties that + * we care about. Bail if it isn't in the below list + */ + if (new_prop && (strncmp(new_prop->name, "status", new_prop->length) || + strncmp(new_prop->name, "ibm,max-sg-len", new_prop->length) || + strncmp(new_prop->name, "ibm,max-sync-cop", new_prop->length))) + goto out; /* Perform property updates */ ret = nx842_OF_upd_status(new_devdata, status); diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h index b707f292b377..65dd1ff93d3b 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h +++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h @@ -66,7 +66,7 @@ #define ADF_DH895XCC_ETR_MAX_BANKS 32 #define ADF_DH895XCC_SMIAPF0_MASK_OFFSET (0x3A000 + 0x28) #define ADF_DH895XCC_SMIAPF1_MASK_OFFSET (0x3A000 + 0x30) -#define ADF_DH895XCC_SMIA0_MASK 0xFFFF +#define ADF_DH895XCC_SMIA0_MASK 0xFFFFFFFF #define ADF_DH895XCC_SMIA1_MASK 0x1 /* Error detection and correction */ #define ADF_DH895XCC_AE_CTX_ENABLES(i) (i * 0x1000 + 0x20818) diff --git a/drivers/dma-buf/fence.c b/drivers/dma-buf/fence.c index 4222cb2aa96a..7bb9d65d9a2c 100644 --- a/drivers/dma-buf/fence.c +++ b/drivers/dma-buf/fence.c @@ -29,7 +29,7 @@ EXPORT_TRACEPOINT_SYMBOL(fence_annotate_wait_on); EXPORT_TRACEPOINT_SYMBOL(fence_emit); -/** +/* * fence context counter: each execution context should have its own * fence context, this allows checking if fences belong to the same * context or not. One device can have multiple separate contexts, diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 8f6afbf9ba54..a016490c95ae 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -393,6 +393,22 @@ config XILINX_VDMA channels, Memory Mapped to Stream (MM2S) and Stream to Memory Mapped (S2MM) for the data transfers. +config DMA_SUN6I + tristate "Allwinner A31 SoCs DMA support" + depends on MACH_SUN6I || COMPILE_TEST + depends on RESET_CONTROLLER + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + help + Support for the DMA engine for Allwinner A31 SoCs. + +config NBPFAXI_DMA + tristate "Renesas Type-AXI NBPF DMA support" + select DMA_ENGINE + depends on ARM || COMPILE_TEST + help + Support for "Type-AXI" NBPF DMA IPs from Renesas + config DMA_ENGINE bool @@ -406,22 +422,11 @@ config DMA_ACPI config DMA_OF def_bool y depends on OF + select DMA_ENGINE comment "DMA Clients" depends on DMA_ENGINE -config NET_DMA - bool "Network: TCP receive copy offload" - depends on DMA_ENGINE && NET - default (INTEL_IOATDMA || FSL_DMA) - depends on BROKEN - help - This enables the use of DMA engines in the network stack to - offload receive copy-to-user operations, freeing CPU cycles. - - Say Y here if you enabled INTEL_IOATDMA or FSL_DMA, otherwise - say N. - config ASYNC_TX_DMA bool "Async_tx: Offload support for the async_tx api" depends on DMA_ENGINE diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index bd9e7fa928bd..cb626c179911 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -1,12 +1,11 @@ -ccflags-$(CONFIG_DMADEVICES_DEBUG) := -DDEBUG -ccflags-$(CONFIG_DMADEVICES_VDEBUG) += -DVERBOSE_DEBUG +subdir-ccflags-$(CONFIG_DMADEVICES_DEBUG) := -DDEBUG +subdir-ccflags-$(CONFIG_DMADEVICES_VDEBUG) += -DVERBOSE_DEBUG obj-$(CONFIG_DMA_ENGINE) += dmaengine.o obj-$(CONFIG_DMA_VIRTUAL_CHANNELS) += virt-dma.o obj-$(CONFIG_DMA_ACPI) += acpi-dma.o obj-$(CONFIG_DMA_OF) += of-dma.o -obj-$(CONFIG_NET_DMA) += iovlock.o obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o obj-$(CONFIG_DMATEST) += dmatest.o obj-$(CONFIG_INTEL_IOATDMA) += ioat/ @@ -48,3 +47,5 @@ obj-$(CONFIG_FSL_EDMA) += fsl-edma.o obj-$(CONFIG_QCOM_BAM_DMA) += qcom_bam_dma.o obj-y += xilinx/ obj-$(CONFIG_INTEL_MIC_X100_DMA) += mic_x100_dma.o +obj-$(CONFIG_NBPFAXI_DMA) += nbpfaxi.o +obj-$(CONFIG_DMA_SUN6I) += sun6i-dma.o diff --git a/drivers/dma/TODO b/drivers/dma/TODO index 734ed0206cd5..b8045cd42ee1 100644 --- a/drivers/dma/TODO +++ b/drivers/dma/TODO @@ -7,7 +7,6 @@ TODO for slave dma - imx-dma - imx-sdma - mxs-dma.c - - dw_dmac - intel_mid_dma 4. Check other subsystems for dma drivers and merge/move to dmaengine 5. Remove dma_slave_config's dma direction. diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 8114731a1c62..e34024b000a4 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -1040,7 +1040,7 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, if (early_bytes) { dev_vdbg(&pl08x->adev->dev, - "%s byte width LLIs (remain 0x%08x)\n", + "%s byte width LLIs (remain 0x%08zx)\n", __func__, bd.remainder); prep_byte_width_lli(pl08x, &bd, &cctl, early_bytes, num_llis++, &total_bytes); @@ -1653,7 +1653,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( static struct dma_async_tx_descriptor *pl08x_prep_dma_cyclic( struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, size_t period_len, enum dma_transfer_direction direction, - unsigned long flags, void *context) + unsigned long flags) { struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); struct pl08x_driver_data *pl08x = plchan->host; @@ -1662,7 +1662,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_cyclic( dma_addr_t slave_addr; dev_dbg(&pl08x->adev->dev, - "%s prepare cyclic transaction of %d/%d bytes %s %s\n", + "%s prepare cyclic transaction of %zd/%zd bytes %s %s\n", __func__, period_len, buf_len, direction == DMA_MEM_TO_DEV ? "to" : "from", plchan->name); diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index c13a3bb0f594..ca9dd2613283 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -294,14 +294,16 @@ static int atc_get_bytes_left(struct dma_chan *chan) ret = -EINVAL; goto out; } - atchan->remain_desc -= (desc_cur->lli.ctrla & ATC_BTSIZE_MAX) - << (desc_first->tx_width); - if (atchan->remain_desc < 0) { + + count = (desc_cur->lli.ctrla & ATC_BTSIZE_MAX) + << desc_first->tx_width; + if (atchan->remain_desc < count) { ret = -EINVAL; goto out; - } else { - ret = atchan->remain_desc; } + + atchan->remain_desc -= count; + ret = atchan->remain_desc; } else { /* * Get residual bytes when current @@ -893,12 +895,11 @@ atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc, * @period_len: number of bytes for each period * @direction: transfer direction, to or from device * @flags: tx descriptor status flags - * @context: transfer context (ignored) */ static struct dma_async_tx_descriptor * atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, size_t period_len, enum dma_transfer_direction direction, - unsigned long flags, void *context) + unsigned long flags) { struct at_dma_chan *atchan = to_at_dma_chan(chan); struct at_dma_slave *atslave = chan->private; diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c index a03602164e3e..68007974961a 100644 --- a/drivers/dma/bcm2835-dma.c +++ b/drivers/dma/bcm2835-dma.c @@ -335,7 +335,7 @@ static void bcm2835_dma_issue_pending(struct dma_chan *chan) static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic( struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, size_t period_len, enum dma_transfer_direction direction, - unsigned long flags, void *context) + unsigned long flags) { struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); enum dma_slave_buswidth dev_width; diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c index 94c380f07538..ae2ab14e64b3 100644 --- a/drivers/dma/dma-jz4740.c +++ b/drivers/dma/dma-jz4740.c @@ -362,8 +362,9 @@ static void jz4740_dma_chan_irq(struct jz4740_dmaengine_chan *chan) vchan_cyclic_callback(&chan->desc->vdesc); } else { if (chan->next_sg == chan->desc->num_sgs) { - chan->desc = NULL; + list_del(&chan->desc->vdesc.node); vchan_cookie_complete(&chan->desc->vdesc); + chan->desc = NULL; } } } @@ -433,7 +434,7 @@ static struct dma_async_tx_descriptor *jz4740_dma_prep_slave_sg( static struct dma_async_tx_descriptor *jz4740_dma_prep_dma_cyclic( struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len, size_t period_len, enum dma_transfer_direction direction, - unsigned long flags, void *context) + unsigned long flags) { struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c); struct jz4740_dma_desc *desc; @@ -614,4 +615,4 @@ module_platform_driver(jz4740_dma_driver); MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); MODULE_DESCRIPTION("JZ4740 DMA driver"); -MODULE_LICENSE("GPLv2"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index d5d30ed863ce..24bfaf0b92ba 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -1081,110 +1081,6 @@ dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags) } EXPORT_SYMBOL(dmaengine_get_unmap_data); -/** - * dma_async_memcpy_pg_to_pg - offloaded copy from page to page - * @chan: DMA channel to offload copy to - * @dest_pg: destination page - * @dest_off: offset in page to copy to - * @src_pg: source page - * @src_off: offset in page to copy from - * @len: length - * - * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus - * address according to the DMA mapping API rules for streaming mappings. - * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident - * (kernel memory or locked user space pages). - */ -dma_cookie_t -dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, - unsigned int dest_off, struct page *src_pg, unsigned int src_off, - size_t len) -{ - struct dma_device *dev = chan->device; - struct dma_async_tx_descriptor *tx; - struct dmaengine_unmap_data *unmap; - dma_cookie_t cookie; - unsigned long flags; - - unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOWAIT); - if (!unmap) - return -ENOMEM; - - unmap->to_cnt = 1; - unmap->from_cnt = 1; - unmap->addr[0] = dma_map_page(dev->dev, src_pg, src_off, len, - DMA_TO_DEVICE); - unmap->addr[1] = dma_map_page(dev->dev, dest_pg, dest_off, len, - DMA_FROM_DEVICE); - unmap->len = len; - flags = DMA_CTRL_ACK; - tx = dev->device_prep_dma_memcpy(chan, unmap->addr[1], unmap->addr[0], - len, flags); - - if (!tx) { - dmaengine_unmap_put(unmap); - return -ENOMEM; - } - - dma_set_unmap(tx, unmap); - cookie = tx->tx_submit(tx); - dmaengine_unmap_put(unmap); - - preempt_disable(); - __this_cpu_add(chan->local->bytes_transferred, len); - __this_cpu_inc(chan->local->memcpy_count); - preempt_enable(); - - return cookie; -} -EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg); - -/** - * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses - * @chan: DMA channel to offload copy to - * @dest: destination address (virtual) - * @src: source address (virtual) - * @len: length - * - * Both @dest and @src must be mappable to a bus address according to the - * DMA mapping API rules for streaming mappings. - * Both @dest and @src must stay memory resident (kernel memory or locked - * user space pages). - */ -dma_cookie_t -dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, - void *src, size_t len) -{ - return dma_async_memcpy_pg_to_pg(chan, virt_to_page(dest), - (unsigned long) dest & ~PAGE_MASK, - virt_to_page(src), - (unsigned long) src & ~PAGE_MASK, len); -} -EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf); - -/** - * dma_async_memcpy_buf_to_pg - offloaded copy from address to page - * @chan: DMA channel to offload copy to - * @page: destination page - * @offset: offset in page to copy to - * @kdata: source address (virtual) - * @len: length - * - * Both @page/@offset and @kdata must be mappable to a bus address according - * to the DMA mapping API rules for streaming mappings. - * Both @page/@offset and @kdata must stay memory resident (kernel memory or - * locked user space pages) - */ -dma_cookie_t -dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page, - unsigned int offset, void *kdata, size_t len) -{ - return dma_async_memcpy_pg_to_pg(chan, page, offset, - virt_to_page(kdata), - (unsigned long) kdata & ~PAGE_MASK, len); -} -EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg); - void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, struct dma_chan *chan) { diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index e27cec25c59e..a8d7809e2f4c 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -688,14 +688,14 @@ static int dmatest_func(void *data) runtime = ktime_us_delta(ktime_get(), ktime); ret = 0; +err_dstbuf: for (i = 0; thread->dsts[i]; i++) kfree(thread->dsts[i]); -err_dstbuf: kfree(thread->dsts); err_dsts: +err_srcbuf: for (i = 0; thread->srcs[i]; i++) kfree(thread->srcs[i]); -err_srcbuf: kfree(thread->srcs); err_srcs: kfree(pq_coefs); diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index a27ded53ab4f..1af731b83b3f 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c @@ -279,6 +279,19 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) channel_set_bit(dw, CH_EN, dwc->mask); } +static void dwc_dostart_first_queued(struct dw_dma_chan *dwc) +{ + struct dw_desc *desc; + + if (list_empty(&dwc->queue)) + return; + + list_move(dwc->queue.next, &dwc->active_list); + desc = dwc_first_active(dwc); + dev_vdbg(chan2dev(&dwc->chan), "%s: started %u\n", __func__, desc->txd.cookie); + dwc_dostart(dwc, desc); +} + /*----------------------------------------------------------------------*/ static void @@ -335,10 +348,7 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) * the completed ones. */ list_splice_init(&dwc->active_list, &list); - if (!list_empty(&dwc->queue)) { - list_move(dwc->queue.next, &dwc->active_list); - dwc_dostart(dwc, dwc_first_active(dwc)); - } + dwc_dostart_first_queued(dwc); spin_unlock_irqrestore(&dwc->lock, flags); @@ -467,10 +477,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) /* Try to continue after resetting the channel... */ dwc_chan_disable(dw, dwc); - if (!list_empty(&dwc->queue)) { - list_move(dwc->queue.next, &dwc->active_list); - dwc_dostart(dwc, dwc_first_active(dwc)); - } + dwc_dostart_first_queued(dwc); spin_unlock_irqrestore(&dwc->lock, flags); } @@ -677,17 +684,9 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx) * possible, perhaps even appending to those already submitted * for DMA. But this is hard to do in a race-free manner. */ - if (list_empty(&dwc->active_list)) { - dev_vdbg(chan2dev(tx->chan), "%s: started %u\n", __func__, - desc->txd.cookie); - list_add_tail(&desc->desc_node, &dwc->active_list); - dwc_dostart(dwc, dwc_first_active(dwc)); - } else { - dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__, - desc->txd.cookie); - list_add_tail(&desc->desc_node, &dwc->queue); - } + dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__, desc->txd.cookie); + list_add_tail(&desc->desc_node, &dwc->queue); spin_unlock_irqrestore(&dwc->lock, flags); @@ -1092,9 +1091,12 @@ dwc_tx_status(struct dma_chan *chan, static void dwc_issue_pending(struct dma_chan *chan) { struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + unsigned long flags; - if (!list_empty(&dwc->queue)) - dwc_scan_descriptors(to_dw_dma(chan->device), dwc); + spin_lock_irqsave(&dwc->lock, flags); + if (list_empty(&dwc->active_list)) + dwc_dostart_first_queued(dwc); + spin_unlock_irqrestore(&dwc->lock, flags); } static int dwc_alloc_chan_resources(struct dma_chan *chan) diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index b512caf46944..7b65633f495e 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c @@ -23,6 +23,7 @@ #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/spinlock.h> +#include <linux/of.h> #include <linux/platform_data/edma.h> @@ -256,8 +257,13 @@ static int edma_terminate_all(struct edma_chan *echan) * echan->edesc is NULL and exit.) */ if (echan->edesc) { + int cyclic = echan->edesc->cyclic; echan->edesc = NULL; edma_stop(echan->ch_num); + /* Move the cyclic channel back to default queue */ + if (cyclic) + edma_assign_channel_eventq(echan->ch_num, + EVENTQ_DEFAULT); } vchan_get_all_descriptors(&echan->vchan, &head); @@ -592,7 +598,7 @@ struct dma_async_tx_descriptor *edma_prep_dma_memcpy( static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, size_t period_len, enum dma_transfer_direction direction, - unsigned long tx_flags, void *context) + unsigned long tx_flags) { struct edma_chan *echan = to_edma_chan(chan); struct device *dev = chan->device->dev; @@ -718,12 +724,15 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( edesc->absync = ret; /* - * Enable interrupts for every period because callback - * has to be called for every period. + * Enable period interrupt only if it is requested */ - edesc->pset[i].param.opt |= TCINTEN; + if (tx_flags & DMA_PREP_INTERRUPT) + edesc->pset[i].param.opt |= TCINTEN; } + /* Place the cyclic channel to highest priority queue */ + edma_assign_channel_eventq(echan->ch_num, EVENTQ_0); + return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); } @@ -993,7 +1002,7 @@ static int edma_dma_device_slave_caps(struct dma_chan *dchan, caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); caps->cmd_pause = true; caps->cmd_terminate = true; - caps->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; + caps->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; return 0; } @@ -1040,7 +1049,7 @@ static int edma_probe(struct platform_device *pdev) ecc->dummy_slot = edma_alloc_slot(ecc->ctlr, EDMA_SLOT_ANY); if (ecc->dummy_slot < 0) { dev_err(&pdev->dev, "Can't allocate PaRAM dummy slot\n"); - return -EIO; + return ecc->dummy_slot; } dma_cap_zero(ecc->dma_slave.cap_mask); @@ -1125,7 +1134,7 @@ static int edma_init(void) } } - if (EDMA_CTLRS == 2) { + if (!of_have_populated_dt() && EDMA_CTLRS == 2) { pdev1 = platform_device_register_full(&edma_dev_info1); if (IS_ERR(pdev1)) { platform_driver_unregister(&edma_driver); diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c index cb4bf682a708..7650470196c4 100644 --- a/drivers/dma/ep93xx_dma.c +++ b/drivers/dma/ep93xx_dma.c @@ -1092,7 +1092,6 @@ fail: * @period_len: length of a single period * @dir: direction of the operation * @flags: tx descriptor status flags - * @context: operation context (ignored) * * Prepares a descriptor for cyclic DMA operation. This means that once the * descriptor is submitted, we will be submitting in a @period_len sized @@ -1105,8 +1104,7 @@ fail: static struct dma_async_tx_descriptor * ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, size_t period_len, - enum dma_transfer_direction dir, unsigned long flags, - void *context) + enum dma_transfer_direction dir, unsigned long flags) { struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); struct ep93xx_dma_desc *desc, *first; diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c index b396a7fb53ab..3c5711d5fe97 100644 --- a/drivers/dma/fsl-edma.c +++ b/drivers/dma/fsl-edma.c @@ -248,11 +248,12 @@ static void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan, unsigned int slot, bool enable) { u32 ch = fsl_chan->vchan.chan.chan_id; - void __iomem *muxaddr = fsl_chan->edma->muxbase[ch / DMAMUX_NR]; + void __iomem *muxaddr; unsigned chans_per_mux, ch_off; chans_per_mux = fsl_chan->edma->n_chans / DMAMUX_NR; ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux; + muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux]; if (enable) edma_writeb(fsl_chan->edma, @@ -516,7 +517,7 @@ err: static struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic( struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, size_t period_len, enum dma_transfer_direction direction, - unsigned long flags, void *context) + unsigned long flags) { struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); struct fsl_edma_desc *fsl_desc; @@ -724,6 +725,7 @@ static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec, { struct fsl_edma_engine *fsl_edma = ofdma->of_dma_data; struct dma_chan *chan, *_chan; + unsigned long chans_per_mux = fsl_edma->n_chans / DMAMUX_NR; if (dma_spec->args_count != 2) return NULL; @@ -732,7 +734,7 @@ static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec, list_for_each_entry_safe(chan, _chan, &fsl_edma->dma_dev.channels, device_node) { if (chan->client_count) continue; - if ((chan->chan_id / DMAMUX_NR) == dma_spec->args[0]) { + if ((chan->chan_id / chans_per_mux) == dma_spec->args[0]) { chan = dma_get_slave_channel(chan); if (chan) { chan->device->privatecnt++; diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index e0fec68aed25..d5d6885ab341 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -396,10 +396,17 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) struct fsldma_chan *chan = to_fsl_chan(tx->chan); struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); struct fsl_desc_sw *child; - unsigned long flags; dma_cookie_t cookie = -EINVAL; - spin_lock_irqsave(&chan->desc_lock, flags); + spin_lock_bh(&chan->desc_lock); + +#ifdef CONFIG_PM + if (unlikely(chan->pm_state != RUNNING)) { + chan_dbg(chan, "cannot submit due to suspend\n"); + spin_unlock_bh(&chan->desc_lock); + return -1; + } +#endif /* * assign cookies to all of the software descriptors @@ -412,7 +419,7 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) /* put this transaction onto the tail of the pending queue */ append_ld_queue(chan, desc); - spin_unlock_irqrestore(&chan->desc_lock, flags); + spin_unlock_bh(&chan->desc_lock); return cookie; } @@ -459,6 +466,88 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor(struct fsldma_chan *chan) } /** + * fsldma_clean_completed_descriptor - free all descriptors which + * has been completed and acked + * @chan: Freescale DMA channel + * + * This function is used on all completed and acked descriptors. + * All descriptors should only be freed in this function. + */ +static void fsldma_clean_completed_descriptor(struct fsldma_chan *chan) +{ + struct fsl_desc_sw *desc, *_desc; + + /* Run the callback for each descriptor, in order */ + list_for_each_entry_safe(desc, _desc, &chan->ld_completed, node) + if (async_tx_test_ack(&desc->async_tx)) + fsl_dma_free_descriptor(chan, desc); +} + +/** + * fsldma_run_tx_complete_actions - cleanup a single link descriptor + * @chan: Freescale DMA channel + * @desc: descriptor to cleanup and free + * @cookie: Freescale DMA transaction identifier + * + * This function is used on a descriptor which has been executed by the DMA + * controller. It will run any callbacks, submit any dependencies. + */ +static dma_cookie_t fsldma_run_tx_complete_actions(struct fsldma_chan *chan, + struct fsl_desc_sw *desc, dma_cookie_t cookie) +{ + struct dma_async_tx_descriptor *txd = &desc->async_tx; + dma_cookie_t ret = cookie; + + BUG_ON(txd->cookie < 0); + + if (txd->cookie > 0) { + ret = txd->cookie; + + /* Run the link descriptor callback function */ + if (txd->callback) { + chan_dbg(chan, "LD %p callback\n", desc); + txd->callback(txd->callback_param); + } + } + + /* Run any dependencies */ + dma_run_dependencies(txd); + + return ret; +} + +/** + * fsldma_clean_running_descriptor - move the completed descriptor from + * ld_running to ld_completed + * @chan: Freescale DMA channel + * @desc: the descriptor which is completed + * + * Free the descriptor directly if acked by async_tx api, or move it to + * queue ld_completed. + */ +static void fsldma_clean_running_descriptor(struct fsldma_chan *chan, + struct fsl_desc_sw *desc) +{ + /* Remove from the list of transactions */ + list_del(&desc->node); + + /* + * the client is allowed to attach dependent operations + * until 'ack' is set + */ + if (!async_tx_test_ack(&desc->async_tx)) { + /* + * Move this descriptor to the list of descriptors which is + * completed, but still awaiting the 'ack' bit to be set. + */ + list_add_tail(&desc->node, &chan->ld_completed); + return; + } + + dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); +} + +/** * fsl_chan_xfer_ld_queue - transfer any pending transactions * @chan : Freescale DMA channel * @@ -526,31 +615,58 @@ static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan) } /** - * fsldma_cleanup_descriptor - cleanup and free a single link descriptor + * fsldma_cleanup_descriptors - cleanup link descriptors which are completed + * and move them to ld_completed to free until flag 'ack' is set * @chan: Freescale DMA channel - * @desc: descriptor to cleanup and free * - * This function is used on a descriptor which has been executed by the DMA - * controller. It will run any callbacks, submit any dependencies, and then - * free the descriptor. + * This function is used on descriptors which have been executed by the DMA + * controller. It will run any callbacks, submit any dependencies, then + * free these descriptors if flag 'ack' is set. */ -static void fsldma_cleanup_descriptor(struct fsldma_chan *chan, - struct fsl_desc_sw *desc) +static void fsldma_cleanup_descriptors(struct fsldma_chan *chan) { - struct dma_async_tx_descriptor *txd = &desc->async_tx; + struct fsl_desc_sw *desc, *_desc; + dma_cookie_t cookie = 0; + dma_addr_t curr_phys = get_cdar(chan); + int seen_current = 0; + + fsldma_clean_completed_descriptor(chan); + + /* Run the callback for each descriptor, in order */ + list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) { + /* + * do not advance past the current descriptor loaded into the + * hardware channel, subsequent descriptors are either in + * process or have not been submitted + */ + if (seen_current) + break; + + /* + * stop the search if we reach the current descriptor and the + * channel is busy + */ + if (desc->async_tx.phys == curr_phys) { + seen_current = 1; + if (!dma_is_idle(chan)) + break; + } + + cookie = fsldma_run_tx_complete_actions(chan, desc, cookie); - /* Run the link descriptor callback function */ - if (txd->callback) { - chan_dbg(chan, "LD %p callback\n", desc); - txd->callback(txd->callback_param); + fsldma_clean_running_descriptor(chan, desc); } - /* Run any dependencies */ - dma_run_dependencies(txd); + /* + * Start any pending transactions automatically + * + * In the ideal case, we keep the DMA controller busy while we go + * ahead and free the descriptors below. + */ + fsl_chan_xfer_ld_queue(chan); - dma_descriptor_unmap(txd); - chan_dbg(chan, "LD %p free\n", desc); - dma_pool_free(chan->desc_pool, desc, txd->phys); + if (cookie > 0) + chan->common.completed_cookie = cookie; } /** @@ -617,13 +733,14 @@ static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan, static void fsl_dma_free_chan_resources(struct dma_chan *dchan) { struct fsldma_chan *chan = to_fsl_chan(dchan); - unsigned long flags; chan_dbg(chan, "free all channel resources\n"); - spin_lock_irqsave(&chan->desc_lock, flags); + spin_lock_bh(&chan->desc_lock); + fsldma_cleanup_descriptors(chan); fsldma_free_desc_list(chan, &chan->ld_pending); fsldma_free_desc_list(chan, &chan->ld_running); - spin_unlock_irqrestore(&chan->desc_lock, flags); + fsldma_free_desc_list(chan, &chan->ld_completed); + spin_unlock_bh(&chan->desc_lock); dma_pool_destroy(chan->desc_pool); chan->desc_pool = NULL; @@ -842,7 +959,6 @@ static int fsl_dma_device_control(struct dma_chan *dchan, { struct dma_slave_config *config; struct fsldma_chan *chan; - unsigned long flags; int size; if (!dchan) @@ -852,7 +968,7 @@ static int fsl_dma_device_control(struct dma_chan *dchan, switch (cmd) { case DMA_TERMINATE_ALL: - spin_lock_irqsave(&chan->desc_lock, flags); + spin_lock_bh(&chan->desc_lock); /* Halt the DMA engine */ dma_halt(chan); @@ -860,9 +976,10 @@ static int fsl_dma_device_control(struct dma_chan *dchan, /* Remove and free all of the descriptors in the LD queue */ fsldma_free_desc_list(chan, &chan->ld_pending); fsldma_free_desc_list(chan, &chan->ld_running); + fsldma_free_desc_list(chan, &chan->ld_completed); chan->idle = true; - spin_unlock_irqrestore(&chan->desc_lock, flags); + spin_unlock_bh(&chan->desc_lock); return 0; case DMA_SLAVE_CONFIG: @@ -904,11 +1021,10 @@ static int fsl_dma_device_control(struct dma_chan *dchan, static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan) { struct fsldma_chan *chan = to_fsl_chan(dchan); - unsigned long flags; - spin_lock_irqsave(&chan->desc_lock, flags); + spin_lock_bh(&chan->desc_lock); fsl_chan_xfer_ld_queue(chan); - spin_unlock_irqrestore(&chan->desc_lock, flags); + spin_unlock_bh(&chan->desc_lock); } /** @@ -919,6 +1035,17 @@ static enum dma_status fsl_tx_status(struct dma_chan *dchan, dma_cookie_t cookie, struct dma_tx_state *txstate) { + struct fsldma_chan *chan = to_fsl_chan(dchan); + enum dma_status ret; + + ret = dma_cookie_status(dchan, cookie, txstate); + if (ret == DMA_COMPLETE) + return ret; + + spin_lock_bh(&chan->desc_lock); + fsldma_cleanup_descriptors(chan); + spin_unlock_bh(&chan->desc_lock); + return dma_cookie_status(dchan, cookie, txstate); } @@ -996,52 +1123,18 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data) static void dma_do_tasklet(unsigned long data) { struct fsldma_chan *chan = (struct fsldma_chan *)data; - struct fsl_desc_sw *desc, *_desc; - LIST_HEAD(ld_cleanup); - unsigned long flags; chan_dbg(chan, "tasklet entry\n"); - spin_lock_irqsave(&chan->desc_lock, flags); - - /* update the cookie if we have some descriptors to cleanup */ - if (!list_empty(&chan->ld_running)) { - dma_cookie_t cookie; - - desc = to_fsl_desc(chan->ld_running.prev); - cookie = desc->async_tx.cookie; - dma_cookie_complete(&desc->async_tx); - - chan_dbg(chan, "completed_cookie=%d\n", cookie); - } - - /* - * move the descriptors to a temporary list so we can drop the lock - * during the entire cleanup operation - */ - list_splice_tail_init(&chan->ld_running, &ld_cleanup); + spin_lock_bh(&chan->desc_lock); /* the hardware is now idle and ready for more */ chan->idle = true; - /* - * Start any pending transactions automatically - * - * In the ideal case, we keep the DMA controller busy while we go - * ahead and free the descriptors below. - */ - fsl_chan_xfer_ld_queue(chan); - spin_unlock_irqrestore(&chan->desc_lock, flags); - - /* Run the callback for each descriptor, in order */ - list_for_each_entry_safe(desc, _desc, &ld_cleanup, node) { - - /* Remove from the list of transactions */ - list_del(&desc->node); + /* Run all cleanup for descriptors which have been completed */ + fsldma_cleanup_descriptors(chan); - /* Run all cleanup for this descriptor */ - fsldma_cleanup_descriptor(chan, desc); - } + spin_unlock_bh(&chan->desc_lock); chan_dbg(chan, "tasklet exit\n"); } @@ -1225,7 +1318,11 @@ static int fsl_dma_chan_probe(struct fsldma_device *fdev, spin_lock_init(&chan->desc_lock); INIT_LIST_HEAD(&chan->ld_pending); INIT_LIST_HEAD(&chan->ld_running); + INIT_LIST_HEAD(&chan->ld_completed); chan->idle = true; +#ifdef CONFIG_PM + chan->pm_state = RUNNING; +#endif chan->common.device = &fdev->common; dma_cookie_init(&chan->common); @@ -1365,6 +1462,69 @@ static int fsldma_of_remove(struct platform_device *op) return 0; } +#ifdef CONFIG_PM +static int fsldma_suspend_late(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct fsldma_device *fdev = platform_get_drvdata(pdev); + struct fsldma_chan *chan; + int i; + + for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { + chan = fdev->chan[i]; + if (!chan) + continue; + + spin_lock_bh(&chan->desc_lock); + if (unlikely(!chan->idle)) + goto out; + chan->regs_save.mr = get_mr(chan); + chan->pm_state = SUSPENDED; + spin_unlock_bh(&chan->desc_lock); + } + return 0; + +out: + for (; i >= 0; i--) { + chan = fdev->chan[i]; + if (!chan) + continue; + chan->pm_state = RUNNING; + spin_unlock_bh(&chan->desc_lock); + } + return -EBUSY; +} + +static int fsldma_resume_early(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct fsldma_device *fdev = platform_get_drvdata(pdev); + struct fsldma_chan *chan; + u32 mode; + int i; + + for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { + chan = fdev->chan[i]; + if (!chan) + continue; + + spin_lock_bh(&chan->desc_lock); + mode = chan->regs_save.mr + & ~FSL_DMA_MR_CS & ~FSL_DMA_MR_CC & ~FSL_DMA_MR_CA; + set_mr(chan, mode); + chan->pm_state = RUNNING; + spin_unlock_bh(&chan->desc_lock); + } + + return 0; +} + +static const struct dev_pm_ops fsldma_pm_ops = { + .suspend_late = fsldma_suspend_late, + .resume_early = fsldma_resume_early, +}; +#endif + static const struct of_device_id fsldma_of_ids[] = { { .compatible = "fsl,elo3-dma", }, { .compatible = "fsl,eloplus-dma", }, @@ -1377,6 +1537,9 @@ static struct platform_driver fsldma_of_driver = { .name = "fsl-elo-dma", .owner = THIS_MODULE, .of_match_table = fsldma_of_ids, +#ifdef CONFIG_PM + .pm = &fsldma_pm_ops, +#endif }, .probe = fsldma_of_probe, .remove = fsldma_of_remove, diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h index d56e83599825..239c20c84382 100644 --- a/drivers/dma/fsldma.h +++ b/drivers/dma/fsldma.h @@ -134,12 +134,36 @@ struct fsldma_device { #define FSL_DMA_CHAN_PAUSE_EXT 0x00001000 #define FSL_DMA_CHAN_START_EXT 0x00002000 +#ifdef CONFIG_PM +struct fsldma_chan_regs_save { + u32 mr; +}; + +enum fsldma_pm_state { + RUNNING = 0, + SUSPENDED, +}; +#endif + struct fsldma_chan { char name[8]; /* Channel name */ struct fsldma_chan_regs __iomem *regs; spinlock_t desc_lock; /* Descriptor operation lock */ - struct list_head ld_pending; /* Link descriptors queue */ - struct list_head ld_running; /* Link descriptors queue */ + /* + * Descriptors which are queued to run, but have not yet been + * submitted to the hardware for execution + */ + struct list_head ld_pending; + /* + * Descriptors which are currently being executed by the hardware + */ + struct list_head ld_running; + /* + * Descriptors which have finished execution by the hardware. These + * descriptors have already had their cleanup actions run. They are + * waiting for the ACK bit to be set by the async_tx API. + */ + struct list_head ld_completed; /* Link descriptors queue */ struct dma_chan common; /* DMA common channel */ struct dma_pool *desc_pool; /* Descriptors pool */ struct device *dev; /* Channel device */ @@ -148,6 +172,10 @@ struct fsldma_chan { struct tasklet_struct tasklet; u32 feature; bool idle; /* DMA controller is idle */ +#ifdef CONFIG_PM + struct fsldma_chan_regs_save regs_save; + enum fsldma_pm_state pm_state; +#endif void (*toggle_ext_pause)(struct fsldma_chan *fsl_chan, int enable); void (*toggle_ext_start)(struct fsldma_chan *fsl_chan, int enable); diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index 286660a12cc6..9d2c9e7374dc 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -866,7 +866,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg( static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, size_t period_len, enum dma_transfer_direction direction, - unsigned long flags, void *context) + unsigned long flags) { struct imxdma_channel *imxdmac = to_imxdma_chan(chan); struct imxdma_engine *imxdma = imxdmac->imxdma; diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 14867e3ac8ff..f7626e37d0b8 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -271,6 +271,7 @@ struct sdma_channel { unsigned int chn_count; unsigned int chn_real_count; struct tasklet_struct tasklet; + struct imx_dma_data data; }; #define IMX_DMA_SG_LOOP BIT(0) @@ -749,6 +750,11 @@ static void sdma_get_pc(struct sdma_channel *sdmac, emi_2_per = sdma->script_addrs->asrc_2_mcu_addr; per_2_per = sdma->script_addrs->per_2_per_addr; break; + case IMX_DMATYPE_ASRC_SP: + per_2_emi = sdma->script_addrs->shp_2_mcu_addr; + emi_2_per = sdma->script_addrs->mcu_2_shp_addr; + per_2_per = sdma->script_addrs->per_2_per_addr; + break; case IMX_DMATYPE_MSHC: per_2_emi = sdma->script_addrs->mshc_2_mcu_addr; emi_2_per = sdma->script_addrs->mcu_2_mshc_addr; @@ -911,14 +917,13 @@ static int sdma_request_channel(struct sdma_channel *sdmac) int channel = sdmac->channel; int ret = -EBUSY; - sdmac->bd = dma_alloc_coherent(NULL, PAGE_SIZE, &sdmac->bd_phys, GFP_KERNEL); + sdmac->bd = dma_zalloc_coherent(NULL, PAGE_SIZE, &sdmac->bd_phys, + GFP_KERNEL); if (!sdmac->bd) { ret = -ENOMEM; goto out; } - memset(sdmac->bd, 0, PAGE_SIZE); - sdma->channel_control[channel].base_bd_ptr = sdmac->bd_phys; sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys; @@ -1120,7 +1125,7 @@ err_out: static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic( struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, size_t period_len, enum dma_transfer_direction direction, - unsigned long flags, void *context) + unsigned long flags) { struct sdma_channel *sdmac = to_sdma_chan(chan); struct sdma_engine *sdma = sdmac->sdma; @@ -1414,12 +1419,14 @@ err_dma_alloc: static bool sdma_filter_fn(struct dma_chan *chan, void *fn_param) { + struct sdma_channel *sdmac = to_sdma_chan(chan); struct imx_dma_data *data = fn_param; if (!imx_dma_is_general_purpose(chan)) return false; - chan->private = data; + sdmac->data = *data; + chan->private = &sdmac->data; return true; } diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c index 9e84d5bc9307..3b55bb8d969a 100644 --- a/drivers/dma/ioat/dca.c +++ b/drivers/dma/ioat/dca.c @@ -35,6 +35,7 @@ #include "dma.h" #include "registers.h" +#include "dma_v2.h" /* * Bit 7 of a tag map entry is the "valid" bit, if it is set then bits 0:6 @@ -147,7 +148,7 @@ static int ioat_dca_add_requester(struct dca_provider *dca, struct device *dev) u16 id; /* This implementation only supports PCI-Express */ - if (dev->bus != &pci_bus_type) + if (!dev_is_pci(dev)) return -ENODEV; pdev = to_pci_dev(dev); id = dcaid_from_pcidev(pdev); @@ -179,7 +180,7 @@ static int ioat_dca_remove_requester(struct dca_provider *dca, int i; /* This implementation only supports PCI-Express */ - if (dev->bus != &pci_bus_type) + if (!dev_is_pci(dev)) return -ENODEV; pdev = to_pci_dev(dev); @@ -320,7 +321,7 @@ static int ioat2_dca_add_requester(struct dca_provider *dca, struct device *dev) u16 global_req_table; /* This implementation only supports PCI-Express */ - if (dev->bus != &pci_bus_type) + if (!dev_is_pci(dev)) return -ENODEV; pdev = to_pci_dev(dev); id = dcaid_from_pcidev(pdev); @@ -354,7 +355,7 @@ static int ioat2_dca_remove_requester(struct dca_provider *dca, u16 global_req_table; /* This implementation only supports PCI-Express */ - if (dev->bus != &pci_bus_type) + if (!dev_is_pci(dev)) return -ENODEV; pdev = to_pci_dev(dev); @@ -496,7 +497,7 @@ static int ioat3_dca_add_requester(struct dca_provider *dca, struct device *dev) u16 global_req_table; /* This implementation only supports PCI-Express */ - if (dev->bus != &pci_bus_type) + if (!dev_is_pci(dev)) return -ENODEV; pdev = to_pci_dev(dev); id = dcaid_from_pcidev(pdev); @@ -530,7 +531,7 @@ static int ioat3_dca_remove_requester(struct dca_provider *dca, u16 global_req_table; /* This implementation only supports PCI-Express */ - if (dev->bus != &pci_bus_type) + if (!dev_is_pci(dev)) return -ENODEV; pdev = to_pci_dev(dev); diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 4e3549a16132..940c1502a8b5 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -947,7 +947,7 @@ msix: for (i = 0; i < msixcnt; i++) device->msix_entries[i].entry = i; - err = pci_enable_msix(pdev, device->msix_entries, msixcnt); + err = pci_enable_msix_exact(pdev, device->msix_entries, msixcnt); if (err) goto msi; @@ -1222,7 +1222,6 @@ int ioat1_dma_probe(struct ioatdma_device *device, int dca) err = ioat_probe(device); if (err) return err; - ioat_set_tcp_copy_break(4096); err = ioat_register(device); if (err) return err; diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index e982f00a9843..d63f68b1aa35 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -214,13 +214,6 @@ __dump_desc_dbg(struct ioat_chan_common *chan, struct ioat_dma_descriptor *hw, #define dump_desc_dbg(c, d) \ ({ if (d) __dump_desc_dbg(&c->base, d->hw, &d->txd, desc_id(d)); 0; }) -static inline void ioat_set_tcp_copy_break(unsigned long copybreak) -{ - #ifdef CONFIG_NET_DMA - sysctl_tcp_dma_copybreak = copybreak; - #endif -} - static inline struct ioat_chan_common * ioat_chan_by_index(struct ioatdma_device *device, int index) { diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index 8d1058085eeb..695483e6be32 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c @@ -735,7 +735,8 @@ int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs) * called under bh_disabled so we need to trigger the timer * event directly */ - if (jiffies > chan->timer.expires && timer_pending(&chan->timer)) { + if (time_is_before_jiffies(chan->timer.expires) + && timer_pending(&chan->timer)) { struct ioatdma_device *device = chan->device; mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); @@ -899,7 +900,6 @@ int ioat2_dma_probe(struct ioatdma_device *device, int dca) err = ioat_probe(device); if (err) return err; - ioat_set_tcp_copy_break(2048); list_for_each_entry(c, &dma->channels, device_node) { chan = to_chan_common(c); diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index b9b38a1cf92f..895f869d6c2c 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c @@ -740,7 +740,7 @@ ioat3_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, return __ioat3_prep_xor_lock(chan, NULL, dest, src, src_cnt, len, flags); } -struct dma_async_tx_descriptor * +static struct dma_async_tx_descriptor * ioat3_prep_xor_val(struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt, size_t len, enum sum_check_flags *result, unsigned long flags) @@ -1091,7 +1091,7 @@ ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, } } -struct dma_async_tx_descriptor * +static struct dma_async_tx_descriptor * ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, unsigned int src_cnt, const unsigned char *scf, size_t len, enum sum_check_flags *pqres, unsigned long flags) @@ -1133,7 +1133,7 @@ ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, flags); } -struct dma_async_tx_descriptor * +static struct dma_async_tx_descriptor * ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt, size_t len, enum sum_check_flags *result, unsigned long flags) @@ -1655,7 +1655,6 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca) err = ioat_probe(device); if (err) return err; - ioat_set_tcp_copy_break(262144); list_for_each_entry(c, &dma->channels, device_node) { chan = to_chan_common(c); diff --git a/drivers/dma/iovlock.c b/drivers/dma/iovlock.c deleted file mode 100644 index bb48a57c2fc1..000000000000 --- a/drivers/dma/iovlock.c +++ /dev/null @@ -1,280 +0,0 @@ -/* - * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved. - * Portions based on net/core/datagram.c and copyrighted by their authors. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 - * Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - * The full GNU General Public License is included in this distribution in the - * file called COPYING. - */ - -/* - * This code allows the net stack to make use of a DMA engine for - * skb to iovec copies. - */ - -#include <linux/dmaengine.h> -#include <linux/pagemap.h> -#include <linux/slab.h> -#include <net/tcp.h> /* for memcpy_toiovec */ -#include <asm/io.h> -#include <asm/uaccess.h> - -static int num_pages_spanned(struct iovec *iov) -{ - return - ((PAGE_ALIGN((unsigned long)iov->iov_base + iov->iov_len) - - ((unsigned long)iov->iov_base & PAGE_MASK)) >> PAGE_SHIFT); -} - -/* - * Pin down all the iovec pages needed for len bytes. - * Return a struct dma_pinned_list to keep track of pages pinned down. - * - * We are allocating a single chunk of memory, and then carving it up into - * 3 sections, the latter 2 whose size depends on the number of iovecs and the - * total number of pages, respectively. - */ -struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len) -{ - struct dma_pinned_list *local_list; - struct page **pages; - int i; - int ret; - int nr_iovecs = 0; - int iovec_len_used = 0; - int iovec_pages_used = 0; - - /* don't pin down non-user-based iovecs */ - if (segment_eq(get_fs(), KERNEL_DS)) - return NULL; - - /* determine how many iovecs/pages there are, up front */ - do { - iovec_len_used += iov[nr_iovecs].iov_len; - iovec_pages_used += num_pages_spanned(&iov[nr_iovecs]); - nr_iovecs++; - } while (iovec_len_used < len); - - /* single kmalloc for pinned list, page_list[], and the page arrays */ - local_list = kmalloc(sizeof(*local_list) - + (nr_iovecs * sizeof (struct dma_page_list)) - + (iovec_pages_used * sizeof (struct page*)), GFP_KERNEL); - if (!local_list) - goto out; - - /* list of pages starts right after the page list array */ - pages = (struct page **) &local_list->page_list[nr_iovecs]; - - local_list->nr_iovecs = 0; - - for (i = 0; i < nr_iovecs; i++) { - struct dma_page_list *page_list = &local_list->page_list[i]; - - len -= iov[i].iov_len; - - if (!access_ok(VERIFY_WRITE, iov[i].iov_base, iov[i].iov_len)) - goto unpin; - - page_list->nr_pages = num_pages_spanned(&iov[i]); - page_list->base_address = iov[i].iov_base; - - page_list->pages = pages; - pages += page_list->nr_pages; - - /* pin pages down */ - down_read(¤t->mm->mmap_sem); - ret = get_user_pages( - current, - current->mm, - (unsigned long) iov[i].iov_base, - page_list->nr_pages, - 1, /* write */ - 0, /* force */ - page_list->pages, - NULL); - up_read(¤t->mm->mmap_sem); - - if (ret != page_list->nr_pages) - goto unpin; - - local_list->nr_iovecs = i + 1; - } - - return local_list; - -unpin: - dma_unpin_iovec_pages(local_list); -out: - return NULL; -} - -void dma_unpin_iovec_pages(struct dma_pinned_list *pinned_list) -{ - int i, j; - - if (!pinned_list) - return; - - for (i = 0; i < pinned_list->nr_iovecs; i++) { - struct dma_page_list *page_list = &pinned_list->page_list[i]; - for (j = 0; j < page_list->nr_pages; j++) { - set_page_dirty_lock(page_list->pages[j]); - page_cache_release(page_list->pages[j]); - } - } - - kfree(pinned_list); -} - - -/* - * We have already pinned down the pages we will be using in the iovecs. - * Each entry in iov array has corresponding entry in pinned_list->page_list. - * Using array indexing to keep iov[] and page_list[] in sync. - * Initial elements in iov array's iov->iov_len will be 0 if already copied into - * by another call. - * iov array length remaining guaranteed to be bigger than len. - */ -dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov, - struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len) -{ - int iov_byte_offset; - int copy; - dma_cookie_t dma_cookie = 0; - int iovec_idx; - int page_idx; - - if (!chan) - return memcpy_toiovec(iov, kdata, len); - - iovec_idx = 0; - while (iovec_idx < pinned_list->nr_iovecs) { - struct dma_page_list *page_list; - - /* skip already used-up iovecs */ - while (!iov[iovec_idx].iov_len) - iovec_idx++; - - page_list = &pinned_list->page_list[iovec_idx]; - - iov_byte_offset = ((unsigned long)iov[iovec_idx].iov_base & ~PAGE_MASK); - page_idx = (((unsigned long)iov[iovec_idx].iov_base & PAGE_MASK) - - ((unsigned long)page_list->base_address & PAGE_MASK)) >> PAGE_SHIFT; - - /* break up copies to not cross page boundary */ - while (iov[iovec_idx].iov_len) { - copy = min_t(int, PAGE_SIZE - iov_byte_offset, len); - copy = min_t(int, copy, iov[iovec_idx].iov_len); - - dma_cookie = dma_async_memcpy_buf_to_pg(chan, - page_list->pages[page_idx], - iov_byte_offset, - kdata, - copy); - /* poll for a descriptor slot */ - if (unlikely(dma_cookie < 0)) { - dma_async_issue_pending(chan); - continue; - } - - len -= copy; - iov[iovec_idx].iov_len -= copy; - iov[iovec_idx].iov_base += copy; - - if (!len) - return dma_cookie; - - kdata += copy; - iov_byte_offset = 0; - page_idx++; - } - iovec_idx++; - } - - /* really bad if we ever run out of iovecs */ - BUG(); - return -EFAULT; -} - -dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov, - struct dma_pinned_list *pinned_list, struct page *page, - unsigned int offset, size_t len) -{ - int iov_byte_offset; - int copy; - dma_cookie_t dma_cookie = 0; - int iovec_idx; - int page_idx; - int err; - - /* this needs as-yet-unimplemented buf-to-buff, so punt. */ - /* TODO: use dma for this */ - if (!chan || !pinned_list) { - u8 *vaddr = kmap(page); - err = memcpy_toiovec(iov, vaddr + offset, len); - kunmap(page); - return err; - } - - iovec_idx = 0; - while (iovec_idx < pinned_list->nr_iovecs) { - struct dma_page_list *page_list; - - /* skip already used-up iovecs */ - while (!iov[iovec_idx].iov_len) - iovec_idx++; - - page_list = &pinned_list->page_list[iovec_idx]; - - iov_byte_offset = ((unsigned long)iov[iovec_idx].iov_base & ~PAGE_MASK); - page_idx = (((unsigned long)iov[iovec_idx].iov_base & PAGE_MASK) - - ((unsigned long)page_list->base_address & PAGE_MASK)) >> PAGE_SHIFT; - - /* break up copies to not cross page boundary */ - while (iov[iovec_idx].iov_len) { - copy = min_t(int, PAGE_SIZE - iov_byte_offset, len); - copy = min_t(int, copy, iov[iovec_idx].iov_len); - - dma_cookie = dma_async_memcpy_pg_to_pg(chan, - page_list->pages[page_idx], - iov_byte_offset, - page, - offset, - copy); - /* poll for a descriptor slot */ - if (unlikely(dma_cookie < 0)) { - dma_async_issue_pending(chan); - continue; - } - - len -= copy; - iov[iovec_idx].iov_len -= copy; - iov[iovec_idx].iov_base += copy; - - if (!len) - return dma_cookie; - - offset += copy; - iov_byte_offset = 0; - page_idx++; - } - iovec_idx++; - } - - /* really bad if we ever run out of iovecs */ - BUG(); - return -EFAULT; -} diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index 128ca143486d..bbf62927bd72 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c @@ -1532,11 +1532,17 @@ static int idmac_alloc_chan_resources(struct dma_chan *chan) #ifdef DEBUG if (chan->chan_id == IDMAC_IC_7) { ic_sof = ipu_irq_map(69); - if (ic_sof > 0) - request_irq(ic_sof, ic_sof_irq, 0, "IC SOF", ichan); + if (ic_sof > 0) { + ret = request_irq(ic_sof, ic_sof_irq, 0, "IC SOF", ichan); + if (ret) + dev_err(&chan->dev->device, "request irq failed for IC SOF"); + } ic_eof = ipu_irq_map(70); - if (ic_eof > 0) - request_irq(ic_eof, ic_eof_irq, 0, "IC EOF", ichan); + if (ic_eof > 0) { + ret = request_irq(ic_eof, ic_eof_irq, 0, "IC EOF", ichan); + if (ret) + dev_err(&chan->dev->device, "request irq failed for IC EOF"); + } } #endif diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c index a7b186d536b3..a1a4db5721b8 100644 --- a/drivers/dma/mmp_pdma.c +++ b/drivers/dma/mmp_pdma.c @@ -601,7 +601,7 @@ static struct dma_async_tx_descriptor * mmp_pdma_prep_dma_cyclic(struct dma_chan *dchan, dma_addr_t buf_addr, size_t len, size_t period_len, enum dma_transfer_direction direction, - unsigned long flags, void *context) + unsigned long flags) { struct mmp_pdma_chan *chan; struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new; diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c index 724f7f4c9720..6ad30e2c5038 100644 --- a/drivers/dma/mmp_tdma.c +++ b/drivers/dma/mmp_tdma.c @@ -389,7 +389,7 @@ struct mmp_tdma_desc *mmp_tdma_alloc_descriptor(struct mmp_tdma_chan *tdmac) static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic( struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, size_t period_len, enum dma_transfer_direction direction, - unsigned long flags, void *context) + unsigned long flags) { struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); struct mmp_tdma_desc *desc; diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c index 2ad43738ac8b..881db2bcb48b 100644 --- a/drivers/dma/mpc512x_dma.c +++ b/drivers/dma/mpc512x_dma.c @@ -53,6 +53,7 @@ #include <linux/of_address.h> #include <linux/of_device.h> #include <linux/of_irq.h> +#include <linux/of_dma.h> #include <linux/of_platform.h> #include <linux/random.h> @@ -1036,7 +1037,15 @@ static int mpc_dma_probe(struct platform_device *op) if (retval) goto err_free2; - return retval; + /* Register with OF helpers for DMA lookups (nonfatal) */ + if (dev->of_node) { + retval = of_dma_controller_register(dev->of_node, + of_dma_xlate_by_chan_id, mdma); + if (retval) + dev_warn(dev, "Could not register for OF lookup\n"); + } + + return 0; err_free2: if (mdma->is_mpc8308) @@ -1057,6 +1066,8 @@ static int mpc_dma_remove(struct platform_device *op) struct device *dev = &op->dev; struct mpc_dma *mdma = dev_get_drvdata(dev); + if (dev->of_node) + of_dma_controller_free(dev->of_node); dma_async_device_unregister(&mdma->dma); if (mdma->is_mpc8308) { free_irq(mdma->irq2, mdma); diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 394cbc5c93e3..7938272f2edf 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@ -310,7 +310,8 @@ mv_xor_clean_slot(struct mv_xor_desc_slot *desc, return 0; } -static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan) +/* This function must be called with the mv_xor_chan spinlock held */ +static void mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan) { struct mv_xor_desc_slot *iter, *_iter; dma_cookie_t cookie = 0; @@ -366,18 +367,13 @@ static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan) mv_chan->dmachan.completed_cookie = cookie; } -static void -mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan) -{ - spin_lock_bh(&mv_chan->lock); - __mv_xor_slot_cleanup(mv_chan); - spin_unlock_bh(&mv_chan->lock); -} - static void mv_xor_tasklet(unsigned long data) { struct mv_xor_chan *chan = (struct mv_xor_chan *) data; + + spin_lock_bh(&chan->lock); mv_xor_slot_cleanup(chan); + spin_unlock_bh(&chan->lock); } static struct mv_xor_desc_slot * @@ -656,9 +652,10 @@ static void mv_xor_free_chan_resources(struct dma_chan *chan) struct mv_xor_desc_slot *iter, *_iter; int in_use_descs = 0; + spin_lock_bh(&mv_chan->lock); + mv_xor_slot_cleanup(mv_chan); - spin_lock_bh(&mv_chan->lock); list_for_each_entry_safe(iter, _iter, &mv_chan->chain, chain_node) { in_use_descs++; @@ -700,11 +697,12 @@ static enum dma_status mv_xor_status(struct dma_chan *chan, enum dma_status ret; ret = dma_cookie_status(chan, cookie, txstate); - if (ret == DMA_COMPLETE) { - mv_xor_clean_completed_slots(mv_chan); + if (ret == DMA_COMPLETE) return ret; - } + + spin_lock_bh(&mv_chan->lock); mv_xor_slot_cleanup(mv_chan); + spin_unlock_bh(&mv_chan->lock); return dma_cookie_status(chan, cookie, txstate); } @@ -782,7 +780,7 @@ static void mv_xor_issue_pending(struct dma_chan *chan) static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) { - int i; + int i, ret; void *src, *dest; dma_addr_t src_dma, dest_dma; struct dma_chan *dma_chan; @@ -819,19 +817,44 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0, PAGE_SIZE, DMA_TO_DEVICE); - unmap->to_cnt = 1; unmap->addr[0] = src_dma; + ret = dma_mapping_error(dma_chan->device->dev, src_dma); + if (ret) { + err = -ENOMEM; + goto free_resources; + } + unmap->to_cnt = 1; + dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0, PAGE_SIZE, DMA_FROM_DEVICE); - unmap->from_cnt = 1; unmap->addr[1] = dest_dma; + ret = dma_mapping_error(dma_chan->device->dev, dest_dma); + if (ret) { + err = -ENOMEM; + goto free_resources; + } + unmap->from_cnt = 1; unmap->len = PAGE_SIZE; tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma, PAGE_SIZE, 0); + if (!tx) { + dev_err(dma_chan->device->dev, + "Self-test cannot prepare operation, disabling\n"); + err = -ENODEV; + goto free_resources; + } + cookie = mv_xor_tx_submit(tx); + if (dma_submit_error(cookie)) { + dev_err(dma_chan->device->dev, + "Self-test submit error, disabling\n"); + err = -ENODEV; + goto free_resources; + } + mv_xor_issue_pending(dma_chan); async_tx_ack(tx); msleep(1); @@ -866,7 +889,7 @@ out: static int mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) { - int i, src_idx; + int i, src_idx, ret; struct page *dest; struct page *xor_srcs[MV_XOR_NUM_SRC_TEST]; dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST]; @@ -929,19 +952,42 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], 0, PAGE_SIZE, DMA_TO_DEVICE); dma_srcs[i] = unmap->addr[i]; + ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[i]); + if (ret) { + err = -ENOMEM; + goto free_resources; + } unmap->to_cnt++; } unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE); dest_dma = unmap->addr[src_count]; + ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[src_count]); + if (ret) { + err = -ENOMEM; + goto free_resources; + } unmap->from_cnt = 1; unmap->len = PAGE_SIZE; tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs, src_count, PAGE_SIZE, 0); + if (!tx) { + dev_err(dma_chan->device->dev, + "Self-test cannot prepare operation, disabling\n"); + err = -ENODEV; + goto free_resources; + } cookie = mv_xor_tx_submit(tx); + if (dma_submit_error(cookie)) { + dev_err(dma_chan->device->dev, + "Self-test submit error, disabling\n"); + err = -ENODEV; + goto free_resources; + } + mv_xor_issue_pending(dma_chan); async_tx_ack(tx); msleep(8); diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c index ead491346da7..5ea61201dbf0 100644 --- a/drivers/dma/mxs-dma.c +++ b/drivers/dma/mxs-dma.c @@ -413,16 +413,14 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan) struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; int ret; - mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev, - CCW_BLOCK_SIZE, &mxs_chan->ccw_phys, - GFP_KERNEL); + mxs_chan->ccw = dma_zalloc_coherent(mxs_dma->dma_device.dev, + CCW_BLOCK_SIZE, + &mxs_chan->ccw_phys, GFP_KERNEL); if (!mxs_chan->ccw) { ret = -ENOMEM; goto err_alloc; } - memset(mxs_chan->ccw, 0, CCW_BLOCK_SIZE); - if (mxs_chan->chan_irq != NO_IRQ) { ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler, 0, "mxs-dma", mxs_dma); @@ -591,7 +589,7 @@ err_out: static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic( struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, size_t period_len, enum dma_transfer_direction direction, - unsigned long flags, void *context) + unsigned long flags) { struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c new file mode 100644 index 000000000000..5aeada56a442 --- /dev/null +++ b/drivers/dma/nbpfaxi.c @@ -0,0 +1,1517 @@ +/* + * Copyright (C) 2013-2014 Renesas Electronics Europe Ltd. + * Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + */ + +#include <linux/bitmap.h> +#include <linux/bitops.h> +#include <linux/clk.h> +#include <linux/dma-mapping.h> +#include <linux/dmaengine.h> +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/log2.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_dma.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +#include <dt-bindings/dma/nbpfaxi.h> + +#include "dmaengine.h" + +#define NBPF_REG_CHAN_OFFSET 0 +#define NBPF_REG_CHAN_SIZE 0x40 + +/* Channel Current Transaction Byte register */ +#define NBPF_CHAN_CUR_TR_BYTE 0x20 + +/* Channel Status register */ +#define NBPF_CHAN_STAT 0x24 +#define NBPF_CHAN_STAT_EN 1 +#define NBPF_CHAN_STAT_TACT 4 +#define NBPF_CHAN_STAT_ERR 0x10 +#define NBPF_CHAN_STAT_END 0x20 +#define NBPF_CHAN_STAT_TC 0x40 +#define NBPF_CHAN_STAT_DER 0x400 + +/* Channel Control register */ +#define NBPF_CHAN_CTRL 0x28 +#define NBPF_CHAN_CTRL_SETEN 1 +#define NBPF_CHAN_CTRL_CLREN 2 +#define NBPF_CHAN_CTRL_STG 4 +#define NBPF_CHAN_CTRL_SWRST 8 +#define NBPF_CHAN_CTRL_CLRRQ 0x10 +#define NBPF_CHAN_CTRL_CLREND 0x20 +#define NBPF_CHAN_CTRL_CLRTC 0x40 +#define NBPF_CHAN_CTRL_SETSUS 0x100 +#define NBPF_CHAN_CTRL_CLRSUS 0x200 + +/* Channel Configuration register */ +#define NBPF_CHAN_CFG 0x2c +#define NBPF_CHAN_CFG_SEL 7 /* terminal SELect: 0..7 */ +#define NBPF_CHAN_CFG_REQD 8 /* REQuest Direction: DMAREQ is 0: input, 1: output */ +#define NBPF_CHAN_CFG_LOEN 0x10 /* LOw ENable: low DMA request line is: 0: inactive, 1: active */ +#define NBPF_CHAN_CFG_HIEN 0x20 /* HIgh ENable: high DMA request line is: 0: inactive, 1: active */ +#define NBPF_CHAN_CFG_LVL 0x40 /* LeVeL: DMA request line is sensed as 0: edge, 1: level */ +#define NBPF_CHAN_CFG_AM 0x700 /* ACK Mode: 0: Pulse mode, 1: Level mode, b'1x: Bus Cycle */ +#define NBPF_CHAN_CFG_SDS 0xf000 /* Source Data Size: 0: 8 bits,... , 7: 1024 bits */ +#define NBPF_CHAN_CFG_DDS 0xf0000 /* Destination Data Size: as above */ +#define NBPF_CHAN_CFG_SAD 0x100000 /* Source ADdress counting: 0: increment, 1: fixed */ +#define NBPF_CHAN_CFG_DAD 0x200000 /* Destination ADdress counting: 0: increment, 1: fixed */ +#define NBPF_CHAN_CFG_TM 0x400000 /* Transfer Mode: 0: single, 1: block TM */ +#define NBPF_CHAN_CFG_DEM 0x1000000 /* DMAEND interrupt Mask */ +#define NBPF_CHAN_CFG_TCM 0x2000000 /* DMATCO interrupt Mask */ +#define NBPF_CHAN_CFG_SBE 0x8000000 /* Sweep Buffer Enable */ +#define NBPF_CHAN_CFG_RSEL 0x10000000 /* RM: Register Set sELect */ +#define NBPF_CHAN_CFG_RSW 0x20000000 /* RM: Register Select sWitch */ +#define NBPF_CHAN_CFG_REN 0x40000000 /* RM: Register Set Enable */ +#define NBPF_CHAN_CFG_DMS 0x80000000 /* 0: register mode (RM), 1: link mode (LM) */ + +#define NBPF_CHAN_NXLA 0x38 +#define NBPF_CHAN_CRLA 0x3c + +/* Link Header field */ +#define NBPF_HEADER_LV 1 +#define NBPF_HEADER_LE 2 +#define NBPF_HEADER_WBD 4 +#define NBPF_HEADER_DIM 8 + +#define NBPF_CTRL 0x300 +#define NBPF_CTRL_PR 1 /* 0: fixed priority, 1: round robin */ +#define NBPF_CTRL_LVINT 2 /* DMAEND and DMAERR signalling: 0: pulse, 1: level */ + +#define NBPF_DSTAT_ER 0x314 +#define NBPF_DSTAT_END 0x318 + +#define NBPF_DMA_BUSWIDTHS \ + (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \ + BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ + BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)) + +struct nbpf_config { + int num_channels; + int buffer_size; +}; + +/* + * We've got 3 types of objects, used to describe DMA transfers: + * 1. high-level descriptor, containing a struct dma_async_tx_descriptor object + * in it, used to communicate with the user + * 2. hardware DMA link descriptors, that we pass to DMAC for DMA transfer + * queuing, these must be DMAable, using either the streaming DMA API or + * allocated from coherent memory - one per SG segment + * 3. one per SG segment descriptors, used to manage HW link descriptors from + * (2). They do not have to be DMAable. They can either be (a) allocated + * together with link descriptors as mixed (DMA / CPU) objects, or (b) + * separately. Even if allocated separately it would be best to link them + * to link descriptors once during channel resource allocation and always + * use them as a single object. + * Therefore for both cases (a) and (b) at run-time objects (2) and (3) shall be + * treated as a single SG segment descriptor. + */ + +struct nbpf_link_reg { + u32 header; + u32 src_addr; + u32 dst_addr; + u32 transaction_size; + u32 config; + u32 interval; + u32 extension; + u32 next; +} __packed; + +struct nbpf_device; +struct nbpf_channel; +struct nbpf_desc; + +struct nbpf_link_desc { + struct nbpf_link_reg *hwdesc; + dma_addr_t hwdesc_dma_addr; + struct nbpf_desc *desc; + struct list_head node; +}; + +/** + * struct nbpf_desc - DMA transfer descriptor + * @async_tx: dmaengine object + * @user_wait: waiting for a user ack + * @length: total transfer length + * @sg: list of hardware descriptors, represented by struct nbpf_link_desc + * @node: member in channel descriptor lists + */ +struct nbpf_desc { + struct dma_async_tx_descriptor async_tx; + bool user_wait; + size_t length; + struct nbpf_channel *chan; + struct list_head sg; + struct list_head node; +}; + +/* Take a wild guess: allocate 4 segments per descriptor */ +#define NBPF_SEGMENTS_PER_DESC 4 +#define NBPF_DESCS_PER_PAGE ((PAGE_SIZE - sizeof(struct list_head)) / \ + (sizeof(struct nbpf_desc) + \ + NBPF_SEGMENTS_PER_DESC * \ + (sizeof(struct nbpf_link_desc) + sizeof(struct nbpf_link_reg)))) +#define NBPF_SEGMENTS_PER_PAGE (NBPF_SEGMENTS_PER_DESC * NBPF_DESCS_PER_PAGE) + +struct nbpf_desc_page { + struct list_head node; + struct nbpf_desc desc[NBPF_DESCS_PER_PAGE]; + struct nbpf_link_desc ldesc[NBPF_SEGMENTS_PER_PAGE]; + struct nbpf_link_reg hwdesc[NBPF_SEGMENTS_PER_PAGE]; +}; + +/** + * struct nbpf_channel - one DMAC channel + * @dma_chan: standard dmaengine channel object + * @base: register address base + * @nbpf: DMAC + * @name: IRQ name + * @irq: IRQ number + * @slave_addr: address for slave DMA + * @slave_width:slave data size in bytes + * @slave_burst:maximum slave burst size in bytes + * @terminal: DMA terminal, assigned to this channel + * @dmarq_cfg: DMA request line configuration - high / low, edge / level for NBPF_CHAN_CFG + * @flags: configuration flags from DT + * @lock: protect descriptor lists + * @free_links: list of free link descriptors + * @free: list of free descriptors + * @queued: list of queued descriptors + * @active: list of descriptors, scheduled for processing + * @done: list of completed descriptors, waiting post-processing + * @desc_page: list of additionally allocated descriptor pages - if any + */ +struct nbpf_channel { + struct dma_chan dma_chan; + struct tasklet_struct tasklet; + void __iomem *base; + struct nbpf_device *nbpf; + char name[16]; + int irq; + dma_addr_t slave_src_addr; + size_t slave_src_width; + size_t slave_src_burst; + dma_addr_t slave_dst_addr; + size_t slave_dst_width; + size_t slave_dst_burst; + unsigned int terminal; + u32 dmarq_cfg; + unsigned long flags; + spinlock_t lock; + struct list_head free_links; + struct list_head free; + struct list_head queued; + struct list_head active; + struct list_head done; + struct list_head desc_page; + struct nbpf_desc *running; + bool paused; +}; + +struct nbpf_device { + struct dma_device dma_dev; + void __iomem *base; + struct clk *clk; + const struct nbpf_config *config; + struct nbpf_channel chan[]; +}; + +enum nbpf_model { + NBPF1B4, + NBPF1B8, + NBPF1B16, + NBPF4B4, + NBPF4B8, + NBPF4B16, + NBPF8B4, + NBPF8B8, + NBPF8B16, +}; + +static struct nbpf_config nbpf_cfg[] = { + [NBPF1B4] = { + .num_channels = 1, + .buffer_size = 4, + }, + [NBPF1B8] = { + .num_channels = 1, + .buffer_size = 8, + }, + [NBPF1B16] = { + .num_channels = 1, + .buffer_size = 16, + }, + [NBPF4B4] = { + .num_channels = 4, + .buffer_size = 4, + }, + [NBPF4B8] = { + .num_channels = 4, + .buffer_size = 8, + }, + [NBPF4B16] = { + .num_channels = 4, + .buffer_size = 16, + }, + [NBPF8B4] = { + .num_channels = 8, + .buffer_size = 4, + }, + [NBPF8B8] = { + .num_channels = 8, + .buffer_size = 8, + }, + [NBPF8B16] = { + .num_channels = 8, + .buffer_size = 16, + }, +}; + +#define nbpf_to_chan(d) container_of(d, struct nbpf_channel, dma_chan) + +/* + * dmaengine drivers seem to have a lot in common and instead of sharing more + * code, they reimplement those common algorithms independently. In this driver + * we try to separate the hardware-specific part from the (largely) generic + * part. This improves code readability and makes it possible in the future to + * reuse the generic code in form of a helper library. That generic code should + * be suitable for various DMA controllers, using transfer descriptors in RAM + * and pushing one SG list at a time to the DMA controller. + */ + +/* Hardware-specific part */ + +static inline u32 nbpf_chan_read(struct nbpf_channel *chan, + unsigned int offset) +{ + u32 data = ioread32(chan->base + offset); + dev_dbg(chan->dma_chan.device->dev, "%s(0x%p + 0x%x) = 0x%x\n", + __func__, chan->base, offset, data); + return data; +} + +static inline void nbpf_chan_write(struct nbpf_channel *chan, + unsigned int offset, u32 data) +{ + iowrite32(data, chan->base + offset); + dev_dbg(chan->dma_chan.device->dev, "%s(0x%p + 0x%x) = 0x%x\n", + __func__, chan->base, offset, data); +} + +static inline u32 nbpf_read(struct nbpf_device *nbpf, + unsigned int offset) +{ + u32 data = ioread32(nbpf->base + offset); + dev_dbg(nbpf->dma_dev.dev, "%s(0x%p + 0x%x) = 0x%x\n", + __func__, nbpf->base, offset, data); + return data; +} + +static inline void nbpf_write(struct nbpf_device *nbpf, + unsigned int offset, u32 data) +{ + iowrite32(data, nbpf->base + offset); + dev_dbg(nbpf->dma_dev.dev, "%s(0x%p + 0x%x) = 0x%x\n", + __func__, nbpf->base, offset, data); +} + +static void nbpf_chan_halt(struct nbpf_channel *chan) +{ + nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREN); +} + +static bool nbpf_status_get(struct nbpf_channel *chan) +{ + u32 status = nbpf_read(chan->nbpf, NBPF_DSTAT_END); + + return status & BIT(chan - chan->nbpf->chan); +} + +static void nbpf_status_ack(struct nbpf_channel *chan) +{ + nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREND); +} + +static u32 nbpf_error_get(struct nbpf_device *nbpf) +{ + return nbpf_read(nbpf, NBPF_DSTAT_ER); +} + +static struct nbpf_channel *nbpf_error_get_channel(struct nbpf_device *nbpf, u32 error) +{ + return nbpf->chan + __ffs(error); +} + +static void nbpf_error_clear(struct nbpf_channel *chan) +{ + u32 status; + int i; + + /* Stop the channel, make sure DMA has been aborted */ + nbpf_chan_halt(chan); + + for (i = 1000; i; i--) { + status = nbpf_chan_read(chan, NBPF_CHAN_STAT); + if (!(status & NBPF_CHAN_STAT_TACT)) + break; + cpu_relax(); + } + + if (!i) + dev_err(chan->dma_chan.device->dev, + "%s(): abort timeout, channel status 0x%x\n", __func__, status); + + nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SWRST); +} + +static int nbpf_start(struct nbpf_desc *desc) +{ + struct nbpf_channel *chan = desc->chan; + struct nbpf_link_desc *ldesc = list_first_entry(&desc->sg, struct nbpf_link_desc, node); + + nbpf_chan_write(chan, NBPF_CHAN_NXLA, (u32)ldesc->hwdesc_dma_addr); + nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SETEN | NBPF_CHAN_CTRL_CLRSUS); + chan->paused = false; + + /* Software trigger MEMCPY - only MEMCPY uses the block mode */ + if (ldesc->hwdesc->config & NBPF_CHAN_CFG_TM) + nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_STG); + + dev_dbg(chan->nbpf->dma_dev.dev, "%s(): next 0x%x, cur 0x%x\n", __func__, + nbpf_chan_read(chan, NBPF_CHAN_NXLA), nbpf_chan_read(chan, NBPF_CHAN_CRLA)); + + return 0; +} + +static void nbpf_chan_prepare(struct nbpf_channel *chan) +{ + chan->dmarq_cfg = (chan->flags & NBPF_SLAVE_RQ_HIGH ? NBPF_CHAN_CFG_HIEN : 0) | + (chan->flags & NBPF_SLAVE_RQ_LOW ? NBPF_CHAN_CFG_LOEN : 0) | + (chan->flags & NBPF_SLAVE_RQ_LEVEL ? + NBPF_CHAN_CFG_LVL | (NBPF_CHAN_CFG_AM & 0x200) : 0) | + chan->terminal; +} + +static void nbpf_chan_prepare_default(struct nbpf_channel *chan) +{ + /* Don't output DMAACK */ + chan->dmarq_cfg = NBPF_CHAN_CFG_AM & 0x400; + chan->terminal = 0; + chan->flags = 0; +} + +static void nbpf_chan_configure(struct nbpf_channel *chan) +{ + /* + * We assume, that only the link mode and DMA request line configuration + * have to be set in the configuration register manually. Dynamic + * per-transfer configuration will be loaded from transfer descriptors. + */ + nbpf_chan_write(chan, NBPF_CHAN_CFG, NBPF_CHAN_CFG_DMS | chan->dmarq_cfg); +} + +static u32 nbpf_xfer_ds(struct nbpf_device *nbpf, size_t size) +{ + /* Maximum supported bursts depend on the buffer size */ + return min_t(int, __ffs(size), ilog2(nbpf->config->buffer_size * 8)); +} + +static size_t nbpf_xfer_size(struct nbpf_device *nbpf, + enum dma_slave_buswidth width, u32 burst) +{ + size_t size; + + if (!burst) + burst = 1; + + switch (width) { + case DMA_SLAVE_BUSWIDTH_8_BYTES: + size = 8 * burst; + break; + + case DMA_SLAVE_BUSWIDTH_4_BYTES: + size = 4 * burst; + break; + + case DMA_SLAVE_BUSWIDTH_2_BYTES: + size = 2 * burst; + break; + + default: + pr_warn("%s(): invalid bus width %u\n", __func__, width); + case DMA_SLAVE_BUSWIDTH_1_BYTE: + size = burst; + } + + return nbpf_xfer_ds(nbpf, size); +} + +/* + * We need a way to recognise slaves, whose data is sent "raw" over the bus, + * i.e. it isn't known in advance how many bytes will be received. Therefore + * the slave driver has to provide a "large enough" buffer and either read the + * buffer, when it is full, or detect, that some data has arrived, then wait for + * a timeout, if no more data arrives - receive what's already there. We want to + * handle such slaves in a special way to allow an optimised mode for other + * users, for whom the amount of data is known in advance. So far there's no way + * to recognise such slaves. We use a data-width check to distinguish between + * the SD host and the PL011 UART. + */ + +static int nbpf_prep_one(struct nbpf_link_desc *ldesc, + enum dma_transfer_direction direction, + dma_addr_t src, dma_addr_t dst, size_t size, bool last) +{ + struct nbpf_link_reg *hwdesc = ldesc->hwdesc; + struct nbpf_desc *desc = ldesc->desc; + struct nbpf_channel *chan = desc->chan; + struct device *dev = chan->dma_chan.device->dev; + size_t mem_xfer, slave_xfer; + bool can_burst; + + hwdesc->header = NBPF_HEADER_WBD | NBPF_HEADER_LV | + (last ? NBPF_HEADER_LE : 0); + + hwdesc->src_addr = src; + hwdesc->dst_addr = dst; + hwdesc->transaction_size = size; + + /* + * set config: SAD, DAD, DDS, SDS, etc. + * Note on transfer sizes: the DMAC can perform unaligned DMA transfers, + * but it is important to have transaction size a multiple of both + * receiver and transmitter transfer sizes. It is also possible to use + * different RAM and device transfer sizes, and it does work well with + * some devices, e.g. with V08R07S01E SD host controllers, which can use + * 128 byte transfers. But this doesn't work with other devices, + * especially when the transaction size is unknown. This is the case, + * e.g. with serial drivers like amba-pl011.c. For reception it sets up + * the transaction size of 4K and if fewer bytes are received, it + * pauses DMA and reads out data received via DMA as well as those left + * in the Rx FIFO. For this to work with the RAM side using burst + * transfers we enable the SBE bit and terminate the transfer in our + * DMA_PAUSE handler. + */ + mem_xfer = nbpf_xfer_ds(chan->nbpf, size); + + switch (direction) { + case DMA_DEV_TO_MEM: + can_burst = chan->slave_src_width >= 3; + slave_xfer = min(mem_xfer, can_burst ? + chan->slave_src_burst : chan->slave_src_width); + /* + * Is the slave narrower than 64 bits, i.e. isn't using the full + * bus width and cannot use bursts? + */ + if (mem_xfer > chan->slave_src_burst && !can_burst) + mem_xfer = chan->slave_src_burst; + /* Device-to-RAM DMA is unreliable without REQD set */ + hwdesc->config = NBPF_CHAN_CFG_SAD | (NBPF_CHAN_CFG_DDS & (mem_xfer << 16)) | + (NBPF_CHAN_CFG_SDS & (slave_xfer << 12)) | NBPF_CHAN_CFG_REQD | + NBPF_CHAN_CFG_SBE; + break; + + case DMA_MEM_TO_DEV: + slave_xfer = min(mem_xfer, chan->slave_dst_width >= 3 ? + chan->slave_dst_burst : chan->slave_dst_width); + hwdesc->config = NBPF_CHAN_CFG_DAD | (NBPF_CHAN_CFG_SDS & (mem_xfer << 12)) | + (NBPF_CHAN_CFG_DDS & (slave_xfer << 16)) | NBPF_CHAN_CFG_REQD; + break; + + case DMA_MEM_TO_MEM: + hwdesc->config = NBPF_CHAN_CFG_TCM | NBPF_CHAN_CFG_TM | + (NBPF_CHAN_CFG_SDS & (mem_xfer << 12)) | + (NBPF_CHAN_CFG_DDS & (mem_xfer << 16)); + break; + + default: + return -EINVAL; + } + + hwdesc->config |= chan->dmarq_cfg | (last ? 0 : NBPF_CHAN_CFG_DEM) | + NBPF_CHAN_CFG_DMS; + + dev_dbg(dev, "%s(): desc @ %pad: hdr 0x%x, cfg 0x%x, %zu @ %pad -> %pad\n", + __func__, &ldesc->hwdesc_dma_addr, hwdesc->header, + hwdesc->config, size, &src, &dst); + + dma_sync_single_for_device(dev, ldesc->hwdesc_dma_addr, sizeof(*hwdesc), + DMA_TO_DEVICE); + + return 0; +} + +static size_t nbpf_bytes_left(struct nbpf_channel *chan) +{ + return nbpf_chan_read(chan, NBPF_CHAN_CUR_TR_BYTE); +} + +static void nbpf_configure(struct nbpf_device *nbpf) +{ + nbpf_write(nbpf, NBPF_CTRL, NBPF_CTRL_LVINT); +} + +static void nbpf_pause(struct nbpf_channel *chan) +{ + nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SETSUS); + /* See comment in nbpf_prep_one() */ + nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREN); +} + +/* Generic part */ + +/* DMA ENGINE functions */ +static void nbpf_issue_pending(struct dma_chan *dchan) +{ + struct nbpf_channel *chan = nbpf_to_chan(dchan); + unsigned long flags; + + dev_dbg(dchan->device->dev, "Entry %s()\n", __func__); + + spin_lock_irqsave(&chan->lock, flags); + if (list_empty(&chan->queued)) + goto unlock; + + list_splice_tail_init(&chan->queued, &chan->active); + + if (!chan->running) { + struct nbpf_desc *desc = list_first_entry(&chan->active, + struct nbpf_desc, node); + if (!nbpf_start(desc)) + chan->running = desc; + } + +unlock: + spin_unlock_irqrestore(&chan->lock, flags); +} + +static enum dma_status nbpf_tx_status(struct dma_chan *dchan, + dma_cookie_t cookie, struct dma_tx_state *state) +{ + struct nbpf_channel *chan = nbpf_to_chan(dchan); + enum dma_status status = dma_cookie_status(dchan, cookie, state); + + if (state) { + dma_cookie_t running; + unsigned long flags; + + spin_lock_irqsave(&chan->lock, flags); + running = chan->running ? chan->running->async_tx.cookie : -EINVAL; + + if (cookie == running) { + state->residue = nbpf_bytes_left(chan); + dev_dbg(dchan->device->dev, "%s(): residue %u\n", __func__, + state->residue); + } else if (status == DMA_IN_PROGRESS) { + struct nbpf_desc *desc; + bool found = false; + + list_for_each_entry(desc, &chan->active, node) + if (desc->async_tx.cookie == cookie) { + found = true; + break; + } + + if (!found) + list_for_each_entry(desc, &chan->queued, node) + if (desc->async_tx.cookie == cookie) { + found = true; + break; + + } + + state->residue = found ? desc->length : 0; + } + + spin_unlock_irqrestore(&chan->lock, flags); + } + + if (chan->paused) + status = DMA_PAUSED; + + return status; +} + +static dma_cookie_t nbpf_tx_submit(struct dma_async_tx_descriptor *tx) +{ + struct nbpf_desc *desc = container_of(tx, struct nbpf_desc, async_tx); + struct nbpf_channel *chan = desc->chan; + unsigned long flags; + dma_cookie_t cookie; + + spin_lock_irqsave(&chan->lock, flags); + cookie = dma_cookie_assign(tx); + list_add_tail(&desc->node, &chan->queued); + spin_unlock_irqrestore(&chan->lock, flags); + + dev_dbg(chan->dma_chan.device->dev, "Entry %s(%d)\n", __func__, cookie); + + return cookie; +} + +static int nbpf_desc_page_alloc(struct nbpf_channel *chan) +{ + struct dma_chan *dchan = &chan->dma_chan; + struct nbpf_desc_page *dpage = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); + struct nbpf_link_desc *ldesc; + struct nbpf_link_reg *hwdesc; + struct nbpf_desc *desc; + LIST_HEAD(head); + LIST_HEAD(lhead); + int i; + struct device *dev = dchan->device->dev; + + if (!dpage) + return -ENOMEM; + + dev_dbg(dev, "%s(): alloc %lu descriptors, %lu segments, total alloc %zu\n", + __func__, NBPF_DESCS_PER_PAGE, NBPF_SEGMENTS_PER_PAGE, sizeof(*dpage)); + + for (i = 0, ldesc = dpage->ldesc, hwdesc = dpage->hwdesc; + i < ARRAY_SIZE(dpage->ldesc); + i++, ldesc++, hwdesc++) { + ldesc->hwdesc = hwdesc; + list_add_tail(&ldesc->node, &lhead); + ldesc->hwdesc_dma_addr = dma_map_single(dchan->device->dev, + hwdesc, sizeof(*hwdesc), DMA_TO_DEVICE); + + dev_dbg(dev, "%s(): mapped 0x%p to %pad\n", __func__, + hwdesc, &ldesc->hwdesc_dma_addr); + } + + for (i = 0, desc = dpage->desc; + i < ARRAY_SIZE(dpage->desc); + i++, desc++) { + dma_async_tx_descriptor_init(&desc->async_tx, dchan); + desc->async_tx.tx_submit = nbpf_tx_submit; + desc->chan = chan; + INIT_LIST_HEAD(&desc->sg); + list_add_tail(&desc->node, &head); + } + + /* + * This function cannot be called from interrupt context, so, no need to + * save flags + */ + spin_lock_irq(&chan->lock); + list_splice_tail(&lhead, &chan->free_links); + list_splice_tail(&head, &chan->free); + list_add(&dpage->node, &chan->desc_page); + spin_unlock_irq(&chan->lock); + + return ARRAY_SIZE(dpage->desc); +} + +static void nbpf_desc_put(struct nbpf_desc *desc) +{ + struct nbpf_channel *chan = desc->chan; + struct nbpf_link_desc *ldesc, *tmp; + unsigned long flags; + + spin_lock_irqsave(&chan->lock, flags); + list_for_each_entry_safe(ldesc, tmp, &desc->sg, node) + list_move(&ldesc->node, &chan->free_links); + + list_add(&desc->node, &chan->free); + spin_unlock_irqrestore(&chan->lock, flags); +} + +static void nbpf_scan_acked(struct nbpf_channel *chan) +{ + struct nbpf_desc *desc, *tmp; + unsigned long flags; + LIST_HEAD(head); + + spin_lock_irqsave(&chan->lock, flags); + list_for_each_entry_safe(desc, tmp, &chan->done, node) + if (async_tx_test_ack(&desc->async_tx) && desc->user_wait) { + list_move(&desc->node, &head); + desc->user_wait = false; + } + spin_unlock_irqrestore(&chan->lock, flags); + + list_for_each_entry_safe(desc, tmp, &head, node) { + list_del(&desc->node); + nbpf_desc_put(desc); + } +} + +/* + * We have to allocate descriptors with the channel lock dropped. This means, + * before we re-acquire the lock buffers can be taken already, so we have to + * re-check after re-acquiring the lock and possibly retry, if buffers are gone + * again. + */ +static struct nbpf_desc *nbpf_desc_get(struct nbpf_channel *chan, size_t len) +{ + struct nbpf_desc *desc = NULL; + struct nbpf_link_desc *ldesc, *prev = NULL; + + nbpf_scan_acked(chan); + + spin_lock_irq(&chan->lock); + + do { + int i = 0, ret; + + if (list_empty(&chan->free)) { + /* No more free descriptors */ + spin_unlock_irq(&chan->lock); + ret = nbpf_desc_page_alloc(chan); + if (ret < 0) + return NULL; + spin_lock_irq(&chan->lock); + continue; + } + desc = list_first_entry(&chan->free, struct nbpf_desc, node); + list_del(&desc->node); + + do { + if (list_empty(&chan->free_links)) { + /* No more free link descriptors */ + spin_unlock_irq(&chan->lock); + ret = nbpf_desc_page_alloc(chan); + if (ret < 0) { + nbpf_desc_put(desc); + return NULL; + } + spin_lock_irq(&chan->lock); + continue; + } + + ldesc = list_first_entry(&chan->free_links, + struct nbpf_link_desc, node); + ldesc->desc = desc; + if (prev) + prev->hwdesc->next = (u32)ldesc->hwdesc_dma_addr; + + prev = ldesc; + list_move_tail(&ldesc->node, &desc->sg); + + i++; + } while (i < len); + } while (!desc); + + prev->hwdesc->next = 0; + + spin_unlock_irq(&chan->lock); + + return desc; +} + +static void nbpf_chan_idle(struct nbpf_channel *chan) +{ + struct nbpf_desc *desc, *tmp; + unsigned long flags; + LIST_HEAD(head); + + spin_lock_irqsave(&chan->lock, flags); + + list_splice_init(&chan->done, &head); + list_splice_init(&chan->active, &head); + list_splice_init(&chan->queued, &head); + + chan->running = NULL; + + spin_unlock_irqrestore(&chan->lock, flags); + + list_for_each_entry_safe(desc, tmp, &head, node) { + dev_dbg(chan->nbpf->dma_dev.dev, "%s(): force-free desc %p cookie %d\n", + __func__, desc, desc->async_tx.cookie); + list_del(&desc->node); + nbpf_desc_put(desc); + } +} + +static int nbpf_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd, + unsigned long arg) +{ + struct nbpf_channel *chan = nbpf_to_chan(dchan); + struct dma_slave_config *config; + + dev_dbg(dchan->device->dev, "Entry %s(%d)\n", __func__, cmd); + + switch (cmd) { + case DMA_TERMINATE_ALL: + dev_dbg(dchan->device->dev, "Terminating\n"); + nbpf_chan_halt(chan); + nbpf_chan_idle(chan); + break; + + case DMA_SLAVE_CONFIG: + if (!arg) + return -EINVAL; + config = (struct dma_slave_config *)arg; + + /* + * We could check config->slave_id to match chan->terminal here, + * but with DT they would be coming from the same source, so + * such a check would be superflous + */ + + chan->slave_dst_addr = config->dst_addr; + chan->slave_dst_width = nbpf_xfer_size(chan->nbpf, + config->dst_addr_width, 1); + chan->slave_dst_burst = nbpf_xfer_size(chan->nbpf, + config->dst_addr_width, + config->dst_maxburst); + chan->slave_src_addr = config->src_addr; + chan->slave_src_width = nbpf_xfer_size(chan->nbpf, + config->src_addr_width, 1); + chan->slave_src_burst = nbpf_xfer_size(chan->nbpf, + config->src_addr_width, + config->src_maxburst); + break; + + case DMA_PAUSE: + chan->paused = true; + nbpf_pause(chan); + break; + + default: + return -ENXIO; + } + + return 0; +} + +static struct dma_async_tx_descriptor *nbpf_prep_sg(struct nbpf_channel *chan, + struct scatterlist *src_sg, struct scatterlist *dst_sg, + size_t len, enum dma_transfer_direction direction, + unsigned long flags) +{ + struct nbpf_link_desc *ldesc; + struct scatterlist *mem_sg; + struct nbpf_desc *desc; + bool inc_src, inc_dst; + size_t data_len = 0; + int i = 0; + + switch (direction) { + case DMA_DEV_TO_MEM: + mem_sg = dst_sg; + inc_src = false; + inc_dst = true; + break; + + case DMA_MEM_TO_DEV: + mem_sg = src_sg; + inc_src = true; + inc_dst = false; + break; + + default: + case DMA_MEM_TO_MEM: + mem_sg = src_sg; + inc_src = true; + inc_dst = true; + } + + desc = nbpf_desc_get(chan, len); + if (!desc) + return NULL; + + desc->async_tx.flags = flags; + desc->async_tx.cookie = -EBUSY; + desc->user_wait = false; + + /* + * This is a private descriptor list, and we own the descriptor. No need + * to lock. + */ + list_for_each_entry(ldesc, &desc->sg, node) { + int ret = nbpf_prep_one(ldesc, direction, + sg_dma_address(src_sg), + sg_dma_address(dst_sg), + sg_dma_len(mem_sg), + i == len - 1); + if (ret < 0) { + nbpf_desc_put(desc); + return NULL; + } + data_len += sg_dma_len(mem_sg); + if (inc_src) + src_sg = sg_next(src_sg); + if (inc_dst) + dst_sg = sg_next(dst_sg); + mem_sg = direction == DMA_DEV_TO_MEM ? dst_sg : src_sg; + i++; + } + + desc->length = data_len; + + /* The user has to return the descriptor to us ASAP via .tx_submit() */ + return &desc->async_tx; +} + +static struct dma_async_tx_descriptor *nbpf_prep_memcpy( + struct dma_chan *dchan, dma_addr_t dst, dma_addr_t src, + size_t len, unsigned long flags) +{ + struct nbpf_channel *chan = nbpf_to_chan(dchan); + struct scatterlist dst_sg; + struct scatterlist src_sg; + + sg_init_table(&dst_sg, 1); + sg_init_table(&src_sg, 1); + + sg_dma_address(&dst_sg) = dst; + sg_dma_address(&src_sg) = src; + + sg_dma_len(&dst_sg) = len; + sg_dma_len(&src_sg) = len; + + dev_dbg(dchan->device->dev, "%s(): %zu @ %pad -> %pad\n", + __func__, len, &src, &dst); + + return nbpf_prep_sg(chan, &src_sg, &dst_sg, 1, + DMA_MEM_TO_MEM, flags); +} + +static struct dma_async_tx_descriptor *nbpf_prep_memcpy_sg( + struct dma_chan *dchan, + struct scatterlist *dst_sg, unsigned int dst_nents, + struct scatterlist *src_sg, unsigned int src_nents, + unsigned long flags) +{ + struct nbpf_channel *chan = nbpf_to_chan(dchan); + + if (dst_nents != src_nents) + return NULL; + + return nbpf_prep_sg(chan, src_sg, dst_sg, src_nents, + DMA_MEM_TO_MEM, flags); +} + +static struct dma_async_tx_descriptor *nbpf_prep_slave_sg( + struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, + enum dma_transfer_direction direction, unsigned long flags, void *context) +{ + struct nbpf_channel *chan = nbpf_to_chan(dchan); + struct scatterlist slave_sg; + + dev_dbg(dchan->device->dev, "Entry %s()\n", __func__); + + sg_init_table(&slave_sg, 1); + + switch (direction) { + case DMA_MEM_TO_DEV: + sg_dma_address(&slave_sg) = chan->slave_dst_addr; + return nbpf_prep_sg(chan, sgl, &slave_sg, sg_len, + direction, flags); + + case DMA_DEV_TO_MEM: + sg_dma_address(&slave_sg) = chan->slave_src_addr; + return nbpf_prep_sg(chan, &slave_sg, sgl, sg_len, + direction, flags); + + default: + return NULL; + } +} + +static int nbpf_alloc_chan_resources(struct dma_chan *dchan) +{ + struct nbpf_channel *chan = nbpf_to_chan(dchan); + int ret; + + INIT_LIST_HEAD(&chan->free); + INIT_LIST_HEAD(&chan->free_links); + INIT_LIST_HEAD(&chan->queued); + INIT_LIST_HEAD(&chan->active); + INIT_LIST_HEAD(&chan->done); + + ret = nbpf_desc_page_alloc(chan); + if (ret < 0) + return ret; + + dev_dbg(dchan->device->dev, "Entry %s(): terminal %u\n", __func__, + chan->terminal); + + nbpf_chan_configure(chan); + + return ret; +} + +static void nbpf_free_chan_resources(struct dma_chan *dchan) +{ + struct nbpf_channel *chan = nbpf_to_chan(dchan); + struct nbpf_desc_page *dpage, *tmp; + + dev_dbg(dchan->device->dev, "Entry %s()\n", __func__); + + nbpf_chan_halt(chan); + nbpf_chan_idle(chan); + /* Clean up for if a channel is re-used for MEMCPY after slave DMA */ + nbpf_chan_prepare_default(chan); + + list_for_each_entry_safe(dpage, tmp, &chan->desc_page, node) { + struct nbpf_link_desc *ldesc; + int i; + list_del(&dpage->node); + for (i = 0, ldesc = dpage->ldesc; + i < ARRAY_SIZE(dpage->ldesc); + i++, ldesc++) + dma_unmap_single(dchan->device->dev, ldesc->hwdesc_dma_addr, + sizeof(*ldesc->hwdesc), DMA_TO_DEVICE); + free_page((unsigned long)dpage); + } +} + +static int nbpf_slave_caps(struct dma_chan *dchan, + struct dma_slave_caps *caps) +{ + caps->src_addr_widths = NBPF_DMA_BUSWIDTHS; + caps->dstn_addr_widths = NBPF_DMA_BUSWIDTHS; + caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); + caps->cmd_pause = false; + caps->cmd_terminate = true; + + return 0; +} + +static struct dma_chan *nbpf_of_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + struct nbpf_device *nbpf = ofdma->of_dma_data; + struct dma_chan *dchan; + struct nbpf_channel *chan; + + if (dma_spec->args_count != 2) + return NULL; + + dchan = dma_get_any_slave_channel(&nbpf->dma_dev); + if (!dchan) + return NULL; + + dev_dbg(dchan->device->dev, "Entry %s(%s)\n", __func__, + dma_spec->np->name); + + chan = nbpf_to_chan(dchan); + + chan->terminal = dma_spec->args[0]; + chan->flags = dma_spec->args[1]; + + nbpf_chan_prepare(chan); + nbpf_chan_configure(chan); + + return dchan; +} + +static void nbpf_chan_tasklet(unsigned long data) +{ + struct nbpf_channel *chan = (struct nbpf_channel *)data; + struct nbpf_desc *desc, *tmp; + dma_async_tx_callback callback; + void *param; + + while (!list_empty(&chan->done)) { + bool found = false, must_put, recycling = false; + + spin_lock_irq(&chan->lock); + + list_for_each_entry_safe(desc, tmp, &chan->done, node) { + if (!desc->user_wait) { + /* Newly completed descriptor, have to process */ + found = true; + break; + } else if (async_tx_test_ack(&desc->async_tx)) { + /* + * This descriptor was waiting for a user ACK, + * it can be recycled now. + */ + list_del(&desc->node); + spin_unlock_irq(&chan->lock); + nbpf_desc_put(desc); + recycling = true; + break; + } + } + + if (recycling) + continue; + + if (!found) { + /* This can happen if TERMINATE_ALL has been called */ + spin_unlock_irq(&chan->lock); + break; + } + + dma_cookie_complete(&desc->async_tx); + + /* + * With released lock we cannot dereference desc, maybe it's + * still on the "done" list + */ + if (async_tx_test_ack(&desc->async_tx)) { + list_del(&desc->node); + must_put = true; + } else { + desc->user_wait = true; + must_put = false; + } + + callback = desc->async_tx.callback; + param = desc->async_tx.callback_param; + + /* ack and callback completed descriptor */ + spin_unlock_irq(&chan->lock); + + if (callback) + callback(param); + + if (must_put) + nbpf_desc_put(desc); + } +} + +static irqreturn_t nbpf_chan_irq(int irq, void *dev) +{ + struct nbpf_channel *chan = dev; + bool done = nbpf_status_get(chan); + struct nbpf_desc *desc; + irqreturn_t ret; + bool bh = false; + + if (!done) + return IRQ_NONE; + + nbpf_status_ack(chan); + + dev_dbg(&chan->dma_chan.dev->device, "%s()\n", __func__); + + spin_lock(&chan->lock); + desc = chan->running; + if (WARN_ON(!desc)) { + ret = IRQ_NONE; + goto unlock; + } else { + ret = IRQ_HANDLED; + bh = true; + } + + list_move_tail(&desc->node, &chan->done); + chan->running = NULL; + + if (!list_empty(&chan->active)) { + desc = list_first_entry(&chan->active, + struct nbpf_desc, node); + if (!nbpf_start(desc)) + chan->running = desc; + } + +unlock: + spin_unlock(&chan->lock); + + if (bh) + tasklet_schedule(&chan->tasklet); + + return ret; +} + +static irqreturn_t nbpf_err_irq(int irq, void *dev) +{ + struct nbpf_device *nbpf = dev; + u32 error = nbpf_error_get(nbpf); + + dev_warn(nbpf->dma_dev.dev, "DMA error IRQ %u\n", irq); + + if (!error) + return IRQ_NONE; + + do { + struct nbpf_channel *chan = nbpf_error_get_channel(nbpf, error); + /* On error: abort all queued transfers, no callback */ + nbpf_error_clear(chan); + nbpf_chan_idle(chan); + error = nbpf_error_get(nbpf); + } while (error); + + return IRQ_HANDLED; +} + +static int nbpf_chan_probe(struct nbpf_device *nbpf, int n) +{ + struct dma_device *dma_dev = &nbpf->dma_dev; + struct nbpf_channel *chan = nbpf->chan + n; + int ret; + + chan->nbpf = nbpf; + chan->base = nbpf->base + NBPF_REG_CHAN_OFFSET + NBPF_REG_CHAN_SIZE * n; + INIT_LIST_HEAD(&chan->desc_page); + spin_lock_init(&chan->lock); + chan->dma_chan.device = dma_dev; + dma_cookie_init(&chan->dma_chan); + nbpf_chan_prepare_default(chan); + + dev_dbg(dma_dev->dev, "%s(): channel %d: -> %p\n", __func__, n, chan->base); + + snprintf(chan->name, sizeof(chan->name), "nbpf %d", n); + + tasklet_init(&chan->tasklet, nbpf_chan_tasklet, (unsigned long)chan); + ret = devm_request_irq(dma_dev->dev, chan->irq, + nbpf_chan_irq, IRQF_SHARED, + chan->name, chan); + if (ret < 0) + return ret; + + /* Add the channel to DMA device channel list */ + list_add_tail(&chan->dma_chan.device_node, + &dma_dev->channels); + + return 0; +} + +static const struct of_device_id nbpf_match[] = { + {.compatible = "renesas,nbpfaxi64dmac1b4", .data = &nbpf_cfg[NBPF1B4]}, + {.compatible = "renesas,nbpfaxi64dmac1b8", .data = &nbpf_cfg[NBPF1B8]}, + {.compatible = "renesas,nbpfaxi64dmac1b16", .data = &nbpf_cfg[NBPF1B16]}, + {.compatible = "renesas,nbpfaxi64dmac4b4", .data = &nbpf_cfg[NBPF4B4]}, + {.compatible = "renesas,nbpfaxi64dmac4b8", .data = &nbpf_cfg[NBPF4B8]}, + {.compatible = "renesas,nbpfaxi64dmac4b16", .data = &nbpf_cfg[NBPF4B16]}, + {.compatible = "renesas,nbpfaxi64dmac8b4", .data = &nbpf_cfg[NBPF8B4]}, + {.compatible = "renesas,nbpfaxi64dmac8b8", .data = &nbpf_cfg[NBPF8B8]}, + {.compatible = "renesas,nbpfaxi64dmac8b16", .data = &nbpf_cfg[NBPF8B16]}, + {} +}; +MODULE_DEVICE_TABLE(of, nbpf_match); + +static int nbpf_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + const struct of_device_id *of_id = of_match_device(nbpf_match, dev); + struct device_node *np = dev->of_node; + struct nbpf_device *nbpf; + struct dma_device *dma_dev; + struct resource *iomem, *irq_res; + const struct nbpf_config *cfg; + int num_channels; + int ret, irq, eirq, i; + int irqbuf[9] /* maximum 8 channels + error IRQ */; + unsigned int irqs = 0; + + BUILD_BUG_ON(sizeof(struct nbpf_desc_page) > PAGE_SIZE); + + /* DT only */ + if (!np || !of_id || !of_id->data) + return -ENODEV; + + cfg = of_id->data; + num_channels = cfg->num_channels; + + nbpf = devm_kzalloc(dev, sizeof(*nbpf) + num_channels * + sizeof(nbpf->chan[0]), GFP_KERNEL); + if (!nbpf) { + dev_err(dev, "Memory allocation failed\n"); + return -ENOMEM; + } + dma_dev = &nbpf->dma_dev; + dma_dev->dev = dev; + + iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + nbpf->base = devm_ioremap_resource(dev, iomem); + if (IS_ERR(nbpf->base)) + return PTR_ERR(nbpf->base); + + nbpf->clk = devm_clk_get(dev, NULL); + if (IS_ERR(nbpf->clk)) + return PTR_ERR(nbpf->clk); + + nbpf->config = cfg; + + for (i = 0; irqs < ARRAY_SIZE(irqbuf); i++) { + irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, i); + if (!irq_res) + break; + + for (irq = irq_res->start; irq <= irq_res->end; + irq++, irqs++) + irqbuf[irqs] = irq; + } + + /* + * 3 IRQ resource schemes are supported: + * 1. 1 shared IRQ for error and all channels + * 2. 2 IRQs: one for error and one shared for all channels + * 3. 1 IRQ for error and an own IRQ for each channel + */ + if (irqs != 1 && irqs != 2 && irqs != num_channels + 1) + return -ENXIO; + + if (irqs == 1) { + eirq = irqbuf[0]; + + for (i = 0; i <= num_channels; i++) + nbpf->chan[i].irq = irqbuf[0]; + } else { + eirq = platform_get_irq_byname(pdev, "error"); + if (eirq < 0) + return eirq; + + if (irqs == num_channels + 1) { + struct nbpf_channel *chan; + + for (i = 0, chan = nbpf->chan; i <= num_channels; + i++, chan++) { + /* Skip the error IRQ */ + if (irqbuf[i] == eirq) + i++; + chan->irq = irqbuf[i]; + } + + if (chan != nbpf->chan + num_channels) + return -EINVAL; + } else { + /* 2 IRQs and more than one channel */ + if (irqbuf[0] == eirq) + irq = irqbuf[1]; + else + irq = irqbuf[0]; + + for (i = 0; i <= num_channels; i++) + nbpf->chan[i].irq = irq; + } + } + + ret = devm_request_irq(dev, eirq, nbpf_err_irq, + IRQF_SHARED, "dma error", nbpf); + if (ret < 0) + return ret; + + INIT_LIST_HEAD(&dma_dev->channels); + + /* Create DMA Channel */ + for (i = 0; i < num_channels; i++) { + ret = nbpf_chan_probe(nbpf, i); + if (ret < 0) + return ret; + } + + dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); + dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); + dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask); + dma_cap_set(DMA_SG, dma_dev->cap_mask); + + /* Common and MEMCPY operations */ + dma_dev->device_alloc_chan_resources + = nbpf_alloc_chan_resources; + dma_dev->device_free_chan_resources = nbpf_free_chan_resources; + dma_dev->device_prep_dma_sg = nbpf_prep_memcpy_sg; + dma_dev->device_prep_dma_memcpy = nbpf_prep_memcpy; + dma_dev->device_tx_status = nbpf_tx_status; + dma_dev->device_issue_pending = nbpf_issue_pending; + dma_dev->device_slave_caps = nbpf_slave_caps; + + /* + * If we drop support for unaligned MEMCPY buffer addresses and / or + * lengths by setting + * dma_dev->copy_align = 4; + * then we can set transfer length to 4 bytes in nbpf_prep_one() for + * DMA_MEM_TO_MEM + */ + + /* Compulsory for DMA_SLAVE fields */ + dma_dev->device_prep_slave_sg = nbpf_prep_slave_sg; + dma_dev->device_control = nbpf_control; + + platform_set_drvdata(pdev, nbpf); + + ret = clk_prepare_enable(nbpf->clk); + if (ret < 0) + return ret; + + nbpf_configure(nbpf); + + ret = dma_async_device_register(dma_dev); + if (ret < 0) + goto e_clk_off; + + ret = of_dma_controller_register(np, nbpf_of_xlate, nbpf); + if (ret < 0) + goto e_dma_dev_unreg; + + return 0; + +e_dma_dev_unreg: + dma_async_device_unregister(dma_dev); +e_clk_off: + clk_disable_unprepare(nbpf->clk); + + return ret; +} + +static int nbpf_remove(struct platform_device *pdev) +{ + struct nbpf_device *nbpf = platform_get_drvdata(pdev); + + of_dma_controller_free(pdev->dev.of_node); + dma_async_device_unregister(&nbpf->dma_dev); + clk_disable_unprepare(nbpf->clk); + + return 0; +} + +static struct platform_device_id nbpf_ids[] = { + {"nbpfaxi64dmac1b4", (kernel_ulong_t)&nbpf_cfg[NBPF1B4]}, + {"nbpfaxi64dmac1b8", (kernel_ulong_t)&nbpf_cfg[NBPF1B8]}, + {"nbpfaxi64dmac1b16", (kernel_ulong_t)&nbpf_cfg[NBPF1B16]}, + {"nbpfaxi64dmac4b4", (kernel_ulong_t)&nbpf_cfg[NBPF4B4]}, + {"nbpfaxi64dmac4b8", (kernel_ulong_t)&nbpf_cfg[NBPF4B8]}, + {"nbpfaxi64dmac4b16", (kernel_ulong_t)&nbpf_cfg[NBPF4B16]}, + {"nbpfaxi64dmac8b4", (kernel_ulong_t)&nbpf_cfg[NBPF8B4]}, + {"nbpfaxi64dmac8b8", (kernel_ulong_t)&nbpf_cfg[NBPF8B8]}, + {"nbpfaxi64dmac8b16", (kernel_ulong_t)&nbpf_cfg[NBPF8B16]}, + {}, +}; +MODULE_DEVICE_TABLE(platform, nbpf_ids); + +#ifdef CONFIG_PM_RUNTIME +static int nbpf_runtime_suspend(struct device *dev) +{ + struct nbpf_device *nbpf = platform_get_drvdata(to_platform_device(dev)); + clk_disable_unprepare(nbpf->clk); + return 0; +} + +static int nbpf_runtime_resume(struct device *dev) +{ + struct nbpf_device *nbpf = platform_get_drvdata(to_platform_device(dev)); + return clk_prepare_enable(nbpf->clk); +} +#endif + +static const struct dev_pm_ops nbpf_pm_ops = { + SET_RUNTIME_PM_OPS(nbpf_runtime_suspend, nbpf_runtime_resume, NULL) +}; + +static struct platform_driver nbpf_driver = { + .driver = { + .owner = THIS_MODULE, + .name = "dma-nbpf", + .of_match_table = nbpf_match, + .pm = &nbpf_pm_ops, + }, + .id_table = nbpf_ids, + .probe = nbpf_probe, + .remove = nbpf_remove, +}; + +module_platform_driver(nbpf_driver); + +MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>"); +MODULE_DESCRIPTION("dmaengine driver for NBPFAXI64* DMACs"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c index e8fe9dc455f4..d5fbeaa1e7ba 100644 --- a/drivers/dma/of-dma.c +++ b/drivers/dma/of-dma.c @@ -218,3 +218,38 @@ struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec, &dma_spec->args[0]); } EXPORT_SYMBOL_GPL(of_dma_simple_xlate); + +/** + * of_dma_xlate_by_chan_id - Translate dt property to DMA channel by channel id + * @dma_spec: pointer to DMA specifier as found in the device tree + * @of_dma: pointer to DMA controller data + * + * This function can be used as the of xlate callback for DMA driver which wants + * to match the channel based on the channel id. When using this xlate function + * the #dma-cells propety of the DMA controller dt node needs to be set to 1. + * The data parameter of of_dma_controller_register must be a pointer to the + * dma_device struct the function should match upon. + * + * Returns pointer to appropriate dma channel on success or NULL on error. + */ +struct dma_chan *of_dma_xlate_by_chan_id(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + struct dma_device *dev = ofdma->of_dma_data; + struct dma_chan *chan, *candidate = NULL; + + if (!dev || dma_spec->args_count != 1) + return NULL; + + list_for_each_entry(chan, &dev->channels, device_node) + if (chan->chan_id == dma_spec->args[0]) { + candidate = chan; + break; + } + + if (!candidate) + return NULL; + + return dma_get_slave_channel(candidate); +} +EXPORT_SYMBOL_GPL(of_dma_xlate_by_chan_id); diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c index b19f04f4390b..bbea8243f9e8 100644 --- a/drivers/dma/omap-dma.c +++ b/drivers/dma/omap-dma.c @@ -853,8 +853,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg( static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic( struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, - size_t period_len, enum dma_transfer_direction dir, unsigned long flags, - void *context) + size_t period_len, enum dma_transfer_direction dir, unsigned long flags) { struct omap_dmadev *od = to_omap_dma_dev(chan->device); struct omap_chan *c = to_omap_dma_chan(chan); @@ -1018,6 +1017,11 @@ static int omap_dma_resume(struct omap_chan *c) return -EINVAL; if (c->paused) { + mb(); + + /* Restore channel link register */ + omap_dma_chan_write(c, CLNK_CTRL, c->desc->clnk_ctrl); + omap_dma_start(c, c->desc); c->paused = false; } diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 73fa9b7a10ab..d5149aacd2fe 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -33,26 +33,15 @@ #define PL330_MAX_IRQS 32 #define PL330_MAX_PERI 32 -enum pl330_srccachectrl { - SCCTRL0, /* Noncacheable and nonbufferable */ - SCCTRL1, /* Bufferable only */ - SCCTRL2, /* Cacheable, but do not allocate */ - SCCTRL3, /* Cacheable and bufferable, but do not allocate */ - SINVALID1, - SINVALID2, - SCCTRL6, /* Cacheable write-through, allocate on reads only */ - SCCTRL7, /* Cacheable write-back, allocate on reads only */ -}; - -enum pl330_dstcachectrl { - DCCTRL0, /* Noncacheable and nonbufferable */ - DCCTRL1, /* Bufferable only */ - DCCTRL2, /* Cacheable, but do not allocate */ - DCCTRL3, /* Cacheable and bufferable, but do not allocate */ - DINVALID1, /* AWCACHE = 0x1000 */ - DINVALID2, - DCCTRL6, /* Cacheable write-through, allocate on writes only */ - DCCTRL7, /* Cacheable write-back, allocate on writes only */ +enum pl330_cachectrl { + CCTRL0, /* Noncacheable and nonbufferable */ + CCTRL1, /* Bufferable only */ + CCTRL2, /* Cacheable, but do not allocate */ + CCTRL3, /* Cacheable and bufferable, but do not allocate */ + INVALID1, /* AWCACHE = 0x1000 */ + INVALID2, + CCTRL6, /* Cacheable write-through, allocate on writes only */ + CCTRL7, /* Cacheable write-back, allocate on writes only */ }; enum pl330_byteswap { @@ -63,13 +52,6 @@ enum pl330_byteswap { SWAP_16, }; -enum pl330_reqtype { - MEMTOMEM, - MEMTODEV, - DEVTOMEM, - DEVTODEV, -}; - /* Register and Bit field Definitions */ #define DS 0x0 #define DS_ST_STOP 0x0 @@ -263,9 +245,6 @@ enum pl330_reqtype { */ #define MCODE_BUFF_PER_REQ 256 -/* If the _pl330_req is available to the client */ -#define IS_FREE(req) (*((u8 *)((req)->mc_cpu)) == CMD_DMAEND) - /* Use this _only_ to wait on transient states */ #define UNTIL(t, s) while (!(_state(t) & (s))) cpu_relax(); @@ -300,27 +279,6 @@ struct pl330_config { u32 irq_ns; }; -/* Handle to the DMAC provided to the PL330 core */ -struct pl330_info { - /* Owning device */ - struct device *dev; - /* Size of MicroCode buffers for each channel. */ - unsigned mcbufsz; - /* ioremap'ed address of PL330 registers. */ - void __iomem *base; - /* Client can freely use it. */ - void *client_data; - /* PL330 core data, Client must not touch it. */ - void *pl330_data; - /* Populated by the PL330 core driver during pl330_add */ - struct pl330_config pcfg; - /* - * If the DMAC has some reset mechanism, then the - * client may want to provide pointer to the method. - */ - void (*dmac_reset)(struct pl330_info *pi); -}; - /** * Request Configuration. * The PL330 core does not modify this and uses the last @@ -344,8 +302,8 @@ struct pl330_reqcfg { unsigned brst_len:5; unsigned brst_size:3; /* in power of 2 */ - enum pl330_dstcachectrl dcctl; - enum pl330_srccachectrl scctl; + enum pl330_cachectrl dcctl; + enum pl330_cachectrl scctl; enum pl330_byteswap swap; struct pl330_config *pcfg; }; @@ -359,11 +317,6 @@ struct pl330_xfer { u32 dst_addr; /* Size to xfer */ u32 bytes; - /* - * Pointer to next xfer in the list. - * The last xfer in the req must point to NULL. - */ - struct pl330_xfer *next; }; /* The xfer callbacks are made with one of these arguments. */ @@ -376,67 +329,6 @@ enum pl330_op_err { PL330_ERR_FAIL, }; -/* A request defining Scatter-Gather List ending with NULL xfer. */ -struct pl330_req { - enum pl330_reqtype rqtype; - /* Index of peripheral for the xfer. */ - unsigned peri:5; - /* Unique token for this xfer, set by the client. */ - void *token; - /* Callback to be called after xfer. */ - void (*xfer_cb)(void *token, enum pl330_op_err err); - /* If NULL, req will be done at last set parameters. */ - struct pl330_reqcfg *cfg; - /* Pointer to first xfer in the request. */ - struct pl330_xfer *x; - /* Hook to attach to DMAC's list of reqs with due callback */ - struct list_head rqd; -}; - -/* - * To know the status of the channel and DMAC, the client - * provides a pointer to this structure. The PL330 core - * fills it with current information. - */ -struct pl330_chanstatus { - /* - * If the DMAC engine halted due to some error, - * the client should remove-add DMAC. - */ - bool dmac_halted; - /* - * If channel is halted due to some error, - * the client should ABORT/FLUSH and START the channel. - */ - bool faulting; - /* Location of last load */ - u32 src_addr; - /* Location of last store */ - u32 dst_addr; - /* - * Pointer to the currently active req, NULL if channel is - * inactive, even though the requests may be present. - */ - struct pl330_req *top_req; - /* Pointer to req waiting second in the queue if any. */ - struct pl330_req *wait_req; -}; - -enum pl330_chan_op { - /* Start the channel */ - PL330_OP_START, - /* Abort the active xfer */ - PL330_OP_ABORT, - /* Stop xfer and flush queue */ - PL330_OP_FLUSH, -}; - -struct _xfer_spec { - u32 ccr; - struct pl330_req *r; - struct pl330_xfer *x; -}; - enum dmamov_dst { SAR = 0, CCR, @@ -454,12 +346,12 @@ enum pl330_cond { ALWAYS, }; +struct dma_pl330_desc; + struct _pl330_req { u32 mc_bus; void *mc_cpu; - /* Number of bytes taken to setup MC for the req */ - u32 mc_len; - struct pl330_req *r; + struct dma_pl330_desc *desc; }; /* ToBeDone for tasklet */ @@ -491,30 +383,6 @@ enum pl330_dmac_state { DYING, }; -/* A DMAC */ -struct pl330_dmac { - spinlock_t lock; - /* Holds list of reqs with due callbacks */ - struct list_head req_done; - /* Pointer to platform specific stuff */ - struct pl330_info *pinfo; - /* Maximum possible events/irqs */ - int events[32]; - /* BUS address of MicroCode buffer */ - dma_addr_t mcode_bus; - /* CPU address of MicroCode buffer */ - void *mcode_cpu; - /* List of all Channel threads */ - struct pl330_thread *channels; - /* Pointer to the MANAGER thread */ - struct pl330_thread *manager; - /* To handle bad news in interrupt */ - struct tasklet_struct tasks; - struct _pl330_tbd dmac_tbd; - /* State of DMAC operation */ - enum pl330_dmac_state state; -}; - enum desc_status { /* In the DMAC pool */ FREE, @@ -555,15 +423,16 @@ struct dma_pl330_chan { * As the parent, this DMAC also provides descriptors * to the channel. */ - struct dma_pl330_dmac *dmac; + struct pl330_dmac *dmac; /* To protect channel manipulation */ spinlock_t lock; - /* Token of a hardware channel thread of PL330 DMAC - * NULL if the channel is available to be acquired. + /* + * Hardware channel thread of PL330 DMAC. NULL if the channel is + * available. */ - void *pl330_chid; + struct pl330_thread *thread; /* For D-to-M and M-to-D channels */ int burst_sz; /* the peripheral fifo width */ @@ -574,9 +443,7 @@ struct dma_pl330_chan { bool cyclic; }; -struct dma_pl330_dmac { - struct pl330_info pif; - +struct pl330_dmac { /* DMA-Engine Device */ struct dma_device ddma; @@ -588,6 +455,32 @@ struct dma_pl330_dmac { /* To protect desc_pool manipulation */ spinlock_t pool_lock; + /* Size of MicroCode buffers for each channel. */ + unsigned mcbufsz; + /* ioremap'ed address of PL330 registers. */ + void __iomem *base; + /* Populated by the PL330 core driver during pl330_add */ + struct pl330_config pcfg; + + spinlock_t lock; + /* Maximum possible events/irqs */ + int events[32]; + /* BUS address of MicroCode buffer */ + dma_addr_t mcode_bus; + /* CPU address of MicroCode buffer */ + void *mcode_cpu; + /* List of all Channel threads */ + struct pl330_thread *channels; + /* Pointer to the MANAGER thread */ + struct pl330_thread *manager; + /* To handle bad news in interrupt */ + struct tasklet_struct tasks; + struct _pl330_tbd dmac_tbd; + /* State of DMAC operation */ + enum pl330_dmac_state state; + /* Holds list of reqs with due callbacks */ + struct list_head req_done; + /* Peripheral channels connected to this DMAC */ unsigned int num_peripherals; struct dma_pl330_chan *peripherals; /* keep at end */ @@ -604,49 +497,43 @@ struct dma_pl330_desc { struct pl330_xfer px; struct pl330_reqcfg rqcfg; - struct pl330_req req; enum desc_status status; /* The channel which currently holds this desc */ struct dma_pl330_chan *pchan; + + enum dma_transfer_direction rqtype; + /* Index of peripheral for the xfer. */ + unsigned peri:5; + /* Hook to attach to DMAC's list of reqs with due callback */ + struct list_head rqd; }; -static inline void _callback(struct pl330_req *r, enum pl330_op_err err) -{ - if (r && r->xfer_cb) - r->xfer_cb(r->token, err); -} +struct _xfer_spec { + u32 ccr; + struct dma_pl330_desc *desc; +}; static inline bool _queue_empty(struct pl330_thread *thrd) { - return (IS_FREE(&thrd->req[0]) && IS_FREE(&thrd->req[1])) - ? true : false; + return thrd->req[0].desc == NULL && thrd->req[1].desc == NULL; } static inline bool _queue_full(struct pl330_thread *thrd) { - return (IS_FREE(&thrd->req[0]) || IS_FREE(&thrd->req[1])) - ? false : true; + return thrd->req[0].desc != NULL && thrd->req[1].desc != NULL; } static inline bool is_manager(struct pl330_thread *thrd) { - struct pl330_dmac *pl330 = thrd->dmac; - - /* MANAGER is indexed at the end */ - if (thrd->id == pl330->pinfo->pcfg.num_chan) - return true; - else - return false; + return thrd->dmac->manager == thrd; } /* If manager of the thread is in Non-Secure mode */ static inline bool _manager_ns(struct pl330_thread *thrd) { - struct pl330_dmac *pl330 = thrd->dmac; - - return (pl330->pinfo->pcfg.mode & DMAC_MODE_NS) ? true : false; + return (thrd->dmac->pcfg.mode & DMAC_MODE_NS) ? true : false; } static inline u32 get_revision(u32 periph_id) @@ -1004,7 +891,7 @@ static inline u32 _emit_GO(unsigned dry_run, u8 buf[], /* Returns Time-Out */ static bool _until_dmac_idle(struct pl330_thread *thrd) { - void __iomem *regs = thrd->dmac->pinfo->base; + void __iomem *regs = thrd->dmac->base; unsigned long loops = msecs_to_loops(5); do { @@ -1024,7 +911,7 @@ static bool _until_dmac_idle(struct pl330_thread *thrd) static inline void _execute_DBGINSN(struct pl330_thread *thrd, u8 insn[], bool as_manager) { - void __iomem *regs = thrd->dmac->pinfo->base; + void __iomem *regs = thrd->dmac->base; u32 val; val = (insn[0] << 16) | (insn[1] << 24); @@ -1039,7 +926,7 @@ static inline void _execute_DBGINSN(struct pl330_thread *thrd, /* If timed out due to halted state-machine */ if (_until_dmac_idle(thrd)) { - dev_err(thrd->dmac->pinfo->dev, "DMAC halted!\n"); + dev_err(thrd->dmac->ddma.dev, "DMAC halted!\n"); return; } @@ -1047,25 +934,9 @@ static inline void _execute_DBGINSN(struct pl330_thread *thrd, writel(0, regs + DBGCMD); } -/* - * Mark a _pl330_req as free. - * We do it by writing DMAEND as the first instruction - * because no valid request is going to have DMAEND as - * its first instruction to execute. - */ -static void mark_free(struct pl330_thread *thrd, int idx) -{ - struct _pl330_req *req = &thrd->req[idx]; - - _emit_END(0, req->mc_cpu); - req->mc_len = 0; - - thrd->req_running = -1; -} - static inline u32 _state(struct pl330_thread *thrd) { - void __iomem *regs = thrd->dmac->pinfo->base; + void __iomem *regs = thrd->dmac->base; u32 val; if (is_manager(thrd)) @@ -1123,7 +994,7 @@ static inline u32 _state(struct pl330_thread *thrd) static void _stop(struct pl330_thread *thrd) { - void __iomem *regs = thrd->dmac->pinfo->base; + void __iomem *regs = thrd->dmac->base; u8 insn[6] = {0, 0, 0, 0, 0, 0}; if (_state(thrd) == PL330_STATE_FAULT_COMPLETING) @@ -1146,9 +1017,9 @@ static void _stop(struct pl330_thread *thrd) /* Start doing req 'idx' of thread 'thrd' */ static bool _trigger(struct pl330_thread *thrd) { - void __iomem *regs = thrd->dmac->pinfo->base; + void __iomem *regs = thrd->dmac->base; struct _pl330_req *req; - struct pl330_req *r; + struct dma_pl330_desc *desc; struct _arg_GO go; unsigned ns; u8 insn[6] = {0, 0, 0, 0, 0, 0}; @@ -1159,32 +1030,27 @@ static bool _trigger(struct pl330_thread *thrd) return true; idx = 1 - thrd->lstenq; - if (!IS_FREE(&thrd->req[idx])) + if (thrd->req[idx].desc != NULL) { req = &thrd->req[idx]; - else { + } else { idx = thrd->lstenq; - if (!IS_FREE(&thrd->req[idx])) + if (thrd->req[idx].desc != NULL) req = &thrd->req[idx]; else req = NULL; } /* Return if no request */ - if (!req || !req->r) + if (!req) return true; - r = req->r; + desc = req->desc; - if (r->cfg) - ns = r->cfg->nonsecure ? 1 : 0; - else if (readl(regs + CS(thrd->id)) & CS_CNS) - ns = 1; - else - ns = 0; + ns = desc->rqcfg.nonsecure ? 1 : 0; /* See 'Abort Sources' point-4 at Page 2-25 */ if (_manager_ns(thrd) && !ns) - dev_info(thrd->dmac->pinfo->dev, "%s:%d Recipe for ABORT!\n", + dev_info(thrd->dmac->ddma.dev, "%s:%d Recipe for ABORT!\n", __func__, __LINE__); go.chan = thrd->id; @@ -1240,7 +1106,7 @@ static inline int _ldst_memtomem(unsigned dry_run, u8 buf[], const struct _xfer_spec *pxs, int cyc) { int off = 0; - struct pl330_config *pcfg = pxs->r->cfg->pcfg; + struct pl330_config *pcfg = pxs->desc->rqcfg.pcfg; /* check lock-up free version */ if (get_revision(pcfg->periph_id) >= PERIPH_REV_R1P0) { @@ -1266,10 +1132,10 @@ static inline int _ldst_devtomem(unsigned dry_run, u8 buf[], int off = 0; while (cyc--) { - off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->r->peri); - off += _emit_LDP(dry_run, &buf[off], SINGLE, pxs->r->peri); + off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->desc->peri); + off += _emit_LDP(dry_run, &buf[off], SINGLE, pxs->desc->peri); off += _emit_ST(dry_run, &buf[off], ALWAYS); - off += _emit_FLUSHP(dry_run, &buf[off], pxs->r->peri); + off += _emit_FLUSHP(dry_run, &buf[off], pxs->desc->peri); } return off; @@ -1281,10 +1147,10 @@ static inline int _ldst_memtodev(unsigned dry_run, u8 buf[], int off = 0; while (cyc--) { - off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->r->peri); + off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->desc->peri); off += _emit_LD(dry_run, &buf[off], ALWAYS); - off += _emit_STP(dry_run, &buf[off], SINGLE, pxs->r->peri); - off += _emit_FLUSHP(dry_run, &buf[off], pxs->r->peri); + off += _emit_STP(dry_run, &buf[off], SINGLE, pxs->desc->peri); + off += _emit_FLUSHP(dry_run, &buf[off], pxs->desc->peri); } return off; @@ -1295,14 +1161,14 @@ static int _bursts(unsigned dry_run, u8 buf[], { int off = 0; - switch (pxs->r->rqtype) { - case MEMTODEV: + switch (pxs->desc->rqtype) { + case DMA_MEM_TO_DEV: off += _ldst_memtodev(dry_run, &buf[off], pxs, cyc); break; - case DEVTOMEM: + case DMA_DEV_TO_MEM: off += _ldst_devtomem(dry_run, &buf[off], pxs, cyc); break; - case MEMTOMEM: + case DMA_MEM_TO_MEM: off += _ldst_memtomem(dry_run, &buf[off], pxs, cyc); break; default: @@ -1395,7 +1261,7 @@ static inline int _loop(unsigned dry_run, u8 buf[], static inline int _setup_loops(unsigned dry_run, u8 buf[], const struct _xfer_spec *pxs) { - struct pl330_xfer *x = pxs->x; + struct pl330_xfer *x = &pxs->desc->px; u32 ccr = pxs->ccr; unsigned long c, bursts = BYTE_TO_BURST(x->bytes, ccr); int off = 0; @@ -1412,7 +1278,7 @@ static inline int _setup_loops(unsigned dry_run, u8 buf[], static inline int _setup_xfer(unsigned dry_run, u8 buf[], const struct _xfer_spec *pxs) { - struct pl330_xfer *x = pxs->x; + struct pl330_xfer *x = &pxs->desc->px; int off = 0; /* DMAMOV SAR, x->src_addr */ @@ -1443,17 +1309,12 @@ static int _setup_req(unsigned dry_run, struct pl330_thread *thrd, /* DMAMOV CCR, ccr */ off += _emit_MOV(dry_run, &buf[off], CCR, pxs->ccr); - x = pxs->r->x; - do { - /* Error if xfer length is not aligned at burst size */ - if (x->bytes % (BRST_SIZE(pxs->ccr) * BRST_LEN(pxs->ccr))) - return -EINVAL; - - pxs->x = x; - off += _setup_xfer(dry_run, &buf[off], pxs); + x = &pxs->desc->px; + /* Error if xfer length is not aligned at burst size */ + if (x->bytes % (BRST_SIZE(pxs->ccr) * BRST_LEN(pxs->ccr))) + return -EINVAL; - x = x->next; - } while (x); + off += _setup_xfer(dry_run, &buf[off], pxs); /* DMASEV peripheral/event */ off += _emit_SEV(dry_run, &buf[off], thrd->ev); @@ -1495,31 +1356,15 @@ static inline u32 _prepare_ccr(const struct pl330_reqcfg *rqc) return ccr; } -static inline bool _is_valid(u32 ccr) -{ - enum pl330_dstcachectrl dcctl; - enum pl330_srccachectrl scctl; - - dcctl = (ccr >> CC_DSTCCTRL_SHFT) & CC_DRCCCTRL_MASK; - scctl = (ccr >> CC_SRCCCTRL_SHFT) & CC_SRCCCTRL_MASK; - - if (dcctl == DINVALID1 || dcctl == DINVALID2 - || scctl == SINVALID1 || scctl == SINVALID2) - return false; - else - return true; -} - /* * Submit a list of xfers after which the client wants notification. * Client is not notified after each xfer unit, just once after all * xfer units are done or some error occurs. */ -static int pl330_submit_req(void *ch_id, struct pl330_req *r) +static int pl330_submit_req(struct pl330_thread *thrd, + struct dma_pl330_desc *desc) { - struct pl330_thread *thrd = ch_id; - struct pl330_dmac *pl330; - struct pl330_info *pi; + struct pl330_dmac *pl330 = thrd->dmac; struct _xfer_spec xs; unsigned long flags; void __iomem *regs; @@ -1528,25 +1373,24 @@ static int pl330_submit_req(void *ch_id, struct pl330_req *r) int ret = 0; /* No Req or Unacquired Channel or DMAC */ - if (!r || !thrd || thrd->free) + if (!desc || !thrd || thrd->free) return -EINVAL; - pl330 = thrd->dmac; - pi = pl330->pinfo; - regs = pi->base; + regs = thrd->dmac->base; if (pl330->state == DYING || pl330->dmac_tbd.reset_chan & (1 << thrd->id)) { - dev_info(thrd->dmac->pinfo->dev, "%s:%d\n", + dev_info(thrd->dmac->ddma.dev, "%s:%d\n", __func__, __LINE__); return -EAGAIN; } /* If request for non-existing peripheral */ - if (r->rqtype != MEMTOMEM && r->peri >= pi->pcfg.num_peri) { - dev_info(thrd->dmac->pinfo->dev, + if (desc->rqtype != DMA_MEM_TO_MEM && + desc->peri >= pl330->pcfg.num_peri) { + dev_info(thrd->dmac->ddma.dev, "%s:%d Invalid peripheral(%u)!\n", - __func__, __LINE__, r->peri); + __func__, __LINE__, desc->peri); return -EINVAL; } @@ -1557,41 +1401,26 @@ static int pl330_submit_req(void *ch_id, struct pl330_req *r) goto xfer_exit; } + /* Prefer Secure Channel */ + if (!_manager_ns(thrd)) + desc->rqcfg.nonsecure = 0; + else + desc->rqcfg.nonsecure = 1; - /* Use last settings, if not provided */ - if (r->cfg) { - /* Prefer Secure Channel */ - if (!_manager_ns(thrd)) - r->cfg->nonsecure = 0; - else - r->cfg->nonsecure = 1; - - ccr = _prepare_ccr(r->cfg); - } else { - ccr = readl(regs + CC(thrd->id)); - } - - /* If this req doesn't have valid xfer settings */ - if (!_is_valid(ccr)) { - ret = -EINVAL; - dev_info(thrd->dmac->pinfo->dev, "%s:%d Invalid CCR(%x)!\n", - __func__, __LINE__, ccr); - goto xfer_exit; - } + ccr = _prepare_ccr(&desc->rqcfg); - idx = IS_FREE(&thrd->req[0]) ? 0 : 1; + idx = thrd->req[0].desc == NULL ? 0 : 1; xs.ccr = ccr; - xs.r = r; + xs.desc = desc; /* First dry run to check if req is acceptable */ ret = _setup_req(1, thrd, idx, &xs); if (ret < 0) goto xfer_exit; - if (ret > pi->mcbufsz / 2) { - dev_info(thrd->dmac->pinfo->dev, - "%s:%d Trying increasing mcbufsz\n", + if (ret > pl330->mcbufsz / 2) { + dev_info(pl330->ddma.dev, "%s:%d Trying increasing mcbufsz\n", __func__, __LINE__); ret = -ENOMEM; goto xfer_exit; @@ -1599,8 +1428,8 @@ static int pl330_submit_req(void *ch_id, struct pl330_req *r) /* Hook the request */ thrd->lstenq = idx; - thrd->req[idx].mc_len = _setup_req(0, thrd, idx, &xs); - thrd->req[idx].r = r; + thrd->req[idx].desc = desc; + _setup_req(0, thrd, idx, &xs); ret = 0; @@ -1610,10 +1439,32 @@ xfer_exit: return ret; } +static void dma_pl330_rqcb(struct dma_pl330_desc *desc, enum pl330_op_err err) +{ + struct dma_pl330_chan *pch; + unsigned long flags; + + if (!desc) + return; + + pch = desc->pchan; + + /* If desc aborted */ + if (!pch) + return; + + spin_lock_irqsave(&pch->lock, flags); + + desc->status = DONE; + + spin_unlock_irqrestore(&pch->lock, flags); + + tasklet_schedule(&pch->task); +} + static void pl330_dotask(unsigned long data) { struct pl330_dmac *pl330 = (struct pl330_dmac *) data; - struct pl330_info *pi = pl330->pinfo; unsigned long flags; int i; @@ -1631,16 +1482,16 @@ static void pl330_dotask(unsigned long data) if (pl330->dmac_tbd.reset_mngr) { _stop(pl330->manager); /* Reset all channels */ - pl330->dmac_tbd.reset_chan = (1 << pi->pcfg.num_chan) - 1; + pl330->dmac_tbd.reset_chan = (1 << pl330->pcfg.num_chan) - 1; /* Clear the reset flag */ pl330->dmac_tbd.reset_mngr = false; } - for (i = 0; i < pi->pcfg.num_chan; i++) { + for (i = 0; i < pl330->pcfg.num_chan; i++) { if (pl330->dmac_tbd.reset_chan & (1 << i)) { struct pl330_thread *thrd = &pl330->channels[i]; - void __iomem *regs = pi->base; + void __iomem *regs = pl330->base; enum pl330_op_err err; _stop(thrd); @@ -1651,16 +1502,13 @@ static void pl330_dotask(unsigned long data) err = PL330_ERR_ABORT; spin_unlock_irqrestore(&pl330->lock, flags); - - _callback(thrd->req[1 - thrd->lstenq].r, err); - _callback(thrd->req[thrd->lstenq].r, err); - + dma_pl330_rqcb(thrd->req[1 - thrd->lstenq].desc, err); + dma_pl330_rqcb(thrd->req[thrd->lstenq].desc, err); spin_lock_irqsave(&pl330->lock, flags); - thrd->req[0].r = NULL; - thrd->req[1].r = NULL; - mark_free(thrd, 0); - mark_free(thrd, 1); + thrd->req[0].desc = NULL; + thrd->req[1].desc = NULL; + thrd->req_running = -1; /* Clear the reset flag */ pl330->dmac_tbd.reset_chan &= ~(1 << i); @@ -1673,20 +1521,15 @@ static void pl330_dotask(unsigned long data) } /* Returns 1 if state was updated, 0 otherwise */ -static int pl330_update(const struct pl330_info *pi) +static int pl330_update(struct pl330_dmac *pl330) { - struct pl330_req *rqdone, *tmp; - struct pl330_dmac *pl330; + struct dma_pl330_desc *descdone, *tmp; unsigned long flags; void __iomem *regs; u32 val; int id, ev, ret = 0; - if (!pi || !pi->pl330_data) - return 0; - - regs = pi->base; - pl330 = pi->pl330_data; + regs = pl330->base; spin_lock_irqsave(&pl330->lock, flags); @@ -1696,13 +1539,13 @@ static int pl330_update(const struct pl330_info *pi) else pl330->dmac_tbd.reset_mngr = false; - val = readl(regs + FSC) & ((1 << pi->pcfg.num_chan) - 1); + val = readl(regs + FSC) & ((1 << pl330->pcfg.num_chan) - 1); pl330->dmac_tbd.reset_chan |= val; if (val) { int i = 0; - while (i < pi->pcfg.num_chan) { + while (i < pl330->pcfg.num_chan) { if (val & (1 << i)) { - dev_info(pi->dev, + dev_info(pl330->ddma.dev, "Reset Channel-%d\t CS-%x FTC-%x\n", i, readl(regs + CS(i)), readl(regs + FTC(i))); @@ -1714,15 +1557,16 @@ static int pl330_update(const struct pl330_info *pi) /* Check which event happened i.e, thread notified */ val = readl(regs + ES); - if (pi->pcfg.num_events < 32 - && val & ~((1 << pi->pcfg.num_events) - 1)) { + if (pl330->pcfg.num_events < 32 + && val & ~((1 << pl330->pcfg.num_events) - 1)) { pl330->dmac_tbd.reset_dmac = true; - dev_err(pi->dev, "%s:%d Unexpected!\n", __func__, __LINE__); + dev_err(pl330->ddma.dev, "%s:%d Unexpected!\n", __func__, + __LINE__); ret = 1; goto updt_exit; } - for (ev = 0; ev < pi->pcfg.num_events; ev++) { + for (ev = 0; ev < pl330->pcfg.num_events; ev++) { if (val & (1 << ev)) { /* Event occurred */ struct pl330_thread *thrd; u32 inten = readl(regs + INTEN); @@ -1743,25 +1587,22 @@ static int pl330_update(const struct pl330_info *pi) continue; /* Detach the req */ - rqdone = thrd->req[active].r; - thrd->req[active].r = NULL; - - mark_free(thrd, active); + descdone = thrd->req[active].desc; + thrd->req[active].desc = NULL; /* Get going again ASAP */ _start(thrd); /* For now, just make a list of callbacks to be done */ - list_add_tail(&rqdone->rqd, &pl330->req_done); + list_add_tail(&descdone->rqd, &pl330->req_done); } } /* Now that we are in no hurry, do the callbacks */ - list_for_each_entry_safe(rqdone, tmp, &pl330->req_done, rqd) { - list_del(&rqdone->rqd); - + list_for_each_entry_safe(descdone, tmp, &pl330->req_done, rqd) { + list_del(&descdone->rqd); spin_unlock_irqrestore(&pl330->lock, flags); - _callback(rqdone, PL330_ERR_NONE); + dma_pl330_rqcb(descdone, PL330_ERR_NONE); spin_lock_irqsave(&pl330->lock, flags); } @@ -1778,65 +1619,13 @@ updt_exit: return ret; } -static int pl330_chan_ctrl(void *ch_id, enum pl330_chan_op op) -{ - struct pl330_thread *thrd = ch_id; - struct pl330_dmac *pl330; - unsigned long flags; - int ret = 0, active; - - if (!thrd || thrd->free || thrd->dmac->state == DYING) - return -EINVAL; - - pl330 = thrd->dmac; - active = thrd->req_running; - - spin_lock_irqsave(&pl330->lock, flags); - - switch (op) { - case PL330_OP_FLUSH: - /* Make sure the channel is stopped */ - _stop(thrd); - - thrd->req[0].r = NULL; - thrd->req[1].r = NULL; - mark_free(thrd, 0); - mark_free(thrd, 1); - break; - - case PL330_OP_ABORT: - /* Make sure the channel is stopped */ - _stop(thrd); - - /* ABORT is only for the active req */ - if (active == -1) - break; - - thrd->req[active].r = NULL; - mark_free(thrd, active); - - /* Start the next */ - case PL330_OP_START: - if ((active == -1) && !_start(thrd)) - ret = -EIO; - break; - - default: - ret = -EINVAL; - } - - spin_unlock_irqrestore(&pl330->lock, flags); - return ret; -} - /* Reserve an event */ static inline int _alloc_event(struct pl330_thread *thrd) { struct pl330_dmac *pl330 = thrd->dmac; - struct pl330_info *pi = pl330->pinfo; int ev; - for (ev = 0; ev < pi->pcfg.num_events; ev++) + for (ev = 0; ev < pl330->pcfg.num_events; ev++) if (pl330->events[ev] == -1) { pl330->events[ev] = thrd->id; return ev; @@ -1845,45 +1634,38 @@ static inline int _alloc_event(struct pl330_thread *thrd) return -1; } -static bool _chan_ns(const struct pl330_info *pi, int i) +static bool _chan_ns(const struct pl330_dmac *pl330, int i) { - return pi->pcfg.irq_ns & (1 << i); + return pl330->pcfg.irq_ns & (1 << i); } /* Upon success, returns IdentityToken for the * allocated channel, NULL otherwise. */ -static void *pl330_request_channel(const struct pl330_info *pi) +static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330) { struct pl330_thread *thrd = NULL; - struct pl330_dmac *pl330; unsigned long flags; int chans, i; - if (!pi || !pi->pl330_data) - return NULL; - - pl330 = pi->pl330_data; - if (pl330->state == DYING) return NULL; - chans = pi->pcfg.num_chan; + chans = pl330->pcfg.num_chan; spin_lock_irqsave(&pl330->lock, flags); for (i = 0; i < chans; i++) { thrd = &pl330->channels[i]; if ((thrd->free) && (!_manager_ns(thrd) || - _chan_ns(pi, i))) { + _chan_ns(pl330, i))) { thrd->ev = _alloc_event(thrd); if (thrd->ev >= 0) { thrd->free = false; thrd->lstenq = 1; - thrd->req[0].r = NULL; - mark_free(thrd, 0); - thrd->req[1].r = NULL; - mark_free(thrd, 1); + thrd->req[0].desc = NULL; + thrd->req[1].desc = NULL; + thrd->req_running = -1; break; } } @@ -1899,17 +1681,15 @@ static void *pl330_request_channel(const struct pl330_info *pi) static inline void _free_event(struct pl330_thread *thrd, int ev) { struct pl330_dmac *pl330 = thrd->dmac; - struct pl330_info *pi = pl330->pinfo; /* If the event is valid and was held by the thread */ - if (ev >= 0 && ev < pi->pcfg.num_events + if (ev >= 0 && ev < pl330->pcfg.num_events && pl330->events[ev] == thrd->id) pl330->events[ev] = -1; } -static void pl330_release_channel(void *ch_id) +static void pl330_release_channel(struct pl330_thread *thrd) { - struct pl330_thread *thrd = ch_id; struct pl330_dmac *pl330; unsigned long flags; @@ -1918,8 +1698,8 @@ static void pl330_release_channel(void *ch_id) _stop(thrd); - _callback(thrd->req[1 - thrd->lstenq].r, PL330_ERR_ABORT); - _callback(thrd->req[thrd->lstenq].r, PL330_ERR_ABORT); + dma_pl330_rqcb(thrd->req[1 - thrd->lstenq].desc, PL330_ERR_ABORT); + dma_pl330_rqcb(thrd->req[thrd->lstenq].desc, PL330_ERR_ABORT); pl330 = thrd->dmac; @@ -1932,72 +1712,70 @@ static void pl330_release_channel(void *ch_id) /* Initialize the structure for PL330 configuration, that can be used * by the client driver the make best use of the DMAC */ -static void read_dmac_config(struct pl330_info *pi) +static void read_dmac_config(struct pl330_dmac *pl330) { - void __iomem *regs = pi->base; + void __iomem *regs = pl330->base; u32 val; val = readl(regs + CRD) >> CRD_DATA_WIDTH_SHIFT; val &= CRD_DATA_WIDTH_MASK; - pi->pcfg.data_bus_width = 8 * (1 << val); + pl330->pcfg.data_bus_width = 8 * (1 << val); val = readl(regs + CRD) >> CRD_DATA_BUFF_SHIFT; val &= CRD_DATA_BUFF_MASK; - pi->pcfg.data_buf_dep = val + 1; + pl330->pcfg.data_buf_dep = val + 1; val = readl(regs + CR0) >> CR0_NUM_CHANS_SHIFT; val &= CR0_NUM_CHANS_MASK; val += 1; - pi->pcfg.num_chan = val; + pl330->pcfg.num_chan = val; val = readl(regs + CR0); if (val & CR0_PERIPH_REQ_SET) { val = (val >> CR0_NUM_PERIPH_SHIFT) & CR0_NUM_PERIPH_MASK; val += 1; - pi->pcfg.num_peri = val; - pi->pcfg.peri_ns = readl(regs + CR4); + pl330->pcfg.num_peri = val; + pl330->pcfg.peri_ns = readl(regs + CR4); } else { - pi->pcfg.num_peri = 0; + pl330->pcfg.num_peri = 0; } val = readl(regs + CR0); if (val & CR0_BOOT_MAN_NS) - pi->pcfg.mode |= DMAC_MODE_NS; + pl330->pcfg.mode |= DMAC_MODE_NS; else - pi->pcfg.mode &= ~DMAC_MODE_NS; + pl330->pcfg.mode &= ~DMAC_MODE_NS; val = readl(regs + CR0) >> CR0_NUM_EVENTS_SHIFT; val &= CR0_NUM_EVENTS_MASK; val += 1; - pi->pcfg.num_events = val; + pl330->pcfg.num_events = val; - pi->pcfg.irq_ns = readl(regs + CR3); + pl330->pcfg.irq_ns = readl(regs + CR3); } static inline void _reset_thread(struct pl330_thread *thrd) { struct pl330_dmac *pl330 = thrd->dmac; - struct pl330_info *pi = pl330->pinfo; thrd->req[0].mc_cpu = pl330->mcode_cpu - + (thrd->id * pi->mcbufsz); + + (thrd->id * pl330->mcbufsz); thrd->req[0].mc_bus = pl330->mcode_bus - + (thrd->id * pi->mcbufsz); - thrd->req[0].r = NULL; - mark_free(thrd, 0); + + (thrd->id * pl330->mcbufsz); + thrd->req[0].desc = NULL; thrd->req[1].mc_cpu = thrd->req[0].mc_cpu - + pi->mcbufsz / 2; + + pl330->mcbufsz / 2; thrd->req[1].mc_bus = thrd->req[0].mc_bus - + pi->mcbufsz / 2; - thrd->req[1].r = NULL; - mark_free(thrd, 1); + + pl330->mcbufsz / 2; + thrd->req[1].desc = NULL; + + thrd->req_running = -1; } static int dmac_alloc_threads(struct pl330_dmac *pl330) { - struct pl330_info *pi = pl330->pinfo; - int chans = pi->pcfg.num_chan; + int chans = pl330->pcfg.num_chan; struct pl330_thread *thrd; int i; @@ -2028,29 +1806,28 @@ static int dmac_alloc_threads(struct pl330_dmac *pl330) static int dmac_alloc_resources(struct pl330_dmac *pl330) { - struct pl330_info *pi = pl330->pinfo; - int chans = pi->pcfg.num_chan; + int chans = pl330->pcfg.num_chan; int ret; /* * Alloc MicroCode buffer for 'chans' Channel threads. * A channel's buffer offset is (Channel_Id * MCODE_BUFF_PERCHAN) */ - pl330->mcode_cpu = dma_alloc_coherent(pi->dev, - chans * pi->mcbufsz, + pl330->mcode_cpu = dma_alloc_coherent(pl330->ddma.dev, + chans * pl330->mcbufsz, &pl330->mcode_bus, GFP_KERNEL); if (!pl330->mcode_cpu) { - dev_err(pi->dev, "%s:%d Can't allocate memory!\n", + dev_err(pl330->ddma.dev, "%s:%d Can't allocate memory!\n", __func__, __LINE__); return -ENOMEM; } ret = dmac_alloc_threads(pl330); if (ret) { - dev_err(pi->dev, "%s:%d Can't to create channels for DMAC!\n", + dev_err(pl330->ddma.dev, "%s:%d Can't to create channels for DMAC!\n", __func__, __LINE__); - dma_free_coherent(pi->dev, - chans * pi->mcbufsz, + dma_free_coherent(pl330->ddma.dev, + chans * pl330->mcbufsz, pl330->mcode_cpu, pl330->mcode_bus); return ret; } @@ -2058,71 +1835,45 @@ static int dmac_alloc_resources(struct pl330_dmac *pl330) return 0; } -static int pl330_add(struct pl330_info *pi) +static int pl330_add(struct pl330_dmac *pl330) { - struct pl330_dmac *pl330; void __iomem *regs; int i, ret; - if (!pi || !pi->dev) - return -EINVAL; - - /* If already added */ - if (pi->pl330_data) - return -EINVAL; - - /* - * If the SoC can perform reset on the DMAC, then do it - * before reading its configuration. - */ - if (pi->dmac_reset) - pi->dmac_reset(pi); - - regs = pi->base; + regs = pl330->base; /* Check if we can handle this DMAC */ - if ((pi->pcfg.periph_id & 0xfffff) != PERIPH_ID_VAL) { - dev_err(pi->dev, "PERIPH_ID 0x%x !\n", pi->pcfg.periph_id); + if ((pl330->pcfg.periph_id & 0xfffff) != PERIPH_ID_VAL) { + dev_err(pl330->ddma.dev, "PERIPH_ID 0x%x !\n", + pl330->pcfg.periph_id); return -EINVAL; } /* Read the configuration of the DMAC */ - read_dmac_config(pi); + read_dmac_config(pl330); - if (pi->pcfg.num_events == 0) { - dev_err(pi->dev, "%s:%d Can't work without events!\n", + if (pl330->pcfg.num_events == 0) { + dev_err(pl330->ddma.dev, "%s:%d Can't work without events!\n", __func__, __LINE__); return -EINVAL; } - pl330 = kzalloc(sizeof(*pl330), GFP_KERNEL); - if (!pl330) { - dev_err(pi->dev, "%s:%d Can't allocate memory!\n", - __func__, __LINE__); - return -ENOMEM; - } - - /* Assign the info structure and private data */ - pl330->pinfo = pi; - pi->pl330_data = pl330; - spin_lock_init(&pl330->lock); INIT_LIST_HEAD(&pl330->req_done); /* Use default MC buffer size if not provided */ - if (!pi->mcbufsz) - pi->mcbufsz = MCODE_BUFF_PER_REQ * 2; + if (!pl330->mcbufsz) + pl330->mcbufsz = MCODE_BUFF_PER_REQ * 2; /* Mark all events as free */ - for (i = 0; i < pi->pcfg.num_events; i++) + for (i = 0; i < pl330->pcfg.num_events; i++) pl330->events[i] = -1; /* Allocate resources needed by the DMAC */ ret = dmac_alloc_resources(pl330); if (ret) { - dev_err(pi->dev, "Unable to create channels for DMAC\n"); - kfree(pl330); + dev_err(pl330->ddma.dev, "Unable to create channels for DMAC\n"); return ret; } @@ -2135,15 +1886,13 @@ static int pl330_add(struct pl330_info *pi) static int dmac_free_threads(struct pl330_dmac *pl330) { - struct pl330_info *pi = pl330->pinfo; - int chans = pi->pcfg.num_chan; struct pl330_thread *thrd; int i; /* Release Channel threads */ - for (i = 0; i < chans; i++) { + for (i = 0; i < pl330->pcfg.num_chan; i++) { thrd = &pl330->channels[i]; - pl330_release_channel((void *)thrd); + pl330_release_channel(thrd); } /* Free memory */ @@ -2152,35 +1901,18 @@ static int dmac_free_threads(struct pl330_dmac *pl330) return 0; } -static void dmac_free_resources(struct pl330_dmac *pl330) +static void pl330_del(struct pl330_dmac *pl330) { - struct pl330_info *pi = pl330->pinfo; - int chans = pi->pcfg.num_chan; - - dmac_free_threads(pl330); - - dma_free_coherent(pi->dev, chans * pi->mcbufsz, - pl330->mcode_cpu, pl330->mcode_bus); -} - -static void pl330_del(struct pl330_info *pi) -{ - struct pl330_dmac *pl330; - - if (!pi || !pi->pl330_data) - return; - - pl330 = pi->pl330_data; - pl330->state = UNINIT; tasklet_kill(&pl330->tasks); /* Free DMAC resources */ - dmac_free_resources(pl330); + dmac_free_threads(pl330); - kfree(pl330); - pi->pl330_data = NULL; + dma_free_coherent(pl330->ddma.dev, + pl330->pcfg.num_chan * pl330->mcbufsz, pl330->mcode_cpu, + pl330->mcode_bus); } /* forward declaration */ @@ -2212,8 +1944,7 @@ static inline void fill_queue(struct dma_pl330_chan *pch) if (desc->status == BUSY) continue; - ret = pl330_submit_req(pch->pl330_chid, - &desc->req); + ret = pl330_submit_req(pch->thread, desc); if (!ret) { desc->status = BUSY; } else if (ret == -EAGAIN) { @@ -2222,7 +1953,7 @@ static inline void fill_queue(struct dma_pl330_chan *pch) } else { /* Unacceptable request */ desc->status = DONE; - dev_err(pch->dmac->pif.dev, "%s:%d Bad Desc(%d)\n", + dev_err(pch->dmac->ddma.dev, "%s:%d Bad Desc(%d)\n", __func__, __LINE__, desc->txd.cookie); tasklet_schedule(&pch->task); } @@ -2249,7 +1980,9 @@ static void pl330_tasklet(unsigned long data) fill_queue(pch); /* Make sure the PL330 Channel thread is active */ - pl330_chan_ctrl(pch->pl330_chid, PL330_OP_START); + spin_lock(&pch->thread->dmac->lock); + _start(pch->thread); + spin_unlock(&pch->thread->dmac->lock); while (!list_empty(&pch->completed_list)) { dma_async_tx_callback callback; @@ -2280,25 +2013,6 @@ static void pl330_tasklet(unsigned long data) spin_unlock_irqrestore(&pch->lock, flags); } -static void dma_pl330_rqcb(void *token, enum pl330_op_err err) -{ - struct dma_pl330_desc *desc = token; - struct dma_pl330_chan *pch = desc->pchan; - unsigned long flags; - - /* If desc aborted */ - if (!pch) - return; - - spin_lock_irqsave(&pch->lock, flags); - - desc->status = DONE; - - spin_unlock_irqrestore(&pch->lock, flags); - - tasklet_schedule(&pch->task); -} - bool pl330_filter(struct dma_chan *chan, void *param) { u8 *peri_id; @@ -2315,23 +2029,26 @@ static struct dma_chan *of_dma_pl330_xlate(struct of_phandle_args *dma_spec, struct of_dma *ofdma) { int count = dma_spec->args_count; - struct dma_pl330_dmac *pdmac = ofdma->of_dma_data; + struct pl330_dmac *pl330 = ofdma->of_dma_data; unsigned int chan_id; + if (!pl330) + return NULL; + if (count != 1) return NULL; chan_id = dma_spec->args[0]; - if (chan_id >= pdmac->num_peripherals) + if (chan_id >= pl330->num_peripherals) return NULL; - return dma_get_slave_channel(&pdmac->peripherals[chan_id].chan); + return dma_get_slave_channel(&pl330->peripherals[chan_id].chan); } static int pl330_alloc_chan_resources(struct dma_chan *chan) { struct dma_pl330_chan *pch = to_pchan(chan); - struct dma_pl330_dmac *pdmac = pch->dmac; + struct pl330_dmac *pl330 = pch->dmac; unsigned long flags; spin_lock_irqsave(&pch->lock, flags); @@ -2339,8 +2056,8 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan) dma_cookie_init(chan); pch->cyclic = false; - pch->pl330_chid = pl330_request_channel(&pdmac->pif); - if (!pch->pl330_chid) { + pch->thread = pl330_request_channel(pl330); + if (!pch->thread) { spin_unlock_irqrestore(&pch->lock, flags); return -ENOMEM; } @@ -2357,7 +2074,7 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned struct dma_pl330_chan *pch = to_pchan(chan); struct dma_pl330_desc *desc; unsigned long flags; - struct dma_pl330_dmac *pdmac = pch->dmac; + struct pl330_dmac *pl330 = pch->dmac; struct dma_slave_config *slave_config; LIST_HEAD(list); @@ -2365,8 +2082,13 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned case DMA_TERMINATE_ALL: spin_lock_irqsave(&pch->lock, flags); - /* FLUSH the PL330 Channel thread */ - pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH); + spin_lock(&pl330->lock); + _stop(pch->thread); + spin_unlock(&pl330->lock); + + pch->thread->req[0].desc = NULL; + pch->thread->req[1].desc = NULL; + pch->thread->req_running = -1; /* Mark all desc done */ list_for_each_entry(desc, &pch->submitted_list, node) { @@ -2384,9 +2106,9 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned dma_cookie_complete(&desc->txd); } - list_splice_tail_init(&pch->submitted_list, &pdmac->desc_pool); - list_splice_tail_init(&pch->work_list, &pdmac->desc_pool); - list_splice_tail_init(&pch->completed_list, &pdmac->desc_pool); + list_splice_tail_init(&pch->submitted_list, &pl330->desc_pool); + list_splice_tail_init(&pch->work_list, &pl330->desc_pool); + list_splice_tail_init(&pch->completed_list, &pl330->desc_pool); spin_unlock_irqrestore(&pch->lock, flags); break; case DMA_SLAVE_CONFIG: @@ -2409,7 +2131,7 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned } break; default: - dev_err(pch->dmac->pif.dev, "Not supported command.\n"); + dev_err(pch->dmac->ddma.dev, "Not supported command.\n"); return -ENXIO; } @@ -2425,8 +2147,8 @@ static void pl330_free_chan_resources(struct dma_chan *chan) spin_lock_irqsave(&pch->lock, flags); - pl330_release_channel(pch->pl330_chid); - pch->pl330_chid = NULL; + pl330_release_channel(pch->thread); + pch->thread = NULL; if (pch->cyclic) list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool); @@ -2489,57 +2211,46 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx) static inline void _init_desc(struct dma_pl330_desc *desc) { - desc->req.x = &desc->px; - desc->req.token = desc; desc->rqcfg.swap = SWAP_NO; - desc->rqcfg.scctl = SCCTRL0; - desc->rqcfg.dcctl = DCCTRL0; - desc->req.cfg = &desc->rqcfg; - desc->req.xfer_cb = dma_pl330_rqcb; + desc->rqcfg.scctl = CCTRL0; + desc->rqcfg.dcctl = CCTRL0; desc->txd.tx_submit = pl330_tx_submit; INIT_LIST_HEAD(&desc->node); } /* Returns the number of descriptors added to the DMAC pool */ -static int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count) +static int add_desc(struct pl330_dmac *pl330, gfp_t flg, int count) { struct dma_pl330_desc *desc; unsigned long flags; int i; - if (!pdmac) - return 0; - desc = kcalloc(count, sizeof(*desc), flg); if (!desc) return 0; - spin_lock_irqsave(&pdmac->pool_lock, flags); + spin_lock_irqsave(&pl330->pool_lock, flags); for (i = 0; i < count; i++) { _init_desc(&desc[i]); - list_add_tail(&desc[i].node, &pdmac->desc_pool); + list_add_tail(&desc[i].node, &pl330->desc_pool); } - spin_unlock_irqrestore(&pdmac->pool_lock, flags); + spin_unlock_irqrestore(&pl330->pool_lock, flags); return count; } -static struct dma_pl330_desc * -pluck_desc(struct dma_pl330_dmac *pdmac) +static struct dma_pl330_desc *pluck_desc(struct pl330_dmac *pl330) { struct dma_pl330_desc *desc = NULL; unsigned long flags; - if (!pdmac) - return NULL; - - spin_lock_irqsave(&pdmac->pool_lock, flags); + spin_lock_irqsave(&pl330->pool_lock, flags); - if (!list_empty(&pdmac->desc_pool)) { - desc = list_entry(pdmac->desc_pool.next, + if (!list_empty(&pl330->desc_pool)) { + desc = list_entry(pl330->desc_pool.next, struct dma_pl330_desc, node); list_del_init(&desc->node); @@ -2548,29 +2259,29 @@ pluck_desc(struct dma_pl330_dmac *pdmac) desc->txd.callback = NULL; } - spin_unlock_irqrestore(&pdmac->pool_lock, flags); + spin_unlock_irqrestore(&pl330->pool_lock, flags); return desc; } static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch) { - struct dma_pl330_dmac *pdmac = pch->dmac; + struct pl330_dmac *pl330 = pch->dmac; u8 *peri_id = pch->chan.private; struct dma_pl330_desc *desc; /* Pluck one desc from the pool of DMAC */ - desc = pluck_desc(pdmac); + desc = pluck_desc(pl330); /* If the DMAC pool is empty, alloc new */ if (!desc) { - if (!add_desc(pdmac, GFP_ATOMIC, 1)) + if (!add_desc(pl330, GFP_ATOMIC, 1)) return NULL; /* Try again */ - desc = pluck_desc(pdmac); + desc = pluck_desc(pl330); if (!desc) { - dev_err(pch->dmac->pif.dev, + dev_err(pch->dmac->ddma.dev, "%s:%d ALERT!\n", __func__, __LINE__); return NULL; } @@ -2581,8 +2292,8 @@ static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch) desc->txd.cookie = 0; async_tx_ack(&desc->txd); - desc->req.peri = peri_id ? pch->chan.chan_id : 0; - desc->rqcfg.pcfg = &pch->dmac->pif.pcfg; + desc->peri = peri_id ? pch->chan.chan_id : 0; + desc->rqcfg.pcfg = &pch->dmac->pcfg; dma_async_tx_descriptor_init(&desc->txd, &pch->chan); @@ -2592,7 +2303,6 @@ static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch) static inline void fill_px(struct pl330_xfer *px, dma_addr_t dst, dma_addr_t src, size_t len) { - px->next = NULL; px->bytes = len; px->dst_addr = dst; px->src_addr = src; @@ -2605,7 +2315,7 @@ __pl330_prep_dma_memcpy(struct dma_pl330_chan *pch, dma_addr_t dst, struct dma_pl330_desc *desc = pl330_get_desc(pch); if (!desc) { - dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n", + dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n", __func__, __LINE__); return NULL; } @@ -2629,11 +2339,11 @@ __pl330_prep_dma_memcpy(struct dma_pl330_chan *pch, dma_addr_t dst, static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len) { struct dma_pl330_chan *pch = desc->pchan; - struct pl330_info *pi = &pch->dmac->pif; + struct pl330_dmac *pl330 = pch->dmac; int burst_len; - burst_len = pi->pcfg.data_bus_width / 8; - burst_len *= pi->pcfg.data_buf_dep; + burst_len = pl330->pcfg.data_bus_width / 8; + burst_len *= pl330->pcfg.data_buf_dep; burst_len >>= desc->rqcfg.brst_size; /* src/dst_burst_len can't be more than 16 */ @@ -2652,11 +2362,11 @@ static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len) static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic( struct dma_chan *chan, dma_addr_t dma_addr, size_t len, size_t period_len, enum dma_transfer_direction direction, - unsigned long flags, void *context) + unsigned long flags) { struct dma_pl330_desc *desc = NULL, *first = NULL; struct dma_pl330_chan *pch = to_pchan(chan); - struct dma_pl330_dmac *pdmac = pch->dmac; + struct pl330_dmac *pl330 = pch->dmac; unsigned int i; dma_addr_t dst; dma_addr_t src; @@ -2665,7 +2375,7 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic( return NULL; if (!is_slave_direction(direction)) { - dev_err(pch->dmac->pif.dev, "%s:%d Invalid dma direction\n", + dev_err(pch->dmac->ddma.dev, "%s:%d Invalid dma direction\n", __func__, __LINE__); return NULL; } @@ -2673,23 +2383,23 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic( for (i = 0; i < len / period_len; i++) { desc = pl330_get_desc(pch); if (!desc) { - dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n", + dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n", __func__, __LINE__); if (!first) return NULL; - spin_lock_irqsave(&pdmac->pool_lock, flags); + spin_lock_irqsave(&pl330->pool_lock, flags); while (!list_empty(&first->node)) { desc = list_entry(first->node.next, struct dma_pl330_desc, node); - list_move_tail(&desc->node, &pdmac->desc_pool); + list_move_tail(&desc->node, &pl330->desc_pool); } - list_move_tail(&first->node, &pdmac->desc_pool); + list_move_tail(&first->node, &pl330->desc_pool); - spin_unlock_irqrestore(&pdmac->pool_lock, flags); + spin_unlock_irqrestore(&pl330->pool_lock, flags); return NULL; } @@ -2698,14 +2408,12 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic( case DMA_MEM_TO_DEV: desc->rqcfg.src_inc = 1; desc->rqcfg.dst_inc = 0; - desc->req.rqtype = MEMTODEV; src = dma_addr; dst = pch->fifo_addr; break; case DMA_DEV_TO_MEM: desc->rqcfg.src_inc = 0; desc->rqcfg.dst_inc = 1; - desc->req.rqtype = DEVTOMEM; src = pch->fifo_addr; dst = dma_addr; break; @@ -2713,6 +2421,7 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic( break; } + desc->rqtype = direction; desc->rqcfg.brst_size = pch->burst_sz; desc->rqcfg.brst_len = 1; fill_px(&desc->px, dst, src, period_len); @@ -2740,24 +2449,22 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, { struct dma_pl330_desc *desc; struct dma_pl330_chan *pch = to_pchan(chan); - struct pl330_info *pi; + struct pl330_dmac *pl330 = pch->dmac; int burst; if (unlikely(!pch || !len)) return NULL; - pi = &pch->dmac->pif; - desc = __pl330_prep_dma_memcpy(pch, dst, src, len); if (!desc) return NULL; desc->rqcfg.src_inc = 1; desc->rqcfg.dst_inc = 1; - desc->req.rqtype = MEMTOMEM; + desc->rqtype = DMA_MEM_TO_MEM; /* Select max possible burst size */ - burst = pi->pcfg.data_bus_width / 8; + burst = pl330->pcfg.data_bus_width / 8; while (burst > 1) { if (!(len % burst)) @@ -2776,7 +2483,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, return &desc->txd; } -static void __pl330_giveback_desc(struct dma_pl330_dmac *pdmac, +static void __pl330_giveback_desc(struct pl330_dmac *pl330, struct dma_pl330_desc *first) { unsigned long flags; @@ -2785,17 +2492,17 @@ static void __pl330_giveback_desc(struct dma_pl330_dmac *pdmac, if (!first) return; - spin_lock_irqsave(&pdmac->pool_lock, flags); + spin_lock_irqsave(&pl330->pool_lock, flags); while (!list_empty(&first->node)) { desc = list_entry(first->node.next, struct dma_pl330_desc, node); - list_move_tail(&desc->node, &pdmac->desc_pool); + list_move_tail(&desc->node, &pl330->desc_pool); } - list_move_tail(&first->node, &pdmac->desc_pool); + list_move_tail(&first->node, &pl330->desc_pool); - spin_unlock_irqrestore(&pdmac->pool_lock, flags); + spin_unlock_irqrestore(&pl330->pool_lock, flags); } static struct dma_async_tx_descriptor * @@ -2820,12 +2527,12 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, desc = pl330_get_desc(pch); if (!desc) { - struct dma_pl330_dmac *pdmac = pch->dmac; + struct pl330_dmac *pl330 = pch->dmac; - dev_err(pch->dmac->pif.dev, + dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n", __func__, __LINE__); - __pl330_giveback_desc(pdmac, first); + __pl330_giveback_desc(pl330, first); return NULL; } @@ -2838,19 +2545,18 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, if (direction == DMA_MEM_TO_DEV) { desc->rqcfg.src_inc = 1; desc->rqcfg.dst_inc = 0; - desc->req.rqtype = MEMTODEV; fill_px(&desc->px, addr, sg_dma_address(sg), sg_dma_len(sg)); } else { desc->rqcfg.src_inc = 0; desc->rqcfg.dst_inc = 1; - desc->req.rqtype = DEVTOMEM; fill_px(&desc->px, sg_dma_address(sg), addr, sg_dma_len(sg)); } desc->rqcfg.brst_size = pch->burst_sz; desc->rqcfg.brst_len = 1; + desc->rqtype = direction; } /* Return the last desc in the chain */ @@ -2890,9 +2596,9 @@ static int pl330_probe(struct amba_device *adev, const struct amba_id *id) { struct dma_pl330_platdata *pdat; - struct dma_pl330_dmac *pdmac; + struct pl330_config *pcfg; + struct pl330_dmac *pl330; struct dma_pl330_chan *pch, *_p; - struct pl330_info *pi; struct dma_device *pd; struct resource *res; int i, ret, irq; @@ -2905,30 +2611,27 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) return ret; /* Allocate a new DMAC and its Channels */ - pdmac = devm_kzalloc(&adev->dev, sizeof(*pdmac), GFP_KERNEL); - if (!pdmac) { + pl330 = devm_kzalloc(&adev->dev, sizeof(*pl330), GFP_KERNEL); + if (!pl330) { dev_err(&adev->dev, "unable to allocate mem\n"); return -ENOMEM; } - pi = &pdmac->pif; - pi->dev = &adev->dev; - pi->pl330_data = NULL; - pi->mcbufsz = pdat ? pdat->mcbuf_sz : 0; + pl330->mcbufsz = pdat ? pdat->mcbuf_sz : 0; res = &adev->res; - pi->base = devm_ioremap_resource(&adev->dev, res); - if (IS_ERR(pi->base)) - return PTR_ERR(pi->base); + pl330->base = devm_ioremap_resource(&adev->dev, res); + if (IS_ERR(pl330->base)) + return PTR_ERR(pl330->base); - amba_set_drvdata(adev, pdmac); + amba_set_drvdata(adev, pl330); for (i = 0; i < AMBA_NR_IRQS; i++) { irq = adev->irq[i]; if (irq) { ret = devm_request_irq(&adev->dev, irq, pl330_irq_handler, 0, - dev_name(&adev->dev), pi); + dev_name(&adev->dev), pl330); if (ret) return ret; } else { @@ -2936,38 +2639,40 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) } } - pi->pcfg.periph_id = adev->periphid; - ret = pl330_add(pi); + pcfg = &pl330->pcfg; + + pcfg->periph_id = adev->periphid; + ret = pl330_add(pl330); if (ret) return ret; - INIT_LIST_HEAD(&pdmac->desc_pool); - spin_lock_init(&pdmac->pool_lock); + INIT_LIST_HEAD(&pl330->desc_pool); + spin_lock_init(&pl330->pool_lock); /* Create a descriptor pool of default size */ - if (!add_desc(pdmac, GFP_KERNEL, NR_DEFAULT_DESC)) + if (!add_desc(pl330, GFP_KERNEL, NR_DEFAULT_DESC)) dev_warn(&adev->dev, "unable to allocate desc\n"); - pd = &pdmac->ddma; + pd = &pl330->ddma; INIT_LIST_HEAD(&pd->channels); /* Initialize channel parameters */ if (pdat) - num_chan = max_t(int, pdat->nr_valid_peri, pi->pcfg.num_chan); + num_chan = max_t(int, pdat->nr_valid_peri, pcfg->num_chan); else - num_chan = max_t(int, pi->pcfg.num_peri, pi->pcfg.num_chan); + num_chan = max_t(int, pcfg->num_peri, pcfg->num_chan); - pdmac->num_peripherals = num_chan; + pl330->num_peripherals = num_chan; - pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL); - if (!pdmac->peripherals) { + pl330->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL); + if (!pl330->peripherals) { ret = -ENOMEM; - dev_err(&adev->dev, "unable to allocate pdmac->peripherals\n"); + dev_err(&adev->dev, "unable to allocate pl330->peripherals\n"); goto probe_err2; } for (i = 0; i < num_chan; i++) { - pch = &pdmac->peripherals[i]; + pch = &pl330->peripherals[i]; if (!adev->dev.of_node) pch->chan.private = pdat ? &pdat->peri_id[i] : NULL; else @@ -2977,9 +2682,9 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) INIT_LIST_HEAD(&pch->work_list); INIT_LIST_HEAD(&pch->completed_list); spin_lock_init(&pch->lock); - pch->pl330_chid = NULL; + pch->thread = NULL; pch->chan.device = pd; - pch->dmac = pdmac; + pch->dmac = pl330; /* Add the channel to the DMAC list */ list_add_tail(&pch->chan.device_node, &pd->channels); @@ -2990,7 +2695,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) pd->cap_mask = pdat->cap_mask; } else { dma_cap_set(DMA_MEMCPY, pd->cap_mask); - if (pi->pcfg.num_peri) { + if (pcfg->num_peri) { dma_cap_set(DMA_SLAVE, pd->cap_mask); dma_cap_set(DMA_CYCLIC, pd->cap_mask); dma_cap_set(DMA_PRIVATE, pd->cap_mask); @@ -3015,14 +2720,14 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) if (adev->dev.of_node) { ret = of_dma_controller_register(adev->dev.of_node, - of_dma_pl330_xlate, pdmac); + of_dma_pl330_xlate, pl330); if (ret) { dev_err(&adev->dev, "unable to register DMA to the generic DT DMA helpers\n"); } } - adev->dev.dma_parms = &pdmac->dma_parms; + adev->dev.dma_parms = &pl330->dma_parms; /* * This is the limit for transfers with a buswidth of 1, larger @@ -3037,14 +2742,13 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) "Loaded driver for PL330 DMAC-%d\n", adev->periphid); dev_info(&adev->dev, "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n", - pi->pcfg.data_buf_dep, - pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan, - pi->pcfg.num_peri, pi->pcfg.num_events); + pcfg->data_buf_dep, pcfg->data_bus_width / 8, pcfg->num_chan, + pcfg->num_peri, pcfg->num_events); return 0; probe_err3: /* Idle the DMAC */ - list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels, + list_for_each_entry_safe(pch, _p, &pl330->ddma.channels, chan.device_node) { /* Remove the channel */ @@ -3055,27 +2759,23 @@ probe_err3: pl330_free_chan_resources(&pch->chan); } probe_err2: - pl330_del(pi); + pl330_del(pl330); return ret; } static int pl330_remove(struct amba_device *adev) { - struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev); + struct pl330_dmac *pl330 = amba_get_drvdata(adev); struct dma_pl330_chan *pch, *_p; - struct pl330_info *pi; - - if (!pdmac) - return 0; if (adev->dev.of_node) of_dma_controller_free(adev->dev.of_node); - dma_async_device_unregister(&pdmac->ddma); + dma_async_device_unregister(&pl330->ddma); /* Idle the DMAC */ - list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels, + list_for_each_entry_safe(pch, _p, &pl330->ddma.channels, chan.device_node) { /* Remove the channel */ @@ -3086,9 +2786,7 @@ static int pl330_remove(struct amba_device *adev) pl330_free_chan_resources(&pch->chan); } - pi = &pdmac->pif; - - pl330_del(pi); + pl330_del(pl330); return 0; } diff --git a/drivers/dma/qcom_bam_dma.c b/drivers/dma/qcom_bam_dma.c index 82c923146e49..7a4bbb0f80a5 100644 --- a/drivers/dma/qcom_bam_dma.c +++ b/drivers/dma/qcom_bam_dma.c @@ -61,12 +61,17 @@ struct bam_desc_hw { #define DESC_FLAG_INT BIT(15) #define DESC_FLAG_EOT BIT(14) #define DESC_FLAG_EOB BIT(13) +#define DESC_FLAG_NWD BIT(12) struct bam_async_desc { struct virt_dma_desc vd; u32 num_desc; u32 xfer_len; + + /* transaction flags, EOT|EOB|NWD */ + u16 flags; + struct bam_desc_hw *curr_desc; enum dma_transfer_direction dir; @@ -490,6 +495,14 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan, if (!async_desc) goto err_out; + if (flags & DMA_PREP_FENCE) + async_desc->flags |= DESC_FLAG_NWD; + + if (flags & DMA_PREP_INTERRUPT) + async_desc->flags |= DESC_FLAG_EOT; + else + async_desc->flags |= DESC_FLAG_INT; + async_desc->num_desc = num_alloc; async_desc->curr_desc = async_desc->desc; async_desc->dir = direction; @@ -793,8 +806,11 @@ static void bam_start_dma(struct bam_chan *bchan) else async_desc->xfer_len = async_desc->num_desc; - /* set INT on last descriptor */ - desc[async_desc->xfer_len - 1].flags |= DESC_FLAG_INT; + /* set any special flags on the last descriptor */ + if (async_desc->num_desc == async_desc->xfer_len) + desc[async_desc->xfer_len - 1].flags = async_desc->flags; + else + desc[async_desc->xfer_len - 1].flags |= DESC_FLAG_INT; if (bchan->tail + async_desc->xfer_len > MAX_DESCRIPTORS) { u32 partial = MAX_DESCRIPTORS - bchan->tail; diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c index 012520c9fd79..7416572d1e40 100644 --- a/drivers/dma/s3c24xx-dma.c +++ b/drivers/dma/s3c24xx-dma.c @@ -889,8 +889,7 @@ static struct dma_async_tx_descriptor *s3c24xx_dma_prep_memcpy( static struct dma_async_tx_descriptor *s3c24xx_dma_prep_dma_cyclic( struct dma_chan *chan, dma_addr_t addr, size_t size, size_t period, - enum dma_transfer_direction direction, unsigned long flags, - void *context) + enum dma_transfer_direction direction, unsigned long flags) { struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan); struct s3c24xx_dma_engine *s3cdma = s3cchan->host; diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c index 5ebdfbc1051e..4b0ef043729a 100644 --- a/drivers/dma/sa11x0-dma.c +++ b/drivers/dma/sa11x0-dma.c @@ -612,7 +612,7 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg( static struct dma_async_tx_descriptor *sa11x0_dma_prep_dma_cyclic( struct dma_chan *chan, dma_addr_t addr, size_t size, size_t period, - enum dma_transfer_direction dir, unsigned long flags, void *context) + enum dma_transfer_direction dir, unsigned long flags) { struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); struct sa11x0_dma_desc *txd; diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig index 0f719816c91b..0349125a2e20 100644 --- a/drivers/dma/sh/Kconfig +++ b/drivers/dma/sh/Kconfig @@ -2,21 +2,39 @@ # DMA engine configuration for sh # +# +# DMA Engine Helpers +# + config SH_DMAE_BASE bool "Renesas SuperH DMA Engine support" - depends on (SUPERH && SH_DMA) || ARCH_SHMOBILE || COMPILE_TEST + depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST + depends on !SUPERH || SH_DMA depends on !SH_DMA_API default y select DMA_ENGINE help Enable support for the Renesas SuperH DMA controllers. +# +# DMA Controllers +# + config SH_DMAE tristate "Renesas SuperH DMAC support" depends on SH_DMAE_BASE help Enable support for the Renesas SuperH DMA controllers. +if SH_DMAE + +config SH_DMAE_R8A73A4 + def_bool y + depends on ARCH_R8A73A4 + depends on OF + +endif + config SUDMAC tristate "Renesas SUDMAC support" depends on SH_DMAE_BASE @@ -34,7 +52,3 @@ config RCAR_AUDMAC_PP depends on SH_DMAE_BASE help Enable support for the Renesas R-Car Audio DMAC Peripheral Peripheral controllers. - -config SHDMA_R8A73A4 - def_bool y - depends on ARCH_R8A73A4 && SH_DMAE != n diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile index 1ce88b28cfc6..0a5cfdb76e45 100644 --- a/drivers/dma/sh/Makefile +++ b/drivers/dma/sh/Makefile @@ -1,10 +1,18 @@ +# +# DMA Engine Helpers +# + obj-$(CONFIG_SH_DMAE_BASE) += shdma-base.o shdma-of.o -obj-$(CONFIG_SH_DMAE) += shdma.o + +# +# DMA Controllers +# + shdma-y := shdmac.o -ifeq ($(CONFIG_OF),y) -shdma-$(CONFIG_SHDMA_R8A73A4) += shdma-r8a73a4.o -endif +shdma-$(CONFIG_SH_DMAE_R8A73A4) += shdma-r8a73a4.o shdma-objs := $(shdma-y) +obj-$(CONFIG_SH_DMAE) += shdma.o + obj-$(CONFIG_SUDMAC) += sudmac.o obj-$(CONFIG_RCAR_HPB_DMAE) += rcar-hpbdma.o obj-$(CONFIG_RCAR_AUDMAC_PP) += rcar-audmapp.o diff --git a/drivers/dma/sh/rcar-audmapp.c b/drivers/dma/sh/rcar-audmapp.c index 2de77289a2e9..dabbf0aba2e9 100644 --- a/drivers/dma/sh/rcar-audmapp.c +++ b/drivers/dma/sh/rcar-audmapp.c @@ -22,6 +22,7 @@ #include <linux/module.h> #include <linux/slab.h> #include <linux/dmaengine.h> +#include <linux/of_dma.h> #include <linux/platform_data/dma-rcar-audmapp.h> #include <linux/platform_device.h> #include <linux/shdma-base.h> @@ -45,8 +46,9 @@ struct audmapp_chan { struct shdma_chan shdma_chan; - struct audmapp_slave_config *config; void __iomem *base; + dma_addr_t slave_addr; + u32 chcr; }; struct audmapp_device { @@ -56,7 +58,16 @@ struct audmapp_device { void __iomem *chan_reg; }; +struct audmapp_desc { + struct shdma_desc shdma_desc; + dma_addr_t src; + dma_addr_t dst; +}; + +#define to_shdma_chan(c) container_of(c, struct shdma_chan, dma_chan) + #define to_chan(chan) container_of(chan, struct audmapp_chan, shdma_chan) +#define to_desc(sdesc) container_of(sdesc, struct audmapp_desc, shdma_desc) #define to_dev(chan) container_of(chan->shdma_chan.dma_chan.device, \ struct audmapp_device, shdma_dev.dma_dev) @@ -90,70 +101,82 @@ static void audmapp_halt(struct shdma_chan *schan) } static void audmapp_start_xfer(struct shdma_chan *schan, - struct shdma_desc *sdecs) + struct shdma_desc *sdesc) { struct audmapp_chan *auchan = to_chan(schan); struct audmapp_device *audev = to_dev(auchan); - struct audmapp_slave_config *cfg = auchan->config; + struct audmapp_desc *desc = to_desc(sdesc); struct device *dev = audev->dev; - u32 chcr = cfg->chcr | PDMACHCR_DE; + u32 chcr = auchan->chcr | PDMACHCR_DE; - dev_dbg(dev, "src/dst/chcr = %pad/%pad/%x\n", - &cfg->src, &cfg->dst, cfg->chcr); + dev_dbg(dev, "src/dst/chcr = %pad/%pad/%08x\n", + &desc->src, &desc->dst, chcr); - audmapp_write(auchan, cfg->src, PDMASAR); - audmapp_write(auchan, cfg->dst, PDMADAR); + audmapp_write(auchan, desc->src, PDMASAR); + audmapp_write(auchan, desc->dst, PDMADAR); audmapp_write(auchan, chcr, PDMACHCR); } -static struct audmapp_slave_config * -audmapp_find_slave(struct audmapp_chan *auchan, int slave_id) +static void audmapp_get_config(struct audmapp_chan *auchan, int slave_id, + u32 *chcr, dma_addr_t *dst) { struct audmapp_device *audev = to_dev(auchan); struct audmapp_pdata *pdata = audev->pdata; struct audmapp_slave_config *cfg; int i; + *chcr = 0; + *dst = 0; + + if (!pdata) { /* DT */ + *chcr = ((u32)slave_id) << 16; + auchan->shdma_chan.slave_id = (slave_id) >> 8; + return; + } + + /* non-DT */ + if (slave_id >= AUDMAPP_SLAVE_NUMBER) - return NULL; + return; for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++) - if (cfg->slave_id == slave_id) - return cfg; - - return NULL; + if (cfg->slave_id == slave_id) { + *chcr = cfg->chcr; + *dst = cfg->dst; + break; + } } static int audmapp_set_slave(struct shdma_chan *schan, int slave_id, dma_addr_t slave_addr, bool try) { struct audmapp_chan *auchan = to_chan(schan); - struct audmapp_slave_config *cfg = - audmapp_find_slave(auchan, slave_id); + u32 chcr; + dma_addr_t dst; + + audmapp_get_config(auchan, slave_id, &chcr, &dst); - if (!cfg) - return -ENODEV; if (try) return 0; - auchan->config = cfg; + auchan->chcr = chcr; + auchan->slave_addr = slave_addr ? : dst; return 0; } static int audmapp_desc_setup(struct shdma_chan *schan, - struct shdma_desc *sdecs, + struct shdma_desc *sdesc, dma_addr_t src, dma_addr_t dst, size_t *len) { - struct audmapp_chan *auchan = to_chan(schan); - struct audmapp_slave_config *cfg = auchan->config; - - if (!cfg) - return -ENODEV; + struct audmapp_desc *desc = to_desc(sdesc); if (*len > (size_t)AUDMAPP_LEN_MAX) *len = (size_t)AUDMAPP_LEN_MAX; + desc->src = src; + desc->dst = dst; + return 0; } @@ -164,7 +187,9 @@ static void audmapp_setup_xfer(struct shdma_chan *schan, static dma_addr_t audmapp_slave_addr(struct shdma_chan *schan) { - return 0; /* always fixed address */ + struct audmapp_chan *auchan = to_chan(schan); + + return auchan->slave_addr; } static bool audmapp_channel_busy(struct shdma_chan *schan) @@ -183,7 +208,7 @@ static bool audmapp_desc_completed(struct shdma_chan *schan, static struct shdma_desc *audmapp_embedded_desc(void *buf, int i) { - return &((struct shdma_desc *)buf)[i]; + return &((struct audmapp_desc *)buf)[i].shdma_desc; } static const struct shdma_ops audmapp_shdma_ops = { @@ -234,16 +259,39 @@ static void audmapp_chan_remove(struct audmapp_device *audev) dma_dev->chancnt = 0; } +static struct dma_chan *audmapp_of_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + dma_cap_mask_t mask; + struct dma_chan *chan; + u32 chcr = dma_spec->args[0]; + + if (dma_spec->args_count != 1) + return NULL; + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + chan = dma_request_channel(mask, shdma_chan_filter, NULL); + if (chan) + to_shdma_chan(chan)->hw_req = chcr; + + return chan; +} + static int audmapp_probe(struct platform_device *pdev) { struct audmapp_pdata *pdata = pdev->dev.platform_data; + struct device_node *np = pdev->dev.of_node; struct audmapp_device *audev; struct shdma_dev *sdev; struct dma_device *dma_dev; struct resource *res; int err, i; - if (!pdata) + if (np) + of_dma_controller_register(np, audmapp_of_xlate, pdev); + else if (!pdata) return -ENODEV; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -260,7 +308,7 @@ static int audmapp_probe(struct platform_device *pdev) sdev = &audev->shdma_dev; sdev->ops = &audmapp_shdma_ops; - sdev->desc_size = sizeof(struct shdma_desc); + sdev->desc_size = sizeof(struct audmapp_desc); dma_dev = &sdev->dma_dev; dma_dev->copy_align = LOG2_DEFAULT_XFER_SIZE; @@ -305,12 +353,18 @@ static int audmapp_remove(struct platform_device *pdev) return 0; } +static const struct of_device_id audmapp_of_match[] = { + { .compatible = "renesas,rcar-audmapp", }, + {}, +}; + static struct platform_driver audmapp_driver = { .probe = audmapp_probe, .remove = audmapp_remove, .driver = { .owner = THIS_MODULE, .name = "rcar-audmapp-engine", + .of_match_table = audmapp_of_match, }, }; module_platform_driver(audmapp_driver); diff --git a/drivers/dma/sh/shdma-arm.h b/drivers/dma/sh/shdma-arm.h index a2b8258426c9..a1b0ef45d6a2 100644 --- a/drivers/dma/sh/shdma-arm.h +++ b/drivers/dma/sh/shdma-arm.h @@ -45,7 +45,7 @@ enum { ((((i) & TS_LOW_BIT) << TS_LOW_SHIFT) |\ (((i) & TS_HI_BIT) << TS_HI_SHIFT)) -#define CHCR_TX(xmit_sz) (DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL((xmit_sz))) -#define CHCR_RX(xmit_sz) (DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL((xmit_sz))) +#define CHCR_TX(xmit_sz) (DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL((xmit_sz))) +#define CHCR_RX(xmit_sz) (DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL((xmit_sz))) #endif diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c index b35007e21e6b..42d497416196 100644 --- a/drivers/dma/sh/shdma-base.c +++ b/drivers/dma/sh/shdma-base.c @@ -206,45 +206,6 @@ static int shdma_setup_slave(struct shdma_chan *schan, int slave_id, return 0; } -/* - * This is the standard shdma filter function to be used as a replacement to the - * "old" method, using the .private pointer. If for some reason you allocate a - * channel without slave data, use something like ERR_PTR(-EINVAL) as a filter - * parameter. If this filter is used, the slave driver, after calling - * dma_request_channel(), will also have to call dmaengine_slave_config() with - * .slave_id, .direction, and either .src_addr or .dst_addr set. - * NOTE: this filter doesn't support multiple DMAC drivers with the DMA_SLAVE - * capability! If this becomes a requirement, hardware glue drivers, using this - * services would have to provide their own filters, which first would check - * the device driver, similar to how other DMAC drivers, e.g., sa11x0-dma.c, do - * this, and only then, in case of a match, call this common filter. - * NOTE 2: This filter function is also used in the DT case by shdma_of_xlate(). - * In that case the MID-RID value is used for slave channel filtering and is - * passed to this function in the "arg" parameter. - */ -bool shdma_chan_filter(struct dma_chan *chan, void *arg) -{ - struct shdma_chan *schan = to_shdma_chan(chan); - struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); - const struct shdma_ops *ops = sdev->ops; - int match = (long)arg; - int ret; - - if (match < 0) - /* No slave requested - arbitrary channel */ - return true; - - if (!schan->dev->of_node && match >= slave_num) - return false; - - ret = ops->set_slave(schan, match, 0, true); - if (ret < 0) - return false; - - return true; -} -EXPORT_SYMBOL(shdma_chan_filter); - static int shdma_alloc_chan_resources(struct dma_chan *chan) { struct shdma_chan *schan = to_shdma_chan(chan); @@ -295,6 +256,51 @@ esetslave: return ret; } +/* + * This is the standard shdma filter function to be used as a replacement to the + * "old" method, using the .private pointer. If for some reason you allocate a + * channel without slave data, use something like ERR_PTR(-EINVAL) as a filter + * parameter. If this filter is used, the slave driver, after calling + * dma_request_channel(), will also have to call dmaengine_slave_config() with + * .slave_id, .direction, and either .src_addr or .dst_addr set. + * NOTE: this filter doesn't support multiple DMAC drivers with the DMA_SLAVE + * capability! If this becomes a requirement, hardware glue drivers, using this + * services would have to provide their own filters, which first would check + * the device driver, similar to how other DMAC drivers, e.g., sa11x0-dma.c, do + * this, and only then, in case of a match, call this common filter. + * NOTE 2: This filter function is also used in the DT case by shdma_of_xlate(). + * In that case the MID-RID value is used for slave channel filtering and is + * passed to this function in the "arg" parameter. + */ +bool shdma_chan_filter(struct dma_chan *chan, void *arg) +{ + struct shdma_chan *schan; + struct shdma_dev *sdev; + int match = (long)arg; + int ret; + + /* Only support channels handled by this driver. */ + if (chan->device->device_alloc_chan_resources != + shdma_alloc_chan_resources) + return false; + + if (match < 0) + /* No slave requested - arbitrary channel */ + return true; + + schan = to_shdma_chan(chan); + if (!schan->dev->of_node && match >= slave_num) + return false; + + sdev = to_shdma_dev(schan->dma_chan.device); + ret = sdev->ops->set_slave(schan, match, 0, true); + if (ret < 0) + return false; + + return true; +} +EXPORT_SYMBOL(shdma_chan_filter); + static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all) { struct shdma_desc *desc, *_desc; @@ -662,15 +668,16 @@ static struct dma_async_tx_descriptor *shdma_prep_slave_sg( static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic( struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, size_t period_len, enum dma_transfer_direction direction, - unsigned long flags, void *context) + unsigned long flags) { struct shdma_chan *schan = to_shdma_chan(chan); struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); + struct dma_async_tx_descriptor *desc; const struct shdma_ops *ops = sdev->ops; unsigned int sg_len = buf_len / period_len; int slave_id = schan->slave_id; dma_addr_t slave_addr; - struct scatterlist sgl[SHDMA_MAX_SG_LEN]; + struct scatterlist *sgl; int i; if (!chan) @@ -694,7 +701,16 @@ static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic( slave_addr = ops->slave_addr(schan); + /* + * Allocate the sg list dynamically as it would consumer too much stack + * space. + */ + sgl = kcalloc(sg_len, sizeof(*sgl), GFP_KERNEL); + if (!sgl) + return NULL; + sg_init_table(sgl, sg_len); + for (i = 0; i < sg_len; i++) { dma_addr_t src = buf_addr + (period_len * i); @@ -704,8 +720,11 @@ static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic( sg_dma_len(&sgl[i]) = period_len; } - return shdma_prep_sg(schan, sgl, sg_len, &slave_addr, + desc = shdma_prep_sg(schan, sgl, sg_len, &slave_addr, direction, flags, true); + + kfree(sgl); + return desc; } static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, diff --git a/drivers/dma/sh/shdma.h b/drivers/dma/sh/shdma.h index 758a57b51875..2c0a969adc9f 100644 --- a/drivers/dma/sh/shdma.h +++ b/drivers/dma/sh/shdma.h @@ -62,7 +62,7 @@ struct sh_dmae_desc { #define to_sh_dev(chan) container_of(chan->shdma_chan.dma_chan.device,\ struct sh_dmae_device, shdma_dev.dma_dev) -#ifdef CONFIG_SHDMA_R8A73A4 +#ifdef CONFIG_SH_DMAE_R8A73A4 extern const struct sh_dmae_pdata r8a73a4_dma_pdata; #define r8a73a4_shdma_devid (&r8a73a4_dma_pdata) #else diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c index 146d5df926db..58eb85770eba 100644 --- a/drivers/dma/sh/shdmac.c +++ b/drivers/dma/sh/shdmac.c @@ -38,12 +38,12 @@ #include "../dmaengine.h" #include "shdma.h" -/* DMA register */ -#define SAR 0x00 -#define DAR 0x04 -#define TCR 0x08 -#define CHCR 0x0C -#define DMAOR 0x40 +/* DMA registers */ +#define SAR 0x00 /* Source Address Register */ +#define DAR 0x04 /* Destination Address Register */ +#define TCR 0x08 /* Transfer Count Register */ +#define CHCR 0x0C /* Channel Control Register */ +#define DMAOR 0x40 /* DMA Operation Register */ #define TEND 0x18 /* USB-DMAC */ @@ -239,9 +239,8 @@ static void dmae_init(struct sh_dmae_chan *sh_chan) { /* * Default configuration for dual address memory-memory transfer. - * 0x400 represents auto-request. */ - u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan, + u32 chcr = DM_INC | SM_INC | RS_AUTO | log2size_to_chcr(sh_chan, LOG2_DEFAULT_XFER_SIZE); sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr); chcr_write(sh_chan, chcr); diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c index 03f7820fa333..aac03ab10c54 100644 --- a/drivers/dma/sirf-dma.c +++ b/drivers/dma/sirf-dma.c @@ -580,7 +580,7 @@ err_dir: static struct dma_async_tx_descriptor * sirfsoc_dma_prep_cyclic(struct dma_chan *chan, dma_addr_t addr, size_t buf_len, size_t period_len, - enum dma_transfer_direction direction, unsigned long flags, void *context) + enum dma_transfer_direction direction, unsigned long flags) { struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); struct sirfsoc_dma_desc *sdesc = NULL; diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index c7984459ede7..5fe59335e247 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -2531,8 +2531,7 @@ d40_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, static struct dma_async_tx_descriptor * dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, size_t period_len, - enum dma_transfer_direction direction, unsigned long flags, - void *context) + enum dma_transfer_direction direction, unsigned long flags) { unsigned int periods = buf_len / period_len; struct dma_async_tx_descriptor *txd; diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c new file mode 100644 index 000000000000..1f92a56fd2b6 --- /dev/null +++ b/drivers/dma/sun6i-dma.c @@ -0,0 +1,1053 @@ +/* + * Copyright (C) 2013-2014 Allwinner Tech Co., Ltd + * Author: Sugar <shuge@allwinnertech.com> + * + * Copyright (C) 2014 Maxime Ripard + * Maxime Ripard <maxime.ripard@free-electrons.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/dmaengine.h> +#include <linux/dmapool.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/of_dma.h> +#include <linux/platform_device.h> +#include <linux/reset.h> +#include <linux/slab.h> +#include <linux/types.h> + +#include "virt-dma.h" + +/* + * There's 16 physical channels that can work in parallel. + * + * However we have 30 different endpoints for our requests. + * + * Since the channels are able to handle only an unidirectional + * transfer, we need to allocate more virtual channels so that + * everyone can grab one channel. + * + * Some devices can't work in both direction (mostly because it + * wouldn't make sense), so we have a bit fewer virtual channels than + * 2 channels per endpoints. + */ + +#define NR_MAX_CHANNELS 16 +#define NR_MAX_REQUESTS 30 +#define NR_MAX_VCHANS 53 + +/* + * Common registers + */ +#define DMA_IRQ_EN(x) ((x) * 0x04) +#define DMA_IRQ_HALF BIT(0) +#define DMA_IRQ_PKG BIT(1) +#define DMA_IRQ_QUEUE BIT(2) + +#define DMA_IRQ_CHAN_NR 8 +#define DMA_IRQ_CHAN_WIDTH 4 + + +#define DMA_IRQ_STAT(x) ((x) * 0x04 + 0x10) + +#define DMA_STAT 0x30 + +/* + * Channels specific registers + */ +#define DMA_CHAN_ENABLE 0x00 +#define DMA_CHAN_ENABLE_START BIT(0) +#define DMA_CHAN_ENABLE_STOP 0 + +#define DMA_CHAN_PAUSE 0x04 +#define DMA_CHAN_PAUSE_PAUSE BIT(1) +#define DMA_CHAN_PAUSE_RESUME 0 + +#define DMA_CHAN_LLI_ADDR 0x08 + +#define DMA_CHAN_CUR_CFG 0x0c +#define DMA_CHAN_CFG_SRC_DRQ(x) ((x) & 0x1f) +#define DMA_CHAN_CFG_SRC_IO_MODE BIT(5) +#define DMA_CHAN_CFG_SRC_LINEAR_MODE (0 << 5) +#define DMA_CHAN_CFG_SRC_BURST(x) (((x) & 0x3) << 7) +#define DMA_CHAN_CFG_SRC_WIDTH(x) (((x) & 0x3) << 9) + +#define DMA_CHAN_CFG_DST_DRQ(x) (DMA_CHAN_CFG_SRC_DRQ(x) << 16) +#define DMA_CHAN_CFG_DST_IO_MODE (DMA_CHAN_CFG_SRC_IO_MODE << 16) +#define DMA_CHAN_CFG_DST_LINEAR_MODE (DMA_CHAN_CFG_SRC_LINEAR_MODE << 16) +#define DMA_CHAN_CFG_DST_BURST(x) (DMA_CHAN_CFG_SRC_BURST(x) << 16) +#define DMA_CHAN_CFG_DST_WIDTH(x) (DMA_CHAN_CFG_SRC_WIDTH(x) << 16) + +#define DMA_CHAN_CUR_SRC 0x10 + +#define DMA_CHAN_CUR_DST 0x14 + +#define DMA_CHAN_CUR_CNT 0x18 + +#define DMA_CHAN_CUR_PARA 0x1c + + +/* + * Various hardware related defines + */ +#define LLI_LAST_ITEM 0xfffff800 +#define NORMAL_WAIT 8 +#define DRQ_SDRAM 1 + +/* + * Hardware representation of the LLI + * + * The hardware will be fed the physical address of this structure, + * and read its content in order to start the transfer. + */ +struct sun6i_dma_lli { + u32 cfg; + u32 src; + u32 dst; + u32 len; + u32 para; + u32 p_lli_next; + + /* + * This field is not used by the DMA controller, but will be + * used by the CPU to go through the list (mostly for dumping + * or freeing it). + */ + struct sun6i_dma_lli *v_lli_next; +}; + + +struct sun6i_desc { + struct virt_dma_desc vd; + dma_addr_t p_lli; + struct sun6i_dma_lli *v_lli; +}; + +struct sun6i_pchan { + u32 idx; + void __iomem *base; + struct sun6i_vchan *vchan; + struct sun6i_desc *desc; + struct sun6i_desc *done; +}; + +struct sun6i_vchan { + struct virt_dma_chan vc; + struct list_head node; + struct dma_slave_config cfg; + struct sun6i_pchan *phy; + u8 port; +}; + +struct sun6i_dma_dev { + struct dma_device slave; + void __iomem *base; + struct clk *clk; + int irq; + spinlock_t lock; + struct reset_control *rstc; + struct tasklet_struct task; + atomic_t tasklet_shutdown; + struct list_head pending; + struct dma_pool *pool; + struct sun6i_pchan *pchans; + struct sun6i_vchan *vchans; +}; + +static struct device *chan2dev(struct dma_chan *chan) +{ + return &chan->dev->device; +} + +static inline struct sun6i_dma_dev *to_sun6i_dma_dev(struct dma_device *d) +{ + return container_of(d, struct sun6i_dma_dev, slave); +} + +static inline struct sun6i_vchan *to_sun6i_vchan(struct dma_chan *chan) +{ + return container_of(chan, struct sun6i_vchan, vc.chan); +} + +static inline struct sun6i_desc * +to_sun6i_desc(struct dma_async_tx_descriptor *tx) +{ + return container_of(tx, struct sun6i_desc, vd.tx); +} + +static inline void sun6i_dma_dump_com_regs(struct sun6i_dma_dev *sdev) +{ + dev_dbg(sdev->slave.dev, "Common register:\n" + "\tmask0(%04x): 0x%08x\n" + "\tmask1(%04x): 0x%08x\n" + "\tpend0(%04x): 0x%08x\n" + "\tpend1(%04x): 0x%08x\n" + "\tstats(%04x): 0x%08x\n", + DMA_IRQ_EN(0), readl(sdev->base + DMA_IRQ_EN(0)), + DMA_IRQ_EN(1), readl(sdev->base + DMA_IRQ_EN(1)), + DMA_IRQ_STAT(0), readl(sdev->base + DMA_IRQ_STAT(0)), + DMA_IRQ_STAT(1), readl(sdev->base + DMA_IRQ_STAT(1)), + DMA_STAT, readl(sdev->base + DMA_STAT)); +} + +static inline void sun6i_dma_dump_chan_regs(struct sun6i_dma_dev *sdev, + struct sun6i_pchan *pchan) +{ + phys_addr_t reg = virt_to_phys(pchan->base); + + dev_dbg(sdev->slave.dev, "Chan %d reg: %pa\n" + "\t___en(%04x): \t0x%08x\n" + "\tpause(%04x): \t0x%08x\n" + "\tstart(%04x): \t0x%08x\n" + "\t__cfg(%04x): \t0x%08x\n" + "\t__src(%04x): \t0x%08x\n" + "\t__dst(%04x): \t0x%08x\n" + "\tcount(%04x): \t0x%08x\n" + "\t_para(%04x): \t0x%08x\n\n", + pchan->idx, ®, + DMA_CHAN_ENABLE, + readl(pchan->base + DMA_CHAN_ENABLE), + DMA_CHAN_PAUSE, + readl(pchan->base + DMA_CHAN_PAUSE), + DMA_CHAN_LLI_ADDR, + readl(pchan->base + DMA_CHAN_LLI_ADDR), + DMA_CHAN_CUR_CFG, + readl(pchan->base + DMA_CHAN_CUR_CFG), + DMA_CHAN_CUR_SRC, + readl(pchan->base + DMA_CHAN_CUR_SRC), + DMA_CHAN_CUR_DST, + readl(pchan->base + DMA_CHAN_CUR_DST), + DMA_CHAN_CUR_CNT, + readl(pchan->base + DMA_CHAN_CUR_CNT), + DMA_CHAN_CUR_PARA, + readl(pchan->base + DMA_CHAN_CUR_PARA)); +} + +static inline int convert_burst(u32 maxburst, u8 *burst) +{ + switch (maxburst) { + case 1: + *burst = 0; + break; + case 8: + *burst = 2; + break; + default: + return -EINVAL; + } + + return 0; +} + +static inline int convert_buswidth(enum dma_slave_buswidth addr_width, u8 *width) +{ + if ((addr_width < DMA_SLAVE_BUSWIDTH_1_BYTE) || + (addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES)) + return -EINVAL; + + *width = addr_width >> 1; + return 0; +} + +static void *sun6i_dma_lli_add(struct sun6i_dma_lli *prev, + struct sun6i_dma_lli *next, + dma_addr_t next_phy, + struct sun6i_desc *txd) +{ + if ((!prev && !txd) || !next) + return NULL; + + if (!prev) { + txd->p_lli = next_phy; + txd->v_lli = next; + } else { + prev->p_lli_next = next_phy; + prev->v_lli_next = next; + } + + next->p_lli_next = LLI_LAST_ITEM; + next->v_lli_next = NULL; + + return next; +} + +static inline int sun6i_dma_cfg_lli(struct sun6i_dma_lli *lli, + dma_addr_t src, + dma_addr_t dst, u32 len, + struct dma_slave_config *config) +{ + u8 src_width, dst_width, src_burst, dst_burst; + int ret; + + if (!config) + return -EINVAL; + + ret = convert_burst(config->src_maxburst, &src_burst); + if (ret) + return ret; + + ret = convert_burst(config->dst_maxburst, &dst_burst); + if (ret) + return ret; + + ret = convert_buswidth(config->src_addr_width, &src_width); + if (ret) + return ret; + + ret = convert_buswidth(config->dst_addr_width, &dst_width); + if (ret) + return ret; + + lli->cfg = DMA_CHAN_CFG_SRC_BURST(src_burst) | + DMA_CHAN_CFG_SRC_WIDTH(src_width) | + DMA_CHAN_CFG_DST_BURST(dst_burst) | + DMA_CHAN_CFG_DST_WIDTH(dst_width); + + lli->src = src; + lli->dst = dst; + lli->len = len; + lli->para = NORMAL_WAIT; + + return 0; +} + +static inline void sun6i_dma_dump_lli(struct sun6i_vchan *vchan, + struct sun6i_dma_lli *lli) +{ + phys_addr_t p_lli = virt_to_phys(lli); + + dev_dbg(chan2dev(&vchan->vc.chan), + "\n\tdesc: p - %pa v - 0x%p\n" + "\t\tc - 0x%08x s - 0x%08x d - 0x%08x\n" + "\t\tl - 0x%08x p - 0x%08x n - 0x%08x\n", + &p_lli, lli, + lli->cfg, lli->src, lli->dst, + lli->len, lli->para, lli->p_lli_next); +} + +static void sun6i_dma_free_desc(struct virt_dma_desc *vd) +{ + struct sun6i_desc *txd = to_sun6i_desc(&vd->tx); + struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(vd->tx.chan->device); + struct sun6i_dma_lli *v_lli, *v_next; + dma_addr_t p_lli, p_next; + + if (unlikely(!txd)) + return; + + p_lli = txd->p_lli; + v_lli = txd->v_lli; + + while (v_lli) { + v_next = v_lli->v_lli_next; + p_next = v_lli->p_lli_next; + + dma_pool_free(sdev->pool, v_lli, p_lli); + + v_lli = v_next; + p_lli = p_next; + } + + kfree(txd); +} + +static int sun6i_dma_terminate_all(struct sun6i_vchan *vchan) +{ + struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(vchan->vc.chan.device); + struct sun6i_pchan *pchan = vchan->phy; + unsigned long flags; + LIST_HEAD(head); + + spin_lock(&sdev->lock); + list_del_init(&vchan->node); + spin_unlock(&sdev->lock); + + spin_lock_irqsave(&vchan->vc.lock, flags); + + vchan_get_all_descriptors(&vchan->vc, &head); + + if (pchan) { + writel(DMA_CHAN_ENABLE_STOP, pchan->base + DMA_CHAN_ENABLE); + writel(DMA_CHAN_PAUSE_RESUME, pchan->base + DMA_CHAN_PAUSE); + + vchan->phy = NULL; + pchan->vchan = NULL; + pchan->desc = NULL; + pchan->done = NULL; + } + + spin_unlock_irqrestore(&vchan->vc.lock, flags); + + vchan_dma_desc_free_list(&vchan->vc, &head); + + return 0; +} + +static int sun6i_dma_start_desc(struct sun6i_vchan *vchan) +{ + struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(vchan->vc.chan.device); + struct virt_dma_desc *desc = vchan_next_desc(&vchan->vc); + struct sun6i_pchan *pchan = vchan->phy; + u32 irq_val, irq_reg, irq_offset; + + if (!pchan) + return -EAGAIN; + + if (!desc) { + pchan->desc = NULL; + pchan->done = NULL; + return -EAGAIN; + } + + list_del(&desc->node); + + pchan->desc = to_sun6i_desc(&desc->tx); + pchan->done = NULL; + + sun6i_dma_dump_lli(vchan, pchan->desc->v_lli); + + irq_reg = pchan->idx / DMA_IRQ_CHAN_NR; + irq_offset = pchan->idx % DMA_IRQ_CHAN_NR; + + irq_val = readl(sdev->base + DMA_IRQ_EN(irq_offset)); + irq_val |= DMA_IRQ_QUEUE << (irq_offset * DMA_IRQ_CHAN_WIDTH); + writel(irq_val, sdev->base + DMA_IRQ_EN(irq_offset)); + + writel(pchan->desc->p_lli, pchan->base + DMA_CHAN_LLI_ADDR); + writel(DMA_CHAN_ENABLE_START, pchan->base + DMA_CHAN_ENABLE); + + sun6i_dma_dump_com_regs(sdev); + sun6i_dma_dump_chan_regs(sdev, pchan); + + return 0; +} + +static void sun6i_dma_tasklet(unsigned long data) +{ + struct sun6i_dma_dev *sdev = (struct sun6i_dma_dev *)data; + struct sun6i_vchan *vchan; + struct sun6i_pchan *pchan; + unsigned int pchan_alloc = 0; + unsigned int pchan_idx; + + list_for_each_entry(vchan, &sdev->slave.channels, vc.chan.device_node) { + spin_lock_irq(&vchan->vc.lock); + + pchan = vchan->phy; + + if (pchan && pchan->done) { + if (sun6i_dma_start_desc(vchan)) { + /* + * No current txd associated with this channel + */ + dev_dbg(sdev->slave.dev, "pchan %u: free\n", + pchan->idx); + + /* Mark this channel free */ + vchan->phy = NULL; + pchan->vchan = NULL; + } + } + spin_unlock_irq(&vchan->vc.lock); + } + + spin_lock_irq(&sdev->lock); + for (pchan_idx = 0; pchan_idx < NR_MAX_CHANNELS; pchan_idx++) { + pchan = &sdev->pchans[pchan_idx]; + + if (pchan->vchan || list_empty(&sdev->pending)) + continue; + + vchan = list_first_entry(&sdev->pending, + struct sun6i_vchan, node); + + /* Remove from pending channels */ + list_del_init(&vchan->node); + pchan_alloc |= BIT(pchan_idx); + + /* Mark this channel allocated */ + pchan->vchan = vchan; + vchan->phy = pchan; + dev_dbg(sdev->slave.dev, "pchan %u: alloc vchan %p\n", + pchan->idx, &vchan->vc); + } + spin_unlock_irq(&sdev->lock); + + for (pchan_idx = 0; pchan_idx < NR_MAX_CHANNELS; pchan_idx++) { + if (!(pchan_alloc & BIT(pchan_idx))) + continue; + + pchan = sdev->pchans + pchan_idx; + vchan = pchan->vchan; + if (vchan) { + spin_lock_irq(&vchan->vc.lock); + sun6i_dma_start_desc(vchan); + spin_unlock_irq(&vchan->vc.lock); + } + } +} + +static irqreturn_t sun6i_dma_interrupt(int irq, void *dev_id) +{ + struct sun6i_dma_dev *sdev = dev_id; + struct sun6i_vchan *vchan; + struct sun6i_pchan *pchan; + int i, j, ret = IRQ_NONE; + u32 status; + + for (i = 0; i < 2; i++) { + status = readl(sdev->base + DMA_IRQ_STAT(i)); + if (!status) + continue; + + dev_dbg(sdev->slave.dev, "DMA irq status %s: 0x%x\n", + i ? "high" : "low", status); + + writel(status, sdev->base + DMA_IRQ_STAT(i)); + + for (j = 0; (j < 8) && status; j++) { + if (status & DMA_IRQ_QUEUE) { + pchan = sdev->pchans + j; + vchan = pchan->vchan; + + if (vchan) { + spin_lock(&vchan->vc.lock); + vchan_cookie_complete(&pchan->desc->vd); + pchan->done = pchan->desc; + spin_unlock(&vchan->vc.lock); + } + } + + status = status >> 4; + } + + if (!atomic_read(&sdev->tasklet_shutdown)) + tasklet_schedule(&sdev->task); + ret = IRQ_HANDLED; + } + + return ret; +} + +static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy( + struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, + size_t len, unsigned long flags) +{ + struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device); + struct sun6i_vchan *vchan = to_sun6i_vchan(chan); + struct dma_slave_config *sconfig = &vchan->cfg; + struct sun6i_dma_lli *v_lli; + struct sun6i_desc *txd; + dma_addr_t p_lli; + int ret; + + dev_dbg(chan2dev(chan), + "%s; chan: %d, dest: %pad, src: %pad, len: %zu. flags: 0x%08lx\n", + __func__, vchan->vc.chan.chan_id, &dest, &src, len, flags); + + if (!len) + return NULL; + + txd = kzalloc(sizeof(*txd), GFP_NOWAIT); + if (!txd) + return NULL; + + v_lli = dma_pool_alloc(sdev->pool, GFP_NOWAIT, &p_lli); + if (!v_lli) { + dev_err(sdev->slave.dev, "Failed to alloc lli memory\n"); + goto err_txd_free; + } + + ret = sun6i_dma_cfg_lli(v_lli, src, dest, len, sconfig); + if (ret) + goto err_dma_free; + + v_lli->cfg |= DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) | + DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) | + DMA_CHAN_CFG_DST_LINEAR_MODE | + DMA_CHAN_CFG_SRC_LINEAR_MODE; + + sun6i_dma_lli_add(NULL, v_lli, p_lli, txd); + + sun6i_dma_dump_lli(vchan, v_lli); + + return vchan_tx_prep(&vchan->vc, &txd->vd, flags); + +err_dma_free: + dma_pool_free(sdev->pool, v_lli, p_lli); +err_txd_free: + kfree(txd); + return NULL; +} + +static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg( + struct dma_chan *chan, struct scatterlist *sgl, + unsigned int sg_len, enum dma_transfer_direction dir, + unsigned long flags, void *context) +{ + struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device); + struct sun6i_vchan *vchan = to_sun6i_vchan(chan); + struct dma_slave_config *sconfig = &vchan->cfg; + struct sun6i_dma_lli *v_lli, *prev = NULL; + struct sun6i_desc *txd; + struct scatterlist *sg; + dma_addr_t p_lli; + int i, ret; + + if (!sgl) + return NULL; + + if (!is_slave_direction(dir)) { + dev_err(chan2dev(chan), "Invalid DMA direction\n"); + return NULL; + } + + txd = kzalloc(sizeof(*txd), GFP_NOWAIT); + if (!txd) + return NULL; + + for_each_sg(sgl, sg, sg_len, i) { + v_lli = dma_pool_alloc(sdev->pool, GFP_NOWAIT, &p_lli); + if (!v_lli) + goto err_lli_free; + + if (dir == DMA_MEM_TO_DEV) { + ret = sun6i_dma_cfg_lli(v_lli, sg_dma_address(sg), + sconfig->dst_addr, sg_dma_len(sg), + sconfig); + if (ret) + goto err_cur_lli_free; + + v_lli->cfg |= DMA_CHAN_CFG_DST_IO_MODE | + DMA_CHAN_CFG_SRC_LINEAR_MODE | + DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) | + DMA_CHAN_CFG_DST_DRQ(vchan->port); + + dev_dbg(chan2dev(chan), + "%s; chan: %d, dest: %pad, src: %pad, len: %u. flags: 0x%08lx\n", + __func__, vchan->vc.chan.chan_id, + &sconfig->dst_addr, &sg_dma_address(sg), + sg_dma_len(sg), flags); + + } else { + ret = sun6i_dma_cfg_lli(v_lli, sconfig->src_addr, + sg_dma_address(sg), sg_dma_len(sg), + sconfig); + if (ret) + goto err_cur_lli_free; + + v_lli->cfg |= DMA_CHAN_CFG_DST_LINEAR_MODE | + DMA_CHAN_CFG_SRC_IO_MODE | + DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) | + DMA_CHAN_CFG_SRC_DRQ(vchan->port); + + dev_dbg(chan2dev(chan), + "%s; chan: %d, dest: %pad, src: %pad, len: %u. flags: 0x%08lx\n", + __func__, vchan->vc.chan.chan_id, + &sg_dma_address(sg), &sconfig->src_addr, + sg_dma_len(sg), flags); + } + + prev = sun6i_dma_lli_add(prev, v_lli, p_lli, txd); + } + + dev_dbg(chan2dev(chan), "First: %pad\n", &txd->p_lli); + for (prev = txd->v_lli; prev; prev = prev->v_lli_next) + sun6i_dma_dump_lli(vchan, prev); + + return vchan_tx_prep(&vchan->vc, &txd->vd, flags); + +err_cur_lli_free: + dma_pool_free(sdev->pool, v_lli, p_lli); +err_lli_free: + for (prev = txd->v_lli; prev; prev = prev->v_lli_next) + dma_pool_free(sdev->pool, prev, virt_to_phys(prev)); + kfree(txd); + return NULL; +} + +static int sun6i_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, + unsigned long arg) +{ + struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device); + struct sun6i_vchan *vchan = to_sun6i_vchan(chan); + struct sun6i_pchan *pchan = vchan->phy; + unsigned long flags; + int ret = 0; + + switch (cmd) { + case DMA_RESUME: + dev_dbg(chan2dev(chan), "vchan %p: resume\n", &vchan->vc); + + spin_lock_irqsave(&vchan->vc.lock, flags); + + if (pchan) { + writel(DMA_CHAN_PAUSE_RESUME, + pchan->base + DMA_CHAN_PAUSE); + } else if (!list_empty(&vchan->vc.desc_issued)) { + spin_lock(&sdev->lock); + list_add_tail(&vchan->node, &sdev->pending); + spin_unlock(&sdev->lock); + } + + spin_unlock_irqrestore(&vchan->vc.lock, flags); + break; + + case DMA_PAUSE: + dev_dbg(chan2dev(chan), "vchan %p: pause\n", &vchan->vc); + + if (pchan) { + writel(DMA_CHAN_PAUSE_PAUSE, + pchan->base + DMA_CHAN_PAUSE); + } else { + spin_lock(&sdev->lock); + list_del_init(&vchan->node); + spin_unlock(&sdev->lock); + } + break; + + case DMA_TERMINATE_ALL: + ret = sun6i_dma_terminate_all(vchan); + break; + case DMA_SLAVE_CONFIG: + memcpy(&vchan->cfg, (void *)arg, sizeof(struct dma_slave_config)); + break; + default: + ret = -ENXIO; + break; + } + return ret; +} + +static enum dma_status sun6i_dma_tx_status(struct dma_chan *chan, + dma_cookie_t cookie, + struct dma_tx_state *state) +{ + struct sun6i_vchan *vchan = to_sun6i_vchan(chan); + struct sun6i_pchan *pchan = vchan->phy; + struct sun6i_dma_lli *lli; + struct virt_dma_desc *vd; + struct sun6i_desc *txd; + enum dma_status ret; + unsigned long flags; + size_t bytes = 0; + + ret = dma_cookie_status(chan, cookie, state); + if (ret == DMA_COMPLETE) + return ret; + + spin_lock_irqsave(&vchan->vc.lock, flags); + + vd = vchan_find_desc(&vchan->vc, cookie); + txd = to_sun6i_desc(&vd->tx); + + if (vd) { + for (lli = txd->v_lli; lli != NULL; lli = lli->v_lli_next) + bytes += lli->len; + } else if (!pchan || !pchan->desc) { + bytes = 0; + } else { + bytes = readl(pchan->base + DMA_CHAN_CUR_CNT); + } + + spin_unlock_irqrestore(&vchan->vc.lock, flags); + + dma_set_residue(state, bytes); + + return ret; +} + +static void sun6i_dma_issue_pending(struct dma_chan *chan) +{ + struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device); + struct sun6i_vchan *vchan = to_sun6i_vchan(chan); + unsigned long flags; + + spin_lock_irqsave(&vchan->vc.lock, flags); + + if (vchan_issue_pending(&vchan->vc)) { + spin_lock(&sdev->lock); + + if (!vchan->phy && list_empty(&vchan->node)) { + list_add_tail(&vchan->node, &sdev->pending); + tasklet_schedule(&sdev->task); + dev_dbg(chan2dev(chan), "vchan %p: issued\n", + &vchan->vc); + } + + spin_unlock(&sdev->lock); + } else { + dev_dbg(chan2dev(chan), "vchan %p: nothing to issue\n", + &vchan->vc); + } + + spin_unlock_irqrestore(&vchan->vc.lock, flags); +} + +static int sun6i_dma_alloc_chan_resources(struct dma_chan *chan) +{ + return 0; +} + +static void sun6i_dma_free_chan_resources(struct dma_chan *chan) +{ + struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device); + struct sun6i_vchan *vchan = to_sun6i_vchan(chan); + unsigned long flags; + + spin_lock_irqsave(&sdev->lock, flags); + list_del_init(&vchan->node); + spin_unlock_irqrestore(&sdev->lock, flags); + + vchan_free_chan_resources(&vchan->vc); +} + +static struct dma_chan *sun6i_dma_of_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + struct sun6i_dma_dev *sdev = ofdma->of_dma_data; + struct sun6i_vchan *vchan; + struct dma_chan *chan; + u8 port = dma_spec->args[0]; + + if (port > NR_MAX_REQUESTS) + return NULL; + + chan = dma_get_any_slave_channel(&sdev->slave); + if (!chan) + return NULL; + + vchan = to_sun6i_vchan(chan); + vchan->port = port; + + return chan; +} + +static inline void sun6i_kill_tasklet(struct sun6i_dma_dev *sdev) +{ + /* Disable all interrupts from DMA */ + writel(0, sdev->base + DMA_IRQ_EN(0)); + writel(0, sdev->base + DMA_IRQ_EN(1)); + + /* Prevent spurious interrupts from scheduling the tasklet */ + atomic_inc(&sdev->tasklet_shutdown); + + /* Make sure we won't have any further interrupts */ + devm_free_irq(sdev->slave.dev, sdev->irq, sdev); + + /* Actually prevent the tasklet from being scheduled */ + tasklet_kill(&sdev->task); +} + +static inline void sun6i_dma_free(struct sun6i_dma_dev *sdev) +{ + int i; + + for (i = 0; i < NR_MAX_VCHANS; i++) { + struct sun6i_vchan *vchan = &sdev->vchans[i]; + + list_del(&vchan->vc.chan.device_node); + tasklet_kill(&vchan->vc.task); + } +} + +static int sun6i_dma_probe(struct platform_device *pdev) +{ + struct sun6i_dma_dev *sdc; + struct resource *res; + struct clk *mux, *pll6; + int ret, i; + + sdc = devm_kzalloc(&pdev->dev, sizeof(*sdc), GFP_KERNEL); + if (!sdc) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + sdc->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(sdc->base)) + return PTR_ERR(sdc->base); + + sdc->irq = platform_get_irq(pdev, 0); + if (sdc->irq < 0) { + dev_err(&pdev->dev, "Cannot claim IRQ\n"); + return sdc->irq; + } + + sdc->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(sdc->clk)) { + dev_err(&pdev->dev, "No clock specified\n"); + return PTR_ERR(sdc->clk); + } + + mux = clk_get(NULL, "ahb1_mux"); + if (IS_ERR(mux)) { + dev_err(&pdev->dev, "Couldn't get AHB1 Mux\n"); + return PTR_ERR(mux); + } + + pll6 = clk_get(NULL, "pll6"); + if (IS_ERR(pll6)) { + dev_err(&pdev->dev, "Couldn't get PLL6\n"); + clk_put(mux); + return PTR_ERR(pll6); + } + + ret = clk_set_parent(mux, pll6); + clk_put(pll6); + clk_put(mux); + + if (ret) { + dev_err(&pdev->dev, "Couldn't reparent AHB1 on PLL6\n"); + return ret; + } + + sdc->rstc = devm_reset_control_get(&pdev->dev, NULL); + if (IS_ERR(sdc->rstc)) { + dev_err(&pdev->dev, "No reset controller specified\n"); + return PTR_ERR(sdc->rstc); + } + + sdc->pool = dmam_pool_create(dev_name(&pdev->dev), &pdev->dev, + sizeof(struct sun6i_dma_lli), 4, 0); + if (!sdc->pool) { + dev_err(&pdev->dev, "No memory for descriptors dma pool\n"); + return -ENOMEM; + } + + platform_set_drvdata(pdev, sdc); + INIT_LIST_HEAD(&sdc->pending); + spin_lock_init(&sdc->lock); + + dma_cap_set(DMA_PRIVATE, sdc->slave.cap_mask); + dma_cap_set(DMA_MEMCPY, sdc->slave.cap_mask); + dma_cap_set(DMA_SLAVE, sdc->slave.cap_mask); + + INIT_LIST_HEAD(&sdc->slave.channels); + sdc->slave.device_alloc_chan_resources = sun6i_dma_alloc_chan_resources; + sdc->slave.device_free_chan_resources = sun6i_dma_free_chan_resources; + sdc->slave.device_tx_status = sun6i_dma_tx_status; + sdc->slave.device_issue_pending = sun6i_dma_issue_pending; + sdc->slave.device_prep_slave_sg = sun6i_dma_prep_slave_sg; + sdc->slave.device_prep_dma_memcpy = sun6i_dma_prep_dma_memcpy; + sdc->slave.device_control = sun6i_dma_control; + sdc->slave.chancnt = NR_MAX_VCHANS; + + sdc->slave.dev = &pdev->dev; + + sdc->pchans = devm_kcalloc(&pdev->dev, NR_MAX_CHANNELS, + sizeof(struct sun6i_pchan), GFP_KERNEL); + if (!sdc->pchans) + return -ENOMEM; + + sdc->vchans = devm_kcalloc(&pdev->dev, NR_MAX_VCHANS, + sizeof(struct sun6i_vchan), GFP_KERNEL); + if (!sdc->vchans) + return -ENOMEM; + + tasklet_init(&sdc->task, sun6i_dma_tasklet, (unsigned long)sdc); + + for (i = 0; i < NR_MAX_CHANNELS; i++) { + struct sun6i_pchan *pchan = &sdc->pchans[i]; + + pchan->idx = i; + pchan->base = sdc->base + 0x100 + i * 0x40; + } + + for (i = 0; i < NR_MAX_VCHANS; i++) { + struct sun6i_vchan *vchan = &sdc->vchans[i]; + + INIT_LIST_HEAD(&vchan->node); + vchan->vc.desc_free = sun6i_dma_free_desc; + vchan_init(&vchan->vc, &sdc->slave); + } + + ret = reset_control_deassert(sdc->rstc); + if (ret) { + dev_err(&pdev->dev, "Couldn't deassert the device from reset\n"); + goto err_chan_free; + } + + ret = clk_prepare_enable(sdc->clk); + if (ret) { + dev_err(&pdev->dev, "Couldn't enable the clock\n"); + goto err_reset_assert; + } + + ret = devm_request_irq(&pdev->dev, sdc->irq, sun6i_dma_interrupt, 0, + dev_name(&pdev->dev), sdc); + if (ret) { + dev_err(&pdev->dev, "Cannot request IRQ\n"); + goto err_clk_disable; + } + + ret = dma_async_device_register(&sdc->slave); + if (ret) { + dev_warn(&pdev->dev, "Failed to register DMA engine device\n"); + goto err_irq_disable; + } + + ret = of_dma_controller_register(pdev->dev.of_node, sun6i_dma_of_xlate, + sdc); + if (ret) { + dev_err(&pdev->dev, "of_dma_controller_register failed\n"); + goto err_dma_unregister; + } + + return 0; + +err_dma_unregister: + dma_async_device_unregister(&sdc->slave); +err_irq_disable: + sun6i_kill_tasklet(sdc); +err_clk_disable: + clk_disable_unprepare(sdc->clk); +err_reset_assert: + reset_control_assert(sdc->rstc); +err_chan_free: + sun6i_dma_free(sdc); + return ret; +} + +static int sun6i_dma_remove(struct platform_device *pdev) +{ + struct sun6i_dma_dev *sdc = platform_get_drvdata(pdev); + + of_dma_controller_free(pdev->dev.of_node); + dma_async_device_unregister(&sdc->slave); + + sun6i_kill_tasklet(sdc); + + clk_disable_unprepare(sdc->clk); + reset_control_assert(sdc->rstc); + + sun6i_dma_free(sdc); + + return 0; +} + +static struct of_device_id sun6i_dma_match[] = { + { .compatible = "allwinner,sun6i-a31-dma" }, + { /* sentinel */ } +}; + +static struct platform_driver sun6i_dma_driver = { + .probe = sun6i_dma_probe, + .remove = sun6i_dma_remove, + .driver = { + .name = "sun6i-dma", + .of_match_table = sun6i_dma_match, + }, +}; +module_platform_driver(sun6i_dma_driver); + +MODULE_DESCRIPTION("Allwinner A31 DMA Controller Driver"); +MODULE_AUTHOR("Sugar <shuge@allwinnertech.com>"); +MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index 03ad64ecaaf0..16efa603ff65 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c @@ -1055,7 +1055,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg( static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic( struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len, size_t period_len, enum dma_transfer_direction direction, - unsigned long flags, void *context) + unsigned long flags) { struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); struct tegra_dma_desc *dma_desc = NULL; diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index f8665f9c3e03..fd89ca982748 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig @@ -253,12 +253,12 @@ config EDAC_I7300 Clarksboro MCH (Intel 7300 chipset). config EDAC_SBRIDGE - tristate "Intel Sandy-Bridge Integrated MC" + tristate "Intel Sandy-Bridge/Ivy-Bridge/Haswell Integrated MC" depends on EDAC_MM_EDAC && PCI && X86_64 && X86_MCE_INTEL depends on PCI_MMCONFIG help Support for error detection and correction the Intel - Sandy Bridge Integrated Memory Controller. + Sandy Bridge, Ivy Bridge and Haswell Integrated Memory Controllers. config EDAC_MPC85XX tristate "Freescale MPC83xx / MPC85xx" diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index f8bf00010d45..bbd65149cdb2 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -87,61 +87,73 @@ int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset, } /* + * Select DCT to which PCI cfg accesses are routed + */ +static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct) +{ + u32 reg = 0; + + amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, ®); + reg &= (pvt->model == 0x30) ? ~3 : ~1; + reg |= dct; + amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg); +} + +/* * * Depending on the family, F2 DCT reads need special handling: * - * K8: has a single DCT only + * K8: has a single DCT only and no address offsets >= 0x100 * * F10h: each DCT has its own set of regs * DCT0 -> F2x040.. * DCT1 -> F2x140.. * - * F15h: we select which DCT we access using F1x10C[DctCfgSel] - * * F16h: has only 1 DCT + * + * F15h: we select which DCT we access using F1x10C[DctCfgSel] */ -static int k8_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val, - const char *func) +static inline int amd64_read_dct_pci_cfg(struct amd64_pvt *pvt, u8 dct, + int offset, u32 *val) { - if (addr >= 0x100) - return -EINVAL; - - return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func); -} + switch (pvt->fam) { + case 0xf: + if (dct || offset >= 0x100) + return -EINVAL; + break; -static int f10_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val, - const char *func) -{ - return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func); -} + case 0x10: + if (dct) { + /* + * Note: If ganging is enabled, barring the regs + * F2x[1,0]98 and F2x[1,0]9C; reads reads to F2x1xx + * return 0. (cf. Section 2.8.1 F10h BKDG) + */ + if (dct_ganging_enabled(pvt)) + return 0; -/* - * Select DCT to which PCI cfg accesses are routed - */ -static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct) -{ - u32 reg = 0; + offset += 0x100; + } + break; - amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, ®); - reg &= (pvt->model >= 0x30) ? ~3 : ~1; - reg |= dct; - amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg); -} + case 0x15: + /* + * F15h: F2x1xx addresses do not map explicitly to DCT1. + * We should select which DCT we access using F1x10C[DctCfgSel] + */ + dct = (dct && pvt->model == 0x30) ? 3 : dct; + f15h_select_dct(pvt, dct); + break; -static int f15_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val, - const char *func) -{ - u8 dct = 0; + case 0x16: + if (dct) + return -EINVAL; + break; - /* For F15 M30h, the second dct is DCT 3, refer to BKDG Section 2.10 */ - if (addr >= 0x140 && addr <= 0x1a0) { - dct = (pvt->model >= 0x30) ? 3 : 1; - addr -= 0x100; + default: + break; } - - f15h_select_dct(pvt, dct); - - return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func); + return amd64_read_pci_cfg(pvt->F2, offset, val); } /* @@ -768,16 +780,17 @@ static void read_dct_base_mask(struct amd64_pvt *pvt) u32 *base0 = &pvt->csels[0].csbases[cs]; u32 *base1 = &pvt->csels[1].csbases[cs]; - if (!amd64_read_dct_pci_cfg(pvt, reg0, base0)) + if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0)) edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n", cs, *base0, reg0); - if (pvt->fam == 0xf || dct_ganging_enabled(pvt)) + if (pvt->fam == 0xf) continue; - if (!amd64_read_dct_pci_cfg(pvt, reg1, base1)) + if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1)) edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n", - cs, *base1, reg1); + cs, *base1, (pvt->fam == 0x10) ? reg1 + : reg0); } for_each_chip_select_mask(cs, 0, pvt) { @@ -786,16 +799,17 @@ static void read_dct_base_mask(struct amd64_pvt *pvt) u32 *mask0 = &pvt->csels[0].csmasks[cs]; u32 *mask1 = &pvt->csels[1].csmasks[cs]; - if (!amd64_read_dct_pci_cfg(pvt, reg0, mask0)) + if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0)) edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n", cs, *mask0, reg0); - if (pvt->fam == 0xf || dct_ganging_enabled(pvt)) + if (pvt->fam == 0xf) continue; - if (!amd64_read_dct_pci_cfg(pvt, reg1, mask1)) + if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1)) edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n", - cs, *mask1, reg1); + cs, *mask1, (pvt->fam == 0x10) ? reg1 + : reg0); } } @@ -1198,7 +1212,7 @@ static void read_dram_ctl_register(struct amd64_pvt *pvt) if (pvt->fam == 0xf) return; - if (!amd64_read_dct_pci_cfg(pvt, DCT_SEL_LO, &pvt->dct_sel_lo)) { + if (!amd64_read_pci_cfg(pvt->F2, DCT_SEL_LO, &pvt->dct_sel_lo)) { edac_dbg(0, "F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n", pvt->dct_sel_lo, dct_sel_baseaddr(pvt)); @@ -1219,7 +1233,7 @@ static void read_dram_ctl_register(struct amd64_pvt *pvt) dct_sel_interleave_addr(pvt)); } - amd64_read_dct_pci_cfg(pvt, DCT_SEL_HI, &pvt->dct_sel_hi); + amd64_read_pci_cfg(pvt->F2, DCT_SEL_HI, &pvt->dct_sel_hi); } /* @@ -1430,7 +1444,7 @@ static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr) return sys_addr; } - amd64_read_dct_pci_cfg(pvt, SWAP_INTLV_REG, &swap_reg); + amd64_read_pci_cfg(pvt->F2, SWAP_INTLV_REG, &swap_reg); if (!(swap_reg & 0x1)) return sys_addr; @@ -1723,10 +1737,16 @@ static void debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl) WARN_ON(ctrl != 0); } - dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1 : pvt->dbam0; - dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->csels[1].csbases - : pvt->csels[0].csbases; - + if (pvt->fam == 0x10) { + dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1 + : pvt->dbam0; + dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? + pvt->csels[1].csbases : + pvt->csels[0].csbases; + } else if (ctrl) { + dbam = pvt->dbam0; + dcsb = pvt->csels[1].csbases; + } edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n", ctrl, dbam); @@ -1760,7 +1780,6 @@ static struct amd64_family_type family_types[] = { .early_channel_count = k8_early_channel_count, .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow, .dbam_to_cs = k8_dbam_to_chip_select, - .read_dct_pci_cfg = k8_read_dct_pci_cfg, } }, [F10_CPUS] = { @@ -1771,7 +1790,6 @@ static struct amd64_family_type family_types[] = { .early_channel_count = f1x_early_channel_count, .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow, .dbam_to_cs = f10_dbam_to_chip_select, - .read_dct_pci_cfg = f10_read_dct_pci_cfg, } }, [F15_CPUS] = { @@ -1782,7 +1800,6 @@ static struct amd64_family_type family_types[] = { .early_channel_count = f1x_early_channel_count, .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow, .dbam_to_cs = f15_dbam_to_chip_select, - .read_dct_pci_cfg = f15_read_dct_pci_cfg, } }, [F15_M30H_CPUS] = { @@ -1793,7 +1810,6 @@ static struct amd64_family_type family_types[] = { .early_channel_count = f1x_early_channel_count, .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow, .dbam_to_cs = f16_dbam_to_chip_select, - .read_dct_pci_cfg = f15_read_dct_pci_cfg, } }, [F16_CPUS] = { @@ -1804,7 +1820,6 @@ static struct amd64_family_type family_types[] = { .early_channel_count = f1x_early_channel_count, .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow, .dbam_to_cs = f16_dbam_to_chip_select, - .read_dct_pci_cfg = f10_read_dct_pci_cfg, } }, [F16_M30H_CPUS] = { @@ -1815,7 +1830,6 @@ static struct amd64_family_type family_types[] = { .early_channel_count = f1x_early_channel_count, .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow, .dbam_to_cs = f16_dbam_to_chip_select, - .read_dct_pci_cfg = f10_read_dct_pci_cfg, } }, }; @@ -2148,25 +2162,25 @@ static void read_mc_regs(struct amd64_pvt *pvt) read_dct_base_mask(pvt); amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar); - amd64_read_dct_pci_cfg(pvt, DBAM0, &pvt->dbam0); + amd64_read_dct_pci_cfg(pvt, 0, DBAM0, &pvt->dbam0); amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare); - amd64_read_dct_pci_cfg(pvt, DCLR0, &pvt->dclr0); - amd64_read_dct_pci_cfg(pvt, DCHR0, &pvt->dchr0); + amd64_read_dct_pci_cfg(pvt, 0, DCLR0, &pvt->dclr0); + amd64_read_dct_pci_cfg(pvt, 0, DCHR0, &pvt->dchr0); if (!dct_ganging_enabled(pvt)) { - amd64_read_dct_pci_cfg(pvt, DCLR1, &pvt->dclr1); - amd64_read_dct_pci_cfg(pvt, DCHR1, &pvt->dchr1); + amd64_read_dct_pci_cfg(pvt, 1, DCLR0, &pvt->dclr1); + amd64_read_dct_pci_cfg(pvt, 1, DCHR0, &pvt->dchr1); } pvt->ecc_sym_sz = 4; if (pvt->fam >= 0x10) { amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp); + /* F16h has only DCT0, so no need to read dbam1 */ if (pvt->fam != 0x16) - /* F16h has only DCT0 */ - amd64_read_dct_pci_cfg(pvt, DBAM1, &pvt->dbam1); + amd64_read_dct_pci_cfg(pvt, 1, DBAM0, &pvt->dbam1); /* F10h, revD and later can do x8 ECC too */ if ((pvt->fam > 0x10 || pvt->model > 7) && tmp & BIT(25)) diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h index d903e0c21144..55fb5941c6d4 100644 --- a/drivers/edac/amd64_edac.h +++ b/drivers/edac/amd64_edac.h @@ -481,8 +481,6 @@ struct low_ops { void (*map_sysaddr_to_csrow) (struct mem_ctl_info *mci, u64 sys_addr, struct err_info *); int (*dbam_to_cs) (struct amd64_pvt *pvt, u8 dct, unsigned cs_mode); - int (*read_dct_pci_cfg) (struct amd64_pvt *pvt, int offset, - u32 *val, const char *func); }; struct amd64_family_type { @@ -502,9 +500,6 @@ int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset, #define amd64_write_pci_cfg(pdev, offset, val) \ __amd64_write_pci_cfg_dword(pdev, offset, val, __func__) -#define amd64_read_dct_pci_cfg(pvt, offset, val) \ - pvt->ops->read_dct_pci_cfg(pvt, offset, val, __func__) - int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base, u64 *hole_offset, u64 *hole_size); diff --git a/drivers/edac/cell_edac.c b/drivers/edac/cell_edac.c index 374b57fc596d..a12c8552f6a6 100644 --- a/drivers/edac/cell_edac.c +++ b/drivers/edac/cell_edac.c @@ -134,8 +134,7 @@ static void cell_edac_init_csrows(struct mem_ctl_info *mci) int j; u32 nr_pages; - for (np = NULL; - (np = of_find_node_by_name(np, "memory")) != NULL;) { + for_each_node_by_name(np, "memory") { struct resource r; /* We "know" that the Cell firmware only creates one entry diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h index 3c2625e7980d..6c9f381e8fe6 100644 --- a/drivers/edac/edac_core.h +++ b/drivers/edac/edac_core.h @@ -66,7 +66,7 @@ #define EDAC_PCI "PCI" #define EDAC_DEBUG "DEBUG" -extern const char *edac_mem_types[]; +extern const char * const edac_mem_types[]; #ifdef CONFIG_EDAC_DEBUG extern int edac_debug_level; diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c index 9f134823fa75..c3893b0ddb18 100644 --- a/drivers/edac/edac_mc.c +++ b/drivers/edac/edac_mc.c @@ -128,7 +128,7 @@ static void edac_mc_dump_mci(struct mem_ctl_info *mci) /* * keep those in sync with the enum mem_type */ -const char *edac_mem_types[] = { +const char * const edac_mem_types[] = { "Empty csrow", "Reserved csrow type", "Unknown csrow type", diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c index 01fae8289cf0..a6cd36100663 100644 --- a/drivers/edac/edac_mc_sysfs.c +++ b/drivers/edac/edac_mc_sysfs.c @@ -108,7 +108,9 @@ static const char * const mem_types[] = { [MEM_RDDR2] = "Registered-DDR2", [MEM_XDR] = "XDR", [MEM_DDR3] = "Unbuffered-DDR3", - [MEM_RDDR3] = "Registered-DDR3" + [MEM_RDDR3] = "Registered-DDR3", + [MEM_DDR4] = "Unbuffered-DDR4", + [MEM_RDDR4] = "Registered-DDR4" }; static const char * const dev_types[] = { diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c index f4aec2e6ef56..7d3742edbaa2 100644 --- a/drivers/edac/mpc85xx_edac.c +++ b/drivers/edac/mpc85xx_edac.c @@ -633,7 +633,7 @@ static int mpc85xx_l2_err_probe(struct platform_device *op) if (edac_op_state == EDAC_OPSTATE_INT) { pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0); res = devm_request_irq(&op->dev, pdata->irq, - mpc85xx_l2_isr, 0, + mpc85xx_l2_isr, IRQF_SHARED, "[EDAC] L2 err", edac_dev); if (res < 0) { printk(KERN_ERR diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c index ef6b7e08f485..0f04d5ead521 100644 --- a/drivers/edac/ppc4xx_edac.c +++ b/drivers/edac/ppc4xx_edac.c @@ -974,7 +974,7 @@ static int ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1) * page size (PAGE_SIZE) or the memory width (2 or 4). */ for (j = 0; j < csi->nr_channels; j++) { - struct dimm_info *dimm = csi->channels[j].dimm; + struct dimm_info *dimm = csi->channels[j]->dimm; dimm->nr_pages = nr_pages / csi->nr_channels; dimm->grain = 1; diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c index deea0dc9999b..0034c4844428 100644 --- a/drivers/edac/sb_edac.c +++ b/drivers/edac/sb_edac.c @@ -99,6 +99,7 @@ static const u32 ibridge_dram_rule[] = { #define DRAM_ATTR(reg) GET_BITFIELD(reg, 2, 3) #define INTERLEAVE_MODE(reg) GET_BITFIELD(reg, 1, 1) #define DRAM_RULE_ENABLE(reg) GET_BITFIELD(reg, 0, 0) +#define A7MODE(reg) GET_BITFIELD(reg, 26, 26) static char *get_dram_attr(u32 reg) { @@ -164,6 +165,8 @@ static inline int sad_pkg(const struct interleave_pkg *table, u32 reg, #define TOLM 0x80 #define TOHM 0x84 +#define HASWELL_TOHM_0 0xd4 +#define HASWELL_TOHM_1 0xd8 #define GET_TOLM(reg) ((GET_BITFIELD(reg, 0, 3) << 28) | 0x3ffffff) #define GET_TOHM(reg) ((GET_BITFIELD(reg, 0, 20) << 25) | 0x3ffffff) @@ -176,8 +179,6 @@ static inline int sad_pkg(const struct interleave_pkg *table, u32 reg, #define SAD_CONTROL 0xf4 -#define NODE_ID(reg) GET_BITFIELD(reg, 0, 2) - /* Device 14 function 0 */ static const u32 tad_dram_rule[] = { @@ -235,7 +236,6 @@ static const u32 rir_way_limit[] = { #define IS_RIR_VALID(reg) GET_BITFIELD(reg, 31, 31) #define RIR_WAY(reg) GET_BITFIELD(reg, 28, 29) -#define RIR_LIMIT(reg) ((GET_BITFIELD(reg, 1, 10) << 29)| 0x1fffffff) #define MAX_RIR_WAY 8 @@ -279,8 +279,6 @@ static const u32 correrrthrsld[] = { #define IB_RANK_CFG_A 0x0320 -#define IS_RDIMM_ENABLED(reg) GET_BITFIELD(reg, 11, 11) - /* * sbridge structs */ @@ -291,6 +289,7 @@ static const u32 correrrthrsld[] = { enum type { SANDY_BRIDGE, IVY_BRIDGE, + HASWELL, }; struct sbridge_pvt; @@ -300,11 +299,15 @@ struct sbridge_info { u32 rankcfgr; u64 (*get_tolm)(struct sbridge_pvt *pvt); u64 (*get_tohm)(struct sbridge_pvt *pvt); + u64 (*rir_limit)(u32 reg); const u32 *dram_rule; const u32 *interleave_list; const struct interleave_pkg *interleave_pkg; u8 max_sad; u8 max_interleave; + u8 (*get_node_id)(struct sbridge_pvt *pvt); + enum mem_type (*get_memory_type)(struct sbridge_pvt *pvt); + struct pci_dev *pci_vtd; }; struct sbridge_channel { @@ -313,9 +316,7 @@ struct sbridge_channel { }; struct pci_id_descr { - int dev; - int func; - int dev_id; + int dev_id; int optional; }; @@ -338,6 +339,7 @@ struct sbridge_pvt { struct pci_dev *pci_sad0, *pci_sad1; struct pci_dev *pci_ha0, *pci_ha1; struct pci_dev *pci_br0, *pci_br1; + struct pci_dev *pci_ha1_ta; struct pci_dev *pci_tad[NUM_CHANNELS]; struct sbridge_dev *sbridge_dev; @@ -362,31 +364,29 @@ struct sbridge_pvt { u64 tolm, tohm; }; -#define PCI_DESCR(device, function, device_id, opt) \ - .dev = (device), \ - .func = (function), \ - .dev_id = (device_id), \ +#define PCI_DESCR(device_id, opt) \ + .dev_id = (device_id), \ .optional = opt static const struct pci_id_descr pci_dev_descr_sbridge[] = { /* Processor Home Agent */ - { PCI_DESCR(14, 0, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0, 0) }, + { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0, 0) }, /* Memory controller */ - { PCI_DESCR(15, 0, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA, 0) }, - { PCI_DESCR(15, 1, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS, 0) }, - { PCI_DESCR(15, 2, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0, 0) }, - { PCI_DESCR(15, 3, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1, 0) }, - { PCI_DESCR(15, 4, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2, 0) }, - { PCI_DESCR(15, 5, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3, 0) }, - { PCI_DESCR(17, 0, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO, 1) }, + { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA, 0) }, + { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS, 0) }, + { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0, 0) }, + { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1, 0) }, + { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2, 0) }, + { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3, 0) }, + { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO, 1) }, /* System Address Decoder */ - { PCI_DESCR(12, 6, PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0, 0) }, - { PCI_DESCR(12, 7, PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1, 0) }, + { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0, 0) }, + { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1, 0) }, /* Broadcast Registers */ - { PCI_DESCR(13, 6, PCI_DEVICE_ID_INTEL_SBRIDGE_BR, 0) }, + { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_BR, 0) }, }; #define PCI_ID_TABLE_ENTRY(A) { .descr=A, .n_devs = ARRAY_SIZE(A) } @@ -423,34 +423,34 @@ static const struct pci_id_table pci_dev_descr_sbridge_table[] = { static const struct pci_id_descr pci_dev_descr_ibridge[] = { /* Processor Home Agent */ - { PCI_DESCR(14, 0, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0, 0) }, + { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0, 0) }, /* Memory controller */ - { PCI_DESCR(15, 0, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA, 0) }, - { PCI_DESCR(15, 1, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS, 0) }, - { PCI_DESCR(15, 2, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0, 0) }, - { PCI_DESCR(15, 3, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1, 0) }, - { PCI_DESCR(15, 4, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2, 0) }, - { PCI_DESCR(15, 5, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3, 0) }, + { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA, 0) }, + { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS, 0) }, + { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0, 0) }, + { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1, 0) }, + { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2, 0) }, + { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3, 0) }, /* System Address Decoder */ - { PCI_DESCR(22, 0, PCI_DEVICE_ID_INTEL_IBRIDGE_SAD, 0) }, + { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_SAD, 0) }, /* Broadcast Registers */ - { PCI_DESCR(22, 1, PCI_DEVICE_ID_INTEL_IBRIDGE_BR0, 1) }, - { PCI_DESCR(22, 2, PCI_DEVICE_ID_INTEL_IBRIDGE_BR1, 0) }, + { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_BR0, 1) }, + { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_BR1, 0) }, /* Optional, mode 2HA */ - { PCI_DESCR(28, 0, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1, 1) }, + { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1, 1) }, #if 0 - { PCI_DESCR(29, 0, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA, 1) }, - { PCI_DESCR(29, 1, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS, 1) }, + { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA, 1) }, + { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS, 1) }, #endif - { PCI_DESCR(29, 2, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0, 1) }, - { PCI_DESCR(29, 3, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1, 1) }, + { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0, 1) }, + { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1, 1) }, - { PCI_DESCR(17, 0, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0, 1) }, - { PCI_DESCR(17, 4, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0, 1) }, + { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0, 1) }, + { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0, 1) }, }; static const struct pci_id_table pci_dev_descr_ibridge_table[] = { @@ -458,12 +458,80 @@ static const struct pci_id_table pci_dev_descr_ibridge_table[] = { {0,} /* 0 terminated list. */ }; +/* Haswell support */ +/* EN processor: + * - 1 IMC + * - 3 DDR3 channels, 2 DPC per channel + * EP processor: + * - 1 or 2 IMC + * - 4 DDR4 channels, 3 DPC per channel + * EP 4S processor: + * - 2 IMC + * - 4 DDR4 channels, 3 DPC per channel + * EX processor: + * - 2 IMC + * - each IMC interfaces with a SMI 2 channel + * - each SMI channel interfaces with a scalable memory buffer + * - each scalable memory buffer supports 4 DDR3/DDR4 channels, 3 DPC + */ +#define HASWELL_DDRCRCLKCONTROLS 0xa10 +#define HASWELL_HASYSDEFEATURE2 0x84 +#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_VTD_MISC 0x2f28 +#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0 0x2fa0 +#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1 0x2f60 +#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA 0x2fa8 +#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_THERMAL 0x2f71 +#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA 0x2f68 +#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_THERMAL 0x2f79 +#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0 0x2ffc +#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1 0x2ffd +#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0 0x2faa +#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1 0x2fab +#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2 0x2fac +#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3 0x2fad +#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0 0x2f6a +#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1 0x2f6b +#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2 0x2f6c +#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3 0x2f6d +#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0 0x2fbd +static const struct pci_id_descr pci_dev_descr_haswell[] = { + /* first item must be the HA */ + { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0, 0) }, + + { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0, 0) }, + { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1, 0) }, + + { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1, 1) }, + + { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA, 0) }, + { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_THERMAL, 0) }, + { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0, 0) }, + { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1, 0) }, + { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2, 1) }, + { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3, 1) }, + + { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0, 1) }, + + { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA, 1) }, + { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_THERMAL, 1) }, + { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0, 1) }, + { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1, 1) }, + { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2, 1) }, + { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3, 1) }, +}; + +static const struct pci_id_table pci_dev_descr_haswell_table[] = { + PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell), + {0,} /* 0 terminated list. */ +}; + /* * pci_device_id table for which devices we are looking for */ static const struct pci_device_id sbridge_pci_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0)}, {0,} /* 0 terminated list. */ }; @@ -472,13 +540,17 @@ static const struct pci_device_id sbridge_pci_tbl[] = { Ancillary status routines ****************************************************************************/ -static inline int numrank(u32 mtr) +static inline int numrank(enum type type, u32 mtr) { int ranks = (1 << RANK_CNT_BITS(mtr)); + int max = 4; - if (ranks > 4) { - edac_dbg(0, "Invalid number of ranks: %d (max = 4) raw value = %x (%04x)\n", - ranks, (unsigned int)RANK_CNT_BITS(mtr), mtr); + if (type == HASWELL) + max = 8; + + if (ranks > max) { + edac_dbg(0, "Invalid number of ranks: %d (max = %i) raw value = %x (%04x)\n", + ranks, max, (unsigned int)RANK_CNT_BITS(mtr), mtr); return -EINVAL; } @@ -588,10 +660,107 @@ static u64 ibridge_get_tohm(struct sbridge_pvt *pvt) return GET_TOHM(reg); } +static u64 rir_limit(u32 reg) +{ + return ((u64)GET_BITFIELD(reg, 1, 10) << 29) | 0x1fffffff; +} + +static enum mem_type get_memory_type(struct sbridge_pvt *pvt) +{ + u32 reg; + enum mem_type mtype; + + if (pvt->pci_ddrio) { + pci_read_config_dword(pvt->pci_ddrio, pvt->info.rankcfgr, + ®); + if (GET_BITFIELD(reg, 11, 11)) + /* FIXME: Can also be LRDIMM */ + mtype = MEM_RDDR3; + else + mtype = MEM_DDR3; + } else + mtype = MEM_UNKNOWN; + + return mtype; +} + +static enum mem_type haswell_get_memory_type(struct sbridge_pvt *pvt) +{ + u32 reg; + bool registered = false; + enum mem_type mtype = MEM_UNKNOWN; + + if (!pvt->pci_ddrio) + goto out; + + pci_read_config_dword(pvt->pci_ddrio, + HASWELL_DDRCRCLKCONTROLS, ®); + /* Is_Rdimm */ + if (GET_BITFIELD(reg, 16, 16)) + registered = true; + + pci_read_config_dword(pvt->pci_ta, MCMTR, ®); + if (GET_BITFIELD(reg, 14, 14)) { + if (registered) + mtype = MEM_RDDR4; + else + mtype = MEM_DDR4; + } else { + if (registered) + mtype = MEM_RDDR3; + else + mtype = MEM_DDR3; + } + +out: + return mtype; +} + +static u8 get_node_id(struct sbridge_pvt *pvt) +{ + u32 reg; + pci_read_config_dword(pvt->pci_br0, SAD_CONTROL, ®); + return GET_BITFIELD(reg, 0, 2); +} + +static u8 haswell_get_node_id(struct sbridge_pvt *pvt) +{ + u32 reg; + + pci_read_config_dword(pvt->pci_sad1, SAD_CONTROL, ®); + return GET_BITFIELD(reg, 0, 3); +} + +static u64 haswell_get_tolm(struct sbridge_pvt *pvt) +{ + u32 reg; + + pci_read_config_dword(pvt->info.pci_vtd, TOLM, ®); + return (GET_BITFIELD(reg, 26, 31) << 26) | 0x1ffffff; +} + +static u64 haswell_get_tohm(struct sbridge_pvt *pvt) +{ + u64 rc; + u32 reg; + + pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOHM_0, ®); + rc = GET_BITFIELD(reg, 26, 31); + pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOHM_1, ®); + rc = ((reg << 6) | rc) << 26; + + return rc | 0x1ffffff; +} + +static u64 haswell_rir_limit(u32 reg) +{ + return (((u64)GET_BITFIELD(reg, 1, 11) + 1) << 29) - 1; +} + static inline u8 sad_pkg_socket(u8 pkg) { /* on Ivy Bridge, nodeID is SASS, where A is HA and S is node id */ - return (pkg >> 3) | (pkg & 0x3); + return ((pkg >> 3) << 2) | (pkg & 0x3); } static inline u8 sad_pkg_ha(u8 pkg) @@ -602,44 +771,43 @@ static inline u8 sad_pkg_ha(u8 pkg) /**************************************************************************** Memory check routines ****************************************************************************/ -static struct pci_dev *get_pdev_slot_func(u8 bus, unsigned slot, - unsigned func) +static struct pci_dev *get_pdev_same_bus(u8 bus, u32 id) { - struct sbridge_dev *sbridge_dev = get_sbridge_dev(bus); - int i; - - if (!sbridge_dev) - return NULL; - - for (i = 0; i < sbridge_dev->n_devs; i++) { - if (!sbridge_dev->pdev[i]) - continue; + struct pci_dev *pdev = NULL; - if (PCI_SLOT(sbridge_dev->pdev[i]->devfn) == slot && - PCI_FUNC(sbridge_dev->pdev[i]->devfn) == func) { - edac_dbg(1, "Associated %02x.%02x.%d with %p\n", - bus, slot, func, sbridge_dev->pdev[i]); - return sbridge_dev->pdev[i]; - } - } + do { + pdev = pci_get_device(PCI_VENDOR_ID_INTEL, id, pdev); + if (pdev && pdev->bus->number == bus) + break; + } while (pdev); - return NULL; + return pdev; } /** * check_if_ecc_is_active() - Checks if ECC is active - * bus: Device bus + * @bus: Device bus + * @type: Memory controller type + * returns: 0 in case ECC is active, -ENODEV if it can't be determined or + * disabled */ -static int check_if_ecc_is_active(const u8 bus) +static int check_if_ecc_is_active(const u8 bus, enum type type) { struct pci_dev *pdev = NULL; - u32 mcmtr; + u32 mcmtr, id; + + if (type == IVY_BRIDGE) + id = PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA; + else if (type == HASWELL) + id = PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA; + else + id = PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA; - pdev = get_pdev_slot_func(bus, 15, 0); + pdev = get_pdev_same_bus(bus, id); if (!pdev) { sbridge_printk(KERN_ERR, "Couldn't find PCI device " - "%2x.%02d.%d!!!\n", - bus, 15, 0); + "%04x:%04x! on bus %02d\n", + PCI_VENDOR_ID_INTEL, id, bus); return -ENODEV; } @@ -661,11 +829,14 @@ static int get_dimm_config(struct mem_ctl_info *mci) enum edac_type mode; enum mem_type mtype; - pci_read_config_dword(pvt->pci_br0, SAD_TARGET, ®); + if (pvt->info.type == HASWELL) + pci_read_config_dword(pvt->pci_sad1, SAD_TARGET, ®); + else + pci_read_config_dword(pvt->pci_br0, SAD_TARGET, ®); + pvt->sbridge_dev->source_id = SOURCE_ID(reg); - pci_read_config_dword(pvt->pci_br0, SAD_CONTROL, ®); - pvt->sbridge_dev->node_id = NODE_ID(reg); + pvt->sbridge_dev->node_id = pvt->info.get_node_id(pvt); edac_dbg(0, "mc#%d: Node ID: %d, source ID: %d\n", pvt->sbridge_dev->mc, pvt->sbridge_dev->node_id, @@ -698,24 +869,18 @@ static int get_dimm_config(struct mem_ctl_info *mci) pvt->is_close_pg = false; } - if (pvt->pci_ddrio) { - pci_read_config_dword(pvt->pci_ddrio, pvt->info.rankcfgr, - ®); - if (IS_RDIMM_ENABLED(reg)) { - /* FIXME: Can also be LRDIMM */ - edac_dbg(0, "Memory is registered\n"); - mtype = MEM_RDDR3; - } else { - edac_dbg(0, "Memory is unregistered\n"); - mtype = MEM_DDR3; - } - } else { + mtype = pvt->info.get_memory_type(pvt); + if (mtype == MEM_RDDR3 || mtype == MEM_RDDR4) + edac_dbg(0, "Memory is registered\n"); + else if (mtype == MEM_UNKNOWN) edac_dbg(0, "Cannot determine memory type\n"); - mtype = MEM_UNKNOWN; - } + else + edac_dbg(0, "Memory is unregistered\n"); - /* On all supported DDR3 DIMM types, there are 8 banks available */ - banks = 8; + if (mtype == MEM_DDR4 || MEM_RDDR4) + banks = 16; + else + banks = 8; for (i = 0; i < NUM_CHANNELS; i++) { u32 mtr; @@ -729,11 +894,10 @@ static int get_dimm_config(struct mem_ctl_info *mci) if (IS_DIMM_PRESENT(mtr)) { pvt->channel[i].dimms++; - ranks = numrank(mtr); + ranks = numrank(pvt->info.type, mtr); rows = numrow(mtr); cols = numcol(mtr); - /* DDR3 has 8 I/O banks */ size = ((u64)rows * cols * banks * ranks) >> (20 - 3); npages = MiB_TO_PAGES(size); @@ -744,7 +908,17 @@ static int get_dimm_config(struct mem_ctl_info *mci) dimm->nr_pages = npages; dimm->grain = 32; - dimm->dtype = (banks == 8) ? DEV_X8 : DEV_X4; + switch (banks) { + case 16: + dimm->dtype = DEV_X16; + break; + case 8: + dimm->dtype = DEV_X8; + break; + case 4: + dimm->dtype = DEV_X4; + break; + } dimm->mtype = mtype; dimm->edac_mode = mode; snprintf(dimm->label, sizeof(dimm->label), @@ -887,7 +1061,7 @@ static void get_memory_layout(const struct mem_ctl_info *mci) if (!IS_RIR_VALID(reg)) continue; - tmp_mb = RIR_LIMIT(reg) >> 20; + tmp_mb = pvt->info.rir_limit(reg) >> 20; rir_way = 1 << RIR_WAY(reg); mb = div_u64_rem(tmp_mb, 1000, &kb); edac_dbg(0, "CH#%d RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d, reg=0x%08x\n", @@ -936,11 +1110,11 @@ static int get_memory_error_data(struct mem_ctl_info *mci, struct mem_ctl_info *new_mci; struct sbridge_pvt *pvt = mci->pvt_info; struct pci_dev *pci_ha; - int n_rir, n_sads, n_tads, sad_way, sck_xch; + int n_rir, n_sads, n_tads, sad_way, sck_xch; int sad_interl, idx, base_ch; - int interleave_mode; + int interleave_mode, shiftup = 0; unsigned sad_interleave[pvt->info.max_interleave]; - u32 reg; + u32 reg, dram_rule; u8 ch_way, sck_way, pkg, sad_ha = 0; u32 tad_offset; u32 rir_way; @@ -987,8 +1161,9 @@ static int get_memory_error_data(struct mem_ctl_info *mci, sprintf(msg, "Can't discover the memory socket"); return -EINVAL; } - *area_type = get_dram_attr(reg); - interleave_mode = INTERLEAVE_MODE(reg); + dram_rule = reg; + *area_type = get_dram_attr(dram_rule); + interleave_mode = INTERLEAVE_MODE(dram_rule); pci_read_config_dword(pvt->pci_sad0, pvt->info.interleave_list[n_sads], ®); @@ -1033,6 +1208,36 @@ static int get_memory_error_data(struct mem_ctl_info *mci, *socket = sad_interleave[idx]; edac_dbg(0, "SAD interleave index: %d (wayness %d) = CPU socket %d\n", idx, sad_way, *socket); + } else if (pvt->info.type == HASWELL) { + int bits, a7mode = A7MODE(dram_rule); + + if (a7mode) { + /* A7 mode swaps P9 with P6 */ + bits = GET_BITFIELD(addr, 7, 8) << 1; + bits |= GET_BITFIELD(addr, 9, 9); + } else + bits = GET_BITFIELD(addr, 7, 9); + + if (interleave_mode) { + /* interleave mode will XOR {8,7,6} with {18,17,16} */ + idx = GET_BITFIELD(addr, 16, 18); + idx ^= bits; + } else + idx = bits; + + pkg = sad_pkg(pvt->info.interleave_pkg, reg, idx); + *socket = sad_pkg_socket(pkg); + sad_ha = sad_pkg_ha(pkg); + + if (a7mode) { + /* MCChanShiftUpEnable */ + pci_read_config_dword(pvt->pci_ha0, + HASWELL_HASYSDEFEATURE2, ®); + shiftup = GET_BITFIELD(reg, 22, 22); + } + + edac_dbg(0, "SAD interleave package: %d = CPU socket %d, HA %i, shiftup: %i\n", + idx, *socket, sad_ha, shiftup); } else { /* Ivy Bridge's SAD mode doesn't support XOR interleave mode */ idx = (addr >> 6) & 7; @@ -1090,7 +1295,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci, if (ch_way == 3) idx = addr >> 6; else - idx = addr >> (6 + sck_way); + idx = (addr >> (6 + sck_way + shiftup)) & 0x3; idx = idx % ch_way; /* @@ -1181,7 +1386,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci, if (!IS_RIR_VALID(reg)) continue; - limit = RIR_LIMIT(reg); + limit = pvt->info.rir_limit(reg); mb = div_u64_rem(limit >> 20, 1000, &kb); edac_dbg(0, "RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d\n", n_rir, @@ -1197,6 +1402,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci, return -EINVAL; } rir_way = RIR_WAY(reg); + if (pvt->is_close_pg) idx = (ch_addr >> 6); else @@ -1259,13 +1465,11 @@ static int sbridge_get_onedevice(struct pci_dev **prev, { struct sbridge_dev *sbridge_dev; const struct pci_id_descr *dev_descr = &table->descr[devno]; - struct pci_dev *pdev = NULL; u8 bus = 0; sbridge_printk(KERN_DEBUG, - "Seeking for: dev %02x.%d PCI ID %04x:%04x\n", - dev_descr->dev, dev_descr->func, + "Seeking for: PCI ID %04x:%04x\n", PCI_VENDOR_ID_INTEL, dev_descr->dev_id); pdev = pci_get_device(PCI_VENDOR_ID_INTEL, @@ -1280,12 +1484,12 @@ static int sbridge_get_onedevice(struct pci_dev **prev, if (dev_descr->optional) return 0; + /* if the HA wasn't found */ if (devno == 0) return -ENODEV; sbridge_printk(KERN_INFO, - "Device not found: dev %02x.%d PCI ID %04x:%04x\n", - dev_descr->dev, dev_descr->func, + "Device not found: %04x:%04x\n", PCI_VENDOR_ID_INTEL, dev_descr->dev_id); /* End of list, leave */ @@ -1305,9 +1509,7 @@ static int sbridge_get_onedevice(struct pci_dev **prev, if (sbridge_dev->pdev[devno]) { sbridge_printk(KERN_ERR, - "Duplicated device for " - "dev %02x:%d.%d PCI ID %04x:%04x\n", - bus, dev_descr->dev, dev_descr->func, + "Duplicated device for %04x:%04x\n", PCI_VENDOR_ID_INTEL, dev_descr->dev_id); pci_dev_put(pdev); return -ENODEV; @@ -1315,30 +1517,15 @@ static int sbridge_get_onedevice(struct pci_dev **prev, sbridge_dev->pdev[devno] = pdev; - /* Sanity check */ - if (unlikely(PCI_SLOT(pdev->devfn) != dev_descr->dev || - PCI_FUNC(pdev->devfn) != dev_descr->func)) { - sbridge_printk(KERN_ERR, - "Device PCI ID %04x:%04x " - "has dev %02x:%d.%d instead of dev %02x:%02x.%d\n", - PCI_VENDOR_ID_INTEL, dev_descr->dev_id, - bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), - bus, dev_descr->dev, dev_descr->func); - return -ENODEV; - } - /* Be sure that the device is enabled */ if (unlikely(pci_enable_device(pdev) < 0)) { sbridge_printk(KERN_ERR, - "Couldn't enable " - "dev %02x:%d.%d PCI ID %04x:%04x\n", - bus, dev_descr->dev, dev_descr->func, + "Couldn't enable %04x:%04x\n", PCI_VENDOR_ID_INTEL, dev_descr->dev_id); return -ENODEV; } - edac_dbg(0, "Detected dev %02x:%d.%d PCI ID %04x:%04x\n", - bus, dev_descr->dev, dev_descr->func, + edac_dbg(0, "Detected %04x:%04x\n", PCI_VENDOR_ID_INTEL, dev_descr->dev_id); /* @@ -1355,10 +1542,9 @@ static int sbridge_get_onedevice(struct pci_dev **prev, /* * sbridge_get_all_devices - Find and perform 'get' operation on the MCH's - * device/functions we want to reference for this driver. - * Need to 'get' device 16 func 1 and func 2. + * devices we want to reference for this driver. * @num_mc: pointer to the memory controllers count, to be incremented in case - * of success. + * of success. * @table: model specific table * * returns 0 in case of success or error code @@ -1396,79 +1582,51 @@ static int sbridge_mci_bind_devs(struct mem_ctl_info *mci, { struct sbridge_pvt *pvt = mci->pvt_info; struct pci_dev *pdev; - int i, func, slot; + int i; for (i = 0; i < sbridge_dev->n_devs; i++) { pdev = sbridge_dev->pdev[i]; if (!pdev) continue; - slot = PCI_SLOT(pdev->devfn); - func = PCI_FUNC(pdev->devfn); - switch (slot) { - case 12: - switch (func) { - case 6: - pvt->pci_sad0 = pdev; - break; - case 7: - pvt->pci_sad1 = pdev; - break; - default: - goto error; - } + + switch (pdev->device) { + case PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0: + pvt->pci_sad0 = pdev; break; - case 13: - switch (func) { - case 6: - pvt->pci_br0 = pdev; - break; - default: - goto error; - } + case PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1: + pvt->pci_sad1 = pdev; break; - case 14: - switch (func) { - case 0: - pvt->pci_ha0 = pdev; - break; - default: - goto error; - } + case PCI_DEVICE_ID_INTEL_SBRIDGE_BR: + pvt->pci_br0 = pdev; break; - case 15: - switch (func) { - case 0: - pvt->pci_ta = pdev; - break; - case 1: - pvt->pci_ras = pdev; - break; - case 2: - case 3: - case 4: - case 5: - pvt->pci_tad[func - 2] = pdev; - break; - default: - goto error; - } + case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0: + pvt->pci_ha0 = pdev; break; - case 17: - switch (func) { - case 0: - pvt->pci_ddrio = pdev; - break; - default: - goto error; - } + case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA: + pvt->pci_ta = pdev; + break; + case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS: + pvt->pci_ras = pdev; + break; + case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0: + case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1: + case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2: + case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3: + { + int id = pdev->device - PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0; + pvt->pci_tad[id] = pdev; + } + break; + case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO: + pvt->pci_ddrio = pdev; break; default: goto error; } - edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n", + edac_dbg(0, "Associated PCI %02x:%02x, bus %d with dev = %p\n", + pdev->vendor, pdev->device, sbridge_dev->bus, - PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), pdev); } @@ -1488,9 +1646,8 @@ enodev: return -ENODEV; error: - sbridge_printk(KERN_ERR, "Device %d, function %d " - "is out of the expected range\n", - slot, func); + sbridge_printk(KERN_ERR, "Unexpected device %02x:%02x\n", + PCI_VENDOR_ID_INTEL, pdev->device); return -EINVAL; } @@ -1499,7 +1656,7 @@ static int ibridge_mci_bind_devs(struct mem_ctl_info *mci, { struct sbridge_pvt *pvt = mci->pvt_info; struct pci_dev *pdev, *tmp; - int i, func, slot; + int i; bool mode_2ha = false; tmp = pci_get_device(PCI_VENDOR_ID_INTEL, @@ -1513,79 +1670,60 @@ static int ibridge_mci_bind_devs(struct mem_ctl_info *mci, pdev = sbridge_dev->pdev[i]; if (!pdev) continue; - slot = PCI_SLOT(pdev->devfn); - func = PCI_FUNC(pdev->devfn); - switch (slot) { - case 14: - if (func == 0) { - pvt->pci_ha0 = pdev; - break; - } - goto error; - case 15: - switch (func) { - case 0: - pvt->pci_ta = pdev; - break; - case 1: - pvt->pci_ras = pdev; - break; - case 4: - case 5: - /* if we have 2 HAs active, channels 2 and 3 - * are in other device */ - if (mode_2ha) - break; - /* fall through */ - case 2: - case 3: - pvt->pci_tad[func - 2] = pdev; + switch (pdev->device) { + case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0: + pvt->pci_ha0 = pdev; + break; + case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA: + pvt->pci_ta = pdev; + case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS: + pvt->pci_ras = pdev; + break; + case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2: + case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3: + /* if we have 2 HAs active, channels 2 and 3 + * are in other device */ + if (mode_2ha) break; - default: - goto error; - } + /* fall through */ + case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0: + case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1: + { + int id = pdev->device - PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0; + pvt->pci_tad[id] = pdev; + } break; - case 17: - if (func == 4) { + case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0: + pvt->pci_ddrio = pdev; + break; + case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0: + if (!mode_2ha) pvt->pci_ddrio = pdev; - break; - } else if (func == 0) { - if (!mode_2ha) - pvt->pci_ddrio = pdev; - break; - } - goto error; - case 22: - switch (func) { - case 0: - pvt->pci_sad0 = pdev; - break; - case 1: - pvt->pci_br0 = pdev; - break; - case 2: - pvt->pci_br1 = pdev; - break; - default: - goto error; - } break; - case 28: - if (func == 0) { - pvt->pci_ha1 = pdev; - break; - } - goto error; - case 29: + case PCI_DEVICE_ID_INTEL_IBRIDGE_SAD: + pvt->pci_sad0 = pdev; + break; + case PCI_DEVICE_ID_INTEL_IBRIDGE_BR0: + pvt->pci_br0 = pdev; + break; + case PCI_DEVICE_ID_INTEL_IBRIDGE_BR1: + pvt->pci_br1 = pdev; + break; + case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1: + pvt->pci_ha1 = pdev; + break; + case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0: + case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1: + { + int id = pdev->device - PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0 + 2; + /* we shouldn't have this device if we have just one * HA present */ WARN_ON(!mode_2ha); - if (func == 2 || func == 3) { - pvt->pci_tad[func] = pdev; - break; - } - goto error; + pvt->pci_tad[id] = pdev; + } + break; default: goto error; } @@ -1614,11 +1752,111 @@ enodev: error: sbridge_printk(KERN_ERR, - "Device %d, function %d is out of the expected range\n", - slot, func); + "Unexpected device %02x:%02x\n", PCI_VENDOR_ID_INTEL, + pdev->device); return -EINVAL; } +static int haswell_mci_bind_devs(struct mem_ctl_info *mci, + struct sbridge_dev *sbridge_dev) +{ + struct sbridge_pvt *pvt = mci->pvt_info; + struct pci_dev *pdev, *tmp; + int i; + bool mode_2ha = false; + + tmp = pci_get_device(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1, NULL); + if (tmp) { + mode_2ha = true; + pci_dev_put(tmp); + } + + /* there's only one device per system; not tied to any bus */ + if (pvt->info.pci_vtd == NULL) + /* result will be checked later */ + pvt->info.pci_vtd = pci_get_device(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_HASWELL_IMC_VTD_MISC, + NULL); + + for (i = 0; i < sbridge_dev->n_devs; i++) { + pdev = sbridge_dev->pdev[i]; + if (!pdev) + continue; + + switch (pdev->device) { + case PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0: + pvt->pci_sad0 = pdev; + break; + case PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1: + pvt->pci_sad1 = pdev; + break; + case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0: + pvt->pci_ha0 = pdev; + break; + case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA: + pvt->pci_ta = pdev; + break; + case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_THERMAL: + pvt->pci_ras = pdev; + break; + case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0: + pvt->pci_tad[0] = pdev; + break; + case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1: + pvt->pci_tad[1] = pdev; + break; + case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2: + if (!mode_2ha) + pvt->pci_tad[2] = pdev; + break; + case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3: + if (!mode_2ha) + pvt->pci_tad[3] = pdev; + break; + case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0: + pvt->pci_ddrio = pdev; + break; + case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1: + pvt->pci_ha1 = pdev; + break; + case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA: + pvt->pci_ha1_ta = pdev; + break; + case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0: + if (mode_2ha) + pvt->pci_tad[2] = pdev; + break; + case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1: + if (mode_2ha) + pvt->pci_tad[3] = pdev; + break; + default: + break; + } + + edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n", + sbridge_dev->bus, + PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), + pdev); + } + + /* Check if everything were registered */ + if (!pvt->pci_sad0 || !pvt->pci_ha0 || !pvt->pci_sad1 || + !pvt->pci_ras || !pvt->pci_ta || !pvt->info.pci_vtd) + goto enodev; + + for (i = 0; i < NUM_CHANNELS; i++) { + if (!pvt->pci_tad[i]) + goto enodev; + } + return 0; + +enodev: + sbridge_printk(KERN_ERR, "Some needed devices are missing\n"); + return -ENODEV; +} + /**************************************************************************** Error check routines ****************************************************************************/ @@ -1736,6 +1974,9 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci, * EDAC core should be handling the channel mask, in order to point * to the group of dimm's where the error may be happening. */ + if (!pvt->is_lockstep && !pvt->is_mirrored && !pvt->is_close_pg) + channel = first_channel; + snprintf(msg, sizeof(msg), "%s%s area:%s err_code:%04x:%04x socket:%d channel_mask:%ld rank:%d", overflow ? " OVERFLOW" : "", @@ -1865,10 +2106,6 @@ static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val, "%u APIC %x\n", mce->cpuvendor, mce->cpuid, mce->time, mce->socketid, mce->apicid); - /* Only handle if it is the right mc controller */ - if (cpu_data(mce->cpu).phys_proc_id != pvt->sbridge_dev->mc) - return NOTIFY_DONE; - smp_rmb(); if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) { smp_wmb(); @@ -1932,7 +2169,7 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type) int rc; /* Check the number of active and not disabled channels */ - rc = check_if_ecc_is_active(sbridge_dev->bus); + rc = check_if_ecc_is_active(sbridge_dev->bus, type); if (unlikely(rc < 0)) return rc; @@ -1971,11 +2208,15 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type) mci->edac_check = sbridge_check_error; pvt->info.type = type; - if (type == IVY_BRIDGE) { + switch (type) { + case IVY_BRIDGE: pvt->info.rankcfgr = IB_RANK_CFG_A; pvt->info.get_tolm = ibridge_get_tolm; pvt->info.get_tohm = ibridge_get_tohm; pvt->info.dram_rule = ibridge_dram_rule; + pvt->info.get_memory_type = get_memory_type; + pvt->info.get_node_id = get_node_id; + pvt->info.rir_limit = rir_limit; pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule); pvt->info.interleave_list = ibridge_interleave_list; pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list); @@ -1986,11 +2227,15 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type) rc = ibridge_mci_bind_devs(mci, sbridge_dev); if (unlikely(rc < 0)) goto fail0; - } else { + break; + case SANDY_BRIDGE: pvt->info.rankcfgr = SB_RANK_CFG_A; pvt->info.get_tolm = sbridge_get_tolm; pvt->info.get_tohm = sbridge_get_tohm; pvt->info.dram_rule = sbridge_dram_rule; + pvt->info.get_memory_type = get_memory_type; + pvt->info.get_node_id = get_node_id; + pvt->info.rir_limit = rir_limit; pvt->info.max_sad = ARRAY_SIZE(sbridge_dram_rule); pvt->info.interleave_list = sbridge_interleave_list; pvt->info.max_interleave = ARRAY_SIZE(sbridge_interleave_list); @@ -2001,8 +2246,27 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type) rc = sbridge_mci_bind_devs(mci, sbridge_dev); if (unlikely(rc < 0)) goto fail0; - } + break; + case HASWELL: + /* rankcfgr isn't used */ + pvt->info.get_tolm = haswell_get_tolm; + pvt->info.get_tohm = haswell_get_tohm; + pvt->info.dram_rule = ibridge_dram_rule; + pvt->info.get_memory_type = haswell_get_memory_type; + pvt->info.get_node_id = haswell_get_node_id; + pvt->info.rir_limit = haswell_rir_limit; + pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule); + pvt->info.interleave_list = ibridge_interleave_list; + pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list); + pvt->info.interleave_pkg = ibridge_interleave_pkg; + mci->ctl_name = kasprintf(GFP_KERNEL, "Haswell Socket#%d", mci->mc_idx); + /* Store pci devices at mci for faster access */ + rc = haswell_mci_bind_devs(mci, sbridge_dev); + if (unlikely(rc < 0)) + goto fail0; + break; + } /* Get dimm basic config and the memory layout */ get_dimm_config(mci); @@ -2037,10 +2301,10 @@ fail0: static int sbridge_probe(struct pci_dev *pdev, const struct pci_device_id *id) { - int rc; + int rc = -ENODEV; u8 mc, num_mc = 0; struct sbridge_dev *sbridge_dev; - enum type type; + enum type type = SANDY_BRIDGE; /* get the pci devices we want to reserve for our use */ mutex_lock(&sbridge_edac_lock); @@ -2054,12 +2318,19 @@ static int sbridge_probe(struct pci_dev *pdev, const struct pci_device_id *id) } probed++; - if (pdev->device == PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA) { + switch (pdev->device) { + case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA: rc = sbridge_get_all_devices(&num_mc, pci_dev_descr_ibridge_table); type = IVY_BRIDGE; - } else { + break; + case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA: rc = sbridge_get_all_devices(&num_mc, pci_dev_descr_sbridge_table); type = SANDY_BRIDGE; + break; + case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0: + rc = sbridge_get_all_devices(&num_mc, pci_dev_descr_haswell_table); + type = HASWELL; + break; } if (unlikely(rc < 0)) goto fail0; @@ -2068,6 +2339,7 @@ static int sbridge_probe(struct pci_dev *pdev, const struct pci_device_id *id) list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) { edac_dbg(0, "Registering MC#%d (%d of %d)\n", mc, mc + 1, num_mc); + sbridge_dev->mc = mc++; rc = sbridge_register_mci(sbridge_dev, type); if (unlikely(rc < 0)) diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile index d8be608a9f3b..aef6a95adef5 100644 --- a/drivers/firmware/efi/Makefile +++ b/drivers/firmware/efi/Makefile @@ -7,4 +7,4 @@ obj-$(CONFIG_EFI_VARS_PSTORE) += efi-pstore.o obj-$(CONFIG_UEFI_CPER) += cper.o obj-$(CONFIG_EFI_RUNTIME_MAP) += runtime-map.o obj-$(CONFIG_EFI_RUNTIME_WRAPPERS) += runtime-wrappers.o -obj-$(CONFIG_EFI_STUB) += libstub/ +obj-$(CONFIG_EFI_ARM_STUB) += libstub/ diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c index a56bb3528755..c846a9608cbd 100644 --- a/drivers/firmware/efi/libstub/fdt.c +++ b/drivers/firmware/efi/libstub/fdt.c @@ -22,7 +22,7 @@ efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt, unsigned long map_size, unsigned long desc_size, u32 desc_ver) { - int node, prev; + int node, prev, num_rsv; int status; u32 fdt_val32; u64 fdt_val64; @@ -73,6 +73,14 @@ efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt, prev = node; } + /* + * Delete all memory reserve map entries. When booting via UEFI, + * kernel will use the UEFI memory map to find reserved regions. + */ + num_rsv = fdt_num_mem_rsv(fdt); + while (num_rsv-- > 0) + fdt_del_mem_rsv(fdt, num_rsv); + node = fdt_subnode_offset(fdt, 0, "chosen"); if (node < 0) { node = fdt_add_subnode(fdt, 0, "chosen"); diff --git a/drivers/firmware/efi/runtime-map.c b/drivers/firmware/efi/runtime-map.c index 97cdd16a2169..018c29a26615 100644 --- a/drivers/firmware/efi/runtime-map.c +++ b/drivers/firmware/efi/runtime-map.c @@ -138,6 +138,27 @@ add_sysfs_runtime_map_entry(struct kobject *kobj, int nr) return entry; } +int efi_get_runtime_map_size(void) +{ + return nr_efi_runtime_map * efi_memdesc_size; +} + +int efi_get_runtime_map_desc_size(void) +{ + return efi_memdesc_size; +} + +int efi_runtime_map_copy(void *buf, size_t bufsz) +{ + size_t sz = efi_get_runtime_map_size(); + + if (sz > bufsz) + sz = bufsz; + + memcpy(buf, efi_runtime_map, sz); + return 0; +} + void efi_runtime_map_setup(void *map, int nr_entries, u32 desc_size) { efi_runtime_map = map; diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c index f0a43646a2f3..5abe943e3404 100644 --- a/drivers/firmware/efi/vars.c +++ b/drivers/firmware/efi/vars.c @@ -481,7 +481,7 @@ EXPORT_SYMBOL_GPL(efivar_entry_remove); */ static void efivar_entry_list_del_unlock(struct efivar_entry *entry) { - WARN_ON(!spin_is_locked(&__efivars->lock)); + lockdep_assert_held(&__efivars->lock); list_del(&entry->list); spin_unlock_irq(&__efivars->lock); @@ -507,7 +507,7 @@ int __efivar_entry_delete(struct efivar_entry *entry) const struct efivar_operations *ops = __efivars->ops; efi_status_t status; - WARN_ON(!spin_is_locked(&__efivars->lock)); + lockdep_assert_held(&__efivars->lock); status = ops->set_variable(entry->var.VariableName, &entry->var.VendorGuid, @@ -667,7 +667,7 @@ struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid, int strsize1, strsize2; bool found = false; - WARN_ON(!spin_is_locked(&__efivars->lock)); + lockdep_assert_held(&__efivars->lock); list_for_each_entry_safe(entry, n, head, list) { strsize1 = ucs2_strsize(name, 1024); @@ -739,7 +739,7 @@ int __efivar_entry_get(struct efivar_entry *entry, u32 *attributes, const struct efivar_operations *ops = __efivars->ops; efi_status_t status; - WARN_ON(!spin_is_locked(&__efivars->lock)); + lockdep_assert_held(&__efivars->lock); status = ops->get_variable(entry->var.VariableName, &entry->var.VendorGuid, diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c index 17cf96c45f2b..79f18e6d9c4f 100644 --- a/drivers/firmware/memmap.c +++ b/drivers/firmware/memmap.c @@ -286,7 +286,11 @@ int __meminit firmware_map_add_hotplug(u64 start, u64 end, const char *type) { struct firmware_map_entry *entry; - entry = firmware_map_find_entry_bootmem(start, end, type); + entry = firmware_map_find_entry(start, end - 1, type); + if (entry) + return 0; + + entry = firmware_map_find_entry_bootmem(start, end - 1, type); if (!entry) { entry = kzalloc(sizeof(struct firmware_map_entry), GFP_ATOMIC); if (!entry) diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 4a1b5113e527..9de1515e5808 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -340,6 +340,13 @@ config GPIO_XILINX help Say yes here to support the Xilinx FPGA GPIO device +config GPIO_ZYNQ + tristate "Xilinx Zynq GPIO support" + depends on ARCH_ZYNQ + select GPIOLIB_IRQCHIP + help + Say yes here to support Xilinx Zynq GPIO controller. + config GPIO_XTENSA bool "Xtensa GPIO32 support" depends on XTENSA @@ -423,7 +430,7 @@ config GPIO_GE_FPGA config GPIO_LYNXPOINT tristate "Intel Lynxpoint GPIO support" depends on ACPI && X86 - select IRQ_DOMAIN + select GPIOLIB_IRQCHIP help driver for GPIO functionality on Intel Lynxpoint PCH chipset Requires ACPI device enumeration code to set up a platform device. @@ -450,6 +457,19 @@ config GPIO_ARIZONA help Support for GPIOs on Wolfson Arizona class devices. +config GPIO_CRYSTAL_COVE + tristate "GPIO support for Crystal Cove PMIC" + depends on INTEL_SOC_PMIC + select GPIOLIB_IRQCHIP + help + Support for GPIO pins on Crystal Cove PMIC. + + Say Yes if you have a Intel SoC based tablet with Crystal Cove PMIC + inside. + + This driver can also be built as a module. If so, the module will be + called gpio-crystalcove. + config GPIO_LP3943 tristate "TI/National Semiconductor LP3943 GPIO expander" depends on MFD_LP3943 @@ -573,6 +593,7 @@ config GPIO_SX150X config GPIO_STMPE bool "STMPE GPIOs" depends on MFD_STMPE + select GPIOLIB_IRQCHIP help This enables support for the GPIOs found on the STMPE I/O Expanders. diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile index d10f6a9d875a..5d024e396622 100644 --- a/drivers/gpio/Makefile +++ b/drivers/gpio/Makefile @@ -4,7 +4,9 @@ ccflags-$(CONFIG_DEBUG_GPIO) += -DDEBUG obj-$(CONFIG_GPIO_DEVRES) += devres.o obj-$(CONFIG_GPIOLIB) += gpiolib.o +obj-$(CONFIG_GPIOLIB) += gpiolib-legacy.o obj-$(CONFIG_OF_GPIO) += gpiolib-of.o +obj-$(CONFIG_GPIO_SYSFS) += gpiolib-sysfs.o obj-$(CONFIG_GPIO_ACPI) += gpiolib-acpi.o # Device drivers. Generally keep list sorted alphabetically @@ -20,6 +22,7 @@ obj-$(CONFIG_GPIO_BCM_KONA) += gpio-bcm-kona.o obj-$(CONFIG_GPIO_BT8XX) += gpio-bt8xx.o obj-$(CONFIG_GPIO_CLPS711X) += gpio-clps711x.o obj-$(CONFIG_GPIO_CS5535) += gpio-cs5535.o +obj-$(CONFIG_GPIO_CRYSTAL_COVE) += gpio-crystalcove.o obj-$(CONFIG_GPIO_DA9052) += gpio-da9052.o obj-$(CONFIG_GPIO_DA9055) += gpio-da9055.o obj-$(CONFIG_GPIO_DAVINCI) += gpio-davinci.o @@ -101,3 +104,4 @@ obj-$(CONFIG_GPIO_WM8994) += gpio-wm8994.o obj-$(CONFIG_GPIO_XILINX) += gpio-xilinx.o obj-$(CONFIG_GPIO_XTENSA) += gpio-xtensa.o obj-$(CONFIG_GPIO_ZEVIO) += gpio-zevio.o +obj-$(CONFIG_GPIO_ZYNQ) += gpio-zynq.o diff --git a/drivers/gpio/devres.c b/drivers/gpio/devres.c index 65978cf85f79..954b9f6b0ef8 100644 --- a/drivers/gpio/devres.c +++ b/drivers/gpio/devres.c @@ -39,57 +39,63 @@ static int devm_gpiod_match(struct device *dev, void *res, void *data) * devm_gpiod_get - Resource-managed gpiod_get() * @dev: GPIO consumer * @con_id: function within the GPIO consumer + * @flags: optional GPIO initialization flags * * Managed gpiod_get(). GPIO descriptors returned from this function are * automatically disposed on driver detach. See gpiod_get() for detailed * information about behavior and return values. */ -struct gpio_desc *__must_check devm_gpiod_get(struct device *dev, - const char *con_id) +struct gpio_desc *__must_check __devm_gpiod_get(struct device *dev, + const char *con_id, + enum gpiod_flags flags) { - return devm_gpiod_get_index(dev, con_id, 0); + return devm_gpiod_get_index(dev, con_id, 0, flags); } -EXPORT_SYMBOL(devm_gpiod_get); +EXPORT_SYMBOL(__devm_gpiod_get); /** * devm_gpiod_get_optional - Resource-managed gpiod_get_optional() * @dev: GPIO consumer * @con_id: function within the GPIO consumer + * @flags: optional GPIO initialization flags * * Managed gpiod_get_optional(). GPIO descriptors returned from this function * are automatically disposed on driver detach. See gpiod_get_optional() for * detailed information about behavior and return values. */ -struct gpio_desc *__must_check devm_gpiod_get_optional(struct device *dev, - const char *con_id) +struct gpio_desc *__must_check __devm_gpiod_get_optional(struct device *dev, + const char *con_id, + enum gpiod_flags flags) { - return devm_gpiod_get_index_optional(dev, con_id, 0); + return devm_gpiod_get_index_optional(dev, con_id, 0, flags); } -EXPORT_SYMBOL(devm_gpiod_get_optional); +EXPORT_SYMBOL(__devm_gpiod_get_optional); /** * devm_gpiod_get_index - Resource-managed gpiod_get_index() * @dev: GPIO consumer * @con_id: function within the GPIO consumer * @idx: index of the GPIO to obtain in the consumer + * @flags: optional GPIO initialization flags * * Managed gpiod_get_index(). GPIO descriptors returned from this function are * automatically disposed on driver detach. See gpiod_get_index() for detailed * information about behavior and return values. */ -struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev, +struct gpio_desc *__must_check __devm_gpiod_get_index(struct device *dev, const char *con_id, - unsigned int idx) + unsigned int idx, + enum gpiod_flags flags) { struct gpio_desc **dr; struct gpio_desc *desc; - dr = devres_alloc(devm_gpiod_release, sizeof(struct gpiod_desc *), + dr = devres_alloc(devm_gpiod_release, sizeof(struct gpio_desc *), GFP_KERNEL); if (!dr) return ERR_PTR(-ENOMEM); - desc = gpiod_get_index(dev, con_id, idx); + desc = gpiod_get_index(dev, con_id, idx, flags); if (IS_ERR(desc)) { devres_free(dr); return desc; @@ -100,26 +106,28 @@ struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev, return desc; } -EXPORT_SYMBOL(devm_gpiod_get_index); +EXPORT_SYMBOL(__devm_gpiod_get_index); /** * devm_gpiod_get_index_optional - Resource-managed gpiod_get_index_optional() * @dev: GPIO consumer * @con_id: function within the GPIO consumer * @index: index of the GPIO to obtain in the consumer + * @flags: optional GPIO initialization flags * * Managed gpiod_get_index_optional(). GPIO descriptors returned from this * function are automatically disposed on driver detach. See * gpiod_get_index_optional() for detailed information about behavior and * return values. */ -struct gpio_desc *__must_check devm_gpiod_get_index_optional(struct device *dev, +struct gpio_desc *__must_check __devm_gpiod_get_index_optional(struct device *dev, const char *con_id, - unsigned int index) + unsigned int index, + enum gpiod_flags flags) { struct gpio_desc *desc; - desc = devm_gpiod_get_index(dev, con_id, index); + desc = devm_gpiod_get_index(dev, con_id, index, flags); if (IS_ERR(desc)) { if (PTR_ERR(desc) == -ENOENT) return NULL; @@ -127,7 +135,7 @@ struct gpio_desc *__must_check devm_gpiod_get_index_optional(struct device *dev, return desc; } -EXPORT_SYMBOL(devm_gpiod_get_index_optional); +EXPORT_SYMBOL(__devm_gpiod_get_index_optional); /** * devm_gpiod_put - Resource-managed gpiod_put() diff --git a/drivers/gpio/gpio-74x164.c b/drivers/gpio/gpio-74x164.c index e4ae29824c32..e3d968f751f1 100644 --- a/drivers/gpio/gpio-74x164.c +++ b/drivers/gpio/gpio-74x164.c @@ -167,13 +167,11 @@ exit_destroy: static int gen_74x164_remove(struct spi_device *spi) { struct gen_74x164_chip *chip = spi_get_drvdata(spi); - int ret; - ret = gpiochip_remove(&chip->gpio_chip); - if (!ret) - mutex_destroy(&chip->lock); + gpiochip_remove(&chip->gpio_chip); + mutex_destroy(&chip->lock); - return ret; + return 0; } static const struct of_device_id gen_74x164_dt_ids[] = { diff --git a/drivers/gpio/gpio-adnp.c b/drivers/gpio/gpio-adnp.c index b2239d678d01..416b2200d4f1 100644 --- a/drivers/gpio/gpio-adnp.c +++ b/drivers/gpio/gpio-adnp.c @@ -585,15 +585,8 @@ static int adnp_i2c_remove(struct i2c_client *client) { struct adnp *adnp = i2c_get_clientdata(client); struct device_node *np = client->dev.of_node; - int err; - - err = gpiochip_remove(&adnp->gpio); - if (err < 0) { - dev_err(&client->dev, "%s failed: %d\n", "gpiochip_remove()", - err); - return err; - } + gpiochip_remove(&adnp->gpio); if (of_find_property(np, "interrupt-controller", NULL)) adnp_irq_teardown(adnp); diff --git a/drivers/gpio/gpio-adp5520.c b/drivers/gpio/gpio-adp5520.c index f1ade8fa3218..b08bd169e568 100644 --- a/drivers/gpio/gpio-adp5520.c +++ b/drivers/gpio/gpio-adp5520.c @@ -167,15 +167,9 @@ err: static int adp5520_gpio_remove(struct platform_device *pdev) { struct adp5520_gpio *dev; - int ret; dev = platform_get_drvdata(pdev); - ret = gpiochip_remove(&dev->gpio_chip); - if (ret) { - dev_err(&pdev->dev, "%s failed, %d\n", - "gpiochip_remove()", ret); - return ret; - } + gpiochip_remove(&dev->gpio_chip); return 0; } diff --git a/drivers/gpio/gpio-adp5588.c b/drivers/gpio/gpio-adp5588.c index ef19bc33f2bd..3beed6ea8c65 100644 --- a/drivers/gpio/gpio-adp5588.c +++ b/drivers/gpio/gpio-adp5588.c @@ -470,11 +470,7 @@ static int adp5588_gpio_remove(struct i2c_client *client) if (dev->irq_base) free_irq(dev->client->irq, dev); - ret = gpiochip_remove(&dev->gpio_chip); - if (ret) { - dev_err(&client->dev, "gpiochip_remove failed %d\n", ret); - return ret; - } + gpiochip_remove(&dev->gpio_chip); kfree(dev); return 0; diff --git a/drivers/gpio/gpio-amd8111.c b/drivers/gpio/gpio-amd8111.c index 94e9992f8904..3c09f1a6872a 100644 --- a/drivers/gpio/gpio-amd8111.c +++ b/drivers/gpio/gpio-amd8111.c @@ -232,8 +232,7 @@ out: static void __exit amd_gpio_exit(void) { - int err = gpiochip_remove(&gp.chip); - WARN_ON(err); + gpiochip_remove(&gp.chip); ioport_unmap(gp.pm); release_region(gp.pmbase + PMBASE_OFFSET, PMBASE_SIZE); } diff --git a/drivers/gpio/gpio-arizona.c b/drivers/gpio/gpio-arizona.c index 29bdff558981..fe369f5c7fa6 100644 --- a/drivers/gpio/gpio-arizona.c +++ b/drivers/gpio/gpio-arizona.c @@ -149,7 +149,8 @@ static int arizona_gpio_remove(struct platform_device *pdev) { struct arizona_gpio *arizona_gpio = platform_get_drvdata(pdev); - return gpiochip_remove(&arizona_gpio->gpio_chip); + gpiochip_remove(&arizona_gpio->gpio_chip); + return 0; } static struct platform_driver arizona_gpio_driver = { diff --git a/drivers/gpio/gpio-bt8xx.c b/drivers/gpio/gpio-bt8xx.c index 6557147d9331..7e4c43c18960 100644 --- a/drivers/gpio/gpio-bt8xx.c +++ b/drivers/gpio/gpio-bt8xx.c @@ -241,9 +241,6 @@ static void bt8xxgpio_remove(struct pci_dev *pdev) bgwrite(~0x0, BT848_INT_STAT); bgwrite(0x0, BT848_GPIO_OUT_EN); - iounmap(bg->mmio); - release_mem_region(pci_resource_start(pdev, 0), - pci_resource_len(pdev, 0)); pci_disable_device(pdev); } diff --git a/drivers/gpio/gpio-crystalcove.c b/drivers/gpio/gpio-crystalcove.c new file mode 100644 index 000000000000..934462f5bd22 --- /dev/null +++ b/drivers/gpio/gpio-crystalcove.c @@ -0,0 +1,380 @@ +/* + * gpio-crystalcove.c - Intel Crystal Cove GPIO Driver + * + * Copyright (C) 2012, 2014 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Author: Yang, Bin <bin.yang@intel.com> + */ + +#include <linux/interrupt.h> +#include <linux/platform_device.h> +#include <linux/gpio.h> +#include <linux/seq_file.h> +#include <linux/bitops.h> +#include <linux/regmap.h> +#include <linux/mfd/intel_soc_pmic.h> + +#define CRYSTALCOVE_GPIO_NUM 16 + +#define UPDATE_IRQ_TYPE BIT(0) +#define UPDATE_IRQ_MASK BIT(1) + +#define GPIO0IRQ 0x0b +#define GPIO1IRQ 0x0c +#define MGPIO0IRQS0 0x19 +#define MGPIO1IRQS0 0x1a +#define MGPIO0IRQSX 0x1b +#define MGPIO1IRQSX 0x1c +#define GPIO0P0CTLO 0x2b +#define GPIO0P0CTLI 0x33 +#define GPIO1P0CTLO 0x3b +#define GPIO1P0CTLI 0x43 + +#define CTLI_INTCNT_DIS (0) +#define CTLI_INTCNT_NE (1 << 1) +#define CTLI_INTCNT_PE (2 << 1) +#define CTLI_INTCNT_BE (3 << 1) + +#define CTLO_DIR_IN (0) +#define CTLO_DIR_OUT (1 << 5) + +#define CTLO_DRV_CMOS (0) +#define CTLO_DRV_OD (1 << 4) + +#define CTLO_DRV_REN (1 << 3) + +#define CTLO_RVAL_2KDW (0) +#define CTLO_RVAL_2KUP (1 << 1) +#define CTLO_RVAL_50KDW (2 << 1) +#define CTLO_RVAL_50KUP (3 << 1) + +#define CTLO_INPUT_SET (CTLO_DRV_CMOS | CTLO_DRV_REN | CTLO_RVAL_2KUP) +#define CTLO_OUTPUT_SET (CTLO_DIR_OUT | CTLO_INPUT_SET) + +enum ctrl_register { + CTRL_IN, + CTRL_OUT, +}; + +/** + * struct crystalcove_gpio - Crystal Cove GPIO controller + * @buslock: for bus lock/sync and unlock. + * @chip: the abstract gpio_chip structure. + * @regmap: the regmap from the parent device. + * @update: pending IRQ setting update, to be written to the chip upon unlock. + * @intcnt_value: the Interrupt Detect value to be written. + * @set_irq_mask: true if the IRQ mask needs to be set, false to clear. + */ +struct crystalcove_gpio { + struct mutex buslock; /* irq_bus_lock */ + struct gpio_chip chip; + struct regmap *regmap; + int update; + int intcnt_value; + bool set_irq_mask; +}; + +static inline struct crystalcove_gpio *to_cg(struct gpio_chip *gc) +{ + return container_of(gc, struct crystalcove_gpio, chip); +} + +static inline int to_reg(int gpio, enum ctrl_register reg_type) +{ + int reg; + + if (reg_type == CTRL_IN) { + if (gpio < 8) + reg = GPIO0P0CTLI; + else + reg = GPIO1P0CTLI; + } else { + if (gpio < 8) + reg = GPIO0P0CTLO; + else + reg = GPIO1P0CTLO; + } + + return reg + gpio % 8; +} + +static void crystalcove_update_irq_mask(struct crystalcove_gpio *cg, + int gpio) +{ + u8 mirqs0 = gpio < 8 ? MGPIO0IRQS0 : MGPIO1IRQS0; + int mask = BIT(gpio % 8); + + if (cg->set_irq_mask) + regmap_update_bits(cg->regmap, mirqs0, mask, mask); + else + regmap_update_bits(cg->regmap, mirqs0, mask, 0); +} + +static void crystalcove_update_irq_ctrl(struct crystalcove_gpio *cg, int gpio) +{ + int reg = to_reg(gpio, CTRL_IN); + + regmap_update_bits(cg->regmap, reg, CTLI_INTCNT_BE, cg->intcnt_value); +} + +static int crystalcove_gpio_dir_in(struct gpio_chip *chip, unsigned gpio) +{ + struct crystalcove_gpio *cg = to_cg(chip); + + return regmap_write(cg->regmap, to_reg(gpio, CTRL_OUT), + CTLO_INPUT_SET); +} + +static int crystalcove_gpio_dir_out(struct gpio_chip *chip, unsigned gpio, + int value) +{ + struct crystalcove_gpio *cg = to_cg(chip); + + return regmap_write(cg->regmap, to_reg(gpio, CTRL_OUT), + CTLO_OUTPUT_SET | value); +} + +static int crystalcove_gpio_get(struct gpio_chip *chip, unsigned gpio) +{ + struct crystalcove_gpio *cg = to_cg(chip); + int ret; + unsigned int val; + + ret = regmap_read(cg->regmap, to_reg(gpio, CTRL_IN), &val); + if (ret) + return ret; + + return val & 0x1; +} + +static void crystalcove_gpio_set(struct gpio_chip *chip, + unsigned gpio, int value) +{ + struct crystalcove_gpio *cg = to_cg(chip); + + if (value) + regmap_update_bits(cg->regmap, to_reg(gpio, CTRL_OUT), 1, 1); + else + regmap_update_bits(cg->regmap, to_reg(gpio, CTRL_OUT), 1, 0); +} + +static int crystalcove_irq_type(struct irq_data *data, unsigned type) +{ + struct crystalcove_gpio *cg = to_cg(irq_data_get_irq_chip_data(data)); + + switch (type) { + case IRQ_TYPE_NONE: + cg->intcnt_value = CTLI_INTCNT_DIS; + break; + case IRQ_TYPE_EDGE_BOTH: + cg->intcnt_value = CTLI_INTCNT_BE; + break; + case IRQ_TYPE_EDGE_RISING: + cg->intcnt_value = CTLI_INTCNT_PE; + break; + case IRQ_TYPE_EDGE_FALLING: + cg->intcnt_value = CTLI_INTCNT_NE; + break; + default: + return -EINVAL; + } + + cg->update |= UPDATE_IRQ_TYPE; + + return 0; +} + +static void crystalcove_bus_lock(struct irq_data *data) +{ + struct crystalcove_gpio *cg = to_cg(irq_data_get_irq_chip_data(data)); + + mutex_lock(&cg->buslock); +} + +static void crystalcove_bus_sync_unlock(struct irq_data *data) +{ + struct crystalcove_gpio *cg = to_cg(irq_data_get_irq_chip_data(data)); + int gpio = data->hwirq; + + if (cg->update & UPDATE_IRQ_TYPE) + crystalcove_update_irq_ctrl(cg, gpio); + if (cg->update & UPDATE_IRQ_MASK) + crystalcove_update_irq_mask(cg, gpio); + cg->update = 0; + + mutex_unlock(&cg->buslock); +} + +static void crystalcove_irq_unmask(struct irq_data *data) +{ + struct crystalcove_gpio *cg = to_cg(irq_data_get_irq_chip_data(data)); + + cg->set_irq_mask = false; + cg->update |= UPDATE_IRQ_MASK; +} + +static void crystalcove_irq_mask(struct irq_data *data) +{ + struct crystalcove_gpio *cg = to_cg(irq_data_get_irq_chip_data(data)); + + cg->set_irq_mask = true; + cg->update |= UPDATE_IRQ_MASK; +} + +static struct irq_chip crystalcove_irqchip = { + .name = "Crystal Cove", + .irq_mask = crystalcove_irq_mask, + .irq_unmask = crystalcove_irq_unmask, + .irq_set_type = crystalcove_irq_type, + .irq_bus_lock = crystalcove_bus_lock, + .irq_bus_sync_unlock = crystalcove_bus_sync_unlock, +}; + +static irqreturn_t crystalcove_gpio_irq_handler(int irq, void *data) +{ + struct crystalcove_gpio *cg = data; + unsigned int p0, p1; + int pending; + int gpio; + unsigned int virq; + + if (regmap_read(cg->regmap, GPIO0IRQ, &p0) || + regmap_read(cg->regmap, GPIO1IRQ, &p1)) + return IRQ_NONE; + + regmap_write(cg->regmap, GPIO0IRQ, p0); + regmap_write(cg->regmap, GPIO1IRQ, p1); + + pending = p0 | p1 << 8; + + for (gpio = 0; gpio < cg->chip.ngpio; gpio++) { + if (pending & BIT(gpio)) { + virq = irq_find_mapping(cg->chip.irqdomain, gpio); + generic_handle_irq(virq); + } + } + + return IRQ_HANDLED; +} + +static void crystalcove_gpio_dbg_show(struct seq_file *s, + struct gpio_chip *chip) +{ + struct crystalcove_gpio *cg = to_cg(chip); + int gpio, offset; + unsigned int ctlo, ctli, mirqs0, mirqsx, irq; + + for (gpio = 0; gpio < cg->chip.ngpio; gpio++) { + regmap_read(cg->regmap, to_reg(gpio, CTRL_OUT), &ctlo); + regmap_read(cg->regmap, to_reg(gpio, CTRL_IN), &ctli); + regmap_read(cg->regmap, gpio < 8 ? MGPIO0IRQS0 : MGPIO1IRQS0, + &mirqs0); + regmap_read(cg->regmap, gpio < 8 ? MGPIO0IRQSX : MGPIO1IRQSX, + &mirqsx); + regmap_read(cg->regmap, gpio < 8 ? GPIO0IRQ : GPIO1IRQ, + &irq); + + offset = gpio % 8; + seq_printf(s, " gpio-%-2d %s %s %s %s ctlo=%2x,%s %s %s\n", + gpio, ctlo & CTLO_DIR_OUT ? "out" : "in ", + ctli & 0x1 ? "hi" : "lo", + ctli & CTLI_INTCNT_NE ? "fall" : " ", + ctli & CTLI_INTCNT_PE ? "rise" : " ", + ctlo, + mirqs0 & BIT(offset) ? "s0 mask " : "s0 unmask", + mirqsx & BIT(offset) ? "sx mask " : "sx unmask", + irq & BIT(offset) ? "pending" : " "); + } +} + +static int crystalcove_gpio_probe(struct platform_device *pdev) +{ + int irq = platform_get_irq(pdev, 0); + struct crystalcove_gpio *cg; + int retval; + struct device *dev = pdev->dev.parent; + struct intel_soc_pmic *pmic = dev_get_drvdata(dev); + + if (irq < 0) + return irq; + + cg = devm_kzalloc(&pdev->dev, sizeof(*cg), GFP_KERNEL); + if (!cg) + return -ENOMEM; + + platform_set_drvdata(pdev, cg); + + mutex_init(&cg->buslock); + cg->chip.label = KBUILD_MODNAME; + cg->chip.direction_input = crystalcove_gpio_dir_in; + cg->chip.direction_output = crystalcove_gpio_dir_out; + cg->chip.get = crystalcove_gpio_get; + cg->chip.set = crystalcove_gpio_set; + cg->chip.base = -1; + cg->chip.ngpio = CRYSTALCOVE_GPIO_NUM; + cg->chip.can_sleep = true; + cg->chip.dev = dev; + cg->chip.dbg_show = crystalcove_gpio_dbg_show; + cg->regmap = pmic->regmap; + + retval = gpiochip_add(&cg->chip); + if (retval) { + dev_warn(&pdev->dev, "add gpio chip error: %d\n", retval); + return retval; + } + + gpiochip_irqchip_add(&cg->chip, &crystalcove_irqchip, 0, + handle_simple_irq, IRQ_TYPE_NONE); + + retval = request_threaded_irq(irq, NULL, crystalcove_gpio_irq_handler, + IRQF_ONESHOT, KBUILD_MODNAME, cg); + + if (retval) { + dev_warn(&pdev->dev, "request irq failed: %d\n", retval); + goto out_remove_gpio; + } + + return 0; + +out_remove_gpio: + WARN_ON(gpiochip_remove(&cg->chip)); + return retval; +} + +static int crystalcove_gpio_remove(struct platform_device *pdev) +{ + struct crystalcove_gpio *cg = platform_get_drvdata(pdev); + int irq = platform_get_irq(pdev, 0); + int err; + + err = gpiochip_remove(&cg->chip); + + if (irq >= 0) + free_irq(irq, cg); + + return err; +} + +static struct platform_driver crystalcove_gpio_driver = { + .probe = crystalcove_gpio_probe, + .remove = crystalcove_gpio_remove, + .driver = { + .name = "crystal_cove_gpio", + .owner = THIS_MODULE, + }, +}; + +module_platform_driver(crystalcove_gpio_driver); + +MODULE_AUTHOR("Yang, Bin <bin.yang@intel.com>"); +MODULE_DESCRIPTION("Intel Crystal Cove GPIO Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpio/gpio-cs5535.c b/drivers/gpio/gpio-cs5535.c index c0a3aeba6f21..92ec58fa9236 100644 --- a/drivers/gpio/gpio-cs5535.c +++ b/drivers/gpio/gpio-cs5535.c @@ -358,14 +358,8 @@ done: static int cs5535_gpio_remove(struct platform_device *pdev) { struct resource *r; - int err; - err = gpiochip_remove(&cs5535_gpio_chip.chip); - if (err) { - /* uhh? */ - dev_err(&pdev->dev, "unable to remove gpio_chip?\n"); - return err; - } + gpiochip_remove(&cs5535_gpio_chip.chip); r = platform_get_resource(pdev, IORESOURCE_IO, 0); release_region(r->start, resource_size(r)); diff --git a/drivers/gpio/gpio-da9052.c b/drivers/gpio/gpio-da9052.c index 416cdf786b05..c5bccd4dec96 100644 --- a/drivers/gpio/gpio-da9052.c +++ b/drivers/gpio/gpio-da9052.c @@ -237,7 +237,8 @@ static int da9052_gpio_remove(struct platform_device *pdev) { struct da9052_gpio *gpio = platform_get_drvdata(pdev); - return gpiochip_remove(&gpio->gp); + gpiochip_remove(&gpio->gp); + return 0; } static struct platform_driver da9052_gpio_driver = { diff --git a/drivers/gpio/gpio-da9055.c b/drivers/gpio/gpio-da9055.c index f992997bc301..9167c4331081 100644 --- a/drivers/gpio/gpio-da9055.c +++ b/drivers/gpio/gpio-da9055.c @@ -174,7 +174,8 @@ static int da9055_gpio_remove(struct platform_device *pdev) { struct da9055_gpio *gpio = platform_get_drvdata(pdev); - return gpiochip_remove(&gpio->gp); + gpiochip_remove(&gpio->gp); + return 0; } static struct platform_driver da9055_gpio_driver = { diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c index cd3b81435274..d6618a6e2399 100644 --- a/drivers/gpio/gpio-dwapb.c +++ b/drivers/gpio/gpio-dwapb.c @@ -359,7 +359,7 @@ static void dwapb_gpio_unregister(struct dwapb_gpio *gpio) for (m = 0; m < gpio->nr_ports; ++m) if (gpio->ports[m].is_registered) - WARN_ON(gpiochip_remove(&gpio->ports[m].bgc.gc)); + gpiochip_remove(&gpio->ports[m].bgc.gc); } static int dwapb_gpio_probe(struct platform_device *pdev) diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c index cde36054c387..fe49ec3cdb7d 100644 --- a/drivers/gpio/gpio-em.c +++ b/drivers/gpio/gpio-em.c @@ -409,11 +409,8 @@ err0: static int em_gio_remove(struct platform_device *pdev) { struct em_gio_priv *p = platform_get_drvdata(pdev); - int ret; - ret = gpiochip_remove(&p->gpio_chip); - if (ret) - return ret; + gpiochip_remove(&p->gpio_chip); irq_domain_remove(p->irq_domain); return 0; diff --git a/drivers/gpio/gpio-f7188x.c b/drivers/gpio/gpio-f7188x.c index 8f73ee093739..fd3202f968ff 100644 --- a/drivers/gpio/gpio-f7188x.c +++ b/drivers/gpio/gpio-f7188x.c @@ -317,13 +317,7 @@ static int f7188x_gpio_probe(struct platform_device *pdev) err_gpiochip: for (i = i - 1; i >= 0; i--) { struct f7188x_gpio_bank *bank = &data->bank[i]; - int tmp; - - tmp = gpiochip_remove(&bank->chip); - if (tmp < 0) - dev_err(&pdev->dev, - "Failed to remove gpiochip %d: %d\n", - i, tmp); + gpiochip_remove(&bank->chip); } return err; @@ -331,20 +325,12 @@ err_gpiochip: static int f7188x_gpio_remove(struct platform_device *pdev) { - int err; int i; struct f7188x_gpio_data *data = platform_get_drvdata(pdev); for (i = 0; i < data->nr_bank; i++) { struct f7188x_gpio_bank *bank = &data->bank[i]; - - err = gpiochip_remove(&bank->chip); - if (err) { - dev_err(&pdev->dev, - "Failed to remove GPIO gpiochip %d: %d\n", - i, err); - return err; - } + gpiochip_remove(&bank->chip); } return 0; diff --git a/drivers/gpio/gpio-generic.c b/drivers/gpio/gpio-generic.c index fea8c82bb8fc..16f6115e5bdb 100644 --- a/drivers/gpio/gpio-generic.c +++ b/drivers/gpio/gpio-generic.c @@ -398,7 +398,8 @@ static int bgpio_request(struct gpio_chip *chip, unsigned gpio_pin) int bgpio_remove(struct bgpio_chip *bgc) { - return gpiochip_remove(&bgc->gc); + gpiochip_remove(&bgc->gc); + return 0; } EXPORT_SYMBOL_GPL(bgpio_remove); diff --git a/drivers/gpio/gpio-grgpio.c b/drivers/gpio/gpio-grgpio.c index 3c3f515b7916..66ad3df9d9cf 100644 --- a/drivers/gpio/gpio-grgpio.c +++ b/drivers/gpio/gpio-grgpio.c @@ -468,9 +468,7 @@ static int grgpio_remove(struct platform_device *ofdev) } } - ret = gpiochip_remove(&priv->bgc.gc); - if (ret) - goto out; + gpiochip_remove(&priv->bgc.gc); if (priv->domain) irq_domain_remove(priv->domain); diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c index 70304220a479..3784e81e7762 100644 --- a/drivers/gpio/gpio-ich.c +++ b/drivers/gpio/gpio-ich.c @@ -514,14 +514,7 @@ add_err: static int ichx_gpio_remove(struct platform_device *pdev) { - int err; - - err = gpiochip_remove(&ichx_priv.chip); - if (err) { - dev_err(&pdev->dev, "%s failed, %d\n", - "gpiochip_remove()", err); - return err; - } + gpiochip_remove(&ichx_priv.chip); ichx_gpio_release_regions(ichx_priv.gpio_base, ichx_priv.use_gpio); if (ichx_priv.pm_base) diff --git a/drivers/gpio/gpio-intel-mid.c b/drivers/gpio/gpio-intel-mid.c index 118a6bf455d9..aa28c65eb6b4 100644 --- a/drivers/gpio/gpio-intel-mid.c +++ b/drivers/gpio/gpio-intel-mid.c @@ -28,12 +28,10 @@ #include <linux/stddef.h> #include <linux/interrupt.h> #include <linux/init.h> -#include <linux/irq.h> #include <linux/io.h> -#include <linux/gpio.h> +#include <linux/gpio/driver.h> #include <linux/slab.h> #include <linux/pm_runtime.h> -#include <linux/irqdomain.h> #define INTEL_MID_IRQ_TYPE_EDGE (1 << 0) #define INTEL_MID_IRQ_TYPE_LEVEL (1 << 1) @@ -78,10 +76,12 @@ struct intel_mid_gpio { void __iomem *reg_base; spinlock_t lock; struct pci_dev *pdev; - struct irq_domain *domain; }; -#define to_intel_gpio_priv(chip) container_of(chip, struct intel_mid_gpio, chip) +static inline struct intel_mid_gpio *to_intel_gpio_priv(struct gpio_chip *gc) +{ + return container_of(gc, struct intel_mid_gpio, chip); +} static void __iomem *gpio_reg(struct gpio_chip *chip, unsigned offset, enum GPIO_REG reg_type) @@ -182,15 +182,10 @@ static int intel_gpio_direction_output(struct gpio_chip *chip, return 0; } -static int intel_gpio_to_irq(struct gpio_chip *chip, unsigned offset) -{ - struct intel_mid_gpio *priv = to_intel_gpio_priv(chip); - return irq_create_mapping(priv->domain, offset); -} - static int intel_mid_irq_type(struct irq_data *d, unsigned type) { - struct intel_mid_gpio *priv = irq_data_get_irq_chip_data(d); + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct intel_mid_gpio *priv = to_intel_gpio_priv(gc); u32 gpio = irqd_to_hwirq(d); unsigned long flags; u32 value; @@ -231,33 +226,11 @@ static void intel_mid_irq_mask(struct irq_data *d) { } -static int intel_mid_irq_reqres(struct irq_data *d) -{ - struct intel_mid_gpio *priv = irq_data_get_irq_chip_data(d); - - if (gpio_lock_as_irq(&priv->chip, irqd_to_hwirq(d))) { - dev_err(priv->chip.dev, - "unable to lock HW IRQ %lu for IRQ\n", - irqd_to_hwirq(d)); - return -EINVAL; - } - return 0; -} - -static void intel_mid_irq_relres(struct irq_data *d) -{ - struct intel_mid_gpio *priv = irq_data_get_irq_chip_data(d); - - gpio_unlock_as_irq(&priv->chip, irqd_to_hwirq(d)); -} - static struct irq_chip intel_mid_irqchip = { .name = "INTEL_MID-GPIO", .irq_mask = intel_mid_irq_mask, .irq_unmask = intel_mid_irq_unmask, .irq_set_type = intel_mid_irq_type, - .irq_request_resources = intel_mid_irq_reqres, - .irq_release_resources = intel_mid_irq_relres, }; static const struct intel_mid_gpio_ddata gpio_lincroft = { @@ -330,8 +303,9 @@ MODULE_DEVICE_TABLE(pci, intel_gpio_ids); static void intel_mid_irq_handler(unsigned irq, struct irq_desc *desc) { + struct gpio_chip *gc = irq_desc_get_handler_data(desc); + struct intel_mid_gpio *priv = to_intel_gpio_priv(gc); struct irq_data *data = irq_desc_get_irq_data(desc); - struct intel_mid_gpio *priv = irq_data_get_irq_handler_data(data); struct irq_chip *chip = irq_data_get_irq_chip(data); u32 base, gpio, mask; unsigned long pending; @@ -345,7 +319,7 @@ static void intel_mid_irq_handler(unsigned irq, struct irq_desc *desc) mask = BIT(gpio); /* Clear before handling so we can't lose an edge */ writel(mask, gedr); - generic_handle_irq(irq_find_mapping(priv->domain, + generic_handle_irq(irq_find_mapping(gc->irqdomain, base + gpio)); } } @@ -371,23 +345,6 @@ static void intel_mid_irq_init_hw(struct intel_mid_gpio *priv) } } -static int intel_gpio_irq_map(struct irq_domain *d, unsigned int irq, - irq_hw_number_t hwirq) -{ - struct intel_mid_gpio *priv = d->host_data; - - irq_set_chip_and_handler(irq, &intel_mid_irqchip, handle_simple_irq); - irq_set_chip_data(irq, priv); - irq_set_irq_type(irq, IRQ_TYPE_NONE); - - return 0; -} - -static const struct irq_domain_ops intel_gpio_irq_ops = { - .map = intel_gpio_irq_map, - .xlate = irq_domain_xlate_twocell, -}; - static int intel_gpio_runtime_idle(struct device *dev) { int err = pm_schedule_suspend(dev, 500); @@ -441,7 +398,6 @@ static int intel_gpio_probe(struct pci_dev *pdev, priv->chip.direction_output = intel_gpio_direction_output; priv->chip.get = intel_gpio_get; priv->chip.set = intel_gpio_set; - priv->chip.to_irq = intel_gpio_to_irq; priv->chip.base = gpio_base; priv->chip.ngpio = ddata->ngpio; priv->chip.can_sleep = false; @@ -449,11 +405,6 @@ static int intel_gpio_probe(struct pci_dev *pdev, spin_lock_init(&priv->lock); - priv->domain = irq_domain_add_simple(pdev->dev.of_node, ddata->ngpio, - irq_base, &intel_gpio_irq_ops, priv); - if (!priv->domain) - return -ENOMEM; - pci_set_drvdata(pdev, priv); retval = gpiochip_add(&priv->chip); if (retval) { @@ -461,10 +412,23 @@ static int intel_gpio_probe(struct pci_dev *pdev, return retval; } + retval = gpiochip_irqchip_add(&priv->chip, + &intel_mid_irqchip, + irq_base, + handle_simple_irq, + IRQ_TYPE_NONE); + if (retval) { + dev_err(&pdev->dev, + "could not connect irqchip to gpiochip\n"); + return retval; + } + intel_mid_irq_init_hw(priv); - irq_set_handler_data(pdev->irq, priv); - irq_set_chained_handler(pdev->irq, intel_mid_irq_handler); + gpiochip_set_chained_irqchip(&priv->chip, + &intel_mid_irqchip, + pdev->irq, + intel_mid_irq_handler); pm_runtime_put_noidle(&pdev->dev); pm_runtime_allow(&pdev->dev); diff --git a/drivers/gpio/gpio-it8761e.c b/drivers/gpio/gpio-it8761e.c index 278b81317010..dadfc245cf09 100644 --- a/drivers/gpio/gpio-it8761e.c +++ b/drivers/gpio/gpio-it8761e.c @@ -217,11 +217,7 @@ gpiochip_add_err: static void __exit it8761e_gpio_exit(void) { if (gpio_ba) { - int ret = gpiochip_remove(&it8761e_gpio_chip); - - WARN(ret, "%s(): gpiochip_remove() failed, ret=%d\n", - __func__, ret); - + gpiochip_remove(&it8761e_gpio_chip); release_region(gpio_ba, GPIO_IOSIZE); gpio_ba = 0; } diff --git a/drivers/gpio/gpio-janz-ttl.c b/drivers/gpio/gpio-janz-ttl.c index 42852eaaf020..29ffe22ad97a 100644 --- a/drivers/gpio/gpio-janz-ttl.c +++ b/drivers/gpio/gpio-janz-ttl.c @@ -194,14 +194,8 @@ static int ttl_probe(struct platform_device *pdev) static int ttl_remove(struct platform_device *pdev) { struct ttl_module *mod = platform_get_drvdata(pdev); - struct device *dev = &pdev->dev; - int ret; - ret = gpiochip_remove(&mod->gpio); - if (ret) { - dev_err(dev, "unable to remove GPIO chip\n"); - return ret; - } + gpiochip_remove(&mod->gpio); return 0; } diff --git a/drivers/gpio/gpio-kempld.c b/drivers/gpio/gpio-kempld.c index 1e5e51987d31..fd150adeebf9 100644 --- a/drivers/gpio/gpio-kempld.c +++ b/drivers/gpio/gpio-kempld.c @@ -199,7 +199,8 @@ static int kempld_gpio_remove(struct platform_device *pdev) { struct kempld_gpio_data *gpio = platform_get_drvdata(pdev); - return gpiochip_remove(&gpio->chip); + gpiochip_remove(&gpio->chip); + return 0; } static struct platform_driver kempld_gpio_driver = { diff --git a/drivers/gpio/gpio-lp3943.c b/drivers/gpio/gpio-lp3943.c index a0341c92bcb4..6bbdad805b78 100644 --- a/drivers/gpio/gpio-lp3943.c +++ b/drivers/gpio/gpio-lp3943.c @@ -216,7 +216,8 @@ static int lp3943_gpio_remove(struct platform_device *pdev) { struct lp3943_gpio *lp3943_gpio = platform_get_drvdata(pdev); - return gpiochip_remove(&lp3943_gpio->chip); + gpiochip_remove(&lp3943_gpio->chip); + return 0; } static const struct of_device_id lp3943_gpio_of_match[] = { diff --git a/drivers/gpio/gpio-lpc32xx.c b/drivers/gpio/gpio-lpc32xx.c index 225344d66404..b9b9799b368b 100644 --- a/drivers/gpio/gpio-lpc32xx.c +++ b/drivers/gpio/gpio-lpc32xx.c @@ -560,7 +560,7 @@ static int lpc32xx_gpio_probe(struct platform_device *pdev) } #ifdef CONFIG_OF -static struct of_device_id lpc32xx_gpio_of_match[] = { +static const struct of_device_id lpc32xx_gpio_of_match[] = { { .compatible = "nxp,lpc3220-gpio", }, { }, }; diff --git a/drivers/gpio/gpio-lynxpoint.c b/drivers/gpio/gpio-lynxpoint.c index 2bea89b72508..fa945ec9ccff 100644 --- a/drivers/gpio/gpio-lynxpoint.c +++ b/drivers/gpio/gpio-lynxpoint.c @@ -25,9 +25,7 @@ #include <linux/types.h> #include <linux/bitops.h> #include <linux/interrupt.h> -#include <linux/irq.h> #include <linux/gpio.h> -#include <linux/irqdomain.h> #include <linux/slab.h> #include <linux/acpi.h> #include <linux/platform_device.h> @@ -62,7 +60,6 @@ struct lp_gpio { struct gpio_chip chip; - struct irq_domain *domain; struct platform_device *pdev; spinlock_t lock; unsigned long reg_base; @@ -151,7 +148,8 @@ static void lp_gpio_free(struct gpio_chip *chip, unsigned offset) static int lp_irq_type(struct irq_data *d, unsigned type) { - struct lp_gpio *lg = irq_data_get_irq_chip_data(d); + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct lp_gpio *lg = container_of(gc, struct lp_gpio, chip); u32 hwirq = irqd_to_hwirq(d); unsigned long flags; u32 value; @@ -236,16 +234,11 @@ static int lp_gpio_direction_output(struct gpio_chip *chip, return 0; } -static int lp_gpio_to_irq(struct gpio_chip *chip, unsigned offset) -{ - struct lp_gpio *lg = container_of(chip, struct lp_gpio, chip); - return irq_create_mapping(lg->domain, offset); -} - static void lp_gpio_irq_handler(unsigned hwirq, struct irq_desc *desc) { struct irq_data *data = irq_desc_get_irq_data(desc); - struct lp_gpio *lg = irq_data_get_irq_handler_data(data); + struct gpio_chip *gc = irq_desc_get_handler_data(desc); + struct lp_gpio *lg = container_of(gc, struct lp_gpio, chip); struct irq_chip *chip = irq_data_get_irq_chip(data); u32 base, pin, mask; unsigned long reg, ena, pending; @@ -262,7 +255,7 @@ static void lp_gpio_irq_handler(unsigned hwirq, struct irq_desc *desc) mask = BIT(pin); /* Clear before handling so we don't lose an edge */ outl(mask, reg); - irq = irq_find_mapping(lg->domain, base + pin); + irq = irq_find_mapping(lg->chip.irqdomain, base + pin); generic_handle_irq(irq); } } @@ -279,7 +272,8 @@ static void lp_irq_mask(struct irq_data *d) static void lp_irq_enable(struct irq_data *d) { - struct lp_gpio *lg = irq_data_get_irq_chip_data(d); + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct lp_gpio *lg = container_of(gc, struct lp_gpio, chip); u32 hwirq = irqd_to_hwirq(d); unsigned long reg = lp_gpio_reg(&lg->chip, hwirq, LP_INT_ENABLE); unsigned long flags; @@ -291,7 +285,8 @@ static void lp_irq_enable(struct irq_data *d) static void lp_irq_disable(struct irq_data *d) { - struct lp_gpio *lg = irq_data_get_irq_chip_data(d); + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct lp_gpio *lg = container_of(gc, struct lp_gpio, chip); u32 hwirq = irqd_to_hwirq(d); unsigned long reg = lp_gpio_reg(&lg->chip, hwirq, LP_INT_ENABLE); unsigned long flags; @@ -301,26 +296,6 @@ static void lp_irq_disable(struct irq_data *d) spin_unlock_irqrestore(&lg->lock, flags); } -static int lp_irq_reqres(struct irq_data *d) -{ - struct lp_gpio *lg = irq_data_get_irq_chip_data(d); - - if (gpio_lock_as_irq(&lg->chip, irqd_to_hwirq(d))) { - dev_err(lg->chip.dev, - "unable to lock HW IRQ %lu for IRQ\n", - irqd_to_hwirq(d)); - return -EINVAL; - } - return 0; -} - -static void lp_irq_relres(struct irq_data *d) -{ - struct lp_gpio *lg = irq_data_get_irq_chip_data(d); - - gpio_unlock_as_irq(&lg->chip, irqd_to_hwirq(d)); -} - static struct irq_chip lp_irqchip = { .name = "LP-GPIO", .irq_mask = lp_irq_mask, @@ -328,8 +303,6 @@ static struct irq_chip lp_irqchip = { .irq_enable = lp_irq_enable, .irq_disable = lp_irq_disable, .irq_set_type = lp_irq_type, - .irq_request_resources = lp_irq_reqres, - .irq_release_resources = lp_irq_relres, .flags = IRQCHIP_SKIP_SET_WAKE, }; @@ -348,22 +321,6 @@ static void lp_gpio_irq_init_hw(struct lp_gpio *lg) } } -static int lp_gpio_irq_map(struct irq_domain *d, unsigned int irq, - irq_hw_number_t hwirq) -{ - struct lp_gpio *lg = d->host_data; - - irq_set_chip_and_handler(irq, &lp_irqchip, handle_simple_irq); - irq_set_chip_data(irq, lg); - irq_set_irq_type(irq, IRQ_TYPE_NONE); - - return 0; -} - -static const struct irq_domain_ops lp_gpio_irq_ops = { - .map = lp_gpio_irq_map, -}; - static int lp_gpio_probe(struct platform_device *pdev) { struct lp_gpio *lg; @@ -371,7 +328,6 @@ static int lp_gpio_probe(struct platform_device *pdev) struct resource *io_rc, *irq_rc; struct device *dev = &pdev->dev; unsigned long reg_len; - unsigned hwirq; int ret = -ENODEV; lg = devm_kzalloc(dev, sizeof(struct lp_gpio), GFP_KERNEL); @@ -414,27 +370,28 @@ static int lp_gpio_probe(struct platform_device *pdev) gc->can_sleep = false; gc->dev = dev; + ret = gpiochip_add(gc); + if (ret) { + dev_err(dev, "failed adding lp-gpio chip\n"); + return ret; + } + /* set up interrupts */ if (irq_rc && irq_rc->start) { - hwirq = irq_rc->start; - gc->to_irq = lp_gpio_to_irq; - - lg->domain = irq_domain_add_linear(NULL, LP_NUM_GPIO, - &lp_gpio_irq_ops, lg); - if (!lg->domain) - return -ENXIO; - lp_gpio_irq_init_hw(lg); + ret = gpiochip_irqchip_add(gc, &lp_irqchip, 0, + handle_simple_irq, IRQ_TYPE_NONE); + if (ret) { + dev_err(dev, "failed to add irqchip\n"); + gpiochip_remove(gc); + return ret; + } - irq_set_handler_data(hwirq, lg); - irq_set_chained_handler(hwirq, lp_gpio_irq_handler); + gpiochip_set_chained_irqchip(gc, &lp_irqchip, + (unsigned)irq_rc->start, + lp_gpio_irq_handler); } - ret = gpiochip_add(gc); - if (ret) { - dev_err(dev, "failed adding lp-gpio chip\n"); - return ret; - } pm_runtime_enable(dev); return 0; @@ -450,9 +407,27 @@ static int lp_gpio_runtime_resume(struct device *dev) return 0; } +static int lp_gpio_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct lp_gpio *lg = platform_get_drvdata(pdev); + unsigned long reg; + int i; + + /* on some hardware suspend clears input sensing, re-enable it here */ + for (i = 0; i < lg->chip.ngpio; i++) { + if (gpiochip_is_requested(&lg->chip, i) != NULL) { + reg = lp_gpio_reg(&lg->chip, i, LP_CONFIG2); + outl(inl(reg) & ~GPINDIS_BIT, reg); + } + } + return 0; +} + static const struct dev_pm_ops lp_gpio_pm_ops = { .runtime_suspend = lp_gpio_runtime_suspend, .runtime_resume = lp_gpio_runtime_resume, + .resume = lp_gpio_resume, }; static const struct acpi_device_id lynxpoint_gpio_acpi_match[] = { @@ -465,11 +440,8 @@ MODULE_DEVICE_TABLE(acpi, lynxpoint_gpio_acpi_match); static int lp_gpio_remove(struct platform_device *pdev) { struct lp_gpio *lg = platform_get_drvdata(pdev); - int err; pm_runtime_disable(&pdev->dev); - err = gpiochip_remove(&lg->chip); - if (err) - dev_warn(&pdev->dev, "failed to remove gpio_chip.\n"); + gpiochip_remove(&lg->chip); return 0; } diff --git a/drivers/gpio/gpio-max730x.c b/drivers/gpio/gpio-max730x.c index 0814584fcdc1..18ab89e20806 100644 --- a/drivers/gpio/gpio-max730x.c +++ b/drivers/gpio/gpio-max730x.c @@ -228,21 +228,16 @@ EXPORT_SYMBOL_GPL(__max730x_probe); int __max730x_remove(struct device *dev) { struct max7301 *ts = dev_get_drvdata(dev); - int ret; if (ts == NULL) return -ENODEV; /* Power down the chip and disable IRQ output */ ts->write(dev, 0x04, 0x00); - - ret = gpiochip_remove(&ts->chip); - if (!ret) - mutex_destroy(&ts->lock); - else - dev_err(dev, "Failed to remove GPIO controller: %d\n", ret); - - return ret; + gpiochip_remove(&ts->chip); + mutex_destroy(&ts->lock); + kfree(ts); + return 0; } EXPORT_SYMBOL_GPL(__max730x_remove); diff --git a/drivers/gpio/gpio-max732x.c b/drivers/gpio/gpio-max732x.c index 7c36f2b0983d..6c676225b886 100644 --- a/drivers/gpio/gpio-max732x.c +++ b/drivers/gpio/gpio-max732x.c @@ -676,12 +676,7 @@ static int max732x_remove(struct i2c_client *client) } } - ret = gpiochip_remove(&chip->gpio_chip); - if (ret) { - dev_err(&client->dev, "%s failed, %d\n", - "gpiochip_remove()", ret); - return ret; - } + gpiochip_remove(&chip->gpio_chip); max732x_irq_teardown(chip); diff --git a/drivers/gpio/gpio-mc33880.c b/drivers/gpio/gpio-mc33880.c index 553a80a5eaf3..4e3e160e5db2 100644 --- a/drivers/gpio/gpio-mc33880.c +++ b/drivers/gpio/gpio-mc33880.c @@ -149,20 +149,15 @@ exit_destroy: static int mc33880_remove(struct spi_device *spi) { struct mc33880 *mc; - int ret; mc = spi_get_drvdata(spi); if (mc == NULL) return -ENODEV; - ret = gpiochip_remove(&mc->chip); - if (!ret) - mutex_destroy(&mc->lock); - else - dev_err(&spi->dev, "Failed to remove the GPIO controller: %d\n", - ret); + gpiochip_remove(&mc->chip); + mutex_destroy(&mc->lock); - return ret; + return 0; } static struct spi_driver mc33880_driver = { diff --git a/drivers/gpio/gpio-mc9s08dz60.c b/drivers/gpio/gpio-mc9s08dz60.c index dce35ff00db7..d62b4f8182bf 100644 --- a/drivers/gpio/gpio-mc9s08dz60.c +++ b/drivers/gpio/gpio-mc9s08dz60.c @@ -118,7 +118,8 @@ static int mc9s08dz60_remove(struct i2c_client *client) mc9s = i2c_get_clientdata(client); - return gpiochip_remove(&mc9s->chip); + gpiochip_remove(&mc9s->chip); + return 0; } static const struct i2c_device_id mc9s08dz60_id[] = { diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c index 57adbc90fdad..6f183d9b487e 100644 --- a/drivers/gpio/gpio-mcp23s08.c +++ b/drivers/gpio/gpio-mcp23s08.c @@ -812,16 +812,14 @@ fail: static int mcp230xx_remove(struct i2c_client *client) { struct mcp23s08 *mcp = i2c_get_clientdata(client); - int status; if (client->irq && mcp->irq_controller) mcp23s08_irq_teardown(mcp); - status = gpiochip_remove(&mcp->chip); - if (status == 0) - kfree(mcp); + gpiochip_remove(&mcp->chip); + kfree(mcp); - return status; + return 0; } static const struct i2c_device_id mcp230xx_id[] = { @@ -960,13 +958,10 @@ static int mcp23s08_probe(struct spi_device *spi) fail: for (addr = 0; addr < ARRAY_SIZE(data->mcp); addr++) { - int tmp; if (!data->mcp[addr]) continue; - tmp = gpiochip_remove(&data->mcp[addr]->chip); - if (tmp < 0) - dev_err(&spi->dev, "%s --> %d\n", "remove", tmp); + gpiochip_remove(&data->mcp[addr]->chip); } kfree(data); return status; @@ -976,23 +971,16 @@ static int mcp23s08_remove(struct spi_device *spi) { struct mcp23s08_driver_data *data = spi_get_drvdata(spi); unsigned addr; - int status = 0; for (addr = 0; addr < ARRAY_SIZE(data->mcp); addr++) { - int tmp; if (!data->mcp[addr]) continue; - tmp = gpiochip_remove(&data->mcp[addr]->chip); - if (tmp < 0) { - dev_err(&spi->dev, "%s --> %d\n", "remove", tmp); - status = tmp; - } + gpiochip_remove(&data->mcp[addr]->chip); } - if (status == 0) - kfree(data); - return status; + kfree(data); + return 0; } static const struct spi_device_id mcp23s08_ids[] = { diff --git a/drivers/gpio/gpio-ml-ioh.c b/drivers/gpio/gpio-ml-ioh.c index d51329d23d38..5536108aa9db 100644 --- a/drivers/gpio/gpio-ml-ioh.c +++ b/drivers/gpio/gpio-ml-ioh.c @@ -497,8 +497,7 @@ err_irq_alloc_descs: err_gpiochip_add: while (--i >= 0) { chip--; - if (gpiochip_remove(&chip->gpio)) - dev_err(&pdev->dev, "Failed gpiochip_remove(%d)\n", i); + gpiochip_remove(&chip->gpio); } kfree(chip_save); @@ -519,7 +518,6 @@ err_pci_enable: static void ioh_gpio_remove(struct pci_dev *pdev) { - int err; int i; struct ioh_gpio *chip = pci_get_drvdata(pdev); void *chip_save; @@ -530,9 +528,7 @@ static void ioh_gpio_remove(struct pci_dev *pdev) for (i = 0; i < 8; i++, chip++) { irq_free_descs(chip->irq_base, num_ports[i]); - err = gpiochip_remove(&chip->gpio); - if (err) - dev_err(&pdev->dev, "Failed gpiochip_remove\n"); + gpiochip_remove(&chip->gpio); } chip = chip_save; diff --git a/drivers/gpio/gpio-msm-v2.c b/drivers/gpio/gpio-msm-v2.c index a3351acd4963..94f57670df9a 100644 --- a/drivers/gpio/gpio-msm-v2.c +++ b/drivers/gpio/gpio-msm-v2.c @@ -438,10 +438,7 @@ MODULE_DEVICE_TABLE(of, msm_gpio_of_match); static int msm_gpio_remove(struct platform_device *dev) { - int ret = gpiochip_remove(&msm_gpio.gpio_chip); - - if (ret < 0) - return ret; + gpiochip_remove(&msm_gpio.gpio_chip); irq_set_handler(msm_gpio.summary_irq, NULL); diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c index db83b3c0a449..f4e54a92e04a 100644 --- a/drivers/gpio/gpio-mxc.c +++ b/drivers/gpio/gpio-mxc.c @@ -485,7 +485,7 @@ static int mxc_gpio_probe(struct platform_device *pdev) out_irqdesc_free: irq_free_descs(irq_base, 32); out_gpiochip_remove: - WARN_ON(gpiochip_remove(&port->bgc.gc) < 0); + gpiochip_remove(&port->bgc.gc); out_bgpio_remove: bgpio_remove(&port->bgc); out_bgio: diff --git a/drivers/gpio/gpio-octeon.c b/drivers/gpio/gpio-octeon.c index dbb08546b9ec..5c5770c99c80 100644 --- a/drivers/gpio/gpio-octeon.c +++ b/drivers/gpio/gpio-octeon.c @@ -129,7 +129,8 @@ out: static int octeon_gpio_remove(struct platform_device *pdev) { struct gpio_chip *chip = pdev->dev.platform_data; - return gpiochip_remove(chip); + gpiochip_remove(chip); + return 0; } static struct of_device_id octeon_gpio_match[] = { diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c index 00f29aa1fb9d..174932165fcb 100644 --- a/drivers/gpio/gpio-omap.c +++ b/drivers/gpio/gpio-omap.c @@ -24,7 +24,6 @@ #include <linux/pm.h> #include <linux/of.h> #include <linux/of_device.h> -#include <linux/irqchip/chained_irq.h> #include <linux/gpio.h> #include <linux/bitops.h> #include <linux/platform_data/gpio-omap.h> @@ -89,18 +88,19 @@ struct gpio_bank { #define BANK_USED(bank) (bank->mod_usage || bank->irq_usage) #define LINE_USED(line, offset) (line & (BIT(offset))) -static int irq_to_gpio(struct gpio_bank *bank, unsigned int gpio_irq) +static int omap_irq_to_gpio(struct gpio_bank *bank, unsigned int gpio_irq) { return bank->chip.base + gpio_irq; } -static inline struct gpio_bank *_irq_data_get_bank(struct irq_data *d) +static inline struct gpio_bank *omap_irq_data_get_bank(struct irq_data *d) { struct gpio_chip *chip = irq_data_get_irq_chip_data(d); return container_of(chip, struct gpio_bank, chip); } -static void _set_gpio_direction(struct gpio_bank *bank, int gpio, int is_input) +static void omap_set_gpio_direction(struct gpio_bank *bank, int gpio, + int is_input) { void __iomem *reg = bank->base; u32 l; @@ -117,7 +117,8 @@ static void _set_gpio_direction(struct gpio_bank *bank, int gpio, int is_input) /* set data out value using dedicate set/clear register */ -static void _set_gpio_dataout_reg(struct gpio_bank *bank, int gpio, int enable) +static void omap_set_gpio_dataout_reg(struct gpio_bank *bank, int gpio, + int enable) { void __iomem *reg = bank->base; u32 l = GPIO_BIT(bank, gpio); @@ -134,7 +135,8 @@ static void _set_gpio_dataout_reg(struct gpio_bank *bank, int gpio, int enable) } /* set data out value using mask register */ -static void _set_gpio_dataout_mask(struct gpio_bank *bank, int gpio, int enable) +static void omap_set_gpio_dataout_mask(struct gpio_bank *bank, int gpio, + int enable) { void __iomem *reg = bank->base + bank->regs->dataout; u32 gpio_bit = GPIO_BIT(bank, gpio); @@ -149,21 +151,21 @@ static void _set_gpio_dataout_mask(struct gpio_bank *bank, int gpio, int enable) bank->context.dataout = l; } -static int _get_gpio_datain(struct gpio_bank *bank, int offset) +static int omap_get_gpio_datain(struct gpio_bank *bank, int offset) { void __iomem *reg = bank->base + bank->regs->datain; return (readl_relaxed(reg) & (BIT(offset))) != 0; } -static int _get_gpio_dataout(struct gpio_bank *bank, int offset) +static int omap_get_gpio_dataout(struct gpio_bank *bank, int offset) { void __iomem *reg = bank->base + bank->regs->dataout; return (readl_relaxed(reg) & (BIT(offset))) != 0; } -static inline void _gpio_rmw(void __iomem *base, u32 reg, u32 mask, bool set) +static inline void omap_gpio_rmw(void __iomem *base, u32 reg, u32 mask, bool set) { int l = readl_relaxed(base + reg); @@ -175,7 +177,7 @@ static inline void _gpio_rmw(void __iomem *base, u32 reg, u32 mask, bool set) writel_relaxed(l, base + reg); } -static inline void _gpio_dbck_enable(struct gpio_bank *bank) +static inline void omap_gpio_dbck_enable(struct gpio_bank *bank) { if (bank->dbck_enable_mask && !bank->dbck_enabled) { clk_prepare_enable(bank->dbck); @@ -186,7 +188,7 @@ static inline void _gpio_dbck_enable(struct gpio_bank *bank) } } -static inline void _gpio_dbck_disable(struct gpio_bank *bank) +static inline void omap_gpio_dbck_disable(struct gpio_bank *bank) { if (bank->dbck_enable_mask && bank->dbck_enabled) { /* @@ -202,7 +204,7 @@ static inline void _gpio_dbck_disable(struct gpio_bank *bank) } /** - * _set_gpio_debounce - low level gpio debounce time + * omap2_set_gpio_debounce - low level gpio debounce time * @bank: the gpio bank we're acting upon * @gpio: the gpio number on this @gpio * @debounce: debounce time to use @@ -210,8 +212,8 @@ static inline void _gpio_dbck_disable(struct gpio_bank *bank) * OMAP's debounce time is in 31us steps so we need * to convert and round up to the closest unit. */ -static void _set_gpio_debounce(struct gpio_bank *bank, unsigned gpio, - unsigned debounce) +static void omap2_set_gpio_debounce(struct gpio_bank *bank, unsigned gpio, + unsigned debounce) { void __iomem *reg; u32 val; @@ -252,7 +254,7 @@ static void _set_gpio_debounce(struct gpio_bank *bank, unsigned gpio, * used within _gpio_dbck_enable() is still not initialized at * that point. Therefore we have to enable dbck here. */ - _gpio_dbck_enable(bank); + omap_gpio_dbck_enable(bank); if (bank->dbck_enable_mask) { bank->context.debounce = debounce; bank->context.debounce_en = val; @@ -260,7 +262,7 @@ static void _set_gpio_debounce(struct gpio_bank *bank, unsigned gpio, } /** - * _clear_gpio_debounce - clear debounce settings for a gpio + * omap_clear_gpio_debounce - clear debounce settings for a gpio * @bank: the gpio bank we're acting upon * @gpio: the gpio number on this @gpio * @@ -269,7 +271,7 @@ static void _set_gpio_debounce(struct gpio_bank *bank, unsigned gpio, * time too. The debounce clock will also be disabled when calling this function * if this is the only gpio in the bank using debounce. */ -static void _clear_gpio_debounce(struct gpio_bank *bank, unsigned gpio) +static void omap_clear_gpio_debounce(struct gpio_bank *bank, unsigned gpio) { u32 gpio_bit = GPIO_BIT(bank, gpio); @@ -293,20 +295,20 @@ static void _clear_gpio_debounce(struct gpio_bank *bank, unsigned gpio) } } -static inline void set_gpio_trigger(struct gpio_bank *bank, int gpio, +static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio, unsigned trigger) { void __iomem *base = bank->base; u32 gpio_bit = BIT(gpio); - _gpio_rmw(base, bank->regs->leveldetect0, gpio_bit, - trigger & IRQ_TYPE_LEVEL_LOW); - _gpio_rmw(base, bank->regs->leveldetect1, gpio_bit, - trigger & IRQ_TYPE_LEVEL_HIGH); - _gpio_rmw(base, bank->regs->risingdetect, gpio_bit, - trigger & IRQ_TYPE_EDGE_RISING); - _gpio_rmw(base, bank->regs->fallingdetect, gpio_bit, - trigger & IRQ_TYPE_EDGE_FALLING); + omap_gpio_rmw(base, bank->regs->leveldetect0, gpio_bit, + trigger & IRQ_TYPE_LEVEL_LOW); + omap_gpio_rmw(base, bank->regs->leveldetect1, gpio_bit, + trigger & IRQ_TYPE_LEVEL_HIGH); + omap_gpio_rmw(base, bank->regs->risingdetect, gpio_bit, + trigger & IRQ_TYPE_EDGE_RISING); + omap_gpio_rmw(base, bank->regs->fallingdetect, gpio_bit, + trigger & IRQ_TYPE_EDGE_FALLING); bank->context.leveldetect0 = readl_relaxed(bank->base + bank->regs->leveldetect0); @@ -318,7 +320,7 @@ static inline void set_gpio_trigger(struct gpio_bank *bank, int gpio, readl_relaxed(bank->base + bank->regs->fallingdetect); if (likely(!(bank->non_wakeup_gpios & gpio_bit))) { - _gpio_rmw(base, bank->regs->wkup_en, gpio_bit, trigger != 0); + omap_gpio_rmw(base, bank->regs->wkup_en, gpio_bit, trigger != 0); bank->context.wake_en = readl_relaxed(bank->base + bank->regs->wkup_en); } @@ -354,7 +356,7 @@ exit: * This only applies to chips that can't do both rising and falling edge * detection at once. For all other chips, this function is a noop. */ -static void _toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio) +static void omap_toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio) { void __iomem *reg = bank->base; u32 l = 0; @@ -373,18 +375,18 @@ static void _toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio) writel_relaxed(l, reg); } #else -static void _toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio) {} +static void omap_toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio) {} #endif -static int _set_gpio_triggering(struct gpio_bank *bank, int gpio, - unsigned trigger) +static int omap_set_gpio_triggering(struct gpio_bank *bank, int gpio, + unsigned trigger) { void __iomem *reg = bank->base; void __iomem *base = bank->base; u32 l = 0; if (bank->regs->leveldetect0 && bank->regs->wkup_en) { - set_gpio_trigger(bank, gpio, trigger); + omap_set_gpio_trigger(bank, gpio, trigger); } else if (bank->regs->irqctrl) { reg += bank->regs->irqctrl; @@ -414,7 +416,7 @@ static int _set_gpio_triggering(struct gpio_bank *bank, int gpio, l |= BIT(gpio << 1); /* Enable wake-up during idle for dynamic tick */ - _gpio_rmw(base, bank->regs->wkup_en, BIT(gpio), trigger); + omap_gpio_rmw(base, bank->regs->wkup_en, BIT(gpio), trigger); bank->context.wake_en = readl_relaxed(bank->base + bank->regs->wkup_en); writel_relaxed(l, reg); @@ -422,7 +424,7 @@ static int _set_gpio_triggering(struct gpio_bank *bank, int gpio, return 0; } -static void _enable_gpio_module(struct gpio_bank *bank, unsigned offset) +static void omap_enable_gpio_module(struct gpio_bank *bank, unsigned offset) { if (bank->regs->pinctrl) { void __iomem *reg = bank->base + bank->regs->pinctrl; @@ -443,7 +445,7 @@ static void _enable_gpio_module(struct gpio_bank *bank, unsigned offset) } } -static void _disable_gpio_module(struct gpio_bank *bank, unsigned offset) +static void omap_disable_gpio_module(struct gpio_bank *bank, unsigned offset) { void __iomem *base = bank->base; @@ -451,7 +453,7 @@ static void _disable_gpio_module(struct gpio_bank *bank, unsigned offset) !LINE_USED(bank->mod_usage, offset) && !LINE_USED(bank->irq_usage, offset)) { /* Disable wake-up during idle for dynamic tick */ - _gpio_rmw(base, bank->regs->wkup_en, BIT(offset), 0); + omap_gpio_rmw(base, bank->regs->wkup_en, BIT(offset), 0); bank->context.wake_en = readl_relaxed(bank->base + bank->regs->wkup_en); } @@ -468,16 +470,16 @@ static void _disable_gpio_module(struct gpio_bank *bank, unsigned offset) } } -static int gpio_is_input(struct gpio_bank *bank, int mask) +static int omap_gpio_is_input(struct gpio_bank *bank, int mask) { void __iomem *reg = bank->base + bank->regs->direction; return readl_relaxed(reg) & mask; } -static int gpio_irq_type(struct irq_data *d, unsigned type) +static int omap_gpio_irq_type(struct irq_data *d, unsigned type) { - struct gpio_bank *bank = _irq_data_get_bank(d); + struct gpio_bank *bank = omap_irq_data_get_bank(d); unsigned gpio = 0; int retval; unsigned long flags; @@ -492,7 +494,7 @@ static int gpio_irq_type(struct irq_data *d, unsigned type) #endif if (!gpio) - gpio = irq_to_gpio(bank, d->hwirq); + gpio = omap_irq_to_gpio(bank, d->hwirq); if (type & ~IRQ_TYPE_SENSE_MASK) return -EINVAL; @@ -503,11 +505,11 @@ static int gpio_irq_type(struct irq_data *d, unsigned type) spin_lock_irqsave(&bank->lock, flags); offset = GPIO_INDEX(bank, gpio); - retval = _set_gpio_triggering(bank, offset, type); + retval = omap_set_gpio_triggering(bank, offset, type); if (!LINE_USED(bank->mod_usage, offset)) { - _enable_gpio_module(bank, offset); - _set_gpio_direction(bank, offset, 1); - } else if (!gpio_is_input(bank, BIT(offset))) { + omap_enable_gpio_module(bank, offset); + omap_set_gpio_direction(bank, offset, 1); + } else if (!omap_gpio_is_input(bank, BIT(offset))) { spin_unlock_irqrestore(&bank->lock, flags); return -EINVAL; } @@ -523,7 +525,7 @@ static int gpio_irq_type(struct irq_data *d, unsigned type) return retval; } -static void _clear_gpio_irqbank(struct gpio_bank *bank, int gpio_mask) +static void omap_clear_gpio_irqbank(struct gpio_bank *bank, int gpio_mask) { void __iomem *reg = bank->base; @@ -540,12 +542,12 @@ static void _clear_gpio_irqbank(struct gpio_bank *bank, int gpio_mask) readl_relaxed(reg); } -static inline void _clear_gpio_irqstatus(struct gpio_bank *bank, int gpio) +static inline void omap_clear_gpio_irqstatus(struct gpio_bank *bank, int gpio) { - _clear_gpio_irqbank(bank, GPIO_BIT(bank, gpio)); + omap_clear_gpio_irqbank(bank, GPIO_BIT(bank, gpio)); } -static u32 _get_gpio_irqbank_mask(struct gpio_bank *bank) +static u32 omap_get_gpio_irqbank_mask(struct gpio_bank *bank) { void __iomem *reg = bank->base; u32 l; @@ -559,7 +561,7 @@ static u32 _get_gpio_irqbank_mask(struct gpio_bank *bank) return l; } -static void _enable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask) +static void omap_enable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask) { void __iomem *reg = bank->base; u32 l; @@ -581,7 +583,7 @@ static void _enable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask) writel_relaxed(l, reg); } -static void _disable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask) +static void omap_disable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask) { void __iomem *reg = bank->base; u32 l; @@ -603,12 +605,13 @@ static void _disable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask) writel_relaxed(l, reg); } -static inline void _set_gpio_irqenable(struct gpio_bank *bank, int gpio, int enable) +static inline void omap_set_gpio_irqenable(struct gpio_bank *bank, int gpio, + int enable) { if (enable) - _enable_gpio_irqbank(bank, GPIO_BIT(bank, gpio)); + omap_enable_gpio_irqbank(bank, GPIO_BIT(bank, gpio)); else - _disable_gpio_irqbank(bank, GPIO_BIT(bank, gpio)); + omap_disable_gpio_irqbank(bank, GPIO_BIT(bank, gpio)); } /* @@ -619,7 +622,7 @@ static inline void _set_gpio_irqenable(struct gpio_bank *bank, int gpio, int ena * enabled. When system is suspended, only selected GPIO interrupts need * to have wake-up enabled. */ -static int _set_gpio_wakeup(struct gpio_bank *bank, int gpio, int enable) +static int omap_set_gpio_wakeup(struct gpio_bank *bank, int gpio, int enable) { u32 gpio_bit = GPIO_BIT(bank, gpio); unsigned long flags; @@ -642,22 +645,22 @@ static int _set_gpio_wakeup(struct gpio_bank *bank, int gpio, int enable) return 0; } -static void _reset_gpio(struct gpio_bank *bank, int gpio) +static void omap_reset_gpio(struct gpio_bank *bank, int gpio) { - _set_gpio_direction(bank, GPIO_INDEX(bank, gpio), 1); - _set_gpio_irqenable(bank, gpio, 0); - _clear_gpio_irqstatus(bank, gpio); - _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), IRQ_TYPE_NONE); - _clear_gpio_debounce(bank, gpio); + omap_set_gpio_direction(bank, GPIO_INDEX(bank, gpio), 1); + omap_set_gpio_irqenable(bank, gpio, 0); + omap_clear_gpio_irqstatus(bank, gpio); + omap_set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), IRQ_TYPE_NONE); + omap_clear_gpio_debounce(bank, gpio); } /* Use disable_irq_wake() and enable_irq_wake() functions from drivers */ -static int gpio_wake_enable(struct irq_data *d, unsigned int enable) +static int omap_gpio_wake_enable(struct irq_data *d, unsigned int enable) { - struct gpio_bank *bank = _irq_data_get_bank(d); - unsigned int gpio = irq_to_gpio(bank, d->hwirq); + struct gpio_bank *bank = omap_irq_data_get_bank(d); + unsigned int gpio = omap_irq_to_gpio(bank, d->hwirq); - return _set_gpio_wakeup(bank, gpio, enable); + return omap_set_gpio_wakeup(bank, gpio, enable); } static int omap_gpio_request(struct gpio_chip *chip, unsigned offset) @@ -678,8 +681,8 @@ static int omap_gpio_request(struct gpio_chip *chip, unsigned offset) * not already been requested. */ if (!LINE_USED(bank->irq_usage, offset)) { - _set_gpio_triggering(bank, offset, IRQ_TYPE_NONE); - _enable_gpio_module(bank, offset); + omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE); + omap_enable_gpio_module(bank, offset); } bank->mod_usage |= BIT(offset); spin_unlock_irqrestore(&bank->lock, flags); @@ -694,8 +697,8 @@ static void omap_gpio_free(struct gpio_chip *chip, unsigned offset) spin_lock_irqsave(&bank->lock, flags); bank->mod_usage &= ~(BIT(offset)); - _disable_gpio_module(bank, offset); - _reset_gpio(bank, bank->chip.base + offset); + omap_disable_gpio_module(bank, offset); + omap_reset_gpio(bank, bank->chip.base + offset); spin_unlock_irqrestore(&bank->lock, flags); /* @@ -715,7 +718,7 @@ static void omap_gpio_free(struct gpio_chip *chip, unsigned offset) * line's interrupt handler has been run, we may miss some nested * interrupts. */ -static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc) +static void omap_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) { void __iomem *isr_reg = NULL; u32 isr; @@ -738,7 +741,7 @@ static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc) u32 isr_saved, level_mask = 0; u32 enabled; - enabled = _get_gpio_irqbank_mask(bank); + enabled = omap_get_gpio_irqbank_mask(bank); isr_saved = isr = readl_relaxed(isr_reg) & enabled; if (bank->level_mask) @@ -747,9 +750,9 @@ static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc) /* clear edge sensitive interrupts before handler(s) are called so that we don't miss any interrupt occurred while executing them */ - _disable_gpio_irqbank(bank, isr_saved & ~level_mask); - _clear_gpio_irqbank(bank, isr_saved & ~level_mask); - _enable_gpio_irqbank(bank, isr_saved & ~level_mask); + omap_disable_gpio_irqbank(bank, isr_saved & ~level_mask); + omap_clear_gpio_irqbank(bank, isr_saved & ~level_mask); + omap_enable_gpio_irqbank(bank, isr_saved & ~level_mask); /* if there is only edge sensitive GPIO pin interrupts configured, we could unmask GPIO bank interrupt immediately */ @@ -773,7 +776,7 @@ static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc) * This will be indicated in the bank toggle_mask. */ if (bank->toggle_mask & (BIT(bit))) - _toggle_gpio_edge_triggering(bank, bit); + omap_toggle_gpio_edge_triggering(bank, bit); generic_handle_irq(irq_find_mapping(bank->chip.irqdomain, bit)); @@ -789,18 +792,18 @@ exit: pm_runtime_put(bank->dev); } -static void gpio_irq_shutdown(struct irq_data *d) +static void omap_gpio_irq_shutdown(struct irq_data *d) { - struct gpio_bank *bank = _irq_data_get_bank(d); - unsigned int gpio = irq_to_gpio(bank, d->hwirq); + struct gpio_bank *bank = omap_irq_data_get_bank(d); + unsigned int gpio = omap_irq_to_gpio(bank, d->hwirq); unsigned long flags; unsigned offset = GPIO_INDEX(bank, gpio); spin_lock_irqsave(&bank->lock, flags); gpio_unlock_as_irq(&bank->chip, offset); bank->irq_usage &= ~(BIT(offset)); - _disable_gpio_module(bank, offset); - _reset_gpio(bank, gpio); + omap_disable_gpio_module(bank, offset); + omap_reset_gpio(bank, gpio); spin_unlock_irqrestore(&bank->lock, flags); /* @@ -811,57 +814,57 @@ static void gpio_irq_shutdown(struct irq_data *d) pm_runtime_put(bank->dev); } -static void gpio_ack_irq(struct irq_data *d) +static void omap_gpio_ack_irq(struct irq_data *d) { - struct gpio_bank *bank = _irq_data_get_bank(d); - unsigned int gpio = irq_to_gpio(bank, d->hwirq); + struct gpio_bank *bank = omap_irq_data_get_bank(d); + unsigned int gpio = omap_irq_to_gpio(bank, d->hwirq); - _clear_gpio_irqstatus(bank, gpio); + omap_clear_gpio_irqstatus(bank, gpio); } -static void gpio_mask_irq(struct irq_data *d) +static void omap_gpio_mask_irq(struct irq_data *d) { - struct gpio_bank *bank = _irq_data_get_bank(d); - unsigned int gpio = irq_to_gpio(bank, d->hwirq); + struct gpio_bank *bank = omap_irq_data_get_bank(d); + unsigned int gpio = omap_irq_to_gpio(bank, d->hwirq); unsigned long flags; spin_lock_irqsave(&bank->lock, flags); - _set_gpio_irqenable(bank, gpio, 0); - _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), IRQ_TYPE_NONE); + omap_set_gpio_irqenable(bank, gpio, 0); + omap_set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), IRQ_TYPE_NONE); spin_unlock_irqrestore(&bank->lock, flags); } -static void gpio_unmask_irq(struct irq_data *d) +static void omap_gpio_unmask_irq(struct irq_data *d) { - struct gpio_bank *bank = _irq_data_get_bank(d); - unsigned int gpio = irq_to_gpio(bank, d->hwirq); + struct gpio_bank *bank = omap_irq_data_get_bank(d); + unsigned int gpio = omap_irq_to_gpio(bank, d->hwirq); unsigned int irq_mask = GPIO_BIT(bank, gpio); u32 trigger = irqd_get_trigger_type(d); unsigned long flags; spin_lock_irqsave(&bank->lock, flags); if (trigger) - _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), trigger); + omap_set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), trigger); /* For level-triggered GPIOs, the clearing must be done after * the HW source is cleared, thus after the handler has run */ if (bank->level_mask & irq_mask) { - _set_gpio_irqenable(bank, gpio, 0); - _clear_gpio_irqstatus(bank, gpio); + omap_set_gpio_irqenable(bank, gpio, 0); + omap_clear_gpio_irqstatus(bank, gpio); } - _set_gpio_irqenable(bank, gpio, 1); + omap_set_gpio_irqenable(bank, gpio, 1); spin_unlock_irqrestore(&bank->lock, flags); } static struct irq_chip gpio_irq_chip = { .name = "GPIO", - .irq_shutdown = gpio_irq_shutdown, - .irq_ack = gpio_ack_irq, - .irq_mask = gpio_mask_irq, - .irq_unmask = gpio_unmask_irq, - .irq_set_type = gpio_irq_type, - .irq_set_wake = gpio_wake_enable, + .irq_shutdown = omap_gpio_irq_shutdown, + .irq_ack = omap_gpio_ack_irq, + .irq_mask = omap_gpio_mask_irq, + .irq_unmask = omap_gpio_unmask_irq, + .irq_set_type = omap_gpio_irq_type, + .irq_set_wake = omap_gpio_wake_enable, }; /*---------------------------------------------------------------------*/ @@ -918,7 +921,7 @@ static struct platform_device omap_mpuio_device = { /* could list the /proc/iomem resources */ }; -static inline void mpuio_init(struct gpio_bank *bank) +static inline void omap_mpuio_init(struct gpio_bank *bank) { platform_set_drvdata(&omap_mpuio_device, bank); @@ -928,7 +931,7 @@ static inline void mpuio_init(struct gpio_bank *bank) /*---------------------------------------------------------------------*/ -static int gpio_get_direction(struct gpio_chip *chip, unsigned offset) +static int omap_gpio_get_direction(struct gpio_chip *chip, unsigned offset) { struct gpio_bank *bank; unsigned long flags; @@ -943,19 +946,19 @@ static int gpio_get_direction(struct gpio_chip *chip, unsigned offset) return dir; } -static int gpio_input(struct gpio_chip *chip, unsigned offset) +static int omap_gpio_input(struct gpio_chip *chip, unsigned offset) { struct gpio_bank *bank; unsigned long flags; bank = container_of(chip, struct gpio_bank, chip); spin_lock_irqsave(&bank->lock, flags); - _set_gpio_direction(bank, offset, 1); + omap_set_gpio_direction(bank, offset, 1); spin_unlock_irqrestore(&bank->lock, flags); return 0; } -static int gpio_get(struct gpio_chip *chip, unsigned offset) +static int omap_gpio_get(struct gpio_chip *chip, unsigned offset) { struct gpio_bank *bank; u32 mask; @@ -963,13 +966,13 @@ static int gpio_get(struct gpio_chip *chip, unsigned offset) bank = container_of(chip, struct gpio_bank, chip); mask = (BIT(offset)); - if (gpio_is_input(bank, mask)) - return _get_gpio_datain(bank, offset); + if (omap_gpio_is_input(bank, mask)) + return omap_get_gpio_datain(bank, offset); else - return _get_gpio_dataout(bank, offset); + return omap_get_gpio_dataout(bank, offset); } -static int gpio_output(struct gpio_chip *chip, unsigned offset, int value) +static int omap_gpio_output(struct gpio_chip *chip, unsigned offset, int value) { struct gpio_bank *bank; unsigned long flags; @@ -977,13 +980,13 @@ static int gpio_output(struct gpio_chip *chip, unsigned offset, int value) bank = container_of(chip, struct gpio_bank, chip); spin_lock_irqsave(&bank->lock, flags); bank->set_dataout(bank, offset, value); - _set_gpio_direction(bank, offset, 0); + omap_set_gpio_direction(bank, offset, 0); spin_unlock_irqrestore(&bank->lock, flags); return 0; } -static int gpio_debounce(struct gpio_chip *chip, unsigned offset, - unsigned debounce) +static int omap_gpio_debounce(struct gpio_chip *chip, unsigned offset, + unsigned debounce) { struct gpio_bank *bank; unsigned long flags; @@ -991,13 +994,13 @@ static int gpio_debounce(struct gpio_chip *chip, unsigned offset, bank = container_of(chip, struct gpio_bank, chip); spin_lock_irqsave(&bank->lock, flags); - _set_gpio_debounce(bank, offset, debounce); + omap2_set_gpio_debounce(bank, offset, debounce); spin_unlock_irqrestore(&bank->lock, flags); return 0; } -static void gpio_set(struct gpio_chip *chip, unsigned offset, int value) +static void omap_gpio_set(struct gpio_chip *chip, unsigned offset, int value) { struct gpio_bank *bank; unsigned long flags; @@ -1025,11 +1028,6 @@ static void __init omap_gpio_show_rev(struct gpio_bank *bank) called = true; } -/* This lock class tells lockdep that GPIO irqs are in a different - * category than their parents, so it won't report false recursion. - */ -static struct lock_class_key gpio_lock_class; - static void omap_gpio_mod_init(struct gpio_bank *bank) { void __iomem *base = bank->base; @@ -1043,8 +1041,10 @@ static void omap_gpio_mod_init(struct gpio_bank *bank) return; } - _gpio_rmw(base, bank->regs->irqenable, l, bank->regs->irqenable_inv); - _gpio_rmw(base, bank->regs->irqstatus, l, !bank->regs->irqenable_inv); + omap_gpio_rmw(base, bank->regs->irqenable, l, + bank->regs->irqenable_inv); + omap_gpio_rmw(base, bank->regs->irqstatus, l, + !bank->regs->irqenable_inv); if (bank->regs->debounce_en) writel_relaxed(0, base + bank->regs->debounce_en); @@ -1078,10 +1078,10 @@ omap_mpuio_alloc_gc(struct gpio_bank *bank, unsigned int irq_start, /* NOTE: No ack required, reading IRQ status clears it. */ ct->chip.irq_mask = irq_gc_mask_set_bit; ct->chip.irq_unmask = irq_gc_mask_clr_bit; - ct->chip.irq_set_type = gpio_irq_type; + ct->chip.irq_set_type = omap_gpio_irq_type; if (bank->regs->wkup_en) - ct->chip.irq_set_wake = gpio_wake_enable; + ct->chip.irq_set_wake = omap_gpio_wake_enable; ct->regs.mask = OMAP_MPUIO_GPIO_INT / bank->stride; irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE, @@ -1101,12 +1101,12 @@ static int omap_gpio_chip_init(struct gpio_bank *bank) */ bank->chip.request = omap_gpio_request; bank->chip.free = omap_gpio_free; - bank->chip.get_direction = gpio_get_direction; - bank->chip.direction_input = gpio_input; - bank->chip.get = gpio_get; - bank->chip.direction_output = gpio_output; - bank->chip.set_debounce = gpio_debounce; - bank->chip.set = gpio_set; + bank->chip.get_direction = omap_gpio_get_direction; + bank->chip.direction_input = omap_gpio_input; + bank->chip.get = omap_gpio_get; + bank->chip.direction_output = omap_gpio_output; + bank->chip.set_debounce = omap_gpio_debounce; + bank->chip.set = omap_gpio_set; if (bank->is_mpuio) { bank->chip.label = "mpuio"; if (bank->regs->wkup_en) @@ -1138,7 +1138,7 @@ static int omap_gpio_chip_init(struct gpio_bank *bank) #endif ret = gpiochip_irqchip_add(&bank->chip, &gpio_irq_chip, - irq_base, gpio_irq_handler, + irq_base, omap_gpio_irq_handler, IRQ_TYPE_NONE); if (ret) { @@ -1148,11 +1148,10 @@ static int omap_gpio_chip_init(struct gpio_bank *bank) } gpiochip_set_chained_irqchip(&bank->chip, &gpio_irq_chip, - bank->irq, gpio_irq_handler); + bank->irq, omap_gpio_irq_handler); for (j = 0; j < bank->width; j++) { int irq = irq_find_mapping(bank->chip.irqdomain, j); - irq_set_lockdep_class(irq, &gpio_lock_class); if (bank->is_mpuio) { omap_mpuio_alloc_gc(bank, irq, bank->width); irq_set_chip_and_handler(irq, NULL, NULL); @@ -1217,9 +1216,9 @@ static int omap_gpio_probe(struct platform_device *pdev) } if (bank->regs->set_dataout && bank->regs->clr_dataout) - bank->set_dataout = _set_gpio_dataout_reg; + bank->set_dataout = omap_set_gpio_dataout_reg; else - bank->set_dataout = _set_gpio_dataout_mask; + bank->set_dataout = omap_set_gpio_dataout_mask; spin_lock_init(&bank->lock); @@ -1238,7 +1237,7 @@ static int omap_gpio_probe(struct platform_device *pdev) pm_runtime_get_sync(bank->dev); if (bank->is_mpuio) - mpuio_init(bank); + omap_mpuio_init(bank); omap_gpio_mod_init(bank); @@ -1320,7 +1319,7 @@ update_gpio_context_count: bank->context_loss_count = bank->get_context_loss_count(bank->dev); - _gpio_dbck_disable(bank); + omap_gpio_dbck_disable(bank); spin_unlock_irqrestore(&bank->lock, flags); return 0; @@ -1351,7 +1350,7 @@ static int omap_gpio_runtime_resume(struct device *dev) bank->get_context_loss_count(bank->dev); } - _gpio_dbck_enable(bank); + omap_gpio_dbck_enable(bank); /* * In ->runtime_suspend(), level-triggered, wakeup-enabled diff --git a/drivers/gpio/gpio-palmas.c b/drivers/gpio/gpio-palmas.c index 86bdbe362068..171a6389f9ce 100644 --- a/drivers/gpio/gpio-palmas.c +++ b/drivers/gpio/gpio-palmas.c @@ -210,7 +210,8 @@ static int palmas_gpio_remove(struct platform_device *pdev) { struct palmas_gpio *palmas_gpio = platform_get_drvdata(pdev); - return gpiochip_remove(&palmas_gpio->gpio_chip); + gpiochip_remove(&palmas_gpio->gpio_chip); + return 0; } static struct platform_driver palmas_gpio_driver = { diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c index e721a37c3473..f9961eea2120 100644 --- a/drivers/gpio/gpio-pca953x.c +++ b/drivers/gpio/gpio-pca953x.c @@ -765,12 +765,7 @@ static int pca953x_remove(struct i2c_client *client) } } - ret = gpiochip_remove(&chip->gpio_chip); - if (ret) { - dev_err(&client->dev, "%s failed, %d\n", - "gpiochip_remove()", ret); - return ret; - } + gpiochip_remove(&chip->gpio_chip); return 0; } diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c index 27b46751ea7e..236708ad0a5b 100644 --- a/drivers/gpio/gpio-pcf857x.c +++ b/drivers/gpio/gpio-pcf857x.c @@ -444,9 +444,7 @@ static int pcf857x_remove(struct i2c_client *client) if (client->irq) pcf857x_irq_domain_cleanup(gpio); - status = gpiochip_remove(&gpio->chip); - if (status) - dev_err(&client->dev, "%s --> %d\n", "remove", status); + gpiochip_remove(&gpio->chip); return status; } diff --git a/drivers/gpio/gpio-pch.c b/drivers/gpio/gpio-pch.c index d6eac9b17db9..e0ac549dccb5 100644 --- a/drivers/gpio/gpio-pch.c +++ b/drivers/gpio/gpio-pch.c @@ -426,9 +426,7 @@ end: err_request_irq: irq_free_descs(irq_base, gpio_pins[chip->ioh]); - - if (gpiochip_remove(&chip->gpio)) - dev_err(&pdev->dev, "%s gpiochip_remove failed\n", __func__); + gpiochip_remove(&chip->gpio); err_gpiochip_add: pci_iounmap(pdev, chip->base); @@ -447,7 +445,6 @@ err_pci_enable: static void pch_gpio_remove(struct pci_dev *pdev) { - int err; struct pch_gpio *chip = pci_get_drvdata(pdev); if (chip->irq_base != -1) { @@ -456,10 +453,7 @@ static void pch_gpio_remove(struct pci_dev *pdev) irq_free_descs(chip->irq_base, gpio_pins[chip->ioh]); } - err = gpiochip_remove(&chip->gpio); - if (err) - dev_err(&pdev->dev, "Failed gpiochip_remove\n"); - + gpiochip_remove(&chip->gpio); pci_iounmap(pdev, chip->base); pci_release_regions(pdev); pci_disable_device(pdev); diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c index 42e6e64f2120..ad3feec0075e 100644 --- a/drivers/gpio/gpio-pxa.c +++ b/drivers/gpio/gpio-pxa.c @@ -498,7 +498,7 @@ static int pxa_gpio_nums(struct platform_device *pdev) } #ifdef CONFIG_OF -static struct of_device_id pxa_gpio_dt_ids[] = { +static const struct of_device_id pxa_gpio_dt_ids[] = { { .compatible = "intel,pxa25x-gpio", .data = &pxa25x_id, }, { .compatible = "intel,pxa26x-gpio", .data = &pxa26x_id, }, { .compatible = "intel,pxa27x-gpio", .data = &pxa27x_id, }, @@ -649,6 +649,11 @@ static int pxa_gpio_probe(struct platform_device *pdev) handle_edge_irq); set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); } + } else { + if (irq0 > 0) + irq_set_chained_handler(irq0, pxa_gpio_demux_handler); + if (irq1 > 0) + irq_set_chained_handler(irq1, pxa_gpio_demux_handler); } irq_set_chained_handler(irq_mux, pxa_gpio_demux_handler); diff --git a/drivers/gpio/gpio-rc5t583.c b/drivers/gpio/gpio-rc5t583.c index 562b0c4d9cc8..769233d2da6d 100644 --- a/drivers/gpio/gpio-rc5t583.c +++ b/drivers/gpio/gpio-rc5t583.c @@ -148,7 +148,8 @@ static int rc5t583_gpio_remove(struct platform_device *pdev) { struct rc5t583_gpio *rc5t583_gpio = platform_get_drvdata(pdev); - return gpiochip_remove(&rc5t583_gpio->gpio_chip); + gpiochip_remove(&rc5t583_gpio->gpio_chip); + return 0; } static struct platform_driver rc5t583_gpio_driver = { diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c index b6ae89ea8811..bf6c09450fee 100644 --- a/drivers/gpio/gpio-rcar.c +++ b/drivers/gpio/gpio-rcar.c @@ -240,9 +240,9 @@ static int gpio_rcar_get(struct gpio_chip *chip, unsigned offset) /* testing on r8a7790 shows that INDT does not show correct pin state * when configured as output, so use OUTDT in case of output pins */ if (gpio_rcar_read(gpio_to_priv(chip), INOUTSEL) & bit) - return (int)(gpio_rcar_read(gpio_to_priv(chip), OUTDT) & bit); + return !!(gpio_rcar_read(gpio_to_priv(chip), OUTDT) & bit); else - return (int)(gpio_rcar_read(gpio_to_priv(chip), INDT) & bit); + return !!(gpio_rcar_read(gpio_to_priv(chip), INDT) & bit); } static void gpio_rcar_set(struct gpio_chip *chip, unsigned offset, int value) @@ -472,11 +472,8 @@ err0: static int gpio_rcar_remove(struct platform_device *pdev) { struct gpio_rcar_priv *p = platform_get_drvdata(pdev); - int ret; - ret = gpiochip_remove(&p->gpio_chip); - if (ret) - return ret; + gpiochip_remove(&p->gpio_chip); irq_domain_remove(p->irq_domain); pm_runtime_put(&pdev->dev); diff --git a/drivers/gpio/gpio-rdc321x.c b/drivers/gpio/gpio-rdc321x.c index 9fa7e53331c9..d729bc8a554d 100644 --- a/drivers/gpio/gpio-rdc321x.c +++ b/drivers/gpio/gpio-rdc321x.c @@ -199,14 +199,11 @@ static int rdc321x_gpio_probe(struct platform_device *pdev) static int rdc321x_gpio_remove(struct platform_device *pdev) { - int ret; struct rdc321x_gpio *rdc321x_gpio_dev = platform_get_drvdata(pdev); - ret = gpiochip_remove(&rdc321x_gpio_dev->chip); - if (ret) - dev_err(&pdev->dev, "failed to unregister chip\n"); + gpiochip_remove(&rdc321x_gpio_dev->chip); - return ret; + return 0; } static struct platform_driver rdc321x_gpio_driver = { diff --git a/drivers/gpio/gpio-samsung.c b/drivers/gpio/gpio-samsung.c index 07105ee5c9ae..3810da47043f 100644 --- a/drivers/gpio/gpio-samsung.c +++ b/drivers/gpio/gpio-samsung.c @@ -32,10 +32,7 @@ #include <mach/map.h> #include <mach/regs-gpio.h> - -#if defined(CONFIG_ARCH_S3C24XX) || defined(CONFIG_ARCH_S3C64XX) #include <mach/gpio-samsung.h> -#endif #include <plat/cpu.h> #include <plat/gpio-core.h> @@ -358,47 +355,6 @@ static unsigned s3c24xx_gpio_getcfg_abank(struct samsung_gpio_chip *chip, } #endif -#if defined(CONFIG_CPU_S5P6440) || defined(CONFIG_CPU_S5P6450) -static int s5p64x0_gpio_setcfg_rbank(struct samsung_gpio_chip *chip, - unsigned int off, unsigned int cfg) -{ - void __iomem *reg = chip->base; - unsigned int shift; - u32 con; - - switch (off) { - case 0: - case 1: - case 2: - case 3: - case 4: - case 5: - shift = (off & 7) * 4; - reg -= 4; - break; - case 6: - shift = ((off + 1) & 7) * 4; - reg -= 4; - break; - default: - shift = ((off + 1) & 7) * 4; - break; - } - - if (samsung_gpio_is_cfg_special(cfg)) { - cfg &= 0xf; - cfg <<= shift; - } - - con = __raw_readl(reg); - con &= ~(0xf << shift); - con |= cfg; - __raw_writel(con, reg); - - return 0; -} -#endif - static void __init samsung_gpiolib_set_cfg(struct samsung_gpio_cfg *chipcfg, int nr_chips) { @@ -426,16 +382,6 @@ static struct samsung_gpio_cfg s3c24xx_gpiocfg_banka = { }; #endif -#if defined(CONFIG_CPU_S5P6440) || defined(CONFIG_CPU_S5P6450) -static struct samsung_gpio_cfg s5p64x0_gpio_cfg_rbank = { - .cfg_eint = 0x3, - .set_config = s5p64x0_gpio_setcfg_rbank, - .get_config = samsung_gpio_getcfg_4bit, - .set_pull = samsung_gpio_setpull_updown, - .get_pull = samsung_gpio_getpull_updown, -}; -#endif - static struct samsung_gpio_cfg samsung_gpio_cfgs[] = { [0] = { .cfg_eint = 0x0, @@ -708,91 +654,6 @@ static int s3c24xx_gpiolib_banka_output(struct gpio_chip *chip, } #endif -/* The next set of routines are for the case of s5p64x0 bank r */ - -static int s5p64x0_gpiolib_rbank_input(struct gpio_chip *chip, - unsigned int offset) -{ - struct samsung_gpio_chip *ourchip = to_samsung_gpio(chip); - void __iomem *base = ourchip->base; - void __iomem *regcon = base; - unsigned long con; - unsigned long flags; - - switch (offset) { - case 6: - offset += 1; - case 0: - case 1: - case 2: - case 3: - case 4: - case 5: - regcon -= 4; - break; - default: - offset -= 7; - break; - } - - samsung_gpio_lock(ourchip, flags); - - con = __raw_readl(regcon); - con &= ~(0xf << con_4bit_shift(offset)); - __raw_writel(con, regcon); - - samsung_gpio_unlock(ourchip, flags); - - return 0; -} - -static int s5p64x0_gpiolib_rbank_output(struct gpio_chip *chip, - unsigned int offset, int value) -{ - struct samsung_gpio_chip *ourchip = to_samsung_gpio(chip); - void __iomem *base = ourchip->base; - void __iomem *regcon = base; - unsigned long con; - unsigned long dat; - unsigned long flags; - unsigned con_offset = offset; - - switch (con_offset) { - case 6: - con_offset += 1; - case 0: - case 1: - case 2: - case 3: - case 4: - case 5: - regcon -= 4; - break; - default: - con_offset -= 7; - break; - } - - samsung_gpio_lock(ourchip, flags); - - con = __raw_readl(regcon); - con &= ~(0xf << con_4bit_shift(con_offset)); - con |= 0x1 << con_4bit_shift(con_offset); - - dat = __raw_readl(base + GPIODAT_OFF); - if (value) - dat |= 1 << offset; - else - dat &= ~(1 << offset); - - __raw_writel(con, regcon); - __raw_writel(dat, base + GPIODAT_OFF); - - samsung_gpio_unlock(ourchip, flags); - - return 0; -} - static void samsung_gpiolib_set(struct gpio_chip *chip, unsigned offset, int value) { @@ -999,20 +860,6 @@ static void __init samsung_gpiolib_add_4bit2_chips(struct samsung_gpio_chip *chi } } -static void __init s5p64x0_gpiolib_add_rbank(struct samsung_gpio_chip *chip, - int nr_chips) -{ - for (; nr_chips > 0; nr_chips--, chip++) { - chip->chip.direction_input = s5p64x0_gpiolib_rbank_input; - chip->chip.direction_output = s5p64x0_gpiolib_rbank_output; - - if (!chip->pm) - chip->pm = __gpio_pm(&samsung_gpio_pm_4bit); - - samsung_gpiolib_add(chip); - } -} - int samsung_gpiolib_to_irq(struct gpio_chip *chip, unsigned int offset) { struct samsung_gpio_chip *samsung_chip = container_of(chip, struct samsung_gpio_chip, chip); @@ -1319,773 +1166,9 @@ static struct samsung_gpio_chip s3c64xx_gpios_2bit[] = { #endif }; -/* - * S5P6440 GPIO bank summary: - * - * Bank GPIOs Style SlpCon ExtInt Group - * A 6 4Bit Yes 1 - * B 7 4Bit Yes 1 - * C 8 4Bit Yes 2 - * F 2 2Bit Yes 4 [1] - * G 7 4Bit Yes 5 - * H 10 4Bit[2] Yes 6 - * I 16 2Bit Yes None - * J 12 2Bit Yes None - * N 16 2Bit No IRQ_EINT - * P 8 2Bit Yes 8 - * R 15 4Bit[2] Yes 8 - */ - -static struct samsung_gpio_chip s5p6440_gpios_4bit[] = { -#ifdef CONFIG_CPU_S5P6440 - { - .chip = { - .base = S5P6440_GPA(0), - .ngpio = S5P6440_GPIO_A_NR, - .label = "GPA", - }, - }, { - .chip = { - .base = S5P6440_GPB(0), - .ngpio = S5P6440_GPIO_B_NR, - .label = "GPB", - }, - }, { - .chip = { - .base = S5P6440_GPC(0), - .ngpio = S5P6440_GPIO_C_NR, - .label = "GPC", - }, - }, { - .base = S5P64X0_GPG_BASE, - .chip = { - .base = S5P6440_GPG(0), - .ngpio = S5P6440_GPIO_G_NR, - .label = "GPG", - }, - }, -#endif -}; - -static struct samsung_gpio_chip s5p6440_gpios_4bit2[] = { -#ifdef CONFIG_CPU_S5P6440 - { - .base = S5P64X0_GPH_BASE + 0x4, - .chip = { - .base = S5P6440_GPH(0), - .ngpio = S5P6440_GPIO_H_NR, - .label = "GPH", - }, - }, -#endif -}; - -static struct samsung_gpio_chip s5p6440_gpios_rbank[] = { -#ifdef CONFIG_CPU_S5P6440 - { - .base = S5P64X0_GPR_BASE + 0x4, - .config = &s5p64x0_gpio_cfg_rbank, - .chip = { - .base = S5P6440_GPR(0), - .ngpio = S5P6440_GPIO_R_NR, - .label = "GPR", - }, - }, -#endif -}; - -static struct samsung_gpio_chip s5p6440_gpios_2bit[] = { -#ifdef CONFIG_CPU_S5P6440 - { - .base = S5P64X0_GPF_BASE, - .config = &samsung_gpio_cfgs[6], - .chip = { - .base = S5P6440_GPF(0), - .ngpio = S5P6440_GPIO_F_NR, - .label = "GPF", - }, - }, { - .base = S5P64X0_GPI_BASE, - .config = &samsung_gpio_cfgs[4], - .chip = { - .base = S5P6440_GPI(0), - .ngpio = S5P6440_GPIO_I_NR, - .label = "GPI", - }, - }, { - .base = S5P64X0_GPJ_BASE, - .config = &samsung_gpio_cfgs[4], - .chip = { - .base = S5P6440_GPJ(0), - .ngpio = S5P6440_GPIO_J_NR, - .label = "GPJ", - }, - }, { - .base = S5P64X0_GPN_BASE, - .config = &samsung_gpio_cfgs[5], - .chip = { - .base = S5P6440_GPN(0), - .ngpio = S5P6440_GPIO_N_NR, - .label = "GPN", - }, - }, { - .base = S5P64X0_GPP_BASE, - .config = &samsung_gpio_cfgs[6], - .chip = { - .base = S5P6440_GPP(0), - .ngpio = S5P6440_GPIO_P_NR, - .label = "GPP", - }, - }, -#endif -}; - -/* - * S5P6450 GPIO bank summary: - * - * Bank GPIOs Style SlpCon ExtInt Group - * A 6 4Bit Yes 1 - * B 7 4Bit Yes 1 - * C 8 4Bit Yes 2 - * D 8 4Bit Yes None - * F 2 2Bit Yes None - * G 14 4Bit[2] Yes 5 - * H 10 4Bit[2] Yes 6 - * I 16 2Bit Yes None - * J 12 2Bit Yes None - * K 5 4Bit Yes None - * N 16 2Bit No IRQ_EINT - * P 11 2Bit Yes 8 - * Q 14 2Bit Yes None - * R 15 4Bit[2] Yes None - * S 8 2Bit Yes None - * - * [1] BANKF pins 14,15 do not form part of the external interrupt sources - * [2] BANK has two control registers, GPxCON0 and GPxCON1 - */ - -static struct samsung_gpio_chip s5p6450_gpios_4bit[] = { -#ifdef CONFIG_CPU_S5P6450 - { - .chip = { - .base = S5P6450_GPA(0), - .ngpio = S5P6450_GPIO_A_NR, - .label = "GPA", - }, - }, { - .chip = { - .base = S5P6450_GPB(0), - .ngpio = S5P6450_GPIO_B_NR, - .label = "GPB", - }, - }, { - .chip = { - .base = S5P6450_GPC(0), - .ngpio = S5P6450_GPIO_C_NR, - .label = "GPC", - }, - }, { - .chip = { - .base = S5P6450_GPD(0), - .ngpio = S5P6450_GPIO_D_NR, - .label = "GPD", - }, - }, { - .base = S5P6450_GPK_BASE, - .chip = { - .base = S5P6450_GPK(0), - .ngpio = S5P6450_GPIO_K_NR, - .label = "GPK", - }, - }, -#endif -}; - -static struct samsung_gpio_chip s5p6450_gpios_4bit2[] = { -#ifdef CONFIG_CPU_S5P6450 - { - .base = S5P64X0_GPG_BASE + 0x4, - .chip = { - .base = S5P6450_GPG(0), - .ngpio = S5P6450_GPIO_G_NR, - .label = "GPG", - }, - }, { - .base = S5P64X0_GPH_BASE + 0x4, - .chip = { - .base = S5P6450_GPH(0), - .ngpio = S5P6450_GPIO_H_NR, - .label = "GPH", - }, - }, -#endif -}; - -static struct samsung_gpio_chip s5p6450_gpios_rbank[] = { -#ifdef CONFIG_CPU_S5P6450 - { - .base = S5P64X0_GPR_BASE + 0x4, - .config = &s5p64x0_gpio_cfg_rbank, - .chip = { - .base = S5P6450_GPR(0), - .ngpio = S5P6450_GPIO_R_NR, - .label = "GPR", - }, - }, -#endif -}; - -static struct samsung_gpio_chip s5p6450_gpios_2bit[] = { -#ifdef CONFIG_CPU_S5P6450 - { - .base = S5P64X0_GPF_BASE, - .config = &samsung_gpio_cfgs[6], - .chip = { - .base = S5P6450_GPF(0), - .ngpio = S5P6450_GPIO_F_NR, - .label = "GPF", - }, - }, { - .base = S5P64X0_GPI_BASE, - .config = &samsung_gpio_cfgs[4], - .chip = { - .base = S5P6450_GPI(0), - .ngpio = S5P6450_GPIO_I_NR, - .label = "GPI", - }, - }, { - .base = S5P64X0_GPJ_BASE, - .config = &samsung_gpio_cfgs[4], - .chip = { - .base = S5P6450_GPJ(0), - .ngpio = S5P6450_GPIO_J_NR, - .label = "GPJ", - }, - }, { - .base = S5P64X0_GPN_BASE, - .config = &samsung_gpio_cfgs[5], - .chip = { - .base = S5P6450_GPN(0), - .ngpio = S5P6450_GPIO_N_NR, - .label = "GPN", - }, - }, { - .base = S5P64X0_GPP_BASE, - .config = &samsung_gpio_cfgs[6], - .chip = { - .base = S5P6450_GPP(0), - .ngpio = S5P6450_GPIO_P_NR, - .label = "GPP", - }, - }, { - .base = S5P6450_GPQ_BASE, - .config = &samsung_gpio_cfgs[5], - .chip = { - .base = S5P6450_GPQ(0), - .ngpio = S5P6450_GPIO_Q_NR, - .label = "GPQ", - }, - }, { - .base = S5P6450_GPS_BASE, - .config = &samsung_gpio_cfgs[6], - .chip = { - .base = S5P6450_GPS(0), - .ngpio = S5P6450_GPIO_S_NR, - .label = "GPS", - }, - }, -#endif -}; - -/* - * S5PC100 GPIO bank summary: - * - * Bank GPIOs Style INT Type - * A0 8 4Bit GPIO_INT0 - * A1 5 4Bit GPIO_INT1 - * B 8 4Bit GPIO_INT2 - * C 5 4Bit GPIO_INT3 - * D 7 4Bit GPIO_INT4 - * E0 8 4Bit GPIO_INT5 - * E1 6 4Bit GPIO_INT6 - * F0 8 4Bit GPIO_INT7 - * F1 8 4Bit GPIO_INT8 - * F2 8 4Bit GPIO_INT9 - * F3 4 4Bit GPIO_INT10 - * G0 8 4Bit GPIO_INT11 - * G1 3 4Bit GPIO_INT12 - * G2 7 4Bit GPIO_INT13 - * G3 7 4Bit GPIO_INT14 - * H0 8 4Bit WKUP_INT - * H1 8 4Bit WKUP_INT - * H2 8 4Bit WKUP_INT - * H3 8 4Bit WKUP_INT - * I 8 4Bit GPIO_INT15 - * J0 8 4Bit GPIO_INT16 - * J1 5 4Bit GPIO_INT17 - * J2 8 4Bit GPIO_INT18 - * J3 8 4Bit GPIO_INT19 - * J4 4 4Bit GPIO_INT20 - * K0 8 4Bit None - * K1 6 4Bit None - * K2 8 4Bit None - * K3 8 4Bit None - * L0 8 4Bit None - * L1 8 4Bit None - * L2 8 4Bit None - * L3 8 4Bit None - */ - -static struct samsung_gpio_chip s5pc100_gpios_4bit[] = { -#ifdef CONFIG_CPU_S5PC100 - { - .chip = { - .base = S5PC100_GPA0(0), - .ngpio = S5PC100_GPIO_A0_NR, - .label = "GPA0", - }, - }, { - .chip = { - .base = S5PC100_GPA1(0), - .ngpio = S5PC100_GPIO_A1_NR, - .label = "GPA1", - }, - }, { - .chip = { - .base = S5PC100_GPB(0), - .ngpio = S5PC100_GPIO_B_NR, - .label = "GPB", - }, - }, { - .chip = { - .base = S5PC100_GPC(0), - .ngpio = S5PC100_GPIO_C_NR, - .label = "GPC", - }, - }, { - .chip = { - .base = S5PC100_GPD(0), - .ngpio = S5PC100_GPIO_D_NR, - .label = "GPD", - }, - }, { - .chip = { - .base = S5PC100_GPE0(0), - .ngpio = S5PC100_GPIO_E0_NR, - .label = "GPE0", - }, - }, { - .chip = { - .base = S5PC100_GPE1(0), - .ngpio = S5PC100_GPIO_E1_NR, - .label = "GPE1", - }, - }, { - .chip = { - .base = S5PC100_GPF0(0), - .ngpio = S5PC100_GPIO_F0_NR, - .label = "GPF0", - }, - }, { - .chip = { - .base = S5PC100_GPF1(0), - .ngpio = S5PC100_GPIO_F1_NR, - .label = "GPF1", - }, - }, { - .chip = { - .base = S5PC100_GPF2(0), - .ngpio = S5PC100_GPIO_F2_NR, - .label = "GPF2", - }, - }, { - .chip = { - .base = S5PC100_GPF3(0), - .ngpio = S5PC100_GPIO_F3_NR, - .label = "GPF3", - }, - }, { - .chip = { - .base = S5PC100_GPG0(0), - .ngpio = S5PC100_GPIO_G0_NR, - .label = "GPG0", - }, - }, { - .chip = { - .base = S5PC100_GPG1(0), - .ngpio = S5PC100_GPIO_G1_NR, - .label = "GPG1", - }, - }, { - .chip = { - .base = S5PC100_GPG2(0), - .ngpio = S5PC100_GPIO_G2_NR, - .label = "GPG2", - }, - }, { - .chip = { - .base = S5PC100_GPG3(0), - .ngpio = S5PC100_GPIO_G3_NR, - .label = "GPG3", - }, - }, { - .chip = { - .base = S5PC100_GPI(0), - .ngpio = S5PC100_GPIO_I_NR, - .label = "GPI", - }, - }, { - .chip = { - .base = S5PC100_GPJ0(0), - .ngpio = S5PC100_GPIO_J0_NR, - .label = "GPJ0", - }, - }, { - .chip = { - .base = S5PC100_GPJ1(0), - .ngpio = S5PC100_GPIO_J1_NR, - .label = "GPJ1", - }, - }, { - .chip = { - .base = S5PC100_GPJ2(0), - .ngpio = S5PC100_GPIO_J2_NR, - .label = "GPJ2", - }, - }, { - .chip = { - .base = S5PC100_GPJ3(0), - .ngpio = S5PC100_GPIO_J3_NR, - .label = "GPJ3", - }, - }, { - .chip = { - .base = S5PC100_GPJ4(0), - .ngpio = S5PC100_GPIO_J4_NR, - .label = "GPJ4", - }, - }, { - .chip = { - .base = S5PC100_GPK0(0), - .ngpio = S5PC100_GPIO_K0_NR, - .label = "GPK0", - }, - }, { - .chip = { - .base = S5PC100_GPK1(0), - .ngpio = S5PC100_GPIO_K1_NR, - .label = "GPK1", - }, - }, { - .chip = { - .base = S5PC100_GPK2(0), - .ngpio = S5PC100_GPIO_K2_NR, - .label = "GPK2", - }, - }, { - .chip = { - .base = S5PC100_GPK3(0), - .ngpio = S5PC100_GPIO_K3_NR, - .label = "GPK3", - }, - }, { - .chip = { - .base = S5PC100_GPL0(0), - .ngpio = S5PC100_GPIO_L0_NR, - .label = "GPL0", - }, - }, { - .chip = { - .base = S5PC100_GPL1(0), - .ngpio = S5PC100_GPIO_L1_NR, - .label = "GPL1", - }, - }, { - .chip = { - .base = S5PC100_GPL2(0), - .ngpio = S5PC100_GPIO_L2_NR, - .label = "GPL2", - }, - }, { - .chip = { - .base = S5PC100_GPL3(0), - .ngpio = S5PC100_GPIO_L3_NR, - .label = "GPL3", - }, - }, { - .chip = { - .base = S5PC100_GPL4(0), - .ngpio = S5PC100_GPIO_L4_NR, - .label = "GPL4", - }, - }, { - .base = (S5P_VA_GPIO + 0xC00), - .irq_base = IRQ_EINT(0), - .chip = { - .base = S5PC100_GPH0(0), - .ngpio = S5PC100_GPIO_H0_NR, - .label = "GPH0", - .to_irq = samsung_gpiolib_to_irq, - }, - }, { - .base = (S5P_VA_GPIO + 0xC20), - .irq_base = IRQ_EINT(8), - .chip = { - .base = S5PC100_GPH1(0), - .ngpio = S5PC100_GPIO_H1_NR, - .label = "GPH1", - .to_irq = samsung_gpiolib_to_irq, - }, - }, { - .base = (S5P_VA_GPIO + 0xC40), - .irq_base = IRQ_EINT(16), - .chip = { - .base = S5PC100_GPH2(0), - .ngpio = S5PC100_GPIO_H2_NR, - .label = "GPH2", - .to_irq = samsung_gpiolib_to_irq, - }, - }, { - .base = (S5P_VA_GPIO + 0xC60), - .irq_base = IRQ_EINT(24), - .chip = { - .base = S5PC100_GPH3(0), - .ngpio = S5PC100_GPIO_H3_NR, - .label = "GPH3", - .to_irq = samsung_gpiolib_to_irq, - }, - }, -#endif -}; - -/* - * Followings are the gpio banks in S5PV210/S5PC110 - * - * The 'config' member when left to NULL, is initialized to the default - * structure samsung_gpio_cfgs[3] in the init function below. - * - * The 'base' member is also initialized in the init function below. - * Note: The initialization of 'base' member of samsung_gpio_chip structure - * uses the above macro and depends on the banks being listed in order here. - */ - -static struct samsung_gpio_chip s5pv210_gpios_4bit[] = { -#ifdef CONFIG_CPU_S5PV210 - { - .chip = { - .base = S5PV210_GPA0(0), - .ngpio = S5PV210_GPIO_A0_NR, - .label = "GPA0", - }, - }, { - .chip = { - .base = S5PV210_GPA1(0), - .ngpio = S5PV210_GPIO_A1_NR, - .label = "GPA1", - }, - }, { - .chip = { - .base = S5PV210_GPB(0), - .ngpio = S5PV210_GPIO_B_NR, - .label = "GPB", - }, - }, { - .chip = { - .base = S5PV210_GPC0(0), - .ngpio = S5PV210_GPIO_C0_NR, - .label = "GPC0", - }, - }, { - .chip = { - .base = S5PV210_GPC1(0), - .ngpio = S5PV210_GPIO_C1_NR, - .label = "GPC1", - }, - }, { - .chip = { - .base = S5PV210_GPD0(0), - .ngpio = S5PV210_GPIO_D0_NR, - .label = "GPD0", - }, - }, { - .chip = { - .base = S5PV210_GPD1(0), - .ngpio = S5PV210_GPIO_D1_NR, - .label = "GPD1", - }, - }, { - .chip = { - .base = S5PV210_GPE0(0), - .ngpio = S5PV210_GPIO_E0_NR, - .label = "GPE0", - }, - }, { - .chip = { - .base = S5PV210_GPE1(0), - .ngpio = S5PV210_GPIO_E1_NR, - .label = "GPE1", - }, - }, { - .chip = { - .base = S5PV210_GPF0(0), - .ngpio = S5PV210_GPIO_F0_NR, - .label = "GPF0", - }, - }, { - .chip = { - .base = S5PV210_GPF1(0), - .ngpio = S5PV210_GPIO_F1_NR, - .label = "GPF1", - }, - }, { - .chip = { - .base = S5PV210_GPF2(0), - .ngpio = S5PV210_GPIO_F2_NR, - .label = "GPF2", - }, - }, { - .chip = { - .base = S5PV210_GPF3(0), - .ngpio = S5PV210_GPIO_F3_NR, - .label = "GPF3", - }, - }, { - .chip = { - .base = S5PV210_GPG0(0), - .ngpio = S5PV210_GPIO_G0_NR, - .label = "GPG0", - }, - }, { - .chip = { - .base = S5PV210_GPG1(0), - .ngpio = S5PV210_GPIO_G1_NR, - .label = "GPG1", - }, - }, { - .chip = { - .base = S5PV210_GPG2(0), - .ngpio = S5PV210_GPIO_G2_NR, - .label = "GPG2", - }, - }, { - .chip = { - .base = S5PV210_GPG3(0), - .ngpio = S5PV210_GPIO_G3_NR, - .label = "GPG3", - }, - }, { - .chip = { - .base = S5PV210_GPI(0), - .ngpio = S5PV210_GPIO_I_NR, - .label = "GPI", - }, - }, { - .chip = { - .base = S5PV210_GPJ0(0), - .ngpio = S5PV210_GPIO_J0_NR, - .label = "GPJ0", - }, - }, { - .chip = { - .base = S5PV210_GPJ1(0), - .ngpio = S5PV210_GPIO_J1_NR, - .label = "GPJ1", - }, - }, { - .chip = { - .base = S5PV210_GPJ2(0), - .ngpio = S5PV210_GPIO_J2_NR, - .label = "GPJ2", - }, - }, { - .chip = { - .base = S5PV210_GPJ3(0), - .ngpio = S5PV210_GPIO_J3_NR, - .label = "GPJ3", - }, - }, { - .chip = { - .base = S5PV210_GPJ4(0), - .ngpio = S5PV210_GPIO_J4_NR, - .label = "GPJ4", - }, - }, { - .chip = { - .base = S5PV210_MP01(0), - .ngpio = S5PV210_GPIO_MP01_NR, - .label = "MP01", - }, - }, { - .chip = { - .base = S5PV210_MP02(0), - .ngpio = S5PV210_GPIO_MP02_NR, - .label = "MP02", - }, - }, { - .chip = { - .base = S5PV210_MP03(0), - .ngpio = S5PV210_GPIO_MP03_NR, - .label = "MP03", - }, - }, { - .chip = { - .base = S5PV210_MP04(0), - .ngpio = S5PV210_GPIO_MP04_NR, - .label = "MP04", - }, - }, { - .chip = { - .base = S5PV210_MP05(0), - .ngpio = S5PV210_GPIO_MP05_NR, - .label = "MP05", - }, - }, { - .base = (S5P_VA_GPIO + 0xC00), - .irq_base = IRQ_EINT(0), - .chip = { - .base = S5PV210_GPH0(0), - .ngpio = S5PV210_GPIO_H0_NR, - .label = "GPH0", - .to_irq = samsung_gpiolib_to_irq, - }, - }, { - .base = (S5P_VA_GPIO + 0xC20), - .irq_base = IRQ_EINT(8), - .chip = { - .base = S5PV210_GPH1(0), - .ngpio = S5PV210_GPIO_H1_NR, - .label = "GPH1", - .to_irq = samsung_gpiolib_to_irq, - }, - }, { - .base = (S5P_VA_GPIO + 0xC40), - .irq_base = IRQ_EINT(16), - .chip = { - .base = S5PV210_GPH2(0), - .ngpio = S5PV210_GPIO_H2_NR, - .label = "GPH2", - .to_irq = samsung_gpiolib_to_irq, - }, - }, { - .base = (S5P_VA_GPIO + 0xC60), - .irq_base = IRQ_EINT(24), - .chip = { - .base = S5PV210_GPH3(0), - .ngpio = S5PV210_GPIO_H3_NR, - .label = "GPH3", - .to_irq = samsung_gpiolib_to_irq, - }, - }, -#endif -}; - /* TODO: cleanup soc_is_* */ static __init int samsung_gpiolib_init(void) { - struct samsung_gpio_chip *chip; - int i, nr_chips; - int group = 0; - /* * Currently there are two drivers that can provide GPIO support for * Samsung SoCs. For device tree enabled platforms, the new @@ -2109,54 +1192,6 @@ static __init int samsung_gpiolib_init(void) S3C64XX_VA_GPIO); samsung_gpiolib_add_4bit2_chips(s3c64xx_gpios_4bit2, ARRAY_SIZE(s3c64xx_gpios_4bit2)); - } else if (soc_is_s5p6440()) { - samsung_gpiolib_add_2bit_chips(s5p6440_gpios_2bit, - ARRAY_SIZE(s5p6440_gpios_2bit), NULL, 0x0); - samsung_gpiolib_add_4bit_chips(s5p6440_gpios_4bit, - ARRAY_SIZE(s5p6440_gpios_4bit), S5P_VA_GPIO); - samsung_gpiolib_add_4bit2_chips(s5p6440_gpios_4bit2, - ARRAY_SIZE(s5p6440_gpios_4bit2)); - s5p64x0_gpiolib_add_rbank(s5p6440_gpios_rbank, - ARRAY_SIZE(s5p6440_gpios_rbank)); - } else if (soc_is_s5p6450()) { - samsung_gpiolib_add_2bit_chips(s5p6450_gpios_2bit, - ARRAY_SIZE(s5p6450_gpios_2bit), NULL, 0x0); - samsung_gpiolib_add_4bit_chips(s5p6450_gpios_4bit, - ARRAY_SIZE(s5p6450_gpios_4bit), S5P_VA_GPIO); - samsung_gpiolib_add_4bit2_chips(s5p6450_gpios_4bit2, - ARRAY_SIZE(s5p6450_gpios_4bit2)); - s5p64x0_gpiolib_add_rbank(s5p6450_gpios_rbank, - ARRAY_SIZE(s5p6450_gpios_rbank)); - } else if (soc_is_s5pc100()) { - group = 0; - chip = s5pc100_gpios_4bit; - nr_chips = ARRAY_SIZE(s5pc100_gpios_4bit); - - for (i = 0; i < nr_chips; i++, chip++) { - if (!chip->config) { - chip->config = &samsung_gpio_cfgs[3]; - chip->group = group++; - } - } - samsung_gpiolib_add_4bit_chips(s5pc100_gpios_4bit, nr_chips, S5P_VA_GPIO); -#if defined(CONFIG_CPU_S5PC100) && defined(CONFIG_S5P_GPIO_INT) - s5p_register_gpioint_bank(IRQ_GPIOINT, 0, S5P_GPIOINT_GROUP_MAXNR); -#endif - } else if (soc_is_s5pv210()) { - group = 0; - chip = s5pv210_gpios_4bit; - nr_chips = ARRAY_SIZE(s5pv210_gpios_4bit); - - for (i = 0; i < nr_chips; i++, chip++) { - if (!chip->config) { - chip->config = &samsung_gpio_cfgs[3]; - chip->group = group++; - } - } - samsung_gpiolib_add_4bit_chips(s5pv210_gpios_4bit, nr_chips, S5P_VA_GPIO); -#if defined(CONFIG_CPU_S5PV210) && defined(CONFIG_S5P_GPIO_INT) - s5p_register_gpioint_bank(IRQ_GPIOINT, 0, S5P_GPIOINT_GROUP_MAXNR); -#endif } else { WARN(1, "Unknown SoC in gpio-samsung, no GPIOs added\n"); return -ENODEV; diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c index a9b1cd16c848..41e91d70301e 100644 --- a/drivers/gpio/gpio-sch.c +++ b/drivers/gpio/gpio-sch.c @@ -290,8 +290,7 @@ static int sch_gpio_probe(struct platform_device *pdev) return 0; err_sch_gpio_resume: - if (gpiochip_remove(&sch_gpio_core)) - dev_err(&pdev->dev, "%s gpiochip_remove failed\n", __func__); + gpiochip_remove(&sch_gpio_core); err_sch_gpio_core: release_region(res->start, resource_size(res)); @@ -304,23 +303,14 @@ static int sch_gpio_remove(struct platform_device *pdev) { struct resource *res; if (gpio_ba) { - int err; - err = gpiochip_remove(&sch_gpio_core); - if (err) - dev_err(&pdev->dev, "%s failed, %d\n", - "gpiochip_remove()", err); - err = gpiochip_remove(&sch_gpio_resume); - if (err) - dev_err(&pdev->dev, "%s failed, %d\n", - "gpiochip_remove()", err); + gpiochip_remove(&sch_gpio_core); + gpiochip_remove(&sch_gpio_resume); res = platform_get_resource(pdev, IORESOURCE_IO, 0); release_region(res->start, resource_size(res)); gpio_ba = 0; - - return err; } return 0; diff --git a/drivers/gpio/gpio-sch311x.c b/drivers/gpio/gpio-sch311x.c index f942b80ee403..0cb11413e814 100644 --- a/drivers/gpio/gpio-sch311x.c +++ b/drivers/gpio/gpio-sch311x.c @@ -291,14 +291,12 @@ static int sch311x_gpio_remove(struct platform_device *pdev) { struct sch311x_pdev_data *pdata = pdev->dev.platform_data; struct sch311x_gpio_priv *priv = platform_get_drvdata(pdev); - int err, i; + int i; release_region(pdata->runtime_reg + GP1, 6); for (i = 0; i < ARRAY_SIZE(priv->blocks); i++) { - err = gpiochip_remove(&priv->blocks[i].chip); - if (err) - return err; + gpiochip_remove(&priv->blocks[i].chip); dev_info(&pdev->dev, "SMSC SCH311x GPIO block %d unregistered.\n", i); } diff --git a/drivers/gpio/gpio-sodaville.c b/drivers/gpio/gpio-sodaville.c index 7c6c518929bc..d8da36cd8123 100644 --- a/drivers/gpio/gpio-sodaville.c +++ b/drivers/gpio/gpio-sodaville.c @@ -265,9 +265,7 @@ static void sdv_gpio_remove(struct pci_dev *pdev) free_irq(pdev->irq, sd); irq_free_descs(sd->irq_base, SDV_NUM_PUB_GPIOS); - if (gpiochip_remove(&sd->bgpio.gc)) - dev_err(&pdev->dev, "gpiochip_remove() failed.\n"); - + gpiochip_remove(&sd->bgpio.gc); pci_release_region(pdev, GPIO_BAR); iounmap(sd->gpio_pub_base); pci_disable_device(pdev); diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c index 628b58494294..845025a57240 100644 --- a/drivers/gpio/gpio-stmpe.c +++ b/drivers/gpio/gpio-stmpe.c @@ -10,8 +10,6 @@ #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/gpio.h> -#include <linux/irq.h> -#include <linux/irqdomain.h> #include <linux/interrupt.h> #include <linux/of.h> #include <linux/mfd/stmpe.h> @@ -31,9 +29,7 @@ struct stmpe_gpio { struct stmpe *stmpe; struct device *dev; struct mutex irq_lock; - struct irq_domain *domain; unsigned norequest_mask; - /* Caches of interrupt control registers for bus_lock */ u8 regs[CACHE_NR_REGS][CACHE_NR_BANKS]; u8 oldregs[CACHE_NR_REGS][CACHE_NR_BANKS]; @@ -101,13 +97,6 @@ static int stmpe_gpio_direction_input(struct gpio_chip *chip, return stmpe_set_bits(stmpe, reg, mask, 0); } -static int stmpe_gpio_to_irq(struct gpio_chip *chip, unsigned offset) -{ - struct stmpe_gpio *stmpe_gpio = to_stmpe_gpio(chip); - - return irq_create_mapping(stmpe_gpio->domain, offset); -} - static int stmpe_gpio_request(struct gpio_chip *chip, unsigned offset) { struct stmpe_gpio *stmpe_gpio = to_stmpe_gpio(chip); @@ -126,14 +115,14 @@ static struct gpio_chip template_chip = { .get = stmpe_gpio_get, .direction_output = stmpe_gpio_direction_output, .set = stmpe_gpio_set, - .to_irq = stmpe_gpio_to_irq, .request = stmpe_gpio_request, .can_sleep = true, }; static int stmpe_gpio_irq_set_type(struct irq_data *d, unsigned int type) { - struct stmpe_gpio *stmpe_gpio = irq_data_get_irq_chip_data(d); + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct stmpe_gpio *stmpe_gpio = to_stmpe_gpio(gc); int offset = d->hwirq; int regoffset = offset / 8; int mask = 1 << (offset % 8); @@ -160,14 +149,16 @@ static int stmpe_gpio_irq_set_type(struct irq_data *d, unsigned int type) static void stmpe_gpio_irq_lock(struct irq_data *d) { - struct stmpe_gpio *stmpe_gpio = irq_data_get_irq_chip_data(d); + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct stmpe_gpio *stmpe_gpio = to_stmpe_gpio(gc); mutex_lock(&stmpe_gpio->irq_lock); } static void stmpe_gpio_irq_sync_unlock(struct irq_data *d) { - struct stmpe_gpio *stmpe_gpio = irq_data_get_irq_chip_data(d); + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct stmpe_gpio *stmpe_gpio = to_stmpe_gpio(gc); struct stmpe *stmpe = stmpe_gpio->stmpe; int num_banks = DIV_ROUND_UP(stmpe->num_gpios, 8); static const u8 regmap[] = { @@ -200,7 +191,8 @@ static void stmpe_gpio_irq_sync_unlock(struct irq_data *d) static void stmpe_gpio_irq_mask(struct irq_data *d) { - struct stmpe_gpio *stmpe_gpio = irq_data_get_irq_chip_data(d); + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct stmpe_gpio *stmpe_gpio = to_stmpe_gpio(gc); int offset = d->hwirq; int regoffset = offset / 8; int mask = 1 << (offset % 8); @@ -210,7 +202,8 @@ static void stmpe_gpio_irq_mask(struct irq_data *d) static void stmpe_gpio_irq_unmask(struct irq_data *d) { - struct stmpe_gpio *stmpe_gpio = irq_data_get_irq_chip_data(d); + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct stmpe_gpio *stmpe_gpio = to_stmpe_gpio(gc); int offset = d->hwirq; int regoffset = offset / 8; int mask = 1 << (offset % 8); @@ -253,7 +246,7 @@ static irqreturn_t stmpe_gpio_irq(int irq, void *dev) while (stat) { int bit = __ffs(stat); int line = bank * 8 + bit; - int child_irq = irq_find_mapping(stmpe_gpio->domain, + int child_irq = irq_find_mapping(stmpe_gpio->chip.irqdomain, line); handle_nested_irq(child_irq); @@ -271,56 +264,6 @@ static irqreturn_t stmpe_gpio_irq(int irq, void *dev) return IRQ_HANDLED; } -static int stmpe_gpio_irq_map(struct irq_domain *d, unsigned int irq, - irq_hw_number_t hwirq) -{ - struct stmpe_gpio *stmpe_gpio = d->host_data; - - if (!stmpe_gpio) - return -EINVAL; - - irq_set_chip_data(irq, stmpe_gpio); - irq_set_chip_and_handler(irq, &stmpe_gpio_irq_chip, - handle_simple_irq); - irq_set_nested_thread(irq, 1); -#ifdef CONFIG_ARM - set_irq_flags(irq, IRQF_VALID); -#else - irq_set_noprobe(irq); -#endif - - return 0; -} - -static void stmpe_gpio_irq_unmap(struct irq_domain *d, unsigned int irq) -{ -#ifdef CONFIG_ARM - set_irq_flags(irq, 0); -#endif - irq_set_chip_and_handler(irq, NULL, NULL); - irq_set_chip_data(irq, NULL); -} - -static const struct irq_domain_ops stmpe_gpio_irq_simple_ops = { - .unmap = stmpe_gpio_irq_unmap, - .map = stmpe_gpio_irq_map, - .xlate = irq_domain_xlate_twocell, -}; - -static int stmpe_gpio_irq_init(struct stmpe_gpio *stmpe_gpio, - struct device_node *np) -{ - stmpe_gpio->domain = irq_domain_add_simple(np, - stmpe_gpio->chip.ngpio, 0, - &stmpe_gpio_irq_simple_ops, stmpe_gpio); - if (!stmpe_gpio->domain) { - dev_err(stmpe_gpio->dev, "failed to create irqdomain\n"); - return -ENOSYS; - } - - return 0; -} - static int stmpe_gpio_probe(struct platform_device *pdev) { struct stmpe *stmpe = dev_get_drvdata(pdev->dev.parent); @@ -358,30 +301,37 @@ static int stmpe_gpio_probe(struct platform_device *pdev) if (irq < 0) dev_info(&pdev->dev, - "device configured in no-irq mode; " + "device configured in no-irq mode: " "irqs are not available\n"); ret = stmpe_enable(stmpe, STMPE_BLOCK_GPIO); if (ret) goto out_free; - if (irq >= 0) { - ret = stmpe_gpio_irq_init(stmpe_gpio, np); - if (ret) - goto out_disable; - - ret = request_threaded_irq(irq, NULL, stmpe_gpio_irq, - IRQF_ONESHOT, "stmpe-gpio", stmpe_gpio); + if (irq > 0) { + ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, + stmpe_gpio_irq, IRQF_ONESHOT, + "stmpe-gpio", stmpe_gpio); if (ret) { dev_err(&pdev->dev, "unable to get irq: %d\n", ret); goto out_disable; } + ret = gpiochip_irqchip_add(&stmpe_gpio->chip, + &stmpe_gpio_irq_chip, + 0, + handle_simple_irq, + IRQ_TYPE_NONE); + if (ret) { + dev_err(&pdev->dev, + "could not connect irqchip to gpiochip\n"); + return ret; + } } ret = gpiochip_add(&stmpe_gpio->chip); if (ret) { dev_err(&pdev->dev, "unable to add gpiochip: %d\n", ret); - goto out_freeirq; + goto out_disable; } if (pdata && pdata->setup) @@ -391,9 +341,6 @@ static int stmpe_gpio_probe(struct platform_device *pdev) return 0; -out_freeirq: - if (irq >= 0) - free_irq(irq, stmpe_gpio); out_disable: stmpe_disable(stmpe, STMPE_BLOCK_GPIO); out_free: @@ -406,24 +353,14 @@ static int stmpe_gpio_remove(struct platform_device *pdev) struct stmpe_gpio *stmpe_gpio = platform_get_drvdata(pdev); struct stmpe *stmpe = stmpe_gpio->stmpe; struct stmpe_gpio_platform_data *pdata = stmpe->pdata->gpio; - int irq = platform_get_irq(pdev, 0); - int ret; if (pdata && pdata->remove) pdata->remove(stmpe, stmpe_gpio->chip.base); - ret = gpiochip_remove(&stmpe_gpio->chip); - if (ret < 0) { - dev_err(stmpe_gpio->dev, - "unable to remove gpiochip: %d\n", ret); - return ret; - } + gpiochip_remove(&stmpe_gpio->chip); stmpe_disable(stmpe, STMPE_BLOCK_GPIO); - if (irq >= 0) - free_irq(irq, stmpe_gpio); - kfree(stmpe_gpio); return 0; diff --git a/drivers/gpio/gpio-sx150x.c b/drivers/gpio/gpio-sx150x.c index b51ca9f5c140..bce6c6108f20 100644 --- a/drivers/gpio/gpio-sx150x.c +++ b/drivers/gpio/gpio-sx150x.c @@ -615,19 +615,16 @@ static int sx150x_probe(struct i2c_client *client, return 0; probe_fail_post_gpiochip_add: - WARN_ON(gpiochip_remove(&chip->gpio_chip) < 0); + gpiochip_remove(&chip->gpio_chip); return rc; } static int sx150x_remove(struct i2c_client *client) { struct sx150x_chip *chip; - int rc; chip = i2c_get_clientdata(client); - rc = gpiochip_remove(&chip->gpio_chip); - if (rc < 0) - return rc; + gpiochip_remove(&chip->gpio_chip); if (chip->irq_summary >= 0) sx150x_remove_irq_chip(chip); diff --git a/drivers/gpio/gpio-syscon.c b/drivers/gpio/gpio-syscon.c index b50fe1297748..30884fbc750d 100644 --- a/drivers/gpio/gpio-syscon.c +++ b/drivers/gpio/gpio-syscon.c @@ -172,7 +172,8 @@ static int syscon_gpio_remove(struct platform_device *pdev) { struct syscon_gpio_priv *priv = platform_get_drvdata(pdev); - return gpiochip_remove(&priv->chip); + gpiochip_remove(&priv->chip); + return 0; } static struct platform_driver syscon_gpio_driver = { diff --git a/drivers/gpio/gpio-tb10x.c b/drivers/gpio/gpio-tb10x.c index 07bce97647a6..9e615be8032c 100644 --- a/drivers/gpio/gpio-tb10x.c +++ b/drivers/gpio/gpio-tb10x.c @@ -291,7 +291,6 @@ fail_ioremap: static int __exit tb10x_gpio_remove(struct platform_device *pdev) { struct tb10x_gpio *tb10x_gpio = platform_get_drvdata(pdev); - int ret; if (tb10x_gpio->gc.to_irq) { irq_remove_generic_chip(tb10x_gpio->domain->gc->gc[0], @@ -300,9 +299,7 @@ static int __exit tb10x_gpio_remove(struct platform_device *pdev) irq_domain_remove(tb10x_gpio->domain); free_irq(tb10x_gpio->irq, tb10x_gpio); } - ret = gpiochip_remove(&tb10x_gpio->gc); - if (ret) - return ret; + gpiochip_remove(&tb10x_gpio->gc); return 0; } diff --git a/drivers/gpio/gpio-tc3589x.c b/drivers/gpio/gpio-tc3589x.c index 51f7cbd9ff71..7324869c38e0 100644 --- a/drivers/gpio/gpio-tc3589x.c +++ b/drivers/gpio/gpio-tc3589x.c @@ -313,17 +313,11 @@ static int tc3589x_gpio_remove(struct platform_device *pdev) struct tc3589x_gpio *tc3589x_gpio = platform_get_drvdata(pdev); struct tc3589x *tc3589x = tc3589x_gpio->tc3589x; struct tc3589x_gpio_platform_data *pdata = tc3589x->pdata->gpio; - int ret; if (pdata && pdata->remove) pdata->remove(tc3589x, tc3589x_gpio->chip.base); - ret = gpiochip_remove(&tc3589x_gpio->chip); - if (ret < 0) { - dev_err(tc3589x_gpio->dev, - "unable to remove gpiochip: %d\n", ret); - return ret; - } + gpiochip_remove(&tc3589x_gpio->chip); return 0; } diff --git a/drivers/gpio/gpio-timberdale.c b/drivers/gpio/gpio-timberdale.c index efc7c129016d..a685a3cbbc81 100644 --- a/drivers/gpio/gpio-timberdale.c +++ b/drivers/gpio/gpio-timberdale.c @@ -307,7 +307,6 @@ static int timbgpio_probe(struct platform_device *pdev) static int timbgpio_remove(struct platform_device *pdev) { - int err; struct timbgpio_platform_data *pdata = dev_get_platdata(&pdev->dev); struct timbgpio *tgpio = platform_get_drvdata(pdev); int irq = platform_get_irq(pdev, 0); @@ -323,9 +322,7 @@ static int timbgpio_remove(struct platform_device *pdev) irq_set_handler_data(irq, NULL); } - err = gpiochip_remove(&tgpio->gpio); - if (err) - printk(KERN_ERR DRIVER_NAME": failed to remove gpio_chip\n"); + gpiochip_remove(&tgpio->gpio); return 0; } diff --git a/drivers/gpio/gpio-tps6586x.c b/drivers/gpio/gpio-tps6586x.c index a69fbea41253..9c9238e838a9 100644 --- a/drivers/gpio/gpio-tps6586x.c +++ b/drivers/gpio/gpio-tps6586x.c @@ -137,7 +137,8 @@ static int tps6586x_gpio_remove(struct platform_device *pdev) { struct tps6586x_gpio *tps6586x_gpio = platform_get_drvdata(pdev); - return gpiochip_remove(&tps6586x_gpio->gpio_chip); + gpiochip_remove(&tps6586x_gpio->gpio_chip); + return 0; } static struct platform_driver tps6586x_gpio_driver = { diff --git a/drivers/gpio/gpio-tps65910.c b/drivers/gpio/gpio-tps65910.c index e2f8cda235ea..88f1f5ff4e96 100644 --- a/drivers/gpio/gpio-tps65910.c +++ b/drivers/gpio/gpio-tps65910.c @@ -190,7 +190,8 @@ static int tps65910_gpio_remove(struct platform_device *pdev) { struct tps65910_gpio *tps65910_gpio = platform_get_drvdata(pdev); - return gpiochip_remove(&tps65910_gpio->gpio_chip); + gpiochip_remove(&tps65910_gpio->gpio_chip); + return 0; } static struct platform_driver tps65910_gpio_driver = { diff --git a/drivers/gpio/gpio-tps65912.c b/drivers/gpio/gpio-tps65912.c index 59ee486cb8b9..22052d84c63b 100644 --- a/drivers/gpio/gpio-tps65912.c +++ b/drivers/gpio/gpio-tps65912.c @@ -117,7 +117,8 @@ static int tps65912_gpio_remove(struct platform_device *pdev) { struct tps65912_gpio_data *tps65912_gpio = platform_get_drvdata(pdev); - return gpiochip_remove(&tps65912_gpio->gpio_chip); + gpiochip_remove(&tps65912_gpio->gpio_chip); + return 0; } static struct platform_driver tps65912_gpio_driver = { diff --git a/drivers/gpio/gpio-ts5500.c b/drivers/gpio/gpio-ts5500.c index 3df3ebdb3e52..de18591ff11e 100644 --- a/drivers/gpio/gpio-ts5500.c +++ b/drivers/gpio/gpio-ts5500.c @@ -427,8 +427,7 @@ static int ts5500_dio_probe(struct platform_device *pdev) return 0; cleanup: - if (gpiochip_remove(&priv->gpio_chip)) - dev_err(dev, "failed to remove gpio chip\n"); + gpiochip_remove(&priv->gpio_chip); return ret; } @@ -437,7 +436,8 @@ static int ts5500_dio_remove(struct platform_device *pdev) struct ts5500_priv *priv = platform_get_drvdata(pdev); ts5500_disable_irq(priv); - return gpiochip_remove(&priv->gpio_chip); + gpiochip_remove(&priv->gpio_chip); + return 0; } static struct platform_device_id ts5500_dio_ids[] = { diff --git a/drivers/gpio/gpio-twl4030.c b/drivers/gpio/gpio-twl4030.c index 3ebb1a5ff22e..118828b3736f 100644 --- a/drivers/gpio/gpio-twl4030.c +++ b/drivers/gpio/gpio-twl4030.c @@ -554,7 +554,7 @@ no_irqs: platform_set_drvdata(pdev, priv); - if (pdata && pdata->setup) { + if (pdata->setup) { int status; status = pdata->setup(&pdev->dev, priv->gpio_chip.base, @@ -583,9 +583,7 @@ static int gpio_twl4030_remove(struct platform_device *pdev) } } - status = gpiochip_remove(&priv->gpio_chip); - if (status < 0) - return status; + gpiochip_remove(&priv->gpio_chip); if (is_module()) return 0; diff --git a/drivers/gpio/gpio-twl6040.c b/drivers/gpio/gpio-twl6040.c index 0caf5cd1b47d..f28e04b88aa9 100644 --- a/drivers/gpio/gpio-twl6040.c +++ b/drivers/gpio/gpio-twl6040.c @@ -111,7 +111,8 @@ static int gpo_twl6040_probe(struct platform_device *pdev) static int gpo_twl6040_remove(struct platform_device *pdev) { - return gpiochip_remove(&twl6040gpo_chip); + gpiochip_remove(&twl6040gpo_chip); + return 0; } /* Note: this hardware lives inside an I2C-based multi-function device. */ diff --git a/drivers/gpio/gpio-ucb1400.c b/drivers/gpio/gpio-ucb1400.c index 2445fe771179..d502825159b9 100644 --- a/drivers/gpio/gpio-ucb1400.c +++ b/drivers/gpio/gpio-ucb1400.c @@ -70,7 +70,7 @@ static int ucb1400_gpio_probe(struct platform_device *dev) if (err) goto err; - if (ucb && ucb->gpio_setup) + if (ucb->gpio_setup) err = ucb->gpio_setup(&dev->dev, ucb->gc.ngpio); err: @@ -89,7 +89,7 @@ static int ucb1400_gpio_remove(struct platform_device *dev) return err; } - err = gpiochip_remove(&ucb->gc); + gpiochip_remove(&ucb->gc); return err; } diff --git a/drivers/gpio/gpio-viperboard.c b/drivers/gpio/gpio-viperboard.c index 79e3b5836712..e2a11f27807f 100644 --- a/drivers/gpio/gpio-viperboard.c +++ b/drivers/gpio/gpio-viperboard.c @@ -446,8 +446,7 @@ static int vprbrd_gpio_probe(struct platform_device *pdev) return ret; err_gpiob: - if (gpiochip_remove(&vb_gpio->gpioa)) - dev_err(&pdev->dev, "%s gpiochip_remove failed\n", __func__); + gpiochip_remove(&vb_gpio->gpioa); err_gpioa: return ret; @@ -456,13 +455,10 @@ err_gpioa: static int vprbrd_gpio_remove(struct platform_device *pdev) { struct vprbrd_gpio *vb_gpio = platform_get_drvdata(pdev); - int ret; - ret = gpiochip_remove(&vb_gpio->gpiob); - if (ret == 0) - ret = gpiochip_remove(&vb_gpio->gpioa); + gpiochip_remove(&vb_gpio->gpiob); - return ret; + return 0; } static struct platform_driver vprbrd_gpio_driver = { diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c index 66cbcc108e62..dbf28fa03f67 100644 --- a/drivers/gpio/gpio-vr41xx.c +++ b/drivers/gpio/gpio-vr41xx.c @@ -515,7 +515,7 @@ static int giu_probe(struct platform_device *pdev) struct resource *res; unsigned int trigger, i, pin; struct irq_chip *chip; - int irq, retval; + int irq, ret; switch (pdev->id) { case GPIO_50PINS_PULLUPDOWN: @@ -544,7 +544,11 @@ static int giu_probe(struct platform_device *pdev) vr41xx_gpio_chip.dev = &pdev->dev; - retval = gpiochip_add(&vr41xx_gpio_chip); + ret = gpiochip_add(&vr41xx_gpio_chip); + if (!ret) { + iounmap(giu_base); + return -ENODEV; + } giu_write(GIUINTENL, 0); giu_write(GIUINTENH, 0); diff --git a/drivers/gpio/gpio-vx855.c b/drivers/gpio/gpio-vx855.c index 0fd23b6a753d..85971d4e23c1 100644 --- a/drivers/gpio/gpio-vx855.c +++ b/drivers/gpio/gpio-vx855.c @@ -288,8 +288,7 @@ static int vx855gpio_remove(struct platform_device *pdev) struct vx855_gpio *vg = platform_get_drvdata(pdev); struct resource *res; - if (gpiochip_remove(&vg->gpio)) - dev_err(&pdev->dev, "unable to remove gpio_chip?\n"); + gpiochip_remove(&vg->gpio); if (vg->gpi_reserved) { res = platform_get_resource(pdev, IORESOURCE_IO, 0); diff --git a/drivers/gpio/gpio-wm831x.c b/drivers/gpio/gpio-wm831x.c index b18a1a26425e..58ce75c188b7 100644 --- a/drivers/gpio/gpio-wm831x.c +++ b/drivers/gpio/gpio-wm831x.c @@ -279,7 +279,8 @@ static int wm831x_gpio_remove(struct platform_device *pdev) { struct wm831x_gpio *wm831x_gpio = platform_get_drvdata(pdev); - return gpiochip_remove(&wm831x_gpio->gpio_chip); + gpiochip_remove(&wm831x_gpio->gpio_chip); + return 0; } static struct platform_driver wm831x_gpio_driver = { diff --git a/drivers/gpio/gpio-wm8350.c b/drivers/gpio/gpio-wm8350.c index 2487f9d575d3..060b89303bb6 100644 --- a/drivers/gpio/gpio-wm8350.c +++ b/drivers/gpio/gpio-wm8350.c @@ -145,7 +145,8 @@ static int wm8350_gpio_remove(struct platform_device *pdev) { struct wm8350_gpio_data *wm8350_gpio = platform_get_drvdata(pdev); - return gpiochip_remove(&wm8350_gpio->gpio_chip); + gpiochip_remove(&wm8350_gpio->gpio_chip); + return 0; } static struct platform_driver wm8350_gpio_driver = { diff --git a/drivers/gpio/gpio-wm8994.c b/drivers/gpio/gpio-wm8994.c index d93b6b581677..6f5e42db4b9e 100644 --- a/drivers/gpio/gpio-wm8994.c +++ b/drivers/gpio/gpio-wm8994.c @@ -285,7 +285,8 @@ static int wm8994_gpio_remove(struct platform_device *pdev) { struct wm8994_gpio *wm8994_gpio = platform_get_drvdata(pdev); - return gpiochip_remove(&wm8994_gpio->gpio_chip); + gpiochip_remove(&wm8994_gpio->gpio_chip); + return 0; } static struct platform_driver wm8994_gpio_driver = { diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c new file mode 100644 index 000000000000..31ad5df5dbc9 --- /dev/null +++ b/drivers/gpio/gpio-zynq.c @@ -0,0 +1,712 @@ +/* + * Xilinx Zynq GPIO device driver + * + * Copyright (C) 2009 - 2014 Xilinx, Inc. + * + * This program is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free Software + * Foundation; either version 2 of the License, or (at your option) any later + * version. + */ + +#include <linux/bitops.h> +#include <linux/clk.h> +#include <linux/gpio/driver.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> + +#define DRIVER_NAME "zynq-gpio" + +/* Maximum banks */ +#define ZYNQ_GPIO_MAX_BANK 4 + +#define ZYNQ_GPIO_BANK0_NGPIO 32 +#define ZYNQ_GPIO_BANK1_NGPIO 22 +#define ZYNQ_GPIO_BANK2_NGPIO 32 +#define ZYNQ_GPIO_BANK3_NGPIO 32 + +#define ZYNQ_GPIO_NR_GPIOS (ZYNQ_GPIO_BANK0_NGPIO + \ + ZYNQ_GPIO_BANK1_NGPIO + \ + ZYNQ_GPIO_BANK2_NGPIO + \ + ZYNQ_GPIO_BANK3_NGPIO) + +#define ZYNQ_GPIO_BANK0_PIN_MIN 0 +#define ZYNQ_GPIO_BANK0_PIN_MAX (ZYNQ_GPIO_BANK0_PIN_MIN + \ + ZYNQ_GPIO_BANK0_NGPIO - 1) +#define ZYNQ_GPIO_BANK1_PIN_MIN (ZYNQ_GPIO_BANK0_PIN_MAX + 1) +#define ZYNQ_GPIO_BANK1_PIN_MAX (ZYNQ_GPIO_BANK1_PIN_MIN + \ + ZYNQ_GPIO_BANK1_NGPIO - 1) +#define ZYNQ_GPIO_BANK2_PIN_MIN (ZYNQ_GPIO_BANK1_PIN_MAX + 1) +#define ZYNQ_GPIO_BANK2_PIN_MAX (ZYNQ_GPIO_BANK2_PIN_MIN + \ + ZYNQ_GPIO_BANK2_NGPIO - 1) +#define ZYNQ_GPIO_BANK3_PIN_MIN (ZYNQ_GPIO_BANK2_PIN_MAX + 1) +#define ZYNQ_GPIO_BANK3_PIN_MAX (ZYNQ_GPIO_BANK3_PIN_MIN + \ + ZYNQ_GPIO_BANK3_NGPIO - 1) + + +/* Register offsets for the GPIO device */ +/* LSW Mask & Data -WO */ +#define ZYNQ_GPIO_DATA_LSW_OFFSET(BANK) (0x000 + (8 * BANK)) +/* MSW Mask & Data -WO */ +#define ZYNQ_GPIO_DATA_MSW_OFFSET(BANK) (0x004 + (8 * BANK)) +/* Data Register-RW */ +#define ZYNQ_GPIO_DATA_RO_OFFSET(BANK) (0x060 + (4 * BANK)) +/* Direction mode reg-RW */ +#define ZYNQ_GPIO_DIRM_OFFSET(BANK) (0x204 + (0x40 * BANK)) +/* Output enable reg-RW */ +#define ZYNQ_GPIO_OUTEN_OFFSET(BANK) (0x208 + (0x40 * BANK)) +/* Interrupt mask reg-RO */ +#define ZYNQ_GPIO_INTMASK_OFFSET(BANK) (0x20C + (0x40 * BANK)) +/* Interrupt enable reg-WO */ +#define ZYNQ_GPIO_INTEN_OFFSET(BANK) (0x210 + (0x40 * BANK)) +/* Interrupt disable reg-WO */ +#define ZYNQ_GPIO_INTDIS_OFFSET(BANK) (0x214 + (0x40 * BANK)) +/* Interrupt status reg-RO */ +#define ZYNQ_GPIO_INTSTS_OFFSET(BANK) (0x218 + (0x40 * BANK)) +/* Interrupt type reg-RW */ +#define ZYNQ_GPIO_INTTYPE_OFFSET(BANK) (0x21C + (0x40 * BANK)) +/* Interrupt polarity reg-RW */ +#define ZYNQ_GPIO_INTPOL_OFFSET(BANK) (0x220 + (0x40 * BANK)) +/* Interrupt on any, reg-RW */ +#define ZYNQ_GPIO_INTANY_OFFSET(BANK) (0x224 + (0x40 * BANK)) + +/* Disable all interrupts mask */ +#define ZYNQ_GPIO_IXR_DISABLE_ALL 0xFFFFFFFF + +/* Mid pin number of a bank */ +#define ZYNQ_GPIO_MID_PIN_NUM 16 + +/* GPIO upper 16 bit mask */ +#define ZYNQ_GPIO_UPPER_MASK 0xFFFF0000 + +/** + * struct zynq_gpio - gpio device private data structure + * @chip: instance of the gpio_chip + * @base_addr: base address of the GPIO device + * @clk: clock resource for this controller + */ +struct zynq_gpio { + struct gpio_chip chip; + void __iomem *base_addr; + struct clk *clk; +}; + +static struct irq_chip zynq_gpio_level_irqchip; +static struct irq_chip zynq_gpio_edge_irqchip; + +/** + * zynq_gpio_get_bank_pin - Get the bank number and pin number within that bank + * for a given pin in the GPIO device + * @pin_num: gpio pin number within the device + * @bank_num: an output parameter used to return the bank number of the gpio + * pin + * @bank_pin_num: an output parameter used to return pin number within a bank + * for the given gpio pin + * + * Returns the bank number and pin offset within the bank. + */ +static inline void zynq_gpio_get_bank_pin(unsigned int pin_num, + unsigned int *bank_num, + unsigned int *bank_pin_num) +{ + switch (pin_num) { + case ZYNQ_GPIO_BANK0_PIN_MIN ... ZYNQ_GPIO_BANK0_PIN_MAX: + *bank_num = 0; + *bank_pin_num = pin_num; + break; + case ZYNQ_GPIO_BANK1_PIN_MIN ... ZYNQ_GPIO_BANK1_PIN_MAX: + *bank_num = 1; + *bank_pin_num = pin_num - ZYNQ_GPIO_BANK1_PIN_MIN; + break; + case ZYNQ_GPIO_BANK2_PIN_MIN ... ZYNQ_GPIO_BANK2_PIN_MAX: + *bank_num = 2; + *bank_pin_num = pin_num - ZYNQ_GPIO_BANK2_PIN_MIN; + break; + case ZYNQ_GPIO_BANK3_PIN_MIN ... ZYNQ_GPIO_BANK3_PIN_MAX: + *bank_num = 3; + *bank_pin_num = pin_num - ZYNQ_GPIO_BANK3_PIN_MIN; + break; + default: + WARN(true, "invalid GPIO pin number: %u", pin_num); + *bank_num = 0; + *bank_pin_num = 0; + break; + } +} + +/** + * zynq_gpio_get_value - Get the state of the specified pin of GPIO device + * @chip: gpio_chip instance to be worked on + * @pin: gpio pin number within the device + * + * This function reads the state of the specified pin of the GPIO device. + * + * Return: 0 if the pin is low, 1 if pin is high. + */ +static int zynq_gpio_get_value(struct gpio_chip *chip, unsigned int pin) +{ + u32 data; + unsigned int bank_num, bank_pin_num; + struct zynq_gpio *gpio = container_of(chip, struct zynq_gpio, chip); + + zynq_gpio_get_bank_pin(pin, &bank_num, &bank_pin_num); + + data = readl_relaxed(gpio->base_addr + + ZYNQ_GPIO_DATA_RO_OFFSET(bank_num)); + + return (data >> bank_pin_num) & 1; +} + +/** + * zynq_gpio_set_value - Modify the state of the pin with specified value + * @chip: gpio_chip instance to be worked on + * @pin: gpio pin number within the device + * @state: value used to modify the state of the specified pin + * + * This function calculates the register offset (i.e to lower 16 bits or + * upper 16 bits) based on the given pin number and sets the state of a + * gpio pin to the specified value. The state is either 0 or non-zero. + */ +static void zynq_gpio_set_value(struct gpio_chip *chip, unsigned int pin, + int state) +{ + unsigned int reg_offset, bank_num, bank_pin_num; + struct zynq_gpio *gpio = container_of(chip, struct zynq_gpio, chip); + + zynq_gpio_get_bank_pin(pin, &bank_num, &bank_pin_num); + + if (bank_pin_num >= ZYNQ_GPIO_MID_PIN_NUM) { + /* only 16 data bits in bit maskable reg */ + bank_pin_num -= ZYNQ_GPIO_MID_PIN_NUM; + reg_offset = ZYNQ_GPIO_DATA_MSW_OFFSET(bank_num); + } else { + reg_offset = ZYNQ_GPIO_DATA_LSW_OFFSET(bank_num); + } + + /* + * get the 32 bit value to be written to the mask/data register where + * the upper 16 bits is the mask and lower 16 bits is the data + */ + state = !!state; + state = ~(1 << (bank_pin_num + ZYNQ_GPIO_MID_PIN_NUM)) & + ((state << bank_pin_num) | ZYNQ_GPIO_UPPER_MASK); + + writel_relaxed(state, gpio->base_addr + reg_offset); +} + +/** + * zynq_gpio_dir_in - Set the direction of the specified GPIO pin as input + * @chip: gpio_chip instance to be worked on + * @pin: gpio pin number within the device + * + * This function uses the read-modify-write sequence to set the direction of + * the gpio pin as input. + * + * Return: 0 always + */ +static int zynq_gpio_dir_in(struct gpio_chip *chip, unsigned int pin) +{ + u32 reg; + unsigned int bank_num, bank_pin_num; + struct zynq_gpio *gpio = container_of(chip, struct zynq_gpio, chip); + + zynq_gpio_get_bank_pin(pin, &bank_num, &bank_pin_num); + + /* bank 0 pins 7 and 8 are special and cannot be used as inputs */ + if (bank_num == 0 && (bank_pin_num == 7 || bank_pin_num == 8)) + return -EINVAL; + + /* clear the bit in direction mode reg to set the pin as input */ + reg = readl_relaxed(gpio->base_addr + ZYNQ_GPIO_DIRM_OFFSET(bank_num)); + reg &= ~BIT(bank_pin_num); + writel_relaxed(reg, gpio->base_addr + ZYNQ_GPIO_DIRM_OFFSET(bank_num)); + + return 0; +} + +/** + * zynq_gpio_dir_out - Set the direction of the specified GPIO pin as output + * @chip: gpio_chip instance to be worked on + * @pin: gpio pin number within the device + * @state: value to be written to specified pin + * + * This function sets the direction of specified GPIO pin as output, configures + * the Output Enable register for the pin and uses zynq_gpio_set to set + * the state of the pin to the value specified. + * + * Return: 0 always + */ +static int zynq_gpio_dir_out(struct gpio_chip *chip, unsigned int pin, + int state) +{ + u32 reg; + unsigned int bank_num, bank_pin_num; + struct zynq_gpio *gpio = container_of(chip, struct zynq_gpio, chip); + + zynq_gpio_get_bank_pin(pin, &bank_num, &bank_pin_num); + + /* set the GPIO pin as output */ + reg = readl_relaxed(gpio->base_addr + ZYNQ_GPIO_DIRM_OFFSET(bank_num)); + reg |= BIT(bank_pin_num); + writel_relaxed(reg, gpio->base_addr + ZYNQ_GPIO_DIRM_OFFSET(bank_num)); + + /* configure the output enable reg for the pin */ + reg = readl_relaxed(gpio->base_addr + ZYNQ_GPIO_OUTEN_OFFSET(bank_num)); + reg |= BIT(bank_pin_num); + writel_relaxed(reg, gpio->base_addr + ZYNQ_GPIO_OUTEN_OFFSET(bank_num)); + + /* set the state of the pin */ + zynq_gpio_set_value(chip, pin, state); + return 0; +} + +/** + * zynq_gpio_irq_mask - Disable the interrupts for a gpio pin + * @irq_data: per irq and chip data passed down to chip functions + * + * This function calculates gpio pin number from irq number and sets the + * bit in the Interrupt Disable register of the corresponding bank to disable + * interrupts for that pin. + */ +static void zynq_gpio_irq_mask(struct irq_data *irq_data) +{ + unsigned int device_pin_num, bank_num, bank_pin_num; + struct zynq_gpio *gpio = irq_data_get_irq_chip_data(irq_data); + + device_pin_num = irq_data->hwirq; + zynq_gpio_get_bank_pin(device_pin_num, &bank_num, &bank_pin_num); + writel_relaxed(BIT(bank_pin_num), + gpio->base_addr + ZYNQ_GPIO_INTDIS_OFFSET(bank_num)); +} + +/** + * zynq_gpio_irq_unmask - Enable the interrupts for a gpio pin + * @irq_data: irq data containing irq number of gpio pin for the interrupt + * to enable + * + * This function calculates the gpio pin number from irq number and sets the + * bit in the Interrupt Enable register of the corresponding bank to enable + * interrupts for that pin. + */ +static void zynq_gpio_irq_unmask(struct irq_data *irq_data) +{ + unsigned int device_pin_num, bank_num, bank_pin_num; + struct zynq_gpio *gpio = irq_data_get_irq_chip_data(irq_data); + + device_pin_num = irq_data->hwirq; + zynq_gpio_get_bank_pin(device_pin_num, &bank_num, &bank_pin_num); + writel_relaxed(BIT(bank_pin_num), + gpio->base_addr + ZYNQ_GPIO_INTEN_OFFSET(bank_num)); +} + +/** + * zynq_gpio_irq_ack - Acknowledge the interrupt of a gpio pin + * @irq_data: irq data containing irq number of gpio pin for the interrupt + * to ack + * + * This function calculates gpio pin number from irq number and sets the bit + * in the Interrupt Status Register of the corresponding bank, to ACK the irq. + */ +static void zynq_gpio_irq_ack(struct irq_data *irq_data) +{ + unsigned int device_pin_num, bank_num, bank_pin_num; + struct zynq_gpio *gpio = irq_data_get_irq_chip_data(irq_data); + + device_pin_num = irq_data->hwirq; + zynq_gpio_get_bank_pin(device_pin_num, &bank_num, &bank_pin_num); + writel_relaxed(BIT(bank_pin_num), + gpio->base_addr + ZYNQ_GPIO_INTSTS_OFFSET(bank_num)); +} + +/** + * zynq_gpio_irq_enable - Enable the interrupts for a gpio pin + * @irq_data: irq data containing irq number of gpio pin for the interrupt + * to enable + * + * Clears the INTSTS bit and unmasks the given interrrupt. + */ +static void zynq_gpio_irq_enable(struct irq_data *irq_data) +{ + /* + * The Zynq GPIO controller does not disable interrupt detection when + * the interrupt is masked and only disables the propagation of the + * interrupt. This means when the controller detects an interrupt + * condition while the interrupt is logically disabled it will propagate + * that interrupt event once the interrupt is enabled. This will cause + * the interrupt consumer to see spurious interrupts to prevent this + * first make sure that the interrupt is not asserted and then enable + * it. + */ + zynq_gpio_irq_ack(irq_data); + zynq_gpio_irq_unmask(irq_data); +} + +/** + * zynq_gpio_set_irq_type - Set the irq type for a gpio pin + * @irq_data: irq data containing irq number of gpio pin + * @type: interrupt type that is to be set for the gpio pin + * + * This function gets the gpio pin number and its bank from the gpio pin number + * and configures the INT_TYPE, INT_POLARITY and INT_ANY registers. + * + * Return: 0, negative error otherwise. + * TYPE-EDGE_RISING, INT_TYPE - 1, INT_POLARITY - 1, INT_ANY - 0; + * TYPE-EDGE_FALLING, INT_TYPE - 1, INT_POLARITY - 0, INT_ANY - 0; + * TYPE-EDGE_BOTH, INT_TYPE - 1, INT_POLARITY - NA, INT_ANY - 1; + * TYPE-LEVEL_HIGH, INT_TYPE - 0, INT_POLARITY - 1, INT_ANY - NA; + * TYPE-LEVEL_LOW, INT_TYPE - 0, INT_POLARITY - 0, INT_ANY - NA + */ +static int zynq_gpio_set_irq_type(struct irq_data *irq_data, unsigned int type) +{ + u32 int_type, int_pol, int_any; + unsigned int device_pin_num, bank_num, bank_pin_num; + struct zynq_gpio *gpio = irq_data_get_irq_chip_data(irq_data); + + device_pin_num = irq_data->hwirq; + zynq_gpio_get_bank_pin(device_pin_num, &bank_num, &bank_pin_num); + + int_type = readl_relaxed(gpio->base_addr + + ZYNQ_GPIO_INTTYPE_OFFSET(bank_num)); + int_pol = readl_relaxed(gpio->base_addr + + ZYNQ_GPIO_INTPOL_OFFSET(bank_num)); + int_any = readl_relaxed(gpio->base_addr + + ZYNQ_GPIO_INTANY_OFFSET(bank_num)); + + /* + * based on the type requested, configure the INT_TYPE, INT_POLARITY + * and INT_ANY registers + */ + switch (type) { + case IRQ_TYPE_EDGE_RISING: + int_type |= BIT(bank_pin_num); + int_pol |= BIT(bank_pin_num); + int_any &= ~BIT(bank_pin_num); + break; + case IRQ_TYPE_EDGE_FALLING: + int_type |= BIT(bank_pin_num); + int_pol &= ~BIT(bank_pin_num); + int_any &= ~BIT(bank_pin_num); + break; + case IRQ_TYPE_EDGE_BOTH: + int_type |= BIT(bank_pin_num); + int_any |= BIT(bank_pin_num); + break; + case IRQ_TYPE_LEVEL_HIGH: + int_type &= ~BIT(bank_pin_num); + int_pol |= BIT(bank_pin_num); + break; + case IRQ_TYPE_LEVEL_LOW: + int_type &= ~BIT(bank_pin_num); + int_pol &= ~BIT(bank_pin_num); + break; + default: + return -EINVAL; + } + + writel_relaxed(int_type, + gpio->base_addr + ZYNQ_GPIO_INTTYPE_OFFSET(bank_num)); + writel_relaxed(int_pol, + gpio->base_addr + ZYNQ_GPIO_INTPOL_OFFSET(bank_num)); + writel_relaxed(int_any, + gpio->base_addr + ZYNQ_GPIO_INTANY_OFFSET(bank_num)); + + if (type & IRQ_TYPE_LEVEL_MASK) { + __irq_set_chip_handler_name_locked(irq_data->irq, + &zynq_gpio_level_irqchip, handle_fasteoi_irq, NULL); + } else { + __irq_set_chip_handler_name_locked(irq_data->irq, + &zynq_gpio_edge_irqchip, handle_level_irq, NULL); + } + + return 0; +} + +static int zynq_gpio_set_wake(struct irq_data *data, unsigned int on) +{ + if (on) + zynq_gpio_irq_unmask(data); + else + zynq_gpio_irq_mask(data); + + return 0; +} + +/* irq chip descriptor */ +static struct irq_chip zynq_gpio_level_irqchip = { + .name = DRIVER_NAME, + .irq_enable = zynq_gpio_irq_enable, + .irq_eoi = zynq_gpio_irq_ack, + .irq_mask = zynq_gpio_irq_mask, + .irq_unmask = zynq_gpio_irq_unmask, + .irq_set_type = zynq_gpio_set_irq_type, + .irq_set_wake = zynq_gpio_set_wake, + .flags = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED, +}; + +static struct irq_chip zynq_gpio_edge_irqchip = { + .name = DRIVER_NAME, + .irq_enable = zynq_gpio_irq_enable, + .irq_ack = zynq_gpio_irq_ack, + .irq_mask = zynq_gpio_irq_mask, + .irq_unmask = zynq_gpio_irq_unmask, + .irq_set_type = zynq_gpio_set_irq_type, + .irq_set_wake = zynq_gpio_set_wake, +}; + +/** + * zynq_gpio_irqhandler - IRQ handler for the gpio banks of a gpio device + * @irq: irq number of the gpio bank where interrupt has occurred + * @desc: irq descriptor instance of the 'irq' + * + * This function reads the Interrupt Status Register of each bank to get the + * gpio pin number which has triggered an interrupt. It then acks the triggered + * interrupt and calls the pin specific handler set by the higher layer + * application for that pin. + * Note: A bug is reported if no handler is set for the gpio pin. + */ +static void zynq_gpio_irqhandler(unsigned int irq, struct irq_desc *desc) +{ + u32 int_sts, int_enb; + unsigned int bank_num; + struct zynq_gpio *gpio = irq_get_handler_data(irq); + struct irq_chip *irqchip = irq_desc_get_chip(desc); + + chained_irq_enter(irqchip, desc); + + for (bank_num = 0; bank_num < ZYNQ_GPIO_MAX_BANK; bank_num++) { + int_sts = readl_relaxed(gpio->base_addr + + ZYNQ_GPIO_INTSTS_OFFSET(bank_num)); + int_enb = readl_relaxed(gpio->base_addr + + ZYNQ_GPIO_INTMASK_OFFSET(bank_num)); + int_sts &= ~int_enb; + if (int_sts) { + int offset; + unsigned long pending = int_sts; + + for_each_set_bit(offset, &pending, 32) { + unsigned int gpio_irq = + irq_find_mapping(gpio->chip.irqdomain, + offset); + generic_handle_irq(gpio_irq); + } + } + } + + chained_irq_exit(irqchip, desc); +} + +static int __maybe_unused zynq_gpio_suspend(struct device *dev) +{ + if (!device_may_wakeup(dev)) + return pm_runtime_force_suspend(dev); + + return 0; +} + +static int __maybe_unused zynq_gpio_resume(struct device *dev) +{ + if (!device_may_wakeup(dev)) + return pm_runtime_force_resume(dev); + + return 0; +} + +static int __maybe_unused zynq_gpio_runtime_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct zynq_gpio *gpio = platform_get_drvdata(pdev); + + clk_disable_unprepare(gpio->clk); + + return 0; +} + +static int __maybe_unused zynq_gpio_runtime_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct zynq_gpio *gpio = platform_get_drvdata(pdev); + + return clk_prepare_enable(gpio->clk); +} + +static int zynq_gpio_request(struct gpio_chip *chip, unsigned offset) +{ + int ret; + + ret = pm_runtime_get_sync(chip->dev); + + /* + * If the device is already active pm_runtime_get() will return 1 on + * success, but gpio_request still needs to return 0. + */ + return ret < 0 ? ret : 0; +} + +static void zynq_gpio_free(struct gpio_chip *chip, unsigned offset) +{ + pm_runtime_put(chip->dev); +} + +static const struct dev_pm_ops zynq_gpio_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(zynq_gpio_suspend, zynq_gpio_resume) + SET_PM_RUNTIME_PM_OPS(zynq_gpio_runtime_suspend, + zynq_gpio_runtime_resume, NULL) +}; + +/** + * zynq_gpio_probe - Initialization method for a zynq_gpio device + * @pdev: platform device instance + * + * This function allocates memory resources for the gpio device and registers + * all the banks of the device. It will also set up interrupts for the gpio + * pins. + * Note: Interrupts are disabled for all the banks during initialization. + * + * Return: 0 on success, negative error otherwise. + */ +static int zynq_gpio_probe(struct platform_device *pdev) +{ + int ret, bank_num, irq; + struct zynq_gpio *gpio; + struct gpio_chip *chip; + struct resource *res; + + gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL); + if (!gpio) + return -ENOMEM; + + platform_set_drvdata(pdev, gpio); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + gpio->base_addr = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(gpio->base_addr)) + return PTR_ERR(gpio->base_addr); + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "invalid IRQ\n"); + return irq; + } + + /* configure the gpio chip */ + chip = &gpio->chip; + chip->label = "zynq_gpio"; + chip->owner = THIS_MODULE; + chip->dev = &pdev->dev; + chip->get = zynq_gpio_get_value; + chip->set = zynq_gpio_set_value; + chip->request = zynq_gpio_request; + chip->free = zynq_gpio_free; + chip->direction_input = zynq_gpio_dir_in; + chip->direction_output = zynq_gpio_dir_out; + chip->base = -1; + chip->ngpio = ZYNQ_GPIO_NR_GPIOS; + + /* Enable GPIO clock */ + gpio->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(gpio->clk)) { + dev_err(&pdev->dev, "input clock not found.\n"); + return PTR_ERR(gpio->clk); + } + ret = clk_prepare_enable(gpio->clk); + if (ret) { + dev_err(&pdev->dev, "Unable to enable clock.\n"); + return ret; + } + + /* report a bug if gpio chip registration fails */ + ret = gpiochip_add(chip); + if (ret) { + dev_err(&pdev->dev, "Failed to add gpio chip\n"); + goto err_disable_clk; + } + + /* disable interrupts for all banks */ + for (bank_num = 0; bank_num < ZYNQ_GPIO_MAX_BANK; bank_num++) + writel_relaxed(ZYNQ_GPIO_IXR_DISABLE_ALL, gpio->base_addr + + ZYNQ_GPIO_INTDIS_OFFSET(bank_num)); + + ret = gpiochip_irqchip_add(chip, &zynq_gpio_edge_irqchip, 0, + handle_level_irq, IRQ_TYPE_NONE); + if (ret) { + dev_err(&pdev->dev, "Failed to add irq chip\n"); + goto err_rm_gpiochip; + } + + gpiochip_set_chained_irqchip(chip, &zynq_gpio_edge_irqchip, irq, + zynq_gpio_irqhandler); + + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + + device_set_wakeup_capable(&pdev->dev, 1); + + return 0; + +err_rm_gpiochip: + if (gpiochip_remove(chip)) + dev_err(&pdev->dev, "Failed to remove gpio chip\n"); +err_disable_clk: + clk_disable_unprepare(gpio->clk); + + return ret; +} + +/** + * zynq_gpio_remove - Driver removal function + * @pdev: platform device instance + * + * Return: 0 always + */ +static int zynq_gpio_remove(struct platform_device *pdev) +{ + int ret; + struct zynq_gpio *gpio = platform_get_drvdata(pdev); + + pm_runtime_get_sync(&pdev->dev); + + ret = gpiochip_remove(&gpio->chip); + if (ret) { + dev_err(&pdev->dev, "Failed to remove gpio chip\n"); + return ret; + } + clk_disable_unprepare(gpio->clk); + device_set_wakeup_capable(&pdev->dev, 0); + return 0; +} + +static struct of_device_id zynq_gpio_of_match[] = { + { .compatible = "xlnx,zynq-gpio-1.0", }, + { /* end of table */ } +}; +MODULE_DEVICE_TABLE(of, zynq_gpio_of_match); + +static struct platform_driver zynq_gpio_driver = { + .driver = { + .name = DRIVER_NAME, + .owner = THIS_MODULE, + .pm = &zynq_gpio_dev_pm_ops, + .of_match_table = zynq_gpio_of_match, + }, + .probe = zynq_gpio_probe, + .remove = zynq_gpio_remove, +}; + +/** + * zynq_gpio_init - Initial driver registration call + * + * Return: value from platform_driver_register + */ +static int __init zynq_gpio_init(void) +{ + return platform_driver_register(&zynq_gpio_driver); +} +postcore_initcall(zynq_gpio_init); + +MODULE_AUTHOR("Xilinx Inc."); +MODULE_DESCRIPTION("Zynq GPIO driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index 4a987917c186..687476fb39e3 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c @@ -157,7 +157,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares, gpiod_direction_input(desc); - ret = gpiod_lock_as_irq(desc); + ret = gpio_lock_as_irq(chip, pin); if (ret) { dev_err(chip->dev, "Failed to lock GPIO as interrupt\n"); goto fail_free_desc; @@ -212,7 +212,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares, fail_free_event: kfree(event); fail_unlock_irq: - gpiod_unlock_as_irq(desc); + gpio_unlock_as_irq(chip, pin); fail_free_desc: gpiochip_free_own_desc(desc); @@ -221,7 +221,7 @@ fail_free_desc: /** * acpi_gpiochip_request_interrupts() - Register isr for gpio chip ACPI events - * @acpi_gpio: ACPI GPIO chip + * @chip: GPIO chip * * ACPI5 platforms can use GPIO signaled ACPI events. These GPIO interrupts are * handled by ACPI event methods which need to be called from the GPIO @@ -229,11 +229,21 @@ fail_free_desc: * gpio pins have acpi event methods and assigns interrupt handlers that calls * the acpi event methods for those pins. */ -static void acpi_gpiochip_request_interrupts(struct acpi_gpio_chip *acpi_gpio) +void acpi_gpiochip_request_interrupts(struct gpio_chip *chip) { - struct gpio_chip *chip = acpi_gpio->chip; + struct acpi_gpio_chip *acpi_gpio; + acpi_handle handle; + acpi_status status; + + if (!chip->dev || !chip->to_irq) + return; - if (!chip->to_irq) + handle = ACPI_HANDLE(chip->dev); + if (!handle) + return; + + status = acpi_get_data(handle, acpi_gpio_chip_dh, (void **)&acpi_gpio); + if (ACPI_FAILURE(status)) return; INIT_LIST_HEAD(&acpi_gpio->events); @@ -243,17 +253,27 @@ static void acpi_gpiochip_request_interrupts(struct acpi_gpio_chip *acpi_gpio) /** * acpi_gpiochip_free_interrupts() - Free GPIO ACPI event interrupts. - * @acpi_gpio: ACPI GPIO chip + * @chip: GPIO chip * * Free interrupts associated with GPIO ACPI event method for the given * GPIO chip. */ -static void acpi_gpiochip_free_interrupts(struct acpi_gpio_chip *acpi_gpio) +void acpi_gpiochip_free_interrupts(struct gpio_chip *chip) { + struct acpi_gpio_chip *acpi_gpio; struct acpi_gpio_event *event, *ep; - struct gpio_chip *chip = acpi_gpio->chip; + acpi_handle handle; + acpi_status status; + + if (!chip->dev || !chip->to_irq) + return; - if (!chip->to_irq) + handle = ACPI_HANDLE(chip->dev); + if (!handle) + return; + + status = acpi_get_data(handle, acpi_gpio_chip_dh, (void **)&acpi_gpio); + if (ACPI_FAILURE(status)) return; list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) { @@ -263,7 +283,7 @@ static void acpi_gpiochip_free_interrupts(struct acpi_gpio_chip *acpi_gpio) desc = gpiochip_get_desc(chip, event->pin); if (WARN_ON(IS_ERR(desc))) continue; - gpiod_unlock_as_irq(desc); + gpio_unlock_as_irq(chip, event->pin); gpiochip_free_own_desc(desc); list_del(&event->node); kfree(event); @@ -357,8 +377,10 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address, struct gpio_chip *chip = achip->chip; struct acpi_resource_gpio *agpio; struct acpi_resource *ares; + int pin_index = (int)address; acpi_status status; bool pull_up; + int length; int i; status = acpi_buffer_to_resource(achip->conn_info.connection, @@ -380,7 +402,8 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address, return AE_BAD_PARAMETER; } - for (i = 0; i < agpio->pin_table_length; i++) { + length = min(agpio->pin_table_length, (u16)(pin_index + bits)); + for (i = pin_index; i < length; ++i) { unsigned pin = agpio->pin_table[i]; struct acpi_gpio_connection *conn; struct gpio_desc *desc; @@ -525,7 +548,6 @@ void acpi_gpiochip_add(struct gpio_chip *chip) return; } - acpi_gpiochip_request_interrupts(acpi_gpio); acpi_gpiochip_request_regions(acpi_gpio); } @@ -549,7 +571,6 @@ void acpi_gpiochip_remove(struct gpio_chip *chip) } acpi_gpiochip_free_regions(acpi_gpio); - acpi_gpiochip_free_interrupts(acpi_gpio); acpi_detach_data(handle, acpi_gpio_chip_dh); kfree(acpi_gpio); diff --git a/drivers/gpio/gpiolib-legacy.c b/drivers/gpio/gpiolib-legacy.c new file mode 100644 index 000000000000..078ae6c2df79 --- /dev/null +++ b/drivers/gpio/gpiolib-legacy.c @@ -0,0 +1,102 @@ +#include <linux/gpio/consumer.h> +#include <linux/gpio/driver.h> + +#include <linux/gpio.h> + +#include "gpiolib.h" + +void gpio_free(unsigned gpio) +{ + gpiod_free(gpio_to_desc(gpio)); +} +EXPORT_SYMBOL_GPL(gpio_free); + +/** + * gpio_request_one - request a single GPIO with initial configuration + * @gpio: the GPIO number + * @flags: GPIO configuration as specified by GPIOF_* + * @label: a literal description string of this GPIO + */ +int gpio_request_one(unsigned gpio, unsigned long flags, const char *label) +{ + struct gpio_desc *desc; + int err; + + desc = gpio_to_desc(gpio); + + err = gpiod_request(desc, label); + if (err) + return err; + + if (flags & GPIOF_OPEN_DRAIN) + set_bit(FLAG_OPEN_DRAIN, &desc->flags); + + if (flags & GPIOF_OPEN_SOURCE) + set_bit(FLAG_OPEN_SOURCE, &desc->flags); + + if (flags & GPIOF_ACTIVE_LOW) + set_bit(FLAG_ACTIVE_LOW, &desc->flags); + + if (flags & GPIOF_DIR_IN) + err = gpiod_direction_input(desc); + else + err = gpiod_direction_output_raw(desc, + (flags & GPIOF_INIT_HIGH) ? 1 : 0); + + if (err) + goto free_gpio; + + if (flags & GPIOF_EXPORT) { + err = gpiod_export(desc, flags & GPIOF_EXPORT_CHANGEABLE); + if (err) + goto free_gpio; + } + + return 0; + + free_gpio: + gpiod_free(desc); + return err; +} +EXPORT_SYMBOL_GPL(gpio_request_one); + +int gpio_request(unsigned gpio, const char *label) +{ + return gpiod_request(gpio_to_desc(gpio), label); +} +EXPORT_SYMBOL_GPL(gpio_request); + +/** + * gpio_request_array - request multiple GPIOs in a single call + * @array: array of the 'struct gpio' + * @num: how many GPIOs in the array + */ +int gpio_request_array(const struct gpio *array, size_t num) +{ + int i, err; + + for (i = 0; i < num; i++, array++) { + err = gpio_request_one(array->gpio, array->flags, array->label); + if (err) + goto err_free; + } + return 0; + +err_free: + while (i--) + gpio_free((--array)->gpio); + return err; +} +EXPORT_SYMBOL_GPL(gpio_request_array); + +/** + * gpio_free_array - release multiple GPIOs in a single call + * @array: array of the 'struct gpio' + * @num: how many GPIOs in the array + */ +void gpio_free_array(const struct gpio *array, size_t num) +{ + while (num--) + gpio_free((array++)->gpio); +} +EXPORT_SYMBOL_GPL(gpio_free_array); diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index af7e25c9a9ae..604dbe60bdee 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c @@ -23,7 +23,7 @@ #include <linux/pinctrl/pinctrl.h> #include <linux/slab.h> -struct gpio_desc; +#include "gpiolib.h" /* Private data structure for of_gpiochip_find_and_xlate */ struct gg_data { @@ -82,19 +82,19 @@ struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np, ret = of_parse_phandle_with_args(np, propname, "#gpio-cells", index, &gg_data.gpiospec); if (ret) { - pr_debug("%s: can't parse gpios property of node '%s[%d]'\n", - __func__, np->full_name, index); + pr_debug("%s: can't parse '%s' property of node '%s[%d]'\n", + __func__, propname, np->full_name, index); return ERR_PTR(ret); } gpiochip_find(&gg_data, of_gpiochip_find_and_xlate); of_node_put(gg_data.gpiospec.np); - pr_debug("%s exited with status %d\n", __func__, + pr_debug("%s: parsed '%s' property of node '%s[%d]' - status (%d)\n", + __func__, propname, np->full_name, index, PTR_ERR_OR_ZERO(gg_data.out_gpio)); return gg_data.out_gpio; } -EXPORT_SYMBOL(of_get_named_gpiod_flags); int of_get_named_gpio_flags(struct device_node *np, const char *list_name, int index, enum of_gpio_flags *flags) @@ -307,7 +307,5 @@ void of_gpiochip_add(struct gpio_chip *chip) void of_gpiochip_remove(struct gpio_chip *chip) { gpiochip_remove_pin_ranges(chip); - - if (chip->of_node) - of_node_put(chip->of_node); + of_node_put(chip->of_node); } diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c new file mode 100644 index 000000000000..5f2150b619a7 --- /dev/null +++ b/drivers/gpio/gpiolib-sysfs.c @@ -0,0 +1,827 @@ +#include <linux/idr.h> +#include <linux/mutex.h> +#include <linux/device.h> +#include <linux/sysfs.h> +#include <linux/gpio/consumer.h> +#include <linux/gpio/driver.h> +#include <linux/interrupt.h> +#include <linux/kdev_t.h> + +#include "gpiolib.h" + +static DEFINE_IDR(dirent_idr); + + +/* lock protects against unexport_gpio() being called while + * sysfs files are active. + */ +static DEFINE_MUTEX(sysfs_lock); + +/* + * /sys/class/gpio/gpioN... only for GPIOs that are exported + * /direction + * * MAY BE OMITTED if kernel won't allow direction changes + * * is read/write as "in" or "out" + * * may also be written as "high" or "low", initializing + * output value as specified ("out" implies "low") + * /value + * * always readable, subject to hardware behavior + * * may be writable, as zero/nonzero + * /edge + * * configures behavior of poll(2) on /value + * * available only if pin can generate IRQs on input + * * is read/write as "none", "falling", "rising", or "both" + * /active_low + * * configures polarity of /value + * * is read/write as zero/nonzero + * * also affects existing and subsequent "falling" and "rising" + * /edge configuration + */ + +static ssize_t gpio_direction_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + const struct gpio_desc *desc = dev_get_drvdata(dev); + ssize_t status; + + mutex_lock(&sysfs_lock); + + if (!test_bit(FLAG_EXPORT, &desc->flags)) { + status = -EIO; + } else { + gpiod_get_direction(desc); + status = sprintf(buf, "%s\n", + test_bit(FLAG_IS_OUT, &desc->flags) + ? "out" : "in"); + } + + mutex_unlock(&sysfs_lock); + return status; +} + +static ssize_t gpio_direction_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t size) +{ + struct gpio_desc *desc = dev_get_drvdata(dev); + ssize_t status; + + mutex_lock(&sysfs_lock); + + if (!test_bit(FLAG_EXPORT, &desc->flags)) + status = -EIO; + else if (sysfs_streq(buf, "high")) + status = gpiod_direction_output_raw(desc, 1); + else if (sysfs_streq(buf, "out") || sysfs_streq(buf, "low")) + status = gpiod_direction_output_raw(desc, 0); + else if (sysfs_streq(buf, "in")) + status = gpiod_direction_input(desc); + else + status = -EINVAL; + + mutex_unlock(&sysfs_lock); + return status ? : size; +} + +static /* const */ DEVICE_ATTR(direction, 0644, + gpio_direction_show, gpio_direction_store); + +static ssize_t gpio_value_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct gpio_desc *desc = dev_get_drvdata(dev); + ssize_t status; + + mutex_lock(&sysfs_lock); + + if (!test_bit(FLAG_EXPORT, &desc->flags)) + status = -EIO; + else + status = sprintf(buf, "%d\n", gpiod_get_value_cansleep(desc)); + + mutex_unlock(&sysfs_lock); + return status; +} + +static ssize_t gpio_value_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t size) +{ + struct gpio_desc *desc = dev_get_drvdata(dev); + ssize_t status; + + mutex_lock(&sysfs_lock); + + if (!test_bit(FLAG_EXPORT, &desc->flags)) + status = -EIO; + else if (!test_bit(FLAG_IS_OUT, &desc->flags)) + status = -EPERM; + else { + long value; + + status = kstrtol(buf, 0, &value); + if (status == 0) { + gpiod_set_value_cansleep(desc, value); + status = size; + } + } + + mutex_unlock(&sysfs_lock); + return status; +} + +static const DEVICE_ATTR(value, 0644, + gpio_value_show, gpio_value_store); + +static irqreturn_t gpio_sysfs_irq(int irq, void *priv) +{ + struct kernfs_node *value_sd = priv; + + sysfs_notify_dirent(value_sd); + return IRQ_HANDLED; +} + +static int gpio_setup_irq(struct gpio_desc *desc, struct device *dev, + unsigned long gpio_flags) +{ + struct kernfs_node *value_sd; + unsigned long irq_flags; + int ret, irq, id; + + if ((desc->flags & GPIO_TRIGGER_MASK) == gpio_flags) + return 0; + + irq = gpiod_to_irq(desc); + if (irq < 0) + return -EIO; + + id = desc->flags >> ID_SHIFT; + value_sd = idr_find(&dirent_idr, id); + if (value_sd) + free_irq(irq, value_sd); + + desc->flags &= ~GPIO_TRIGGER_MASK; + + if (!gpio_flags) { + gpio_unlock_as_irq(desc->chip, gpio_chip_hwgpio(desc)); + ret = 0; + goto free_id; + } + + irq_flags = IRQF_SHARED; + if (test_bit(FLAG_TRIG_FALL, &gpio_flags)) + irq_flags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ? + IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING; + if (test_bit(FLAG_TRIG_RISE, &gpio_flags)) + irq_flags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ? + IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING; + + if (!value_sd) { + value_sd = sysfs_get_dirent(dev->kobj.sd, "value"); + if (!value_sd) { + ret = -ENODEV; + goto err_out; + } + + ret = idr_alloc(&dirent_idr, value_sd, 1, 0, GFP_KERNEL); + if (ret < 0) + goto free_sd; + id = ret; + + desc->flags &= GPIO_FLAGS_MASK; + desc->flags |= (unsigned long)id << ID_SHIFT; + + if (desc->flags >> ID_SHIFT != id) { + ret = -ERANGE; + goto free_id; + } + } + + ret = request_any_context_irq(irq, gpio_sysfs_irq, irq_flags, + "gpiolib", value_sd); + if (ret < 0) + goto free_id; + + ret = gpio_lock_as_irq(desc->chip, gpio_chip_hwgpio(desc)); + if (ret < 0) { + gpiod_warn(desc, "failed to flag the GPIO for IRQ\n"); + goto free_id; + } + + desc->flags |= gpio_flags; + return 0; + +free_id: + idr_remove(&dirent_idr, id); + desc->flags &= GPIO_FLAGS_MASK; +free_sd: + if (value_sd) + sysfs_put(value_sd); +err_out: + return ret; +} + +static const struct { + const char *name; + unsigned long flags; +} trigger_types[] = { + { "none", 0 }, + { "falling", BIT(FLAG_TRIG_FALL) }, + { "rising", BIT(FLAG_TRIG_RISE) }, + { "both", BIT(FLAG_TRIG_FALL) | BIT(FLAG_TRIG_RISE) }, +}; + +static ssize_t gpio_edge_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + const struct gpio_desc *desc = dev_get_drvdata(dev); + ssize_t status; + + mutex_lock(&sysfs_lock); + + if (!test_bit(FLAG_EXPORT, &desc->flags)) + status = -EIO; + else { + int i; + + status = 0; + for (i = 0; i < ARRAY_SIZE(trigger_types); i++) + if ((desc->flags & GPIO_TRIGGER_MASK) + == trigger_types[i].flags) { + status = sprintf(buf, "%s\n", + trigger_types[i].name); + break; + } + } + + mutex_unlock(&sysfs_lock); + return status; +} + +static ssize_t gpio_edge_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t size) +{ + struct gpio_desc *desc = dev_get_drvdata(dev); + ssize_t status; + int i; + + for (i = 0; i < ARRAY_SIZE(trigger_types); i++) + if (sysfs_streq(trigger_types[i].name, buf)) + goto found; + return -EINVAL; + +found: + mutex_lock(&sysfs_lock); + + if (!test_bit(FLAG_EXPORT, &desc->flags)) + status = -EIO; + else { + status = gpio_setup_irq(desc, dev, trigger_types[i].flags); + if (!status) + status = size; + } + + mutex_unlock(&sysfs_lock); + + return status; +} + +static DEVICE_ATTR(edge, 0644, gpio_edge_show, gpio_edge_store); + +static int sysfs_set_active_low(struct gpio_desc *desc, struct device *dev, + int value) +{ + int status = 0; + + if (!!test_bit(FLAG_ACTIVE_LOW, &desc->flags) == !!value) + return 0; + + if (value) + set_bit(FLAG_ACTIVE_LOW, &desc->flags); + else + clear_bit(FLAG_ACTIVE_LOW, &desc->flags); + + /* reconfigure poll(2) support if enabled on one edge only */ + if (dev != NULL && (!!test_bit(FLAG_TRIG_RISE, &desc->flags) ^ + !!test_bit(FLAG_TRIG_FALL, &desc->flags))) { + unsigned long trigger_flags = desc->flags & GPIO_TRIGGER_MASK; + + gpio_setup_irq(desc, dev, 0); + status = gpio_setup_irq(desc, dev, trigger_flags); + } + + return status; +} + +static ssize_t gpio_active_low_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + const struct gpio_desc *desc = dev_get_drvdata(dev); + ssize_t status; + + mutex_lock(&sysfs_lock); + + if (!test_bit(FLAG_EXPORT, &desc->flags)) + status = -EIO; + else + status = sprintf(buf, "%d\n", + !!test_bit(FLAG_ACTIVE_LOW, &desc->flags)); + + mutex_unlock(&sysfs_lock); + + return status; +} + +static ssize_t gpio_active_low_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t size) +{ + struct gpio_desc *desc = dev_get_drvdata(dev); + ssize_t status; + + mutex_lock(&sysfs_lock); + + if (!test_bit(FLAG_EXPORT, &desc->flags)) { + status = -EIO; + } else { + long value; + + status = kstrtol(buf, 0, &value); + if (status == 0) + status = sysfs_set_active_low(desc, dev, value != 0); + } + + mutex_unlock(&sysfs_lock); + + return status ? : size; +} + +static const DEVICE_ATTR(active_low, 0644, + gpio_active_low_show, gpio_active_low_store); + +static const struct attribute *gpio_attrs[] = { + &dev_attr_value.attr, + &dev_attr_active_low.attr, + NULL, +}; + +static const struct attribute_group gpio_attr_group = { + .attrs = (struct attribute **) gpio_attrs, +}; + +/* + * /sys/class/gpio/gpiochipN/ + * /base ... matching gpio_chip.base (N) + * /label ... matching gpio_chip.label + * /ngpio ... matching gpio_chip.ngpio + */ + +static ssize_t chip_base_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + const struct gpio_chip *chip = dev_get_drvdata(dev); + + return sprintf(buf, "%d\n", chip->base); +} +static DEVICE_ATTR(base, 0444, chip_base_show, NULL); + +static ssize_t chip_label_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + const struct gpio_chip *chip = dev_get_drvdata(dev); + + return sprintf(buf, "%s\n", chip->label ? : ""); +} +static DEVICE_ATTR(label, 0444, chip_label_show, NULL); + +static ssize_t chip_ngpio_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + const struct gpio_chip *chip = dev_get_drvdata(dev); + + return sprintf(buf, "%u\n", chip->ngpio); +} +static DEVICE_ATTR(ngpio, 0444, chip_ngpio_show, NULL); + +static const struct attribute *gpiochip_attrs[] = { + &dev_attr_base.attr, + &dev_attr_label.attr, + &dev_attr_ngpio.attr, + NULL, +}; + +static const struct attribute_group gpiochip_attr_group = { + .attrs = (struct attribute **) gpiochip_attrs, +}; + +/* + * /sys/class/gpio/export ... write-only + * integer N ... number of GPIO to export (full access) + * /sys/class/gpio/unexport ... write-only + * integer N ... number of GPIO to unexport + */ +static ssize_t export_store(struct class *class, + struct class_attribute *attr, + const char *buf, size_t len) +{ + long gpio; + struct gpio_desc *desc; + int status; + + status = kstrtol(buf, 0, &gpio); + if (status < 0) + goto done; + + desc = gpio_to_desc(gpio); + /* reject invalid GPIOs */ + if (!desc) { + pr_warn("%s: invalid GPIO %ld\n", __func__, gpio); + return -EINVAL; + } + + /* No extra locking here; FLAG_SYSFS just signifies that the + * request and export were done by on behalf of userspace, so + * they may be undone on its behalf too. + */ + + status = gpiod_request(desc, "sysfs"); + if (status < 0) { + if (status == -EPROBE_DEFER) + status = -ENODEV; + goto done; + } + status = gpiod_export(desc, true); + if (status < 0) + gpiod_free(desc); + else + set_bit(FLAG_SYSFS, &desc->flags); + +done: + if (status) + pr_debug("%s: status %d\n", __func__, status); + return status ? : len; +} + +static ssize_t unexport_store(struct class *class, + struct class_attribute *attr, + const char *buf, size_t len) +{ + long gpio; + struct gpio_desc *desc; + int status; + + status = kstrtol(buf, 0, &gpio); + if (status < 0) + goto done; + + desc = gpio_to_desc(gpio); + /* reject bogus commands (gpio_unexport ignores them) */ + if (!desc) { + pr_warn("%s: invalid GPIO %ld\n", __func__, gpio); + return -EINVAL; + } + + status = -EINVAL; + + /* No extra locking here; FLAG_SYSFS just signifies that the + * request and export were done by on behalf of userspace, so + * they may be undone on its behalf too. + */ + if (test_and_clear_bit(FLAG_SYSFS, &desc->flags)) { + status = 0; + gpiod_free(desc); + } +done: + if (status) + pr_debug("%s: status %d\n", __func__, status); + return status ? : len; +} + +static struct class_attribute gpio_class_attrs[] = { + __ATTR(export, 0200, NULL, export_store), + __ATTR(unexport, 0200, NULL, unexport_store), + __ATTR_NULL, +}; + +static struct class gpio_class = { + .name = "gpio", + .owner = THIS_MODULE, + + .class_attrs = gpio_class_attrs, +}; + + +/** + * gpiod_export - export a GPIO through sysfs + * @gpio: gpio to make available, already requested + * @direction_may_change: true if userspace may change gpio direction + * Context: arch_initcall or later + * + * When drivers want to make a GPIO accessible to userspace after they + * have requested it -- perhaps while debugging, or as part of their + * public interface -- they may use this routine. If the GPIO can + * change direction (some can't) and the caller allows it, userspace + * will see "direction" sysfs attribute which may be used to change + * the gpio's direction. A "value" attribute will always be provided. + * + * Returns zero on success, else an error. + */ +int gpiod_export(struct gpio_desc *desc, bool direction_may_change) +{ + unsigned long flags; + int status; + const char *ioname = NULL; + struct device *dev; + int offset; + + /* can't export until sysfs is available ... */ + if (!gpio_class.p) { + pr_debug("%s: called too early!\n", __func__); + return -ENOENT; + } + + if (!desc) { + pr_debug("%s: invalid gpio descriptor\n", __func__); + return -EINVAL; + } + + mutex_lock(&sysfs_lock); + + spin_lock_irqsave(&gpio_lock, flags); + if (!test_bit(FLAG_REQUESTED, &desc->flags) || + test_bit(FLAG_EXPORT, &desc->flags)) { + spin_unlock_irqrestore(&gpio_lock, flags); + gpiod_dbg(desc, "%s: unavailable (requested=%d, exported=%d)\n", + __func__, + test_bit(FLAG_REQUESTED, &desc->flags), + test_bit(FLAG_EXPORT, &desc->flags)); + status = -EPERM; + goto fail_unlock; + } + + if (!desc->chip->direction_input || !desc->chip->direction_output) + direction_may_change = false; + spin_unlock_irqrestore(&gpio_lock, flags); + + offset = gpio_chip_hwgpio(desc); + if (desc->chip->names && desc->chip->names[offset]) + ioname = desc->chip->names[offset]; + + dev = device_create(&gpio_class, desc->chip->dev, MKDEV(0, 0), + desc, ioname ? ioname : "gpio%u", + desc_to_gpio(desc)); + if (IS_ERR(dev)) { + status = PTR_ERR(dev); + goto fail_unlock; + } + + status = sysfs_create_group(&dev->kobj, &gpio_attr_group); + if (status) + goto fail_unregister_device; + + if (direction_may_change) { + status = device_create_file(dev, &dev_attr_direction); + if (status) + goto fail_unregister_device; + } + + if (gpiod_to_irq(desc) >= 0 && (direction_may_change || + !test_bit(FLAG_IS_OUT, &desc->flags))) { + status = device_create_file(dev, &dev_attr_edge); + if (status) + goto fail_unregister_device; + } + + set_bit(FLAG_EXPORT, &desc->flags); + mutex_unlock(&sysfs_lock); + return 0; + +fail_unregister_device: + device_unregister(dev); +fail_unlock: + mutex_unlock(&sysfs_lock); + gpiod_dbg(desc, "%s: status %d\n", __func__, status); + return status; +} +EXPORT_SYMBOL_GPL(gpiod_export); + +static int match_export(struct device *dev, const void *data) +{ + return dev_get_drvdata(dev) == data; +} + +/** + * gpiod_export_link - create a sysfs link to an exported GPIO node + * @dev: device under which to create symlink + * @name: name of the symlink + * @gpio: gpio to create symlink to, already exported + * + * Set up a symlink from /sys/.../dev/name to /sys/class/gpio/gpioN + * node. Caller is responsible for unlinking. + * + * Returns zero on success, else an error. + */ +int gpiod_export_link(struct device *dev, const char *name, + struct gpio_desc *desc) +{ + int status = -EINVAL; + + if (!desc) { + pr_warn("%s: invalid GPIO\n", __func__); + return -EINVAL; + } + + mutex_lock(&sysfs_lock); + + if (test_bit(FLAG_EXPORT, &desc->flags)) { + struct device *tdev; + + tdev = class_find_device(&gpio_class, NULL, desc, match_export); + if (tdev != NULL) { + status = sysfs_create_link(&dev->kobj, &tdev->kobj, + name); + } else { + status = -ENODEV; + } + } + + mutex_unlock(&sysfs_lock); + + if (status) + gpiod_dbg(desc, "%s: status %d\n", __func__, status); + + return status; +} +EXPORT_SYMBOL_GPL(gpiod_export_link); + +/** + * gpiod_sysfs_set_active_low - set the polarity of gpio sysfs value + * @gpio: gpio to change + * @value: non-zero to use active low, i.e. inverted values + * + * Set the polarity of /sys/class/gpio/gpioN/value sysfs attribute. + * The GPIO does not have to be exported yet. If poll(2) support has + * been enabled for either rising or falling edge, it will be + * reconfigured to follow the new polarity. + * + * Returns zero on success, else an error. + */ +int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value) +{ + struct device *dev = NULL; + int status = -EINVAL; + + if (!desc) { + pr_warn("%s: invalid GPIO\n", __func__); + return -EINVAL; + } + + mutex_lock(&sysfs_lock); + + if (test_bit(FLAG_EXPORT, &desc->flags)) { + dev = class_find_device(&gpio_class, NULL, desc, match_export); + if (dev == NULL) { + status = -ENODEV; + goto unlock; + } + } + + status = sysfs_set_active_low(desc, dev, value); + +unlock: + mutex_unlock(&sysfs_lock); + + if (status) + gpiod_dbg(desc, "%s: status %d\n", __func__, status); + + return status; +} +EXPORT_SYMBOL_GPL(gpiod_sysfs_set_active_low); + +/** + * gpiod_unexport - reverse effect of gpio_export() + * @gpio: gpio to make unavailable + * + * This is implicit on gpio_free(). + */ +void gpiod_unexport(struct gpio_desc *desc) +{ + int status = 0; + struct device *dev = NULL; + + if (!desc) { + pr_warn("%s: invalid GPIO\n", __func__); + return; + } + + mutex_lock(&sysfs_lock); + + if (test_bit(FLAG_EXPORT, &desc->flags)) { + + dev = class_find_device(&gpio_class, NULL, desc, match_export); + if (dev) { + gpio_setup_irq(desc, dev, 0); + clear_bit(FLAG_EXPORT, &desc->flags); + } else + status = -ENODEV; + } + + mutex_unlock(&sysfs_lock); + + if (dev) { + device_unregister(dev); + put_device(dev); + } + + if (status) + gpiod_dbg(desc, "%s: status %d\n", __func__, status); +} +EXPORT_SYMBOL_GPL(gpiod_unexport); + +int gpiochip_export(struct gpio_chip *chip) +{ + int status; + struct device *dev; + + /* Many systems register gpio chips for SOC support very early, + * before driver model support is available. In those cases we + * export this later, in gpiolib_sysfs_init() ... here we just + * verify that _some_ field of gpio_class got initialized. + */ + if (!gpio_class.p) + return 0; + + /* use chip->base for the ID; it's already known to be unique */ + mutex_lock(&sysfs_lock); + dev = device_create(&gpio_class, chip->dev, MKDEV(0, 0), chip, + "gpiochip%d", chip->base); + if (!IS_ERR(dev)) { + status = sysfs_create_group(&dev->kobj, + &gpiochip_attr_group); + } else + status = PTR_ERR(dev); + chip->exported = (status == 0); + mutex_unlock(&sysfs_lock); + + if (status) + chip_dbg(chip, "%s: status %d\n", __func__, status); + + return status; +} + +void gpiochip_unexport(struct gpio_chip *chip) +{ + int status; + struct device *dev; + + mutex_lock(&sysfs_lock); + dev = class_find_device(&gpio_class, NULL, chip, match_export); + if (dev) { + put_device(dev); + device_unregister(dev); + chip->exported = false; + status = 0; + } else + status = -ENODEV; + mutex_unlock(&sysfs_lock); + + if (status) + chip_dbg(chip, "%s: status %d\n", __func__, status); +} + +static int __init gpiolib_sysfs_init(void) +{ + int status; + unsigned long flags; + struct gpio_chip *chip; + + status = class_register(&gpio_class); + if (status < 0) + return status; + + /* Scan and register the gpio_chips which registered very + * early (e.g. before the class_register above was called). + * + * We run before arch_initcall() so chip->dev nodes can have + * registered, and so arch_initcall() can always gpio_export(). + */ + spin_lock_irqsave(&gpio_lock, flags); + list_for_each_entry(chip, &gpio_chips, list) { + if (chip->exported) + continue; + + /* + * TODO we yield gpio_lock here because gpiochip_export() + * acquires a mutex. This is unsafe and needs to be fixed. + * + * Also it would be nice to use gpiochip_find() here so we + * can keep gpio_chips local to gpiolib.c, but the yield of + * gpio_lock prevents us from doing this. + */ + spin_unlock_irqrestore(&gpio_lock, flags); + status = gpiochip_export(chip); + spin_lock_irqsave(&gpio_lock, flags); + } + spin_unlock_irqrestore(&gpio_lock, flags); + + + return status; +} +postcore_initcall(gpiolib_sysfs_init); diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 2ebc9071e354..c68d037de656 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -14,6 +14,7 @@ #include <linux/slab.h> #include <linux/acpi.h> #include <linux/gpio/driver.h> +#include <linux/gpio/machine.h> #include "gpiolib.h" @@ -44,111 +45,19 @@ * While any GPIO is requested, its gpio_chip is not removable; * each GPIO's "requested" flag serves as a lock and refcount. */ -static DEFINE_SPINLOCK(gpio_lock); +DEFINE_SPINLOCK(gpio_lock); -struct gpio_desc { - struct gpio_chip *chip; - unsigned long flags; -/* flag symbols are bit numbers */ -#define FLAG_REQUESTED 0 -#define FLAG_IS_OUT 1 -#define FLAG_EXPORT 2 /* protected by sysfs_lock */ -#define FLAG_SYSFS 3 /* exported via /sys/class/gpio/control */ -#define FLAG_TRIG_FALL 4 /* trigger on falling edge */ -#define FLAG_TRIG_RISE 5 /* trigger on rising edge */ -#define FLAG_ACTIVE_LOW 6 /* value has active low */ -#define FLAG_OPEN_DRAIN 7 /* Gpio is open drain type */ -#define FLAG_OPEN_SOURCE 8 /* Gpio is open source type */ -#define FLAG_USED_AS_IRQ 9 /* GPIO is connected to an IRQ */ - -#define ID_SHIFT 16 /* add new flags before this one */ - -#define GPIO_FLAGS_MASK ((1 << ID_SHIFT) - 1) -#define GPIO_TRIGGER_MASK (BIT(FLAG_TRIG_FALL) | BIT(FLAG_TRIG_RISE)) - -#ifdef CONFIG_DEBUG_FS - const char *label; -#endif -}; static struct gpio_desc gpio_desc[ARCH_NR_GPIOS]; #define GPIO_OFFSET_VALID(chip, offset) (offset >= 0 && offset < chip->ngpio) static DEFINE_MUTEX(gpio_lookup_lock); static LIST_HEAD(gpio_lookup_list); -static LIST_HEAD(gpio_chips); - -#ifdef CONFIG_GPIO_SYSFS -static DEFINE_IDR(dirent_idr); -#endif - -static int gpiod_request(struct gpio_desc *desc, const char *label); -static void gpiod_free(struct gpio_desc *desc); - -/* With descriptor prefix */ - -#ifdef CONFIG_DEBUG_FS -#define gpiod_emerg(desc, fmt, ...) \ - pr_emerg("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label ? : "?",\ - ##__VA_ARGS__) -#define gpiod_crit(desc, fmt, ...) \ - pr_crit("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label ? : "?", \ - ##__VA_ARGS__) -#define gpiod_err(desc, fmt, ...) \ - pr_err("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label ? : "?", \ - ##__VA_ARGS__) -#define gpiod_warn(desc, fmt, ...) \ - pr_warn("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label ? : "?", \ - ##__VA_ARGS__) -#define gpiod_info(desc, fmt, ...) \ - pr_info("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label ? : "?", \ - ##__VA_ARGS__) -#define gpiod_dbg(desc, fmt, ...) \ - pr_debug("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label ? : "?",\ - ##__VA_ARGS__) -#else -#define gpiod_emerg(desc, fmt, ...) \ - pr_emerg("gpio-%d: " fmt, desc_to_gpio(desc), ##__VA_ARGS__) -#define gpiod_crit(desc, fmt, ...) \ - pr_crit("gpio-%d: " fmt, desc_to_gpio(desc), ##__VA_ARGS__) -#define gpiod_err(desc, fmt, ...) \ - pr_err("gpio-%d: " fmt, desc_to_gpio(desc), ##__VA_ARGS__) -#define gpiod_warn(desc, fmt, ...) \ - pr_warn("gpio-%d: " fmt, desc_to_gpio(desc), ##__VA_ARGS__) -#define gpiod_info(desc, fmt, ...) \ - pr_info("gpio-%d: " fmt, desc_to_gpio(desc), ##__VA_ARGS__) -#define gpiod_dbg(desc, fmt, ...) \ - pr_debug("gpio-%d: " fmt, desc_to_gpio(desc), ##__VA_ARGS__) -#endif - -/* With chip prefix */ - -#define chip_emerg(chip, fmt, ...) \ - pr_emerg("GPIO chip %s: " fmt, chip->label, ##__VA_ARGS__) -#define chip_crit(chip, fmt, ...) \ - pr_crit("GPIO chip %s: " fmt, chip->label, ##__VA_ARGS__) -#define chip_err(chip, fmt, ...) \ - pr_err("GPIO chip %s: " fmt, chip->label, ##__VA_ARGS__) -#define chip_warn(chip, fmt, ...) \ - pr_warn("GPIO chip %s: " fmt, chip->label, ##__VA_ARGS__) -#define chip_info(chip, fmt, ...) \ - pr_info("GPIO chip %s: " fmt, chip->label, ##__VA_ARGS__) -#define chip_dbg(chip, fmt, ...) \ - pr_debug("GPIO chip %s: " fmt, chip->label, ##__VA_ARGS__) +LIST_HEAD(gpio_chips); static inline void desc_set_label(struct gpio_desc *d, const char *label) { -#ifdef CONFIG_DEBUG_FS d->label = label; -#endif -} - -/* - * Return the GPIO number of the passed descriptor relative to its chip - */ -static int gpio_chip_hwgpio(const struct gpio_desc *desc) -{ - return desc - &desc->chip->desc[0]; } /** @@ -174,7 +83,6 @@ struct gpio_desc *gpiochip_get_desc(struct gpio_chip *chip, return &chip->desc[hwnum]; } -EXPORT_SYMBOL_GPL(gpiochip_get_desc); /** * Convert a GPIO descriptor to the integer namespace. @@ -188,39 +96,6 @@ int desc_to_gpio(const struct gpio_desc *desc) EXPORT_SYMBOL_GPL(desc_to_gpio); -/* Warn when drivers omit gpio_request() calls -- legal but ill-advised - * when setting direction, and otherwise illegal. Until board setup code - * and drivers use explicit requests everywhere (which won't happen when - * those calls have no teeth) we can't avoid autorequesting. This nag - * message should motivate switching to explicit requests... so should - * the weaker cleanup after faults, compared to gpio_request(). - * - * NOTE: the autorequest mechanism is going away; at this point it's - * only "legal" in the sense that (old) code using it won't break yet, - * but instead only triggers a WARN() stack dump. - */ -static int gpio_ensure_requested(struct gpio_desc *desc) -{ - const struct gpio_chip *chip = desc->chip; - const int gpio = desc_to_gpio(desc); - - if (WARN(test_and_set_bit(FLAG_REQUESTED, &desc->flags) == 0, - "autorequest GPIO-%d\n", gpio)) { - if (!try_module_get(chip->owner)) { - gpiod_err(desc, "%s: module can't be gotten\n", - __func__); - clear_bit(FLAG_REQUESTED, &desc->flags); - /* lose */ - return -EIO; - } - desc_set_label(desc, "[auto]"); - /* caller must chip->request() w/o spinlock */ - if (chip->request) - return 1; - } - return 0; -} - /** * gpiod_to_chip - Return the GPIO chip to which a GPIO descriptor belongs * @desc: descriptor to return the chip of @@ -291,836 +166,6 @@ int gpiod_get_direction(const struct gpio_desc *desc) } EXPORT_SYMBOL_GPL(gpiod_get_direction); -#ifdef CONFIG_GPIO_SYSFS - -/* lock protects against unexport_gpio() being called while - * sysfs files are active. - */ -static DEFINE_MUTEX(sysfs_lock); - -/* - * /sys/class/gpio/gpioN... only for GPIOs that are exported - * /direction - * * MAY BE OMITTED if kernel won't allow direction changes - * * is read/write as "in" or "out" - * * may also be written as "high" or "low", initializing - * output value as specified ("out" implies "low") - * /value - * * always readable, subject to hardware behavior - * * may be writable, as zero/nonzero - * /edge - * * configures behavior of poll(2) on /value - * * available only if pin can generate IRQs on input - * * is read/write as "none", "falling", "rising", or "both" - * /active_low - * * configures polarity of /value - * * is read/write as zero/nonzero - * * also affects existing and subsequent "falling" and "rising" - * /edge configuration - */ - -static ssize_t gpio_direction_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - const struct gpio_desc *desc = dev_get_drvdata(dev); - ssize_t status; - - mutex_lock(&sysfs_lock); - - if (!test_bit(FLAG_EXPORT, &desc->flags)) { - status = -EIO; - } else { - gpiod_get_direction(desc); - status = sprintf(buf, "%s\n", - test_bit(FLAG_IS_OUT, &desc->flags) - ? "out" : "in"); - } - - mutex_unlock(&sysfs_lock); - return status; -} - -static ssize_t gpio_direction_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t size) -{ - struct gpio_desc *desc = dev_get_drvdata(dev); - ssize_t status; - - mutex_lock(&sysfs_lock); - - if (!test_bit(FLAG_EXPORT, &desc->flags)) - status = -EIO; - else if (sysfs_streq(buf, "high")) - status = gpiod_direction_output_raw(desc, 1); - else if (sysfs_streq(buf, "out") || sysfs_streq(buf, "low")) - status = gpiod_direction_output_raw(desc, 0); - else if (sysfs_streq(buf, "in")) - status = gpiod_direction_input(desc); - else - status = -EINVAL; - - mutex_unlock(&sysfs_lock); - return status ? : size; -} - -static /* const */ DEVICE_ATTR(direction, 0644, - gpio_direction_show, gpio_direction_store); - -static ssize_t gpio_value_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct gpio_desc *desc = dev_get_drvdata(dev); - ssize_t status; - - mutex_lock(&sysfs_lock); - - if (!test_bit(FLAG_EXPORT, &desc->flags)) - status = -EIO; - else - status = sprintf(buf, "%d\n", gpiod_get_value_cansleep(desc)); - - mutex_unlock(&sysfs_lock); - return status; -} - -static ssize_t gpio_value_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t size) -{ - struct gpio_desc *desc = dev_get_drvdata(dev); - ssize_t status; - - mutex_lock(&sysfs_lock); - - if (!test_bit(FLAG_EXPORT, &desc->flags)) - status = -EIO; - else if (!test_bit(FLAG_IS_OUT, &desc->flags)) - status = -EPERM; - else { - long value; - - status = kstrtol(buf, 0, &value); - if (status == 0) { - gpiod_set_value_cansleep(desc, value); - status = size; - } - } - - mutex_unlock(&sysfs_lock); - return status; -} - -static const DEVICE_ATTR(value, 0644, - gpio_value_show, gpio_value_store); - -static irqreturn_t gpio_sysfs_irq(int irq, void *priv) -{ - struct kernfs_node *value_sd = priv; - - sysfs_notify_dirent(value_sd); - return IRQ_HANDLED; -} - -static int gpio_setup_irq(struct gpio_desc *desc, struct device *dev, - unsigned long gpio_flags) -{ - struct kernfs_node *value_sd; - unsigned long irq_flags; - int ret, irq, id; - - if ((desc->flags & GPIO_TRIGGER_MASK) == gpio_flags) - return 0; - - irq = gpiod_to_irq(desc); - if (irq < 0) - return -EIO; - - id = desc->flags >> ID_SHIFT; - value_sd = idr_find(&dirent_idr, id); - if (value_sd) - free_irq(irq, value_sd); - - desc->flags &= ~GPIO_TRIGGER_MASK; - - if (!gpio_flags) { - gpiod_unlock_as_irq(desc); - ret = 0; - goto free_id; - } - - irq_flags = IRQF_SHARED; - if (test_bit(FLAG_TRIG_FALL, &gpio_flags)) - irq_flags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ? - IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING; - if (test_bit(FLAG_TRIG_RISE, &gpio_flags)) - irq_flags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ? - IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING; - - if (!value_sd) { - value_sd = sysfs_get_dirent(dev->kobj.sd, "value"); - if (!value_sd) { - ret = -ENODEV; - goto err_out; - } - - ret = idr_alloc(&dirent_idr, value_sd, 1, 0, GFP_KERNEL); - if (ret < 0) - goto free_sd; - id = ret; - - desc->flags &= GPIO_FLAGS_MASK; - desc->flags |= (unsigned long)id << ID_SHIFT; - - if (desc->flags >> ID_SHIFT != id) { - ret = -ERANGE; - goto free_id; - } - } - - ret = request_any_context_irq(irq, gpio_sysfs_irq, irq_flags, - "gpiolib", value_sd); - if (ret < 0) - goto free_id; - - ret = gpiod_lock_as_irq(desc); - if (ret < 0) { - gpiod_warn(desc, "failed to flag the GPIO for IRQ\n"); - goto free_id; - } - - desc->flags |= gpio_flags; - return 0; - -free_id: - idr_remove(&dirent_idr, id); - desc->flags &= GPIO_FLAGS_MASK; -free_sd: - if (value_sd) - sysfs_put(value_sd); -err_out: - return ret; -} - -static const struct { - const char *name; - unsigned long flags; -} trigger_types[] = { - { "none", 0 }, - { "falling", BIT(FLAG_TRIG_FALL) }, - { "rising", BIT(FLAG_TRIG_RISE) }, - { "both", BIT(FLAG_TRIG_FALL) | BIT(FLAG_TRIG_RISE) }, -}; - -static ssize_t gpio_edge_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - const struct gpio_desc *desc = dev_get_drvdata(dev); - ssize_t status; - - mutex_lock(&sysfs_lock); - - if (!test_bit(FLAG_EXPORT, &desc->flags)) - status = -EIO; - else { - int i; - - status = 0; - for (i = 0; i < ARRAY_SIZE(trigger_types); i++) - if ((desc->flags & GPIO_TRIGGER_MASK) - == trigger_types[i].flags) { - status = sprintf(buf, "%s\n", - trigger_types[i].name); - break; - } - } - - mutex_unlock(&sysfs_lock); - return status; -} - -static ssize_t gpio_edge_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t size) -{ - struct gpio_desc *desc = dev_get_drvdata(dev); - ssize_t status; - int i; - - for (i = 0; i < ARRAY_SIZE(trigger_types); i++) - if (sysfs_streq(trigger_types[i].name, buf)) - goto found; - return -EINVAL; - -found: - mutex_lock(&sysfs_lock); - - if (!test_bit(FLAG_EXPORT, &desc->flags)) - status = -EIO; - else { - status = gpio_setup_irq(desc, dev, trigger_types[i].flags); - if (!status) - status = size; - } - - mutex_unlock(&sysfs_lock); - - return status; -} - -static DEVICE_ATTR(edge, 0644, gpio_edge_show, gpio_edge_store); - -static int sysfs_set_active_low(struct gpio_desc *desc, struct device *dev, - int value) -{ - int status = 0; - - if (!!test_bit(FLAG_ACTIVE_LOW, &desc->flags) == !!value) - return 0; - - if (value) - set_bit(FLAG_ACTIVE_LOW, &desc->flags); - else - clear_bit(FLAG_ACTIVE_LOW, &desc->flags); - - /* reconfigure poll(2) support if enabled on one edge only */ - if (dev != NULL && (!!test_bit(FLAG_TRIG_RISE, &desc->flags) ^ - !!test_bit(FLAG_TRIG_FALL, &desc->flags))) { - unsigned long trigger_flags = desc->flags & GPIO_TRIGGER_MASK; - - gpio_setup_irq(desc, dev, 0); - status = gpio_setup_irq(desc, dev, trigger_flags); - } - - return status; -} - -static ssize_t gpio_active_low_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - const struct gpio_desc *desc = dev_get_drvdata(dev); - ssize_t status; - - mutex_lock(&sysfs_lock); - - if (!test_bit(FLAG_EXPORT, &desc->flags)) - status = -EIO; - else - status = sprintf(buf, "%d\n", - !!test_bit(FLAG_ACTIVE_LOW, &desc->flags)); - - mutex_unlock(&sysfs_lock); - - return status; -} - -static ssize_t gpio_active_low_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t size) -{ - struct gpio_desc *desc = dev_get_drvdata(dev); - ssize_t status; - - mutex_lock(&sysfs_lock); - - if (!test_bit(FLAG_EXPORT, &desc->flags)) { - status = -EIO; - } else { - long value; - - status = kstrtol(buf, 0, &value); - if (status == 0) - status = sysfs_set_active_low(desc, dev, value != 0); - } - - mutex_unlock(&sysfs_lock); - - return status ? : size; -} - -static const DEVICE_ATTR(active_low, 0644, - gpio_active_low_show, gpio_active_low_store); - -static const struct attribute *gpio_attrs[] = { - &dev_attr_value.attr, - &dev_attr_active_low.attr, - NULL, -}; - -static const struct attribute_group gpio_attr_group = { - .attrs = (struct attribute **) gpio_attrs, -}; - -/* - * /sys/class/gpio/gpiochipN/ - * /base ... matching gpio_chip.base (N) - * /label ... matching gpio_chip.label - * /ngpio ... matching gpio_chip.ngpio - */ - -static ssize_t chip_base_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - const struct gpio_chip *chip = dev_get_drvdata(dev); - - return sprintf(buf, "%d\n", chip->base); -} -static DEVICE_ATTR(base, 0444, chip_base_show, NULL); - -static ssize_t chip_label_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - const struct gpio_chip *chip = dev_get_drvdata(dev); - - return sprintf(buf, "%s\n", chip->label ? : ""); -} -static DEVICE_ATTR(label, 0444, chip_label_show, NULL); - -static ssize_t chip_ngpio_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - const struct gpio_chip *chip = dev_get_drvdata(dev); - - return sprintf(buf, "%u\n", chip->ngpio); -} -static DEVICE_ATTR(ngpio, 0444, chip_ngpio_show, NULL); - -static const struct attribute *gpiochip_attrs[] = { - &dev_attr_base.attr, - &dev_attr_label.attr, - &dev_attr_ngpio.attr, - NULL, -}; - -static const struct attribute_group gpiochip_attr_group = { - .attrs = (struct attribute **) gpiochip_attrs, -}; - -/* - * /sys/class/gpio/export ... write-only - * integer N ... number of GPIO to export (full access) - * /sys/class/gpio/unexport ... write-only - * integer N ... number of GPIO to unexport - */ -static ssize_t export_store(struct class *class, - struct class_attribute *attr, - const char *buf, size_t len) -{ - long gpio; - struct gpio_desc *desc; - int status; - - status = kstrtol(buf, 0, &gpio); - if (status < 0) - goto done; - - desc = gpio_to_desc(gpio); - /* reject invalid GPIOs */ - if (!desc) { - pr_warn("%s: invalid GPIO %ld\n", __func__, gpio); - return -EINVAL; - } - - /* No extra locking here; FLAG_SYSFS just signifies that the - * request and export were done by on behalf of userspace, so - * they may be undone on its behalf too. - */ - - status = gpiod_request(desc, "sysfs"); - if (status < 0) { - if (status == -EPROBE_DEFER) - status = -ENODEV; - goto done; - } - status = gpiod_export(desc, true); - if (status < 0) - gpiod_free(desc); - else - set_bit(FLAG_SYSFS, &desc->flags); - -done: - if (status) - pr_debug("%s: status %d\n", __func__, status); - return status ? : len; -} - -static ssize_t unexport_store(struct class *class, - struct class_attribute *attr, - const char *buf, size_t len) -{ - long gpio; - struct gpio_desc *desc; - int status; - - status = kstrtol(buf, 0, &gpio); - if (status < 0) - goto done; - - desc = gpio_to_desc(gpio); - /* reject bogus commands (gpio_unexport ignores them) */ - if (!desc) { - pr_warn("%s: invalid GPIO %ld\n", __func__, gpio); - return -EINVAL; - } - - status = -EINVAL; - - /* No extra locking here; FLAG_SYSFS just signifies that the - * request and export were done by on behalf of userspace, so - * they may be undone on its behalf too. - */ - if (test_and_clear_bit(FLAG_SYSFS, &desc->flags)) { - status = 0; - gpiod_free(desc); - } -done: - if (status) - pr_debug("%s: status %d\n", __func__, status); - return status ? : len; -} - -static struct class_attribute gpio_class_attrs[] = { - __ATTR(export, 0200, NULL, export_store), - __ATTR(unexport, 0200, NULL, unexport_store), - __ATTR_NULL, -}; - -static struct class gpio_class = { - .name = "gpio", - .owner = THIS_MODULE, - - .class_attrs = gpio_class_attrs, -}; - - -/** - * gpiod_export - export a GPIO through sysfs - * @gpio: gpio to make available, already requested - * @direction_may_change: true if userspace may change gpio direction - * Context: arch_initcall or later - * - * When drivers want to make a GPIO accessible to userspace after they - * have requested it -- perhaps while debugging, or as part of their - * public interface -- they may use this routine. If the GPIO can - * change direction (some can't) and the caller allows it, userspace - * will see "direction" sysfs attribute which may be used to change - * the gpio's direction. A "value" attribute will always be provided. - * - * Returns zero on success, else an error. - */ -int gpiod_export(struct gpio_desc *desc, bool direction_may_change) -{ - unsigned long flags; - int status; - const char *ioname = NULL; - struct device *dev; - int offset; - - /* can't export until sysfs is available ... */ - if (!gpio_class.p) { - pr_debug("%s: called too early!\n", __func__); - return -ENOENT; - } - - if (!desc) { - pr_debug("%s: invalid gpio descriptor\n", __func__); - return -EINVAL; - } - - mutex_lock(&sysfs_lock); - - spin_lock_irqsave(&gpio_lock, flags); - if (!test_bit(FLAG_REQUESTED, &desc->flags) || - test_bit(FLAG_EXPORT, &desc->flags)) { - spin_unlock_irqrestore(&gpio_lock, flags); - gpiod_dbg(desc, "%s: unavailable (requested=%d, exported=%d)\n", - __func__, - test_bit(FLAG_REQUESTED, &desc->flags), - test_bit(FLAG_EXPORT, &desc->flags)); - status = -EPERM; - goto fail_unlock; - } - - if (!desc->chip->direction_input || !desc->chip->direction_output) - direction_may_change = false; - spin_unlock_irqrestore(&gpio_lock, flags); - - offset = gpio_chip_hwgpio(desc); - if (desc->chip->names && desc->chip->names[offset]) - ioname = desc->chip->names[offset]; - - dev = device_create(&gpio_class, desc->chip->dev, MKDEV(0, 0), - desc, ioname ? ioname : "gpio%u", - desc_to_gpio(desc)); - if (IS_ERR(dev)) { - status = PTR_ERR(dev); - goto fail_unlock; - } - - status = sysfs_create_group(&dev->kobj, &gpio_attr_group); - if (status) - goto fail_unregister_device; - - if (direction_may_change) { - status = device_create_file(dev, &dev_attr_direction); - if (status) - goto fail_unregister_device; - } - - if (gpiod_to_irq(desc) >= 0 && (direction_may_change || - !test_bit(FLAG_IS_OUT, &desc->flags))) { - status = device_create_file(dev, &dev_attr_edge); - if (status) - goto fail_unregister_device; - } - - set_bit(FLAG_EXPORT, &desc->flags); - mutex_unlock(&sysfs_lock); - return 0; - -fail_unregister_device: - device_unregister(dev); -fail_unlock: - mutex_unlock(&sysfs_lock); - gpiod_dbg(desc, "%s: status %d\n", __func__, status); - return status; -} -EXPORT_SYMBOL_GPL(gpiod_export); - -static int match_export(struct device *dev, const void *data) -{ - return dev_get_drvdata(dev) == data; -} - -/** - * gpiod_export_link - create a sysfs link to an exported GPIO node - * @dev: device under which to create symlink - * @name: name of the symlink - * @gpio: gpio to create symlink to, already exported - * - * Set up a symlink from /sys/.../dev/name to /sys/class/gpio/gpioN - * node. Caller is responsible for unlinking. - * - * Returns zero on success, else an error. - */ -int gpiod_export_link(struct device *dev, const char *name, - struct gpio_desc *desc) -{ - int status = -EINVAL; - - if (!desc) { - pr_warn("%s: invalid GPIO\n", __func__); - return -EINVAL; - } - - mutex_lock(&sysfs_lock); - - if (test_bit(FLAG_EXPORT, &desc->flags)) { - struct device *tdev; - - tdev = class_find_device(&gpio_class, NULL, desc, match_export); - if (tdev != NULL) { - status = sysfs_create_link(&dev->kobj, &tdev->kobj, - name); - } else { - status = -ENODEV; - } - } - - mutex_unlock(&sysfs_lock); - - if (status) - gpiod_dbg(desc, "%s: status %d\n", __func__, status); - - return status; -} -EXPORT_SYMBOL_GPL(gpiod_export_link); - -/** - * gpiod_sysfs_set_active_low - set the polarity of gpio sysfs value - * @gpio: gpio to change - * @value: non-zero to use active low, i.e. inverted values - * - * Set the polarity of /sys/class/gpio/gpioN/value sysfs attribute. - * The GPIO does not have to be exported yet. If poll(2) support has - * been enabled for either rising or falling edge, it will be - * reconfigured to follow the new polarity. - * - * Returns zero on success, else an error. - */ -int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value) -{ - struct device *dev = NULL; - int status = -EINVAL; - - if (!desc) { - pr_warn("%s: invalid GPIO\n", __func__); - return -EINVAL; - } - - mutex_lock(&sysfs_lock); - - if (test_bit(FLAG_EXPORT, &desc->flags)) { - dev = class_find_device(&gpio_class, NULL, desc, match_export); - if (dev == NULL) { - status = -ENODEV; - goto unlock; - } - } - - status = sysfs_set_active_low(desc, dev, value); - -unlock: - mutex_unlock(&sysfs_lock); - - if (status) - gpiod_dbg(desc, "%s: status %d\n", __func__, status); - - return status; -} -EXPORT_SYMBOL_GPL(gpiod_sysfs_set_active_low); - -/** - * gpiod_unexport - reverse effect of gpio_export() - * @gpio: gpio to make unavailable - * - * This is implicit on gpio_free(). - */ -void gpiod_unexport(struct gpio_desc *desc) -{ - int status = 0; - struct device *dev = NULL; - - if (!desc) { - pr_warn("%s: invalid GPIO\n", __func__); - return; - } - - mutex_lock(&sysfs_lock); - - if (test_bit(FLAG_EXPORT, &desc->flags)) { - - dev = class_find_device(&gpio_class, NULL, desc, match_export); - if (dev) { - gpio_setup_irq(desc, dev, 0); - clear_bit(FLAG_EXPORT, &desc->flags); - } else - status = -ENODEV; - } - - mutex_unlock(&sysfs_lock); - - if (dev) { - device_unregister(dev); - put_device(dev); - } - - if (status) - gpiod_dbg(desc, "%s: status %d\n", __func__, status); -} -EXPORT_SYMBOL_GPL(gpiod_unexport); - -static int gpiochip_export(struct gpio_chip *chip) -{ - int status; - struct device *dev; - - /* Many systems register gpio chips for SOC support very early, - * before driver model support is available. In those cases we - * export this later, in gpiolib_sysfs_init() ... here we just - * verify that _some_ field of gpio_class got initialized. - */ - if (!gpio_class.p) - return 0; - - /* use chip->base for the ID; it's already known to be unique */ - mutex_lock(&sysfs_lock); - dev = device_create(&gpio_class, chip->dev, MKDEV(0, 0), chip, - "gpiochip%d", chip->base); - if (!IS_ERR(dev)) { - status = sysfs_create_group(&dev->kobj, - &gpiochip_attr_group); - } else - status = PTR_ERR(dev); - chip->exported = (status == 0); - mutex_unlock(&sysfs_lock); - - if (status) { - unsigned long flags; - unsigned gpio; - - spin_lock_irqsave(&gpio_lock, flags); - gpio = 0; - while (gpio < chip->ngpio) - chip->desc[gpio++].chip = NULL; - spin_unlock_irqrestore(&gpio_lock, flags); - - chip_dbg(chip, "%s: status %d\n", __func__, status); - } - - return status; -} - -static void gpiochip_unexport(struct gpio_chip *chip) -{ - int status; - struct device *dev; - - mutex_lock(&sysfs_lock); - dev = class_find_device(&gpio_class, NULL, chip, match_export); - if (dev) { - put_device(dev); - device_unregister(dev); - chip->exported = false; - status = 0; - } else - status = -ENODEV; - mutex_unlock(&sysfs_lock); - - if (status) - chip_dbg(chip, "%s: status %d\n", __func__, status); -} - -static int __init gpiolib_sysfs_init(void) -{ - int status; - unsigned long flags; - struct gpio_chip *chip; - - status = class_register(&gpio_class); - if (status < 0) - return status; - - /* Scan and register the gpio_chips which registered very - * early (e.g. before the class_register above was called). - * - * We run before arch_initcall() so chip->dev nodes can have - * registered, and so arch_initcall() can always gpio_export(). - */ - spin_lock_irqsave(&gpio_lock, flags); - list_for_each_entry(chip, &gpio_chips, list) { - if (!chip || chip->exported) - continue; - - spin_unlock_irqrestore(&gpio_lock, flags); - status = gpiochip_export(chip); - spin_lock_irqsave(&gpio_lock, flags); - } - spin_unlock_irqrestore(&gpio_lock, flags); - - - return status; -} -postcore_initcall(gpiolib_sysfs_init); - -#else -static inline int gpiochip_export(struct gpio_chip *chip) -{ - return 0; -} - -static inline void gpiochip_unexport(struct gpio_chip *chip) -{ -} - -#endif /* CONFIG_GPIO_SYSFS */ - /* * Add a new chip to the global chips list, keeping the list of chips sorted * by base order. @@ -1368,12 +413,12 @@ void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip, return; } - irq_set_chained_handler(parent_irq, parent_handler); /* * The parent irqchip is already using the chip_data for this * irqchip, so our callbacks simply use the handler_data. */ irq_set_handler_data(parent_irq, gpiochip); + irq_set_chained_handler(parent_irq, parent_handler); } EXPORT_SYMBOL_GPL(gpiochip_set_chained_irqchip); @@ -1474,6 +519,8 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip) { unsigned int offset; + acpi_gpiochip_free_interrupts(gpiochip); + /* Remove all IRQ mappings and delete the domain */ if (gpiochip->irqdomain) { for (offset = 0; offset < gpiochip->ngpio; offset++) @@ -1567,6 +614,8 @@ int gpiochip_irqchip_add(struct gpio_chip *gpiochip, gpiochip->irq_base = irq_base; } + acpi_gpiochip_request_interrupts(gpiochip); + return 0; } EXPORT_SYMBOL_GPL(gpiochip_irqchip_add); @@ -1740,7 +789,7 @@ done: return status; } -static int gpiod_request(struct gpio_desc *desc, const char *label) +int gpiod_request(struct gpio_desc *desc, const char *label) { int status = -EPROBE_DEFER; struct gpio_chip *chip; @@ -1767,12 +816,6 @@ done: return status; } -int gpio_request(unsigned gpio, const char *label) -{ - return gpiod_request(gpio_to_desc(gpio), label); -} -EXPORT_SYMBOL_GPL(gpio_request); - static bool __gpiod_free(struct gpio_desc *desc) { bool ret = false; @@ -1805,7 +848,7 @@ static bool __gpiod_free(struct gpio_desc *desc) return ret; } -static void gpiod_free(struct gpio_desc *desc) +void gpiod_free(struct gpio_desc *desc) { if (desc && __gpiod_free(desc)) module_put(desc->chip->owner); @@ -1813,101 +856,14 @@ static void gpiod_free(struct gpio_desc *desc) WARN_ON(extra_checks); } -void gpio_free(unsigned gpio) -{ - gpiod_free(gpio_to_desc(gpio)); -} -EXPORT_SYMBOL_GPL(gpio_free); - -/** - * gpio_request_one - request a single GPIO with initial configuration - * @gpio: the GPIO number - * @flags: GPIO configuration as specified by GPIOF_* - * @label: a literal description string of this GPIO - */ -int gpio_request_one(unsigned gpio, unsigned long flags, const char *label) -{ - struct gpio_desc *desc; - int err; - - desc = gpio_to_desc(gpio); - - err = gpiod_request(desc, label); - if (err) - return err; - - if (flags & GPIOF_OPEN_DRAIN) - set_bit(FLAG_OPEN_DRAIN, &desc->flags); - - if (flags & GPIOF_OPEN_SOURCE) - set_bit(FLAG_OPEN_SOURCE, &desc->flags); - - if (flags & GPIOF_DIR_IN) - err = gpiod_direction_input(desc); - else - err = gpiod_direction_output_raw(desc, - (flags & GPIOF_INIT_HIGH) ? 1 : 0); - - if (err) - goto free_gpio; - - if (flags & GPIOF_EXPORT) { - err = gpiod_export(desc, flags & GPIOF_EXPORT_CHANGEABLE); - if (err) - goto free_gpio; - } - - return 0; - - free_gpio: - gpiod_free(desc); - return err; -} -EXPORT_SYMBOL_GPL(gpio_request_one); - -/** - * gpio_request_array - request multiple GPIOs in a single call - * @array: array of the 'struct gpio' - * @num: how many GPIOs in the array - */ -int gpio_request_array(const struct gpio *array, size_t num) -{ - int i, err; - - for (i = 0; i < num; i++, array++) { - err = gpio_request_one(array->gpio, array->flags, array->label); - if (err) - goto err_free; - } - return 0; - -err_free: - while (i--) - gpio_free((--array)->gpio); - return err; -} -EXPORT_SYMBOL_GPL(gpio_request_array); - -/** - * gpio_free_array - release multiple GPIOs in a single call - * @array: array of the 'struct gpio' - * @num: how many GPIOs in the array - */ -void gpio_free_array(const struct gpio *array, size_t num) -{ - while (num--) - gpio_free((array++)->gpio); -} -EXPORT_SYMBOL_GPL(gpio_free_array); - /** * gpiochip_is_requested - return string iff signal was requested * @chip: controller managing the signal * @offset: of signal within controller's 0..(ngpio - 1) range * * Returns NULL if the GPIO is not currently requested, else a string. - * If debugfs support is enabled, the string returned is the label passed - * to gpio_request(); otherwise it is a meaningless constant. + * The string returned is the label passed to gpio_request(); if none has been + * passed it is a meaningless, non-NULL constant. * * This function is for use by GPIO controller drivers. The label can * help with diagnostics, and knowing that the signal is used as a GPIO @@ -1924,11 +880,7 @@ const char *gpiochip_is_requested(struct gpio_chip *chip, unsigned offset) if (test_bit(FLAG_REQUESTED, &desc->flags) == 0) return NULL; -#ifdef CONFIG_DEBUG_FS return desc->label; -#else - return "?"; -#endif } EXPORT_SYMBOL_GPL(gpiochip_is_requested); @@ -1950,6 +902,7 @@ int gpiochip_request_own_desc(struct gpio_desc *desc, const char *label) return __gpiod_request(desc, label); } +EXPORT_SYMBOL_GPL(gpiochip_request_own_desc); /** * gpiochip_free_own_desc - Free GPIO requested by the chip driver @@ -1963,6 +916,7 @@ void gpiochip_free_own_desc(struct gpio_desc *desc) if (desc) __gpiod_free(desc); } +EXPORT_SYMBOL_GPL(gpiochip_free_own_desc); /* Drivers MUST set GPIO direction before making get/set calls. In * some cases this is done in early boot, before IRQs are enabled. @@ -1984,10 +938,8 @@ void gpiochip_free_own_desc(struct gpio_desc *desc) */ int gpiod_direction_input(struct gpio_desc *desc) { - unsigned long flags; struct gpio_chip *chip; int status = -EINVAL; - int offset; if (!desc || !desc->chip) { pr_warn("%s: invalid GPIO\n", __func__); @@ -2002,52 +954,20 @@ int gpiod_direction_input(struct gpio_desc *desc) return -EIO; } - spin_lock_irqsave(&gpio_lock, flags); - - status = gpio_ensure_requested(desc); - if (status < 0) - goto fail; - - /* now we know the gpio is valid and chip won't vanish */ - - spin_unlock_irqrestore(&gpio_lock, flags); - - might_sleep_if(chip->can_sleep); - - offset = gpio_chip_hwgpio(desc); - if (status) { - status = chip->request(chip, offset); - if (status < 0) { - gpiod_dbg(desc, "%s: chip request fail, %d\n", - __func__, status); - /* and it's not available to anyone else ... - * gpio_request() is the fully clean solution. - */ - goto lose; - } - } - - status = chip->direction_input(chip, offset); + status = chip->direction_input(chip, gpio_chip_hwgpio(desc)); if (status == 0) clear_bit(FLAG_IS_OUT, &desc->flags); trace_gpio_direction(desc_to_gpio(desc), 1, status); -lose: - return status; -fail: - spin_unlock_irqrestore(&gpio_lock, flags); - if (status) - gpiod_dbg(desc, "%s: status %d\n", __func__, status); + return status; } EXPORT_SYMBOL_GPL(gpiod_direction_input); static int _gpiod_direction_output_raw(struct gpio_desc *desc, int value) { - unsigned long flags; struct gpio_chip *chip; int status = -EINVAL; - int offset; /* GPIOs used for IRQs shall not be set as output */ if (test_bit(FLAG_USED_AS_IRQ, &desc->flags)) { @@ -2073,42 +993,11 @@ static int _gpiod_direction_output_raw(struct gpio_desc *desc, int value) return -EIO; } - spin_lock_irqsave(&gpio_lock, flags); - - status = gpio_ensure_requested(desc); - if (status < 0) - goto fail; - - /* now we know the gpio is valid and chip won't vanish */ - - spin_unlock_irqrestore(&gpio_lock, flags); - - might_sleep_if(chip->can_sleep); - - offset = gpio_chip_hwgpio(desc); - if (status) { - status = chip->request(chip, offset); - if (status < 0) { - gpiod_dbg(desc, "%s: chip request fail, %d\n", - __func__, status); - /* and it's not available to anyone else ... - * gpio_request() is the fully clean solution. - */ - goto lose; - } - } - - status = chip->direction_output(chip, offset, value); + status = chip->direction_output(chip, gpio_chip_hwgpio(desc), value); if (status == 0) set_bit(FLAG_IS_OUT, &desc->flags); trace_gpio_value(desc_to_gpio(desc), 0, value); trace_gpio_direction(desc_to_gpio(desc), 0, status); -lose: - return status; -fail: - spin_unlock_irqrestore(&gpio_lock, flags); - if (status) - gpiod_dbg(desc, "%s: gpio status %d\n", __func__, status); return status; } @@ -2167,10 +1056,7 @@ EXPORT_SYMBOL_GPL(gpiod_direction_output); */ int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce) { - unsigned long flags; struct gpio_chip *chip; - int status = -EINVAL; - int offset; if (!desc || !desc->chip) { pr_warn("%s: invalid GPIO\n", __func__); @@ -2185,27 +1071,7 @@ int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce) return -ENOTSUPP; } - spin_lock_irqsave(&gpio_lock, flags); - - status = gpio_ensure_requested(desc); - if (status < 0) - goto fail; - - /* now we know the gpio is valid and chip won't vanish */ - - spin_unlock_irqrestore(&gpio_lock, flags); - - might_sleep_if(chip->can_sleep); - - offset = gpio_chip_hwgpio(desc); - return chip->set_debounce(chip, offset, debounce); - -fail: - spin_unlock_irqrestore(&gpio_lock, flags); - if (status) - gpiod_dbg(desc, "%s: status %d\n", __func__, status); - - return status; + return chip->set_debounce(chip, gpio_chip_hwgpio(desc), debounce); } EXPORT_SYMBOL_GPL(gpiod_set_debounce); @@ -2448,54 +1314,44 @@ int gpiod_to_irq(const struct gpio_desc *desc) EXPORT_SYMBOL_GPL(gpiod_to_irq); /** - * gpiod_lock_as_irq() - lock a GPIO to be used as IRQ - * @gpio: the GPIO line to lock as used for IRQ + * gpio_lock_as_irq() - lock a GPIO to be used as IRQ + * @chip: the chip the GPIO to lock belongs to + * @offset: the offset of the GPIO to lock as IRQ * * This is used directly by GPIO drivers that want to lock down * a certain GPIO line to be used for IRQs. */ -int gpiod_lock_as_irq(struct gpio_desc *desc) +int gpio_lock_as_irq(struct gpio_chip *chip, unsigned int offset) { - if (!desc) + if (offset >= chip->ngpio) return -EINVAL; - if (test_bit(FLAG_IS_OUT, &desc->flags)) { - gpiod_err(desc, + if (test_bit(FLAG_IS_OUT, &chip->desc[offset].flags)) { + chip_err(chip, "%s: tried to flag a GPIO set as output for IRQ\n", __func__); return -EIO; } - set_bit(FLAG_USED_AS_IRQ, &desc->flags); + set_bit(FLAG_USED_AS_IRQ, &chip->desc[offset].flags); return 0; } -EXPORT_SYMBOL_GPL(gpiod_lock_as_irq); - -int gpio_lock_as_irq(struct gpio_chip *chip, unsigned int offset) -{ - return gpiod_lock_as_irq(gpiochip_get_desc(chip, offset)); -} EXPORT_SYMBOL_GPL(gpio_lock_as_irq); /** - * gpiod_unlock_as_irq() - unlock a GPIO used as IRQ - * @gpio: the GPIO line to unlock from IRQ usage + * gpio_unlock_as_irq() - unlock a GPIO used as IRQ + * @chip: the chip the GPIO to lock belongs to + * @offset: the offset of the GPIO to lock as IRQ * * This is used directly by GPIO drivers that want to indicate * that a certain GPIO is no longer used exclusively for IRQ. */ -void gpiod_unlock_as_irq(struct gpio_desc *desc) +void gpio_unlock_as_irq(struct gpio_chip *chip, unsigned int offset) { - if (!desc) + if (offset >= chip->ngpio) return; - clear_bit(FLAG_USED_AS_IRQ, &desc->flags); -} -EXPORT_SYMBOL_GPL(gpiod_unlock_as_irq); - -void gpio_unlock_as_irq(struct gpio_chip *chip, unsigned int offset) -{ - return gpiod_unlock_as_irq(gpiochip_get_desc(chip, offset)); + clear_bit(FLAG_USED_AS_IRQ, &chip->desc[offset].flags); } EXPORT_SYMBOL_GPL(gpio_unlock_as_irq); @@ -2726,38 +1582,43 @@ static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id, * gpiod_get - obtain a GPIO for a given GPIO function * @dev: GPIO consumer, can be NULL for system-global GPIOs * @con_id: function within the GPIO consumer + * @flags: optional GPIO initialization flags * * Return the GPIO descriptor corresponding to the function con_id of device * dev, -ENOENT if no GPIO has been assigned to the requested function, or * another IS_ERR() code if an error occured while trying to acquire the GPIO. */ -struct gpio_desc *__must_check gpiod_get(struct device *dev, const char *con_id) +struct gpio_desc *__must_check __gpiod_get(struct device *dev, const char *con_id, + enum gpiod_flags flags) { - return gpiod_get_index(dev, con_id, 0); + return gpiod_get_index(dev, con_id, 0, flags); } -EXPORT_SYMBOL_GPL(gpiod_get); +EXPORT_SYMBOL_GPL(__gpiod_get); /** * gpiod_get_optional - obtain an optional GPIO for a given GPIO function * @dev: GPIO consumer, can be NULL for system-global GPIOs * @con_id: function within the GPIO consumer + * @flags: optional GPIO initialization flags * * This is equivalent to gpiod_get(), except that when no GPIO was assigned to * the requested function it will return NULL. This is convenient for drivers * that need to handle optional GPIOs. */ -struct gpio_desc *__must_check gpiod_get_optional(struct device *dev, - const char *con_id) +struct gpio_desc *__must_check __gpiod_get_optional(struct device *dev, + const char *con_id, + enum gpiod_flags flags) { - return gpiod_get_index_optional(dev, con_id, 0); + return gpiod_get_index_optional(dev, con_id, 0, flags); } -EXPORT_SYMBOL_GPL(gpiod_get_optional); +EXPORT_SYMBOL_GPL(__gpiod_get_optional); /** * gpiod_get_index - obtain a GPIO from a multi-index GPIO function * @dev: GPIO consumer, can be NULL for system-global GPIOs * @con_id: function within the GPIO consumer * @idx: index of the GPIO to obtain in the consumer + * @flags: optional GPIO initialization flags * * This variant of gpiod_get() allows to access GPIOs other than the first * defined one for functions that define several GPIOs. @@ -2766,23 +1627,24 @@ EXPORT_SYMBOL_GPL(gpiod_get_optional); * requested function and/or index, or another IS_ERR() code if an error * occured while trying to acquire the GPIO. */ -struct gpio_desc *__must_check gpiod_get_index(struct device *dev, +struct gpio_desc *__must_check __gpiod_get_index(struct device *dev, const char *con_id, - unsigned int idx) + unsigned int idx, + enum gpiod_flags flags) { struct gpio_desc *desc = NULL; int status; - enum gpio_lookup_flags flags = 0; + enum gpio_lookup_flags lookupflags = 0; dev_dbg(dev, "GPIO lookup for consumer %s\n", con_id); /* Using device tree? */ if (IS_ENABLED(CONFIG_OF) && dev && dev->of_node) { dev_dbg(dev, "using device tree for GPIO lookup\n"); - desc = of_find_gpio(dev, con_id, idx, &flags); + desc = of_find_gpio(dev, con_id, idx, &lookupflags); } else if (IS_ENABLED(CONFIG_ACPI) && dev && ACPI_HANDLE(dev)) { dev_dbg(dev, "using ACPI for GPIO lookup\n"); - desc = acpi_find_gpio(dev, con_id, idx, &flags); + desc = acpi_find_gpio(dev, con_id, idx, &lookupflags); } /* @@ -2791,7 +1653,7 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev, */ if (!desc || desc == ERR_PTR(-ENOENT)) { dev_dbg(dev, "using lookup tables for GPIO lookup"); - desc = gpiod_find(dev, con_id, idx, &flags); + desc = gpiod_find(dev, con_id, idx, &lookupflags); } if (IS_ERR(desc)) { @@ -2804,16 +1666,33 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev, if (status < 0) return ERR_PTR(status); - if (flags & GPIO_ACTIVE_LOW) + if (lookupflags & GPIO_ACTIVE_LOW) set_bit(FLAG_ACTIVE_LOW, &desc->flags); - if (flags & GPIO_OPEN_DRAIN) + if (lookupflags & GPIO_OPEN_DRAIN) set_bit(FLAG_OPEN_DRAIN, &desc->flags); - if (flags & GPIO_OPEN_SOURCE) + if (lookupflags & GPIO_OPEN_SOURCE) set_bit(FLAG_OPEN_SOURCE, &desc->flags); + /* No particular flag request, return here... */ + if (!(flags & GPIOD_FLAGS_BIT_DIR_SET)) + return desc; + + /* Process flags */ + if (flags & GPIOD_FLAGS_BIT_DIR_OUT) + status = gpiod_direction_output(desc, + flags & GPIOD_FLAGS_BIT_DIR_VAL); + else + status = gpiod_direction_input(desc); + + if (status < 0) { + dev_dbg(dev, "setup of GPIO %s failed\n", con_id); + gpiod_put(desc); + return ERR_PTR(status); + } + return desc; } -EXPORT_SYMBOL_GPL(gpiod_get_index); +EXPORT_SYMBOL_GPL(__gpiod_get_index); /** * gpiod_get_index_optional - obtain an optional GPIO from a multi-index GPIO @@ -2821,18 +1700,20 @@ EXPORT_SYMBOL_GPL(gpiod_get_index); * @dev: GPIO consumer, can be NULL for system-global GPIOs * @con_id: function within the GPIO consumer * @index: index of the GPIO to obtain in the consumer + * @flags: optional GPIO initialization flags * * This is equivalent to gpiod_get_index(), except that when no GPIO with the * specified index was assigned to the requested function it will return NULL. * This is convenient for drivers that need to handle optional GPIOs. */ -struct gpio_desc *__must_check gpiod_get_index_optional(struct device *dev, +struct gpio_desc *__must_check __gpiod_get_index_optional(struct device *dev, const char *con_id, - unsigned int index) + unsigned int index, + enum gpiod_flags flags) { struct gpio_desc *desc; - desc = gpiod_get_index(dev, con_id, index); + desc = gpiod_get_index(dev, con_id, index, flags); if (IS_ERR(desc)) { if (PTR_ERR(desc) == -ENOENT) return NULL; @@ -2840,7 +1721,7 @@ struct gpio_desc *__must_check gpiod_get_index_optional(struct device *dev, return desc; } -EXPORT_SYMBOL_GPL(gpiod_get_index_optional); +EXPORT_SYMBOL_GPL(__gpiod_get_index_optional); /** * gpiod_put - dispose of a GPIO descriptor diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h index 1a4103dd38df..9db2b6a71c5d 100644 --- a/drivers/gpio/gpiolib.h +++ b/drivers/gpio/gpiolib.h @@ -31,12 +31,21 @@ struct acpi_gpio_info { void acpi_gpiochip_add(struct gpio_chip *chip); void acpi_gpiochip_remove(struct gpio_chip *chip); +void acpi_gpiochip_request_interrupts(struct gpio_chip *chip); +void acpi_gpiochip_free_interrupts(struct gpio_chip *chip); + struct gpio_desc *acpi_get_gpiod_by_index(struct device *dev, int index, struct acpi_gpio_info *info); #else static inline void acpi_gpiochip_add(struct gpio_chip *chip) { } static inline void acpi_gpiochip_remove(struct gpio_chip *chip) { } +static inline void +acpi_gpiochip_request_interrupts(struct gpio_chip *chip) { } + +static inline void +acpi_gpiochip_free_interrupts(struct gpio_chip *chip) { } + static inline struct gpio_desc * acpi_get_gpiod_by_index(struct device *dev, int index, struct acpi_gpio_info *info) @@ -45,10 +54,100 @@ acpi_get_gpiod_by_index(struct device *dev, int index, } #endif -int gpiochip_request_own_desc(struct gpio_desc *desc, const char *label); -void gpiochip_free_own_desc(struct gpio_desc *desc); - struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np, const char *list_name, int index, enum of_gpio_flags *flags); +struct gpio_desc *gpiochip_get_desc(struct gpio_chip *chip, u16 hwnum); + +extern struct spinlock gpio_lock; +extern struct list_head gpio_chips; + +struct gpio_desc { + struct gpio_chip *chip; + unsigned long flags; +/* flag symbols are bit numbers */ +#define FLAG_REQUESTED 0 +#define FLAG_IS_OUT 1 +#define FLAG_EXPORT 2 /* protected by sysfs_lock */ +#define FLAG_SYSFS 3 /* exported via /sys/class/gpio/control */ +#define FLAG_TRIG_FALL 4 /* trigger on falling edge */ +#define FLAG_TRIG_RISE 5 /* trigger on rising edge */ +#define FLAG_ACTIVE_LOW 6 /* value has active low */ +#define FLAG_OPEN_DRAIN 7 /* Gpio is open drain type */ +#define FLAG_OPEN_SOURCE 8 /* Gpio is open source type */ +#define FLAG_USED_AS_IRQ 9 /* GPIO is connected to an IRQ */ + +#define ID_SHIFT 16 /* add new flags before this one */ + +#define GPIO_FLAGS_MASK ((1 << ID_SHIFT) - 1) +#define GPIO_TRIGGER_MASK (BIT(FLAG_TRIG_FALL) | BIT(FLAG_TRIG_RISE)) + + const char *label; +}; + +int gpiod_request(struct gpio_desc *desc, const char *label); +void gpiod_free(struct gpio_desc *desc); + +/* + * Return the GPIO number of the passed descriptor relative to its chip + */ +static int __maybe_unused gpio_chip_hwgpio(const struct gpio_desc *desc) +{ + return desc - &desc->chip->desc[0]; +} + +/* With descriptor prefix */ + +#define gpiod_emerg(desc, fmt, ...) \ + pr_emerg("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label ? : "?",\ + ##__VA_ARGS__) +#define gpiod_crit(desc, fmt, ...) \ + pr_crit("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label ? : "?", \ + ##__VA_ARGS__) +#define gpiod_err(desc, fmt, ...) \ + pr_err("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label ? : "?", \ + ##__VA_ARGS__) +#define gpiod_warn(desc, fmt, ...) \ + pr_warn("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label ? : "?", \ + ##__VA_ARGS__) +#define gpiod_info(desc, fmt, ...) \ + pr_info("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label ? : "?", \ + ##__VA_ARGS__) +#define gpiod_dbg(desc, fmt, ...) \ + pr_debug("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label ? : "?",\ + ##__VA_ARGS__) + +/* With chip prefix */ + +#define chip_emerg(chip, fmt, ...) \ + pr_emerg("GPIO chip %s: " fmt, chip->label, ##__VA_ARGS__) +#define chip_crit(chip, fmt, ...) \ + pr_crit("GPIO chip %s: " fmt, chip->label, ##__VA_ARGS__) +#define chip_err(chip, fmt, ...) \ + pr_err("GPIO chip %s: " fmt, chip->label, ##__VA_ARGS__) +#define chip_warn(chip, fmt, ...) \ + pr_warn("GPIO chip %s: " fmt, chip->label, ##__VA_ARGS__) +#define chip_info(chip, fmt, ...) \ + pr_info("GPIO chip %s: " fmt, chip->label, ##__VA_ARGS__) +#define chip_dbg(chip, fmt, ...) \ + pr_debug("GPIO chip %s: " fmt, chip->label, ##__VA_ARGS__) + +#ifdef CONFIG_GPIO_SYSFS + +int gpiochip_export(struct gpio_chip *chip); +void gpiochip_unexport(struct gpio_chip *chip); + +#else + +static inline int gpiochip_export(struct gpio_chip *chip) +{ + return 0; +} + +static inline void gpiochip_unexport(struct gpio_chip *chip) +{ +} + +#endif /* CONFIG_GPIO_SYSFS */ + #endif /* GPIOLIB_H */ diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index f5120046ff80..b066bb3ca01a 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -114,6 +114,7 @@ config DRM_RADEON select POWER_SUPPLY select HWMON select BACKLIGHT_CLASS_DEVICE + select INTERVAL_TREE help Choose this option if you have an ATI Radeon graphics card. There are both PCI and AGP versions. You don't need to choose this to @@ -201,3 +202,5 @@ source "drivers/gpu/drm/msm/Kconfig" source "drivers/gpu/drm/tegra/Kconfig" source "drivers/gpu/drm/panel/Kconfig" + +source "drivers/gpu/drm/sti/Kconfig" diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index dd2ba4269740..4a55d59ccd22 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -6,8 +6,8 @@ ccflags-y := -Iinclude/drm drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \ drm_context.o drm_dma.o \ - drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \ - drm_lock.o drm_memory.o drm_stub.o drm_vm.o \ + drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \ + drm_lock.o drm_memory.o drm_drv.o drm_vm.o \ drm_agpsupport.o drm_scatter.o drm_pci.o \ drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \ drm_crtc.o drm_modes.o drm_edid.o \ @@ -20,11 +20,12 @@ drm-$(CONFIG_COMPAT) += drm_ioc32.o drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o drm-$(CONFIG_PCI) += ati_pcigart.o drm-$(CONFIG_DRM_PANEL) += drm_panel.o +drm-$(CONFIG_OF) += drm_of.o drm-usb-y := drm_usb.o drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \ - drm_plane_helper.o + drm_plane_helper.o drm_dp_mst_topology.o drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o drm_kms_helper-$(CONFIG_DRM_KMS_FB_HELPER) += drm_fb_helper.o drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o @@ -63,6 +64,7 @@ obj-$(CONFIG_DRM_QXL) += qxl/ obj-$(CONFIG_DRM_BOCHS) += bochs/ obj-$(CONFIG_DRM_MSM) += msm/ obj-$(CONFIG_DRM_TEGRA) += tegra/ +obj-$(CONFIG_DRM_STI) += sti/ obj-y += i2c/ obj-y += panel/ obj-y += bridge/ diff --git a/drivers/gpu/drm/armada/armada_510.c b/drivers/gpu/drm/armada/armada_510.c index 59948eff6095..ad3d2ebf95c9 100644 --- a/drivers/gpu/drm/armada/armada_510.c +++ b/drivers/gpu/drm/armada/armada_510.c @@ -15,20 +15,19 @@ #include "armada_drm.h" #include "armada_hw.h" -static int armada510_init(struct armada_private *priv, struct device *dev) +static int armada510_crtc_init(struct armada_crtc *dcrtc, struct device *dev) { - priv->extclk[0] = devm_clk_get(dev, "ext_ref_clk_1"); + struct clk *clk; - if (IS_ERR(priv->extclk[0]) && PTR_ERR(priv->extclk[0]) == -ENOENT) - priv->extclk[0] = ERR_PTR(-EPROBE_DEFER); + clk = devm_clk_get(dev, "ext_ref_clk1"); + if (IS_ERR(clk)) + return PTR_ERR(clk) == -ENOENT ? -EPROBE_DEFER : PTR_ERR(clk); - return PTR_RET(priv->extclk[0]); -} + dcrtc->extclk[0] = clk; -static int armada510_crtc_init(struct armada_crtc *dcrtc) -{ /* Lower the watermark so to eliminate jitter at higher bandwidths */ armada_updatel(0x20, (1 << 11) | 0xff, dcrtc->base + LCD_CFG_RDREG4F); + return 0; } @@ -45,8 +44,7 @@ static int armada510_crtc_init(struct armada_crtc *dcrtc) static int armada510_crtc_compute_clock(struct armada_crtc *dcrtc, const struct drm_display_mode *mode, uint32_t *sclk) { - struct armada_private *priv = dcrtc->crtc.dev->dev_private; - struct clk *clk = priv->extclk[0]; + struct clk *clk = dcrtc->extclk[0]; int ret; if (dcrtc->num == 1) @@ -81,7 +79,6 @@ static int armada510_crtc_compute_clock(struct armada_crtc *dcrtc, const struct armada_variant armada510_ops = { .has_spu_adv_reg = true, .spu_adv_reg = ADV_HWC32ENABLE | ADV_HWC32ARGB | ADV_HWC32BLEND, - .init = armada510_init, - .crtc_init = armada510_crtc_init, - .crtc_compute_clock = armada510_crtc_compute_clock, + .init = armada510_crtc_init, + .compute_clock = armada510_crtc_compute_clock, }; diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c index 3aedf9e993e6..9a0cc09e6653 100644 --- a/drivers/gpu/drm/armada/armada_crtc.c +++ b/drivers/gpu/drm/armada/armada_crtc.c @@ -7,6 +7,9 @@ * published by the Free Software Foundation. */ #include <linux/clk.h> +#include <linux/component.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> #include <drm/drmP.h> #include <drm/drm_crtc_helper.h> #include "armada_crtc.h" @@ -332,24 +335,23 @@ static void armada_drm_crtc_commit(struct drm_crtc *crtc) static bool armada_drm_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode, struct drm_display_mode *adj) { - struct armada_private *priv = crtc->dev->dev_private; struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); int ret; /* We can't do interlaced modes if we don't have the SPU_ADV_REG */ - if (!priv->variant->has_spu_adv_reg && + if (!dcrtc->variant->has_spu_adv_reg && adj->flags & DRM_MODE_FLAG_INTERLACE) return false; /* Check whether the display mode is possible */ - ret = priv->variant->crtc_compute_clock(dcrtc, adj, NULL); + ret = dcrtc->variant->compute_clock(dcrtc, adj, NULL); if (ret) return false; return true; } -void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat) +static void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat) { struct armada_vbl_event *e, *n; void __iomem *base = dcrtc->base; @@ -410,6 +412,27 @@ void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat) } } +static irqreturn_t armada_drm_irq(int irq, void *arg) +{ + struct armada_crtc *dcrtc = arg; + u32 v, stat = readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR); + + /* + * This is rediculous - rather than writing bits to clear, we + * have to set the actual status register value. This is racy. + */ + writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR); + + /* Mask out those interrupts we haven't enabled */ + v = stat & dcrtc->irq_ena; + + if (v & (VSYNC_IRQ|GRA_FRAME_IRQ|DUMB_FRAMEDONE)) { + armada_drm_crtc_irq(dcrtc, stat); + return IRQ_HANDLED; + } + return IRQ_NONE; +} + /* These are locked by dev->vbl_lock */ void armada_drm_crtc_disable_irq(struct armada_crtc *dcrtc, u32 mask) { @@ -470,7 +493,6 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adj, int x, int y, struct drm_framebuffer *old_fb) { - struct armada_private *priv = crtc->dev->dev_private; struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); struct armada_regs regs[17]; uint32_t lm, rm, tm, bm, val, sclk; @@ -515,7 +537,7 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc, } /* Now compute the divider for real */ - priv->variant->crtc_compute_clock(dcrtc, adj, &sclk); + dcrtc->variant->compute_clock(dcrtc, adj, &sclk); /* Ensure graphic fifo is enabled */ armada_reg_queue_mod(regs, i, 0, CFG_PDWN64x66, LCD_SPU_SRAM_PARA1); @@ -537,7 +559,7 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc, dcrtc->v[1].spu_v_porch = tm << 16 | bm; val = adj->crtc_hsync_start; dcrtc->v[1].spu_adv_reg = val << 20 | val | ADV_VSYNCOFFEN | - priv->variant->spu_adv_reg; + dcrtc->variant->spu_adv_reg; if (interlaced) { /* Odd interlaced frame */ @@ -546,7 +568,7 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc, dcrtc->v[0].spu_v_porch = dcrtc->v[1].spu_v_porch + 1; val = adj->crtc_hsync_start - adj->crtc_htotal / 2; dcrtc->v[0].spu_adv_reg = val << 20 | val | ADV_VSYNCOFFEN | - priv->variant->spu_adv_reg; + dcrtc->variant->spu_adv_reg; } else { dcrtc->v[0] = dcrtc->v[1]; } @@ -561,7 +583,7 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc, armada_reg_queue_set(regs, i, dcrtc->v[0].spu_v_h_total, LCD_SPUT_V_H_TOTAL); - if (priv->variant->has_spu_adv_reg) { + if (dcrtc->variant->has_spu_adv_reg) { armada_reg_queue_mod(regs, i, dcrtc->v[0].spu_adv_reg, ADV_VSYNC_L_OFF | ADV_VSYNC_H_OFF | ADV_VSYNCOFFEN, LCD_SPU_ADV_REG); @@ -805,12 +827,11 @@ static int armada_drm_crtc_cursor_set(struct drm_crtc *crtc, { struct drm_device *dev = crtc->dev; struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); - struct armada_private *priv = crtc->dev->dev_private; struct armada_gem_object *obj = NULL; int ret; /* If no cursor support, replicate drm's return value */ - if (!priv->variant->has_spu_adv_reg) + if (!dcrtc->variant->has_spu_adv_reg) return -ENXIO; if (handle && w > 0 && h > 0) { @@ -858,11 +879,10 @@ static int armada_drm_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) { struct drm_device *dev = crtc->dev; struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); - struct armada_private *priv = crtc->dev->dev_private; int ret; /* If no cursor support, replicate drm's return value */ - if (!priv->variant->has_spu_adv_reg) + if (!dcrtc->variant->has_spu_adv_reg) return -EFAULT; mutex_lock(&dev->struct_mutex); @@ -888,6 +908,10 @@ static void armada_drm_crtc_destroy(struct drm_crtc *crtc) if (!IS_ERR(dcrtc->clk)) clk_disable_unprepare(dcrtc->clk); + writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ENA); + + of_node_put(dcrtc->crtc.port); + kfree(dcrtc); } @@ -1027,19 +1051,20 @@ static int armada_drm_crtc_create_properties(struct drm_device *dev) return 0; } -int armada_drm_crtc_create(struct drm_device *dev, unsigned num, - struct resource *res) +int armada_drm_crtc_create(struct drm_device *drm, struct device *dev, + struct resource *res, int irq, const struct armada_variant *variant, + struct device_node *port) { - struct armada_private *priv = dev->dev_private; + struct armada_private *priv = drm->dev_private; struct armada_crtc *dcrtc; void __iomem *base; int ret; - ret = armada_drm_crtc_create_properties(dev); + ret = armada_drm_crtc_create_properties(drm); if (ret) return ret; - base = devm_ioremap_resource(dev->dev, res); + base = devm_ioremap_resource(dev, res); if (IS_ERR(base)) return PTR_ERR(base); @@ -1049,8 +1074,12 @@ int armada_drm_crtc_create(struct drm_device *dev, unsigned num, return -ENOMEM; } + if (dev != drm->dev) + dev_set_drvdata(dev, dcrtc); + + dcrtc->variant = variant; dcrtc->base = base; - dcrtc->num = num; + dcrtc->num = drm->mode_config.num_crtc; dcrtc->clk = ERR_PTR(-EINVAL); dcrtc->csc_yuv_mode = CSC_AUTO; dcrtc->csc_rgb_mode = CSC_AUTO; @@ -1072,9 +1101,18 @@ int armada_drm_crtc_create(struct drm_device *dev, unsigned num, CFG_PDWN64x66, dcrtc->base + LCD_SPU_SRAM_PARA1); writel_relaxed(0x2032ff81, dcrtc->base + LCD_SPU_DMA_CTRL1); writel_relaxed(0x00000000, dcrtc->base + LCD_SPU_GRA_OVSA_HPXL_VLN); + writel_relaxed(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA); + writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR); + + ret = devm_request_irq(dev, irq, armada_drm_irq, 0, "armada_drm_crtc", + dcrtc); + if (ret < 0) { + kfree(dcrtc); + return ret; + } - if (priv->variant->crtc_init) { - ret = priv->variant->crtc_init(dcrtc); + if (dcrtc->variant->init) { + ret = dcrtc->variant->init(dcrtc, dev); if (ret) { kfree(dcrtc); return ret; @@ -1086,7 +1124,8 @@ int armada_drm_crtc_create(struct drm_device *dev, unsigned num, priv->dcrtc[dcrtc->num] = dcrtc; - drm_crtc_init(dev, &dcrtc->crtc, &armada_crtc_funcs); + dcrtc->crtc.port = port; + drm_crtc_init(drm, &dcrtc->crtc, &armada_crtc_funcs); drm_crtc_helper_add(&dcrtc->crtc, &armada_crtc_helper_funcs); drm_object_attach_property(&dcrtc->crtc.base, priv->csc_yuv_prop, @@ -1094,5 +1133,107 @@ int armada_drm_crtc_create(struct drm_device *dev, unsigned num, drm_object_attach_property(&dcrtc->crtc.base, priv->csc_rgb_prop, dcrtc->csc_rgb_mode); - return armada_overlay_plane_create(dev, 1 << dcrtc->num); + return armada_overlay_plane_create(drm, 1 << dcrtc->num); +} + +static int +armada_lcd_bind(struct device *dev, struct device *master, void *data) +{ + struct platform_device *pdev = to_platform_device(dev); + struct drm_device *drm = data; + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + int irq = platform_get_irq(pdev, 0); + const struct armada_variant *variant; + struct device_node *port = NULL; + + if (irq < 0) + return irq; + + if (!dev->of_node) { + const struct platform_device_id *id; + + id = platform_get_device_id(pdev); + if (!id) + return -ENXIO; + + variant = (const struct armada_variant *)id->driver_data; + } else { + const struct of_device_id *match; + struct device_node *np, *parent = dev->of_node; + + match = of_match_device(dev->driver->of_match_table, dev); + if (!match) + return -ENXIO; + + np = of_get_child_by_name(parent, "ports"); + if (np) + parent = np; + port = of_get_child_by_name(parent, "port"); + of_node_put(np); + if (!port) { + dev_err(dev, "no port node found in %s\n", + parent->full_name); + return -ENXIO; + } + + variant = match->data; + } + + return armada_drm_crtc_create(drm, dev, res, irq, variant, port); +} + +static void +armada_lcd_unbind(struct device *dev, struct device *master, void *data) +{ + struct armada_crtc *dcrtc = dev_get_drvdata(dev); + + armada_drm_crtc_destroy(&dcrtc->crtc); } + +static const struct component_ops armada_lcd_ops = { + .bind = armada_lcd_bind, + .unbind = armada_lcd_unbind, +}; + +static int armada_lcd_probe(struct platform_device *pdev) +{ + return component_add(&pdev->dev, &armada_lcd_ops); +} + +static int armada_lcd_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &armada_lcd_ops); + return 0; +} + +static struct of_device_id armada_lcd_of_match[] = { + { + .compatible = "marvell,dove-lcd", + .data = &armada510_ops, + }, + {} +}; +MODULE_DEVICE_TABLE(of, armada_lcd_of_match); + +static const struct platform_device_id armada_lcd_platform_ids[] = { + { + .name = "armada-lcd", + .driver_data = (unsigned long)&armada510_ops, + }, { + .name = "armada-510-lcd", + .driver_data = (unsigned long)&armada510_ops, + }, + { }, +}; +MODULE_DEVICE_TABLE(platform, armada_lcd_platform_ids); + +struct platform_driver armada_lcd_platform_driver = { + .probe = armada_lcd_probe, + .remove = armada_lcd_remove, + .driver = { + .name = "armada-lcd", + .owner = THIS_MODULE, + .of_match_table = armada_lcd_of_match, + }, + .id_table = armada_lcd_platform_ids, +}; diff --git a/drivers/gpu/drm/armada/armada_crtc.h b/drivers/gpu/drm/armada/armada_crtc.h index 9c10a07e7492..98102a5a9af5 100644 --- a/drivers/gpu/drm/armada/armada_crtc.h +++ b/drivers/gpu/drm/armada/armada_crtc.h @@ -32,12 +32,15 @@ struct armada_regs { armada_reg_queue_mod(_r, _i, 0, 0, ~0) struct armada_frame_work; +struct armada_variant; struct armada_crtc { struct drm_crtc crtc; + const struct armada_variant *variant; unsigned num; void __iomem *base; struct clk *clk; + struct clk *extclk[2]; struct { uint32_t spu_v_h_total; uint32_t spu_v_porch; @@ -72,12 +75,16 @@ struct armada_crtc { }; #define drm_to_armada_crtc(c) container_of(c, struct armada_crtc, crtc) -int armada_drm_crtc_create(struct drm_device *, unsigned, struct resource *); +struct device_node; +int armada_drm_crtc_create(struct drm_device *, struct device *, + struct resource *, int, const struct armada_variant *, + struct device_node *); void armada_drm_crtc_gamma_set(struct drm_crtc *, u16, u16, u16, int); void armada_drm_crtc_gamma_get(struct drm_crtc *, u16 *, u16 *, u16 *, int); -void armada_drm_crtc_irq(struct armada_crtc *, u32); void armada_drm_crtc_disable_irq(struct armada_crtc *, u32); void armada_drm_crtc_enable_irq(struct armada_crtc *, u32); void armada_drm_crtc_update_regs(struct armada_crtc *, struct armada_regs *); +extern struct platform_driver armada_lcd_platform_driver; + #endif diff --git a/drivers/gpu/drm/armada/armada_drm.h b/drivers/gpu/drm/armada/armada_drm.h index a72cae03b99b..ea63c6c7c66f 100644 --- a/drivers/gpu/drm/armada/armada_drm.h +++ b/drivers/gpu/drm/armada/armada_drm.h @@ -59,26 +59,23 @@ void armada_drm_vbl_event_remove_unlocked(struct armada_crtc *, struct armada_private; struct armada_variant { - bool has_spu_adv_reg; + bool has_spu_adv_reg; uint32_t spu_adv_reg; - int (*init)(struct armada_private *, struct device *); - int (*crtc_init)(struct armada_crtc *); - int (*crtc_compute_clock)(struct armada_crtc *, - const struct drm_display_mode *, - uint32_t *); + int (*init)(struct armada_crtc *, struct device *); + int (*compute_clock)(struct armada_crtc *, + const struct drm_display_mode *, + uint32_t *); }; /* Variant ops */ extern const struct armada_variant armada510_ops; struct armada_private { - const struct armada_variant *variant; struct work_struct fb_unref_work; DECLARE_KFIFO(fb_unref, struct drm_framebuffer *, 8); struct drm_fb_helper *fbdev; struct armada_crtc *dcrtc[2]; struct drm_mm linear; - struct clk *extclk[2]; struct drm_property *csc_yuv_prop; struct drm_property *csc_rgb_prop; struct drm_property *colorkey_prop; diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c index 8ab3cd1a8cdb..e2d5792b140f 100644 --- a/drivers/gpu/drm/armada/armada_drv.c +++ b/drivers/gpu/drm/armada/armada_drv.c @@ -6,7 +6,9 @@ * published by the Free Software Foundation. */ #include <linux/clk.h> +#include <linux/component.h> #include <linux/module.h> +#include <linux/of_graph.h> #include <drm/drmP.h> #include <drm/drm_crtc_helper.h> #include "armada_crtc.h" @@ -52,6 +54,11 @@ static const struct armada_drm_slave_config tda19988_config = { }; #endif +static bool is_componentized(struct device *dev) +{ + return dev->of_node || dev->platform_data; +} + static void armada_drm_unref_work(struct work_struct *work) { struct armada_private *priv = @@ -85,6 +92,7 @@ void armada_drm_queue_unref_work(struct drm_device *dev, static int armada_drm_load(struct drm_device *dev, unsigned long flags) { const struct platform_device_id *id; + const struct armada_variant *variant; struct armada_private *priv; struct resource *res[ARRAY_SIZE(priv->dcrtc)]; struct resource *mem = NULL; @@ -107,7 +115,7 @@ static int armada_drm_load(struct drm_device *dev, unsigned long flags) return -EINVAL; } - if (!res[0] || !mem) + if (!mem) return -ENXIO; if (!devm_request_mem_region(dev->dev, mem->start, @@ -128,11 +136,7 @@ static int armada_drm_load(struct drm_device *dev, unsigned long flags) if (!id) return -ENXIO; - priv->variant = (struct armada_variant *)id->driver_data; - - ret = priv->variant->init(priv, dev->dev); - if (ret) - return ret; + variant = (const struct armada_variant *)id->driver_data; INIT_WORK(&priv->fb_unref_work, armada_drm_unref_work); INIT_KFIFO(priv->fb_unref); @@ -155,40 +159,50 @@ static int armada_drm_load(struct drm_device *dev, unsigned long flags) /* Create all LCD controllers */ for (n = 0; n < ARRAY_SIZE(priv->dcrtc); n++) { + int irq; + if (!res[n]) break; - ret = armada_drm_crtc_create(dev, n, res[n]); + irq = platform_get_irq(dev->platformdev, n); + if (irq < 0) + goto err_kms; + + ret = armada_drm_crtc_create(dev, dev->dev, res[n], irq, + variant, NULL); if (ret) goto err_kms; } + if (is_componentized(dev->dev)) { + ret = component_bind_all(dev->dev, dev); + if (ret) + goto err_kms; + } else { #ifdef CONFIG_DRM_ARMADA_TDA1998X - ret = armada_drm_connector_slave_create(dev, &tda19988_config); - if (ret) - goto err_kms; + ret = armada_drm_connector_slave_create(dev, &tda19988_config); + if (ret) + goto err_kms; #endif + } - ret = drm_vblank_init(dev, n); - if (ret) - goto err_kms; - - ret = drm_irq_install(dev, platform_get_irq(dev->platformdev, 0)); + ret = drm_vblank_init(dev, dev->mode_config.num_crtc); if (ret) - goto err_kms; + goto err_comp; dev->vblank_disable_allowed = 1; ret = armada_fbdev_init(dev); if (ret) - goto err_irq; + goto err_comp; drm_kms_helper_poll_init(dev); return 0; - err_irq: - drm_irq_uninstall(dev); + err_comp: + if (is_componentized(dev->dev)) + component_unbind_all(dev->dev, dev); err_kms: drm_mode_config_cleanup(dev); drm_mm_takedown(&priv->linear); @@ -203,7 +217,10 @@ static int armada_drm_unload(struct drm_device *dev) drm_kms_helper_poll_fini(dev); armada_fbdev_fini(dev); - drm_irq_uninstall(dev); + + if (is_componentized(dev->dev)) + component_unbind_all(dev->dev, dev); + drm_mode_config_cleanup(dev); drm_mm_takedown(&priv->linear); flush_work(&priv->fb_unref_work); @@ -259,52 +276,6 @@ static void armada_drm_disable_vblank(struct drm_device *dev, int crtc) armada_drm_crtc_disable_irq(priv->dcrtc[crtc], VSYNC_IRQ_ENA); } -static irqreturn_t armada_drm_irq_handler(int irq, void *arg) -{ - struct drm_device *dev = arg; - struct armada_private *priv = dev->dev_private; - struct armada_crtc *dcrtc = priv->dcrtc[0]; - uint32_t v, stat = readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR); - irqreturn_t handled = IRQ_NONE; - - /* - * This is rediculous - rather than writing bits to clear, we - * have to set the actual status register value. This is racy. - */ - writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR); - - /* Mask out those interrupts we haven't enabled */ - v = stat & dcrtc->irq_ena; - - if (v & (VSYNC_IRQ|GRA_FRAME_IRQ|DUMB_FRAMEDONE)) { - armada_drm_crtc_irq(dcrtc, stat); - handled = IRQ_HANDLED; - } - - return handled; -} - -static int armada_drm_irq_postinstall(struct drm_device *dev) -{ - struct armada_private *priv = dev->dev_private; - struct armada_crtc *dcrtc = priv->dcrtc[0]; - - spin_lock_irq(&dev->vbl_lock); - writel_relaxed(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA); - writel(0, dcrtc->base + LCD_SPU_IRQ_ISR); - spin_unlock_irq(&dev->vbl_lock); - - return 0; -} - -static void armada_drm_irq_uninstall(struct drm_device *dev) -{ - struct armada_private *priv = dev->dev_private; - struct armada_crtc *dcrtc = priv->dcrtc[0]; - - writel(0, dcrtc->base + LCD_SPU_IRQ_ENA); -} - static struct drm_ioctl_desc armada_ioctls[] = { DRM_IOCTL_DEF_DRV(ARMADA_GEM_CREATE, armada_gem_create_ioctl, DRM_UNLOCKED), @@ -340,9 +311,6 @@ static struct drm_driver armada_drm_driver = { .get_vblank_counter = drm_vblank_count, .enable_vblank = armada_drm_enable_vblank, .disable_vblank = armada_drm_disable_vblank, - .irq_handler = armada_drm_irq_handler, - .irq_postinstall = armada_drm_irq_postinstall, - .irq_uninstall = armada_drm_irq_uninstall, #ifdef CONFIG_DEBUG_FS .debugfs_init = armada_drm_debugfs_init, .debugfs_cleanup = armada_drm_debugfs_cleanup, @@ -362,19 +330,140 @@ static struct drm_driver armada_drm_driver = { .desc = "Armada SoC DRM", .date = "20120730", .driver_features = DRIVER_GEM | DRIVER_MODESET | - DRIVER_HAVE_IRQ | DRIVER_PRIME, + DRIVER_PRIME, .ioctls = armada_ioctls, .fops = &armada_drm_fops, }; +static int armada_drm_bind(struct device *dev) +{ + return drm_platform_init(&armada_drm_driver, to_platform_device(dev)); +} + +static void armada_drm_unbind(struct device *dev) +{ + drm_put_dev(dev_get_drvdata(dev)); +} + +static int compare_of(struct device *dev, void *data) +{ + return dev->of_node == data; +} + +static int compare_dev_name(struct device *dev, void *data) +{ + const char *name = data; + return !strcmp(dev_name(dev), name); +} + +static void armada_add_endpoints(struct device *dev, + struct component_match **match, struct device_node *port) +{ + struct device_node *ep, *remote; + + for_each_child_of_node(port, ep) { + remote = of_graph_get_remote_port_parent(ep); + if (!remote || !of_device_is_available(remote)) { + of_node_put(remote); + continue; + } else if (!of_device_is_available(remote->parent)) { + dev_warn(dev, "parent device of %s is not available\n", + remote->full_name); + of_node_put(remote); + continue; + } + + component_match_add(dev, match, compare_of, remote); + of_node_put(remote); + } +} + +static int armada_drm_find_components(struct device *dev, + struct component_match **match) +{ + struct device_node *port; + int i; + + if (dev->of_node) { + struct device_node *np = dev->of_node; + + for (i = 0; ; i++) { + port = of_parse_phandle(np, "ports", i); + if (!port) + break; + + component_match_add(dev, match, compare_of, port); + of_node_put(port); + } + + if (i == 0) { + dev_err(dev, "missing 'ports' property\n"); + return -ENODEV; + } + + for (i = 0; ; i++) { + port = of_parse_phandle(np, "ports", i); + if (!port) + break; + + armada_add_endpoints(dev, match, port); + of_node_put(port); + } + } else if (dev->platform_data) { + char **devices = dev->platform_data; + struct device *d; + + for (i = 0; devices[i]; i++) + component_match_add(dev, match, compare_dev_name, + devices[i]); + + if (i == 0) { + dev_err(dev, "missing 'ports' property\n"); + return -ENODEV; + } + + for (i = 0; devices[i]; i++) { + d = bus_find_device_by_name(&platform_bus_type, NULL, + devices[i]); + if (d && d->of_node) { + for_each_child_of_node(d->of_node, port) + armada_add_endpoints(dev, match, port); + } + put_device(d); + } + } + + return 0; +} + +static const struct component_master_ops armada_master_ops = { + .bind = armada_drm_bind, + .unbind = armada_drm_unbind, +}; + static int armada_drm_probe(struct platform_device *pdev) { - return drm_platform_init(&armada_drm_driver, pdev); + if (is_componentized(&pdev->dev)) { + struct component_match *match = NULL; + int ret; + + ret = armada_drm_find_components(&pdev->dev, &match); + if (ret < 0) + return ret; + + return component_master_add_with_match(&pdev->dev, + &armada_master_ops, match); + } else { + return drm_platform_init(&armada_drm_driver, pdev); + } } static int armada_drm_remove(struct platform_device *pdev) { - drm_put_dev(platform_get_drvdata(pdev)); + if (is_componentized(&pdev->dev)) + component_master_del(&pdev->dev, &armada_master_ops); + else + drm_put_dev(platform_get_drvdata(pdev)); return 0; } @@ -402,14 +491,24 @@ static struct platform_driver armada_drm_platform_driver = { static int __init armada_drm_init(void) { + int ret; + armada_drm_driver.num_ioctls = ARRAY_SIZE(armada_ioctls); - return platform_driver_register(&armada_drm_platform_driver); + + ret = platform_driver_register(&armada_lcd_platform_driver); + if (ret) + return ret; + ret = platform_driver_register(&armada_drm_platform_driver); + if (ret) + platform_driver_unregister(&armada_lcd_platform_driver); + return ret; } module_init(armada_drm_init); static void __exit armada_drm_exit(void) { platform_driver_unregister(&armada_drm_platform_driver); + platform_driver_unregister(&armada_lcd_platform_driver); } module_exit(armada_drm_exit); diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c index fd166f532ab9..7838e731b0de 100644 --- a/drivers/gpu/drm/armada/armada_fbdev.c +++ b/drivers/gpu/drm/armada/armada_fbdev.c @@ -131,7 +131,7 @@ static int armada_fb_probe(struct drm_fb_helper *fbh, return ret; } -static struct drm_fb_helper_funcs armada_fb_helper_funcs = { +static const struct drm_fb_helper_funcs armada_fb_helper_funcs = { .gamma_set = armada_drm_crtc_gamma_set, .gamma_get = armada_drm_crtc_gamma_get, .fb_probe = armada_fb_probe, @@ -149,7 +149,7 @@ int armada_fbdev_init(struct drm_device *dev) priv->fbdev = fbh; - fbh->funcs = &armada_fb_helper_funcs; + drm_fb_helper_prepare(dev, fbh, &armada_fb_helper_funcs); ret = drm_fb_helper_init(dev, fbh, 1, 1); if (ret) { diff --git a/drivers/gpu/drm/armada/armada_output.c b/drivers/gpu/drm/armada/armada_output.c index d685a5421485..abbc309fe539 100644 --- a/drivers/gpu/drm/armada/armada_output.c +++ b/drivers/gpu/drm/armada/armada_output.c @@ -48,7 +48,7 @@ static void armada_drm_connector_destroy(struct drm_connector *conn) { struct armada_connector *dconn = drm_to_armada_conn(conn); - drm_sysfs_connector_remove(conn); + drm_connector_unregister(conn); drm_connector_cleanup(conn); kfree(dconn); } @@ -141,7 +141,7 @@ int armada_output_create(struct drm_device *dev, if (ret) goto err_conn; - ret = drm_sysfs_connector_add(&dconn->conn); + ret = drm_connector_register(&dconn->conn); if (ret) goto err_sysfs; diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c index 44074fbcf7ff..f19682a93c24 100644 --- a/drivers/gpu/drm/ast/ast_drv.c +++ b/drivers/gpu/drm/ast/ast_drv.c @@ -51,7 +51,7 @@ static struct drm_driver driver; .subdevice = PCI_ANY_ID, \ .driver_data = (unsigned long) info } -static DEFINE_PCI_DEVICE_TABLE(pciidlist) = { +static const struct pci_device_id pciidlist[] = { AST_VGA_DEVICE(PCI_CHIP_AST2000, NULL), AST_VGA_DEVICE(PCI_CHIP_AST2100, NULL), /* AST_VGA_DEVICE(PCI_CHIP_AST1180, NULL), - don't bind to 1180 for now */ diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index 5d6a87573c33..957d4fabf1e1 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -362,7 +362,7 @@ static inline int ast_bo_reserve(struct ast_bo *bo, bool no_wait) { int ret; - ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0); + ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, NULL); if (ret) { if (ret != -ERESTARTSYS && ret != -EBUSY) DRM_ERROR("reserve failed %p\n", bo); diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c index a28640f47c27..cba45c774552 100644 --- a/drivers/gpu/drm/ast/ast_fb.c +++ b/drivers/gpu/drm/ast/ast_fb.c @@ -287,7 +287,7 @@ static void ast_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, *blue = ast_crtc->lut_b[regno] << 8; } -static struct drm_fb_helper_funcs ast_fb_helper_funcs = { +static const struct drm_fb_helper_funcs ast_fb_helper_funcs = { .gamma_set = ast_fb_gamma_set, .gamma_get = ast_fb_gamma_get, .fb_probe = astfb_create, @@ -328,8 +328,10 @@ int ast_fbdev_init(struct drm_device *dev) return -ENOMEM; ast->fbdev = afbdev; - afbdev->helper.funcs = &ast_fb_helper_funcs; spin_lock_init(&afbdev->dirty_lock); + + drm_fb_helper_prepare(dev, &afbdev->helper, &ast_fb_helper_funcs); + ret = drm_fb_helper_init(dev, &afbdev->helper, 1, 1); if (ret) { diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index a2cc6be97983..b792194e0d9c 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c @@ -67,6 +67,7 @@ static int ast_detect_chip(struct drm_device *dev) { struct ast_private *ast = dev->dev_private; uint32_t data, jreg; + ast_open_key(ast); if (dev->pdev->device == PCI_CHIP_AST1180) { ast->chip = AST1100; @@ -104,7 +105,7 @@ static int ast_detect_chip(struct drm_device *dev) } ast->vga2_clone = false; } else { - ast->chip = 2000; + ast->chip = AST2000; DRM_INFO("AST 2000 detected\n"); } } diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index 114aee941d46..5389350244f2 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -667,17 +667,9 @@ static void ast_encoder_destroy(struct drm_encoder *encoder) static struct drm_encoder *ast_best_single_encoder(struct drm_connector *connector) { int enc_id = connector->encoder_ids[0]; - struct drm_mode_object *obj; - struct drm_encoder *encoder; - /* pick the encoder ids */ - if (enc_id) { - obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER); - if (!obj) - return NULL; - encoder = obj_to_encoder(obj); - return encoder; - } + if (enc_id) + return drm_encoder_find(connector->dev, enc_id); return NULL; } @@ -829,7 +821,7 @@ static void ast_connector_destroy(struct drm_connector *connector) { struct ast_connector *ast_connector = to_ast_connector(connector); ast_i2c_destroy(ast_connector->i2c); - drm_sysfs_connector_remove(connector); + drm_connector_unregister(connector); drm_connector_cleanup(connector); kfree(connector); } @@ -871,7 +863,7 @@ static int ast_connector_init(struct drm_device *dev) connector->interlace_allowed = 0; connector->doublescan_allowed = 0; - drm_sysfs_connector_add(connector); + drm_connector_register(connector); connector->polled = DRM_CONNECTOR_POLL_CONNECT; diff --git a/drivers/gpu/drm/ast/ast_tables.h b/drivers/gpu/drm/ast/ast_tables.h index 4c761dcea972..05c01ea85294 100644 --- a/drivers/gpu/drm/ast/ast_tables.h +++ b/drivers/gpu/drm/ast/ast_tables.h @@ -99,6 +99,7 @@ static struct ast_vbios_dclk_info dclk_table[] = { {0x25, 0x65, 0x80}, /* 16: VCLK88.75 */ {0x77, 0x58, 0x80}, /* 17: VCLK119 */ {0x32, 0x67, 0x80}, /* 18: VCLK85_5 */ + {0x6a, 0x6d, 0x80}, /* 19: VCLK97_75 */ }; static struct ast_vbios_stdtable vbios_stdtable[] = { diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c index 9c13df29fd20..9738e9b14708 100644 --- a/drivers/gpu/drm/bochs/bochs_drv.c +++ b/drivers/gpu/drm/bochs/bochs_drv.c @@ -97,6 +97,7 @@ static struct drm_driver bochs_driver = { /* ---------------------------------------------------------------------- */ /* pm interface */ +#ifdef CONFIG_PM_SLEEP static int bochs_pm_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); @@ -131,6 +132,7 @@ static int bochs_pm_resume(struct device *dev) drm_kms_helper_poll_enable(drm_dev); return 0; } +#endif static const struct dev_pm_ops bochs_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(bochs_pm_suspend, @@ -175,7 +177,7 @@ static void bochs_pci_remove(struct pci_dev *pdev) drm_put_dev(dev); } -static DEFINE_PCI_DEVICE_TABLE(bochs_pci_tbl) = { +static const struct pci_device_id bochs_pci_tbl[] = { { .vendor = 0x1234, .device = 0x1111, diff --git a/drivers/gpu/drm/bochs/bochs_fbdev.c b/drivers/gpu/drm/bochs/bochs_fbdev.c index 561b84474122..fe95d31cd110 100644 --- a/drivers/gpu/drm/bochs/bochs_fbdev.c +++ b/drivers/gpu/drm/bochs/bochs_fbdev.c @@ -72,7 +72,7 @@ static int bochsfb_create(struct drm_fb_helper *helper, bo = gem_to_bochs_bo(gobj); - ret = ttm_bo_reserve(&bo->bo, true, false, false, 0); + ret = ttm_bo_reserve(&bo->bo, true, false, false, NULL); if (ret) return ret; @@ -179,7 +179,7 @@ void bochs_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, *blue = regno; } -static struct drm_fb_helper_funcs bochs_fb_helper_funcs = { +static const struct drm_fb_helper_funcs bochs_fb_helper_funcs = { .gamma_set = bochs_fb_gamma_set, .gamma_get = bochs_fb_gamma_get, .fb_probe = bochsfb_create, @@ -189,7 +189,8 @@ int bochs_fbdev_init(struct bochs_device *bochs) { int ret; - bochs->fb.helper.funcs = &bochs_fb_helper_funcs; + drm_fb_helper_prepare(bochs->dev, &bochs->fb.helper, + &bochs_fb_helper_funcs); ret = drm_fb_helper_init(bochs->dev, &bochs->fb.helper, 1, 1); diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c index dcf2e55f4ae9..6b7efcf363d6 100644 --- a/drivers/gpu/drm/bochs/bochs_kms.c +++ b/drivers/gpu/drm/bochs/bochs_kms.c @@ -53,7 +53,7 @@ static int bochs_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, if (old_fb) { bochs_fb = to_bochs_framebuffer(old_fb); bo = gem_to_bochs_bo(bochs_fb->obj); - ret = ttm_bo_reserve(&bo->bo, true, false, false, 0); + ret = ttm_bo_reserve(&bo->bo, true, false, false, NULL); if (ret) { DRM_ERROR("failed to reserve old_fb bo\n"); } else { @@ -67,7 +67,7 @@ static int bochs_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, bochs_fb = to_bochs_framebuffer(crtc->primary->fb); bo = gem_to_bochs_bo(bochs_fb->obj); - ret = ttm_bo_reserve(&bo->bo, true, false, false, 0); + ret = ttm_bo_reserve(&bo->bo, true, false, false, NULL); if (ret) return ret; @@ -216,18 +216,9 @@ static struct drm_encoder * bochs_connector_best_encoder(struct drm_connector *connector) { int enc_id = connector->encoder_ids[0]; - struct drm_mode_object *obj; - struct drm_encoder *encoder; - /* pick the encoder ids */ - if (enc_id) { - obj = drm_mode_object_find(connector->dev, enc_id, - DRM_MODE_OBJECT_ENCODER); - if (!obj) - return NULL; - encoder = obj_to_encoder(obj); - return encoder; - } + if (enc_id) + return drm_encoder_find(connector->dev, enc_id); return NULL; } @@ -259,6 +250,7 @@ static void bochs_connector_init(struct drm_device *dev) DRM_MODE_CONNECTOR_VIRTUAL); drm_connector_helper_add(connector, &bochs_connector_connector_helper_funcs); + drm_connector_register(connector); } diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c index b9a695d92792..1728a1b0b813 100644 --- a/drivers/gpu/drm/bochs/bochs_mm.c +++ b/drivers/gpu/drm/bochs/bochs_mm.c @@ -387,7 +387,7 @@ int bochs_gem_create(struct drm_device *dev, u32 size, bool iskernel, *obj = NULL; - size = ALIGN(size, PAGE_SIZE); + size = PAGE_ALIGN(size); if (size == 0) return -EINVAL; diff --git a/drivers/gpu/drm/bridge/ptn3460.c b/drivers/gpu/drm/bridge/ptn3460.c index 98fd17ae4916..d466696ed5e8 100644 --- a/drivers/gpu/drm/bridge/ptn3460.c +++ b/drivers/gpu/drm/bridge/ptn3460.c @@ -328,7 +328,7 @@ int ptn3460_init(struct drm_device *dev, struct drm_encoder *encoder, } drm_connector_helper_add(&ptn_bridge->connector, &ptn3460_connector_helper_funcs); - drm_sysfs_connector_add(&ptn_bridge->connector); + drm_connector_register(&ptn_bridge->connector); drm_mode_connector_attach_encoder(&ptn_bridge->connector, encoder); return 0; diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c index 08ce520f61a5..919c73b94447 100644 --- a/drivers/gpu/drm/cirrus/cirrus_drv.c +++ b/drivers/gpu/drm/cirrus/cirrus_drv.c @@ -29,7 +29,7 @@ module_param_named(modeset, cirrus_modeset, int, 0400); static struct drm_driver driver; /* only bind to the cirrus chip in qemu */ -static DEFINE_PCI_DEVICE_TABLE(pciidlist) = { +static const struct pci_device_id pciidlist[] = { { PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5446, 0x1af4, 0x1100, 0, 0, 0 }, {0,} @@ -76,6 +76,7 @@ static void cirrus_pci_remove(struct pci_dev *pdev) drm_put_dev(dev); } +#ifdef CONFIG_PM_SLEEP static int cirrus_pm_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); @@ -110,6 +111,7 @@ static int cirrus_pm_resume(struct device *dev) drm_kms_helper_poll_enable(drm_dev); return 0; } +#endif static const struct file_operations cirrus_driver_fops = { .owner = THIS_MODULE, diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h index 117d3eca5e37..401c890b6c6a 100644 --- a/drivers/gpu/drm/cirrus/cirrus_drv.h +++ b/drivers/gpu/drm/cirrus/cirrus_drv.h @@ -241,7 +241,7 @@ static inline int cirrus_bo_reserve(struct cirrus_bo *bo, bool no_wait) { int ret; - ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0); + ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, NULL); if (ret) { if (ret != -ERESTARTSYS && ret != -EBUSY) DRM_ERROR("reserve failed %p\n", bo); diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c index 32bbba0a787b..2a135f253e29 100644 --- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c +++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c @@ -288,7 +288,7 @@ static int cirrus_fbdev_destroy(struct drm_device *dev, return 0; } -static struct drm_fb_helper_funcs cirrus_fb_helper_funcs = { +static const struct drm_fb_helper_funcs cirrus_fb_helper_funcs = { .gamma_set = cirrus_crtc_fb_gamma_set, .gamma_get = cirrus_crtc_fb_gamma_get, .fb_probe = cirrusfb_create, @@ -306,9 +306,11 @@ int cirrus_fbdev_init(struct cirrus_device *cdev) return -ENOMEM; cdev->mode_info.gfbdev = gfbdev; - gfbdev->helper.funcs = &cirrus_fb_helper_funcs; spin_lock_init(&gfbdev->dirty_lock); + drm_fb_helper_prepare(cdev->dev, &gfbdev->helper, + &cirrus_fb_helper_funcs); + ret = drm_fb_helper_init(cdev->dev, &gfbdev->helper, cdev->num_crtc, CIRRUSFB_CONN_LIMIT); if (ret) { diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c index 49332c5fe35b..c7c5a9d91fa0 100644 --- a/drivers/gpu/drm/cirrus/cirrus_mode.c +++ b/drivers/gpu/drm/cirrus/cirrus_mode.c @@ -509,19 +509,9 @@ static struct drm_encoder *cirrus_connector_best_encoder(struct drm_connector *connector) { int enc_id = connector->encoder_ids[0]; - struct drm_mode_object *obj; - struct drm_encoder *encoder; - /* pick the encoder ids */ - if (enc_id) { - obj = - drm_mode_object_find(connector->dev, enc_id, - DRM_MODE_OBJECT_ENCODER); - if (!obj) - return NULL; - encoder = obj_to_encoder(obj); - return encoder; - } + if (enc_id) + return drm_encoder_find(connector->dev, enc_id); return NULL; } @@ -565,6 +555,7 @@ static struct drm_connector *cirrus_vga_init(struct drm_device *dev) drm_connector_helper_add(connector, &cirrus_vga_connector_helper_funcs); + drm_connector_register(connector); return connector; } diff --git a/drivers/gpu/drm/drm_buffer.c b/drivers/gpu/drm/drm_buffer.c index 0406110f83ed..86a4a4a60afc 100644 --- a/drivers/gpu/drm/drm_buffer.c +++ b/drivers/gpu/drm/drm_buffer.c @@ -80,11 +80,7 @@ int drm_buffer_alloc(struct drm_buffer **buf, int size) error_out: - /* Only last element can be null pointer so check for it first. */ - if ((*buf)->data[idx]) - kfree((*buf)->data[idx]); - - for (--idx; idx >= 0; --idx) + for (; idx >= 0; --idx) kfree((*buf)->data[idx]); kfree(*buf); diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c index 68175b54504b..61acb8f6756d 100644 --- a/drivers/gpu/drm/drm_bufs.c +++ b/drivers/gpu/drm/drm_bufs.c @@ -1217,7 +1217,6 @@ int drm_infobufs(struct drm_device *dev, void *data, struct drm_buf_desc __user *to = &request->list[count]; struct drm_buf_entry *from = &dma->bufs[i]; - struct drm_freelist *list = &dma->bufs[i].freelist; if (copy_to_user(&to->count, &from->buf_count, sizeof(from->buf_count)) || @@ -1225,19 +1224,19 @@ int drm_infobufs(struct drm_device *dev, void *data, &from->buf_size, sizeof(from->buf_size)) || copy_to_user(&to->low_mark, - &list->low_mark, - sizeof(list->low_mark)) || + &from->low_mark, + sizeof(from->low_mark)) || copy_to_user(&to->high_mark, - &list->high_mark, - sizeof(list->high_mark))) + &from->high_mark, + sizeof(from->high_mark))) return -EFAULT; DRM_DEBUG("%d %d %d %d %d\n", i, dma->bufs[i].buf_count, dma->bufs[i].buf_size, - dma->bufs[i].freelist.low_mark, - dma->bufs[i].freelist.high_mark); + dma->bufs[i].low_mark, + dma->bufs[i].high_mark); ++count; } } @@ -1290,8 +1289,8 @@ int drm_markbufs(struct drm_device *dev, void *data, if (request->high_mark < 0 || request->high_mark > entry->buf_count) return -EINVAL; - entry->freelist.low_mark = request->low_mark; - entry->freelist.high_mark = request->high_mark; + entry->low_mark = request->low_mark; + entry->high_mark = request->high_mark; return 0; } diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c index a4b017b6849e..9b23525c0ed0 100644 --- a/drivers/gpu/drm/drm_context.c +++ b/drivers/gpu/drm/drm_context.c @@ -1,18 +1,13 @@ -/** - * \file drm_context.c - * IOCTLs for generic contexts - * - * \author Rickard E. (Rik) Faith <faith@valinux.com> - * \author Gareth Hughes <gareth@valinux.com> - */ - /* - * Created: Fri Nov 24 18:31:37 2000 by gareth@valinux.com + * Legacy: Generic DRM Contexts * * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * + * Author: Rickard E. (Rik) Faith <faith@valinux.com> + * Author: Gareth Hughes <gareth@valinux.com> + * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation @@ -33,14 +28,14 @@ * OTHER DEALINGS IN THE SOFTWARE. */ -/* - * ChangeLog: - * 2001-11-16 Torsten Duwe <duwe@caldera.de> - * added context constructor/destructor hooks, - * needed by SiS driver's memory management. - */ - #include <drm/drmP.h> +#include "drm_legacy.h" + +struct drm_ctx_list { + struct list_head head; + drm_context_t handle; + struct drm_file *tag; +}; /******************************************************************/ /** \name Context bitmap support */ @@ -56,7 +51,7 @@ * in drm_device::ctx_idr, while holding the drm_device::struct_mutex * lock. */ -void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle) +void drm_legacy_ctxbitmap_free(struct drm_device * dev, int ctx_handle) { mutex_lock(&dev->struct_mutex); idr_remove(&dev->ctx_idr, ctx_handle); @@ -72,7 +67,7 @@ void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle) * Allocate a new idr from drm_device::ctx_idr while holding the * drm_device::struct_mutex lock. */ -static int drm_ctxbitmap_next(struct drm_device * dev) +static int drm_legacy_ctxbitmap_next(struct drm_device * dev) { int ret; @@ -90,7 +85,7 @@ static int drm_ctxbitmap_next(struct drm_device * dev) * * Initialise the drm_device::ctx_idr */ -int drm_ctxbitmap_init(struct drm_device * dev) +int drm_legacy_ctxbitmap_init(struct drm_device * dev) { idr_init(&dev->ctx_idr); return 0; @@ -104,13 +99,43 @@ int drm_ctxbitmap_init(struct drm_device * dev) * Free all idr members using drm_ctx_sarea_free helper function * while holding the drm_device::struct_mutex lock. */ -void drm_ctxbitmap_cleanup(struct drm_device * dev) +void drm_legacy_ctxbitmap_cleanup(struct drm_device * dev) { mutex_lock(&dev->struct_mutex); idr_destroy(&dev->ctx_idr); mutex_unlock(&dev->struct_mutex); } +/** + * drm_ctxbitmap_flush() - Flush all contexts owned by a file + * @dev: DRM device to operate on + * @file: Open file to flush contexts for + * + * This iterates over all contexts on @dev and drops them if they're owned by + * @file. Note that after this call returns, new contexts might be added if + * the file is still alive. + */ +void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file) +{ + struct drm_ctx_list *pos, *tmp; + + mutex_lock(&dev->ctxlist_mutex); + + list_for_each_entry_safe(pos, tmp, &dev->ctxlist, head) { + if (pos->tag == file && + pos->handle != DRM_KERNEL_CONTEXT) { + if (dev->driver->context_dtor) + dev->driver->context_dtor(dev, pos->handle); + + drm_legacy_ctxbitmap_free(dev, pos->handle); + list_del(&pos->head); + kfree(pos); + } + } + + mutex_unlock(&dev->ctxlist_mutex); +} + /*@}*/ /******************************************************************/ @@ -129,8 +154,8 @@ void drm_ctxbitmap_cleanup(struct drm_device * dev) * Gets the map from drm_device::ctx_idr with the handle specified and * returns its handle. */ -int drm_getsareactx(struct drm_device *dev, void *data, - struct drm_file *file_priv) +int drm_legacy_getsareactx(struct drm_device *dev, void *data, + struct drm_file *file_priv) { struct drm_ctx_priv_map *request = data; struct drm_local_map *map; @@ -173,8 +198,8 @@ int drm_getsareactx(struct drm_device *dev, void *data, * Searches the mapping specified in \p arg and update the entry in * drm_device::ctx_idr with it. */ -int drm_setsareactx(struct drm_device *dev, void *data, - struct drm_file *file_priv) +int drm_legacy_setsareactx(struct drm_device *dev, void *data, + struct drm_file *file_priv) { struct drm_ctx_priv_map *request = data; struct drm_local_map *map = NULL; @@ -273,8 +298,8 @@ static int drm_context_switch_complete(struct drm_device *dev, * \param arg user argument pointing to a drm_ctx_res structure. * \return zero on success or a negative number on failure. */ -int drm_resctx(struct drm_device *dev, void *data, - struct drm_file *file_priv) +int drm_legacy_resctx(struct drm_device *dev, void *data, + struct drm_file *file_priv) { struct drm_ctx_res *res = data; struct drm_ctx ctx; @@ -304,16 +329,16 @@ int drm_resctx(struct drm_device *dev, void *data, * * Get a new handle for the context and copy to userspace. */ -int drm_addctx(struct drm_device *dev, void *data, - struct drm_file *file_priv) +int drm_legacy_addctx(struct drm_device *dev, void *data, + struct drm_file *file_priv) { struct drm_ctx_list *ctx_entry; struct drm_ctx *ctx = data; - ctx->handle = drm_ctxbitmap_next(dev); + ctx->handle = drm_legacy_ctxbitmap_next(dev); if (ctx->handle == DRM_KERNEL_CONTEXT) { /* Skip kernel's context and get a new one. */ - ctx->handle = drm_ctxbitmap_next(dev); + ctx->handle = drm_legacy_ctxbitmap_next(dev); } DRM_DEBUG("%d\n", ctx->handle); if (ctx->handle == -1) { @@ -348,7 +373,8 @@ int drm_addctx(struct drm_device *dev, void *data, * \param arg user argument pointing to a drm_ctx structure. * \return zero on success or a negative number on failure. */ -int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv) +int drm_legacy_getctx(struct drm_device *dev, void *data, + struct drm_file *file_priv) { struct drm_ctx *ctx = data; @@ -369,8 +395,8 @@ int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv) * * Calls context_switch(). */ -int drm_switchctx(struct drm_device *dev, void *data, - struct drm_file *file_priv) +int drm_legacy_switchctx(struct drm_device *dev, void *data, + struct drm_file *file_priv) { struct drm_ctx *ctx = data; @@ -389,8 +415,8 @@ int drm_switchctx(struct drm_device *dev, void *data, * * Calls context_switch_complete(). */ -int drm_newctx(struct drm_device *dev, void *data, - struct drm_file *file_priv) +int drm_legacy_newctx(struct drm_device *dev, void *data, + struct drm_file *file_priv) { struct drm_ctx *ctx = data; @@ -411,8 +437,8 @@ int drm_newctx(struct drm_device *dev, void *data, * * If not the special kernel context, calls ctxbitmap_free() to free the specified context. */ -int drm_rmctx(struct drm_device *dev, void *data, - struct drm_file *file_priv) +int drm_legacy_rmctx(struct drm_device *dev, void *data, + struct drm_file *file_priv) { struct drm_ctx *ctx = data; @@ -420,7 +446,7 @@ int drm_rmctx(struct drm_device *dev, void *data, if (ctx->handle != DRM_KERNEL_CONTEXT) { if (dev->driver->context_dtor) dev->driver->context_dtor(dev, ctx->handle); - drm_ctxbitmap_free(dev, ctx->handle); + drm_legacy_ctxbitmap_free(dev, ctx->handle); } mutex_lock(&dev->ctxlist_mutex); diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index fe94cc10cd35..90e773019eac 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -41,6 +41,10 @@ #include "drm_crtc_internal.h" +static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, + struct drm_mode_fb_cmd2 *r, + struct drm_file *file_priv); + /** * drm_modeset_lock_all - take all modeset locks * @dev: drm device @@ -178,6 +182,12 @@ static const struct drm_prop_enum_list drm_scaling_mode_enum_list[] = { DRM_MODE_SCALE_ASPECT, "Full aspect" }, }; +static const struct drm_prop_enum_list drm_aspect_ratio_enum_list[] = { + { DRM_MODE_PICTURE_ASPECT_NONE, "Automatic" }, + { DRM_MODE_PICTURE_ASPECT_4_3, "4:3" }, + { DRM_MODE_PICTURE_ASPECT_16_9, "16:9" }, +}; + /* * Non-global properties, but "required" for certain connectors. */ @@ -357,6 +367,32 @@ const char *drm_get_format_name(uint32_t format) } EXPORT_SYMBOL(drm_get_format_name); +/* + * Internal function to assign a slot in the object idr and optionally + * register the object into the idr. + */ +static int drm_mode_object_get_reg(struct drm_device *dev, + struct drm_mode_object *obj, + uint32_t obj_type, + bool register_obj) +{ + int ret; + + mutex_lock(&dev->mode_config.idr_mutex); + ret = idr_alloc(&dev->mode_config.crtc_idr, register_obj ? obj : NULL, 1, 0, GFP_KERNEL); + if (ret >= 0) { + /* + * Set up the object linking under the protection of the idr + * lock so that other users can't see inconsistent state. + */ + obj->id = ret; + obj->type = obj_type; + } + mutex_unlock(&dev->mode_config.idr_mutex); + + return ret < 0 ? ret : 0; +} + /** * drm_mode_object_get - allocate a new modeset identifier * @dev: DRM device @@ -375,21 +411,15 @@ EXPORT_SYMBOL(drm_get_format_name); int drm_mode_object_get(struct drm_device *dev, struct drm_mode_object *obj, uint32_t obj_type) { - int ret; + return drm_mode_object_get_reg(dev, obj, obj_type, true); +} +static void drm_mode_object_register(struct drm_device *dev, + struct drm_mode_object *obj) +{ mutex_lock(&dev->mode_config.idr_mutex); - ret = idr_alloc(&dev->mode_config.crtc_idr, obj, 1, 0, GFP_KERNEL); - if (ret >= 0) { - /* - * Set up the object linking under the protection of the idr - * lock so that other users can't see inconsistent state. - */ - obj->id = ret; - obj->type = obj_type; - } + idr_replace(&dev->mode_config.crtc_idr, obj, obj->id); mutex_unlock(&dev->mode_config.idr_mutex); - - return ret < 0 ? ret : 0; } /** @@ -416,8 +446,12 @@ static struct drm_mode_object *_object_find(struct drm_device *dev, mutex_lock(&dev->mode_config.idr_mutex); obj = idr_find(&dev->mode_config.crtc_idr, id); - if (!obj || (type != DRM_MODE_OBJECT_ANY && obj->type != type) || - (obj->id != id)) + if (obj && type != DRM_MODE_OBJECT_ANY && obj->type != type) + obj = NULL; + if (obj && obj->id != id) + obj = NULL; + /* don't leak out unref'd fb's */ + if (obj && (obj->type == DRM_MODE_OBJECT_FB)) obj = NULL; mutex_unlock(&dev->mode_config.idr_mutex); @@ -444,9 +478,6 @@ struct drm_mode_object *drm_mode_object_find(struct drm_device *dev, * function.*/ WARN_ON(type == DRM_MODE_OBJECT_FB); obj = _object_find(dev, id, type); - /* don't leak out unref'd fb's */ - if (obj && (obj->type == DRM_MODE_OBJECT_FB)) - obj = NULL; return obj; } EXPORT_SYMBOL(drm_mode_object_find); @@ -723,7 +754,7 @@ DEFINE_WW_CLASS(crtc_ww_class); */ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc, struct drm_plane *primary, - void *cursor, + struct drm_plane *cursor, const struct drm_crtc_funcs *funcs) { struct drm_mode_config *config = &dev->mode_config; @@ -748,8 +779,11 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc, config->num_crtc++; crtc->primary = primary; + crtc->cursor = cursor; if (primary) primary->possible_crtcs = 1 << drm_crtc_index(crtc); + if (cursor) + cursor->possible_crtcs = 1 << drm_crtc_index(crtc); out: drm_modeset_unlock_all(dev); @@ -842,7 +876,7 @@ int drm_connector_init(struct drm_device *dev, drm_modeset_lock_all(dev); - ret = drm_mode_object_get(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR); + ret = drm_mode_object_get_reg(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR, false); if (ret) goto out_unlock; @@ -881,6 +915,8 @@ int drm_connector_init(struct drm_device *dev, drm_object_attach_property(&connector->base, dev->mode_config.dpms_property, 0); + connector->debugfs_entry = NULL; + out_put: if (ret) drm_mode_object_put(dev, &connector->base); @@ -921,6 +957,49 @@ void drm_connector_cleanup(struct drm_connector *connector) EXPORT_SYMBOL(drm_connector_cleanup); /** + * drm_connector_register - register a connector + * @connector: the connector to register + * + * Register userspace interfaces for a connector + * + * Returns: + * Zero on success, error code on failure. + */ +int drm_connector_register(struct drm_connector *connector) +{ + int ret; + + drm_mode_object_register(connector->dev, &connector->base); + + ret = drm_sysfs_connector_add(connector); + if (ret) + return ret; + + ret = drm_debugfs_connector_add(connector); + if (ret) { + drm_sysfs_connector_remove(connector); + return ret; + } + + return 0; +} +EXPORT_SYMBOL(drm_connector_register); + +/** + * drm_connector_unregister - unregister a connector + * @connector: the connector to unregister + * + * Unregister userspace interfaces for a connector + */ +void drm_connector_unregister(struct drm_connector *connector) +{ + drm_sysfs_connector_remove(connector); + drm_debugfs_connector_remove(connector); +} +EXPORT_SYMBOL(drm_connector_unregister); + + +/** * drm_connector_unplug_all - unregister connector userspace interfaces * @dev: drm device * @@ -934,7 +1013,7 @@ void drm_connector_unplug_all(struct drm_device *dev) /* taking the mode config mutex ends up in a clash with sysfs */ list_for_each_entry(connector, &dev->mode_config.connector_list, head) - drm_sysfs_connector_remove(connector); + drm_connector_unregister(connector); } EXPORT_SYMBOL(drm_connector_unplug_all); @@ -1214,6 +1293,7 @@ static int drm_mode_create_standard_connector_properties(struct drm_device *dev) { struct drm_property *edid; struct drm_property *dpms; + struct drm_property *dev_path; /* * Standard properties (apply to all connectors) @@ -1228,6 +1308,12 @@ static int drm_mode_create_standard_connector_properties(struct drm_device *dev) ARRAY_SIZE(drm_dpms_enum_list)); dev->mode_config.dpms_property = dpms; + dev_path = drm_property_create(dev, + DRM_MODE_PROP_BLOB | + DRM_MODE_PROP_IMMUTABLE, + "PATH", 0); + dev->mode_config.path_property = dev_path; + return 0; } @@ -1384,6 +1470,33 @@ int drm_mode_create_scaling_mode_property(struct drm_device *dev) EXPORT_SYMBOL(drm_mode_create_scaling_mode_property); /** + * drm_mode_create_aspect_ratio_property - create aspect ratio property + * @dev: DRM device + * + * Called by a driver the first time it's needed, must be attached to desired + * connectors. + * + * Returns: + * Zero on success, errno on failure. + */ +int drm_mode_create_aspect_ratio_property(struct drm_device *dev) +{ + if (dev->mode_config.aspect_ratio_property) + return 0; + + dev->mode_config.aspect_ratio_property = + drm_property_create_enum(dev, 0, "aspect ratio", + drm_aspect_ratio_enum_list, + ARRAY_SIZE(drm_aspect_ratio_enum_list)); + + if (dev->mode_config.aspect_ratio_property == NULL) + return -ENOMEM; + + return 0; +} +EXPORT_SYMBOL(drm_mode_create_aspect_ratio_property); + +/** * drm_mode_create_dirty_property - create dirty property * @dev: DRM device * @@ -1470,6 +1583,15 @@ int drm_mode_group_init_legacy_group(struct drm_device *dev, } EXPORT_SYMBOL(drm_mode_group_init_legacy_group); +void drm_reinit_primary_mode_group(struct drm_device *dev) +{ + drm_modeset_lock_all(dev); + drm_mode_group_destroy(&dev->primary->mode_group); + drm_mode_group_init_legacy_group(dev, &dev->primary->mode_group); + drm_modeset_unlock_all(dev); +} +EXPORT_SYMBOL(drm_reinit_primary_mode_group); + /** * drm_crtc_convert_to_umode - convert a drm_display_mode into a modeinfo * @out: drm_mode_modeinfo struct to return to the user @@ -2118,45 +2240,32 @@ out: return ret; } -/** - * drm_mode_setplane - configure a plane's configuration - * @dev: DRM device - * @data: ioctl data* - * @file_priv: DRM file info +/* + * setplane_internal - setplane handler for internal callers * - * Set plane configuration, including placement, fb, scaling, and other factors. - * Or pass a NULL fb to disable. + * Note that we assume an extra reference has already been taken on fb. If the + * update fails, this reference will be dropped before return; if it succeeds, + * the previous framebuffer (if any) will be unreferenced instead. * - * Returns: - * Zero on success, errno on failure. + * src_{x,y,w,h} are provided in 16.16 fixed point format */ -int drm_mode_setplane(struct drm_device *dev, void *data, - struct drm_file *file_priv) +static int setplane_internal(struct drm_plane *plane, + struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int32_t crtc_x, int32_t crtc_y, + uint32_t crtc_w, uint32_t crtc_h, + /* src_{x,y,w,h} values are 16.16 fixed point */ + uint32_t src_x, uint32_t src_y, + uint32_t src_w, uint32_t src_h) { - struct drm_mode_set_plane *plane_req = data; - struct drm_plane *plane; - struct drm_crtc *crtc; - struct drm_framebuffer *fb = NULL, *old_fb = NULL; + struct drm_device *dev = plane->dev; + struct drm_framebuffer *old_fb = NULL; int ret = 0; unsigned int fb_width, fb_height; int i; - if (!drm_core_check_feature(dev, DRIVER_MODESET)) - return -EINVAL; - - /* - * First, find the plane, crtc, and fb objects. If not available, - * we don't bother to call the driver. - */ - plane = drm_plane_find(dev, plane_req->plane_id); - if (!plane) { - DRM_DEBUG_KMS("Unknown plane ID %d\n", - plane_req->plane_id); - return -ENOENT; - } - /* No fb means shut it down */ - if (!plane_req->fb_id) { + if (!fb) { drm_modeset_lock_all(dev); old_fb = plane->fb; ret = plane->funcs->disable_plane(plane); @@ -2170,14 +2279,6 @@ int drm_mode_setplane(struct drm_device *dev, void *data, goto out; } - crtc = drm_crtc_find(dev, plane_req->crtc_id); - if (!crtc) { - DRM_DEBUG_KMS("Unknown crtc ID %d\n", - plane_req->crtc_id); - ret = -ENOENT; - goto out; - } - /* Check whether this plane is usable on this CRTC */ if (!(plane->possible_crtcs & drm_crtc_mask(crtc))) { DRM_DEBUG_KMS("Invalid crtc for plane\n"); @@ -2185,14 +2286,6 @@ int drm_mode_setplane(struct drm_device *dev, void *data, goto out; } - fb = drm_framebuffer_lookup(dev, plane_req->fb_id); - if (!fb) { - DRM_DEBUG_KMS("Unknown framebuffer ID %d\n", - plane_req->fb_id); - ret = -ENOENT; - goto out; - } - /* Check whether this plane supports the fb pixel format. */ for (i = 0; i < plane->format_count; i++) if (fb->pixel_format == plane->format_types[i]) @@ -2208,43 +2301,25 @@ int drm_mode_setplane(struct drm_device *dev, void *data, fb_height = fb->height << 16; /* Make sure source coordinates are inside the fb. */ - if (plane_req->src_w > fb_width || - plane_req->src_x > fb_width - plane_req->src_w || - plane_req->src_h > fb_height || - plane_req->src_y > fb_height - plane_req->src_h) { + if (src_w > fb_width || + src_x > fb_width - src_w || + src_h > fb_height || + src_y > fb_height - src_h) { DRM_DEBUG_KMS("Invalid source coordinates " "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n", - plane_req->src_w >> 16, - ((plane_req->src_w & 0xffff) * 15625) >> 10, - plane_req->src_h >> 16, - ((plane_req->src_h & 0xffff) * 15625) >> 10, - plane_req->src_x >> 16, - ((plane_req->src_x & 0xffff) * 15625) >> 10, - plane_req->src_y >> 16, - ((plane_req->src_y & 0xffff) * 15625) >> 10); + src_w >> 16, ((src_w & 0xffff) * 15625) >> 10, + src_h >> 16, ((src_h & 0xffff) * 15625) >> 10, + src_x >> 16, ((src_x & 0xffff) * 15625) >> 10, + src_y >> 16, ((src_y & 0xffff) * 15625) >> 10); ret = -ENOSPC; goto out; } - /* Give drivers some help against integer overflows */ - if (plane_req->crtc_w > INT_MAX || - plane_req->crtc_x > INT_MAX - (int32_t) plane_req->crtc_w || - plane_req->crtc_h > INT_MAX || - plane_req->crtc_y > INT_MAX - (int32_t) plane_req->crtc_h) { - DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n", - plane_req->crtc_w, plane_req->crtc_h, - plane_req->crtc_x, plane_req->crtc_y); - ret = -ERANGE; - goto out; - } - drm_modeset_lock_all(dev); old_fb = plane->fb; ret = plane->funcs->update_plane(plane, crtc, fb, - plane_req->crtc_x, plane_req->crtc_y, - plane_req->crtc_w, plane_req->crtc_h, - plane_req->src_x, plane_req->src_y, - plane_req->src_w, plane_req->src_h); + crtc_x, crtc_y, crtc_w, crtc_h, + src_x, src_y, src_w, src_h); if (!ret) { plane->crtc = crtc; plane->fb = fb; @@ -2261,6 +2336,85 @@ out: drm_framebuffer_unreference(old_fb); return ret; + +} + +/** + * drm_mode_setplane - configure a plane's configuration + * @dev: DRM device + * @data: ioctl data* + * @file_priv: DRM file info + * + * Set plane configuration, including placement, fb, scaling, and other factors. + * Or pass a NULL fb to disable (planes may be disabled without providing a + * valid crtc). + * + * Returns: + * Zero on success, errno on failure. + */ +int drm_mode_setplane(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_mode_set_plane *plane_req = data; + struct drm_mode_object *obj; + struct drm_plane *plane; + struct drm_crtc *crtc = NULL; + struct drm_framebuffer *fb = NULL; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + + /* Give drivers some help against integer overflows */ + if (plane_req->crtc_w > INT_MAX || + plane_req->crtc_x > INT_MAX - (int32_t) plane_req->crtc_w || + plane_req->crtc_h > INT_MAX || + plane_req->crtc_y > INT_MAX - (int32_t) plane_req->crtc_h) { + DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n", + plane_req->crtc_w, plane_req->crtc_h, + plane_req->crtc_x, plane_req->crtc_y); + return -ERANGE; + } + + /* + * First, find the plane, crtc, and fb objects. If not available, + * we don't bother to call the driver. + */ + obj = drm_mode_object_find(dev, plane_req->plane_id, + DRM_MODE_OBJECT_PLANE); + if (!obj) { + DRM_DEBUG_KMS("Unknown plane ID %d\n", + plane_req->plane_id); + return -ENOENT; + } + plane = obj_to_plane(obj); + + if (plane_req->fb_id) { + fb = drm_framebuffer_lookup(dev, plane_req->fb_id); + if (!fb) { + DRM_DEBUG_KMS("Unknown framebuffer ID %d\n", + plane_req->fb_id); + return -ENOENT; + } + + obj = drm_mode_object_find(dev, plane_req->crtc_id, + DRM_MODE_OBJECT_CRTC); + if (!obj) { + DRM_DEBUG_KMS("Unknown crtc ID %d\n", + plane_req->crtc_id); + return -ENOENT; + } + crtc = obj_to_crtc(obj); + } + + /* + * setplane_internal will take care of deref'ing either the old or new + * framebuffer depending on success. + */ + return setplane_internal(plane, crtc, fb, + plane_req->crtc_x, plane_req->crtc_y, + plane_req->crtc_w, plane_req->crtc_h, + plane_req->src_x, plane_req->src_y, + plane_req->src_w, plane_req->src_h); } /** @@ -2509,6 +2663,102 @@ out: return ret; } +/** + * drm_mode_cursor_universal - translate legacy cursor ioctl call into a + * universal plane handler call + * @crtc: crtc to update cursor for + * @req: data pointer for the ioctl + * @file_priv: drm file for the ioctl call + * + * Legacy cursor ioctl's work directly with driver buffer handles. To + * translate legacy ioctl calls into universal plane handler calls, we need to + * wrap the native buffer handle in a drm_framebuffer. + * + * Note that we assume any handle passed to the legacy ioctls was a 32-bit ARGB + * buffer with a pitch of 4*width; the universal plane interface should be used + * directly in cases where the hardware can support other buffer settings and + * userspace wants to make use of these capabilities. + * + * Returns: + * Zero on success, errno on failure. + */ +static int drm_mode_cursor_universal(struct drm_crtc *crtc, + struct drm_mode_cursor2 *req, + struct drm_file *file_priv) +{ + struct drm_device *dev = crtc->dev; + struct drm_framebuffer *fb = NULL; + struct drm_mode_fb_cmd2 fbreq = { + .width = req->width, + .height = req->height, + .pixel_format = DRM_FORMAT_ARGB8888, + .pitches = { req->width * 4 }, + .handles = { req->handle }, + }; + int32_t crtc_x, crtc_y; + uint32_t crtc_w = 0, crtc_h = 0; + uint32_t src_w = 0, src_h = 0; + int ret = 0; + + BUG_ON(!crtc->cursor); + + /* + * Obtain fb we'll be using (either new or existing) and take an extra + * reference to it if fb != null. setplane will take care of dropping + * the reference if the plane update fails. + */ + if (req->flags & DRM_MODE_CURSOR_BO) { + if (req->handle) { + fb = add_framebuffer_internal(dev, &fbreq, file_priv); + if (IS_ERR(fb)) { + DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n"); + return PTR_ERR(fb); + } + + drm_framebuffer_reference(fb); + } else { + fb = NULL; + } + } else { + mutex_lock(&dev->mode_config.mutex); + fb = crtc->cursor->fb; + if (fb) + drm_framebuffer_reference(fb); + mutex_unlock(&dev->mode_config.mutex); + } + + if (req->flags & DRM_MODE_CURSOR_MOVE) { + crtc_x = req->x; + crtc_y = req->y; + } else { + crtc_x = crtc->cursor_x; + crtc_y = crtc->cursor_y; + } + + if (fb) { + crtc_w = fb->width; + crtc_h = fb->height; + src_w = fb->width << 16; + src_h = fb->height << 16; + } + + /* + * setplane_internal will take care of deref'ing either the old or new + * framebuffer depending on success. + */ + ret = setplane_internal(crtc->cursor, crtc, fb, + crtc_x, crtc_y, crtc_w, crtc_h, + 0, 0, src_w, src_h); + + /* Update successful; save new cursor position, if necessary */ + if (ret == 0 && req->flags & DRM_MODE_CURSOR_MOVE) { + crtc->cursor_x = req->x; + crtc->cursor_y = req->y; + } + + return ret; +} + static int drm_mode_cursor_common(struct drm_device *dev, struct drm_mode_cursor2 *req, struct drm_file *file_priv) @@ -2528,6 +2778,13 @@ static int drm_mode_cursor_common(struct drm_device *dev, return -ENOENT; } + /* + * If this crtc has a universal cursor plane, call that plane's update + * handler rather than using legacy cursor handlers. + */ + if (crtc->cursor) + return drm_mode_cursor_universal(crtc, req, file_priv); + drm_modeset_lock(&crtc->mutex, NULL); if (req->flags & DRM_MODE_CURSOR_BO) { if (!crtc->funcs->cursor_set && !crtc->funcs->cursor_set2) { @@ -2827,56 +3084,38 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r) return 0; } -/** - * drm_mode_addfb2 - add an FB to the graphics configuration - * @dev: drm device for the ioctl - * @data: data pointer for the ioctl - * @file_priv: drm file for the ioctl call - * - * Add a new FB to the specified CRTC, given a user request with format. This is - * the 2nd version of the addfb ioctl, which supports multi-planar framebuffers - * and uses fourcc codes as pixel format specifiers. - * - * Called by the user via ioctl. - * - * Returns: - * Zero on success, errno on failure. - */ -int drm_mode_addfb2(struct drm_device *dev, - void *data, struct drm_file *file_priv) +static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, + struct drm_mode_fb_cmd2 *r, + struct drm_file *file_priv) { - struct drm_mode_fb_cmd2 *r = data; struct drm_mode_config *config = &dev->mode_config; struct drm_framebuffer *fb; int ret; - if (!drm_core_check_feature(dev, DRIVER_MODESET)) - return -EINVAL; - if (r->flags & ~DRM_MODE_FB_INTERLACED) { DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags); - return -EINVAL; + return ERR_PTR(-EINVAL); } if ((config->min_width > r->width) || (r->width > config->max_width)) { DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n", r->width, config->min_width, config->max_width); - return -EINVAL; + return ERR_PTR(-EINVAL); } if ((config->min_height > r->height) || (r->height > config->max_height)) { DRM_DEBUG_KMS("bad framebuffer height %d, should be >= %d && <= %d\n", r->height, config->min_height, config->max_height); - return -EINVAL; + return ERR_PTR(-EINVAL); } ret = framebuffer_check(r); if (ret) - return ret; + return ERR_PTR(ret); fb = dev->mode_config.funcs->fb_create(dev, file_priv, r); if (IS_ERR(fb)) { DRM_DEBUG_KMS("could not create framebuffer\n"); - return PTR_ERR(fb); + return fb; } mutex_lock(&file_priv->fbs_lock); @@ -2885,8 +3124,37 @@ int drm_mode_addfb2(struct drm_device *dev, DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id); mutex_unlock(&file_priv->fbs_lock); + return fb; +} - return ret; +/** + * drm_mode_addfb2 - add an FB to the graphics configuration + * @dev: drm device for the ioctl + * @data: data pointer for the ioctl + * @file_priv: drm file for the ioctl call + * + * Add a new FB to the specified CRTC, given a user request with format. This is + * the 2nd version of the addfb ioctl, which supports multi-planar framebuffers + * and uses fourcc codes as pixel format specifiers. + * + * Called by the user via ioctl. + * + * Returns: + * Zero on success, errno on failure. + */ +int drm_mode_addfb2(struct drm_device *dev, + void *data, struct drm_file *file_priv) +{ + struct drm_framebuffer *fb; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + + fb = add_framebuffer_internal(dev, data, file_priv); + if (IS_ERR(fb)) + return PTR_ERR(fb); + + return 0; } /** @@ -3176,7 +3444,7 @@ fail: EXPORT_SYMBOL(drm_property_create); /** - * drm_property_create - create a new enumeration property type + * drm_property_create_enum - create a new enumeration property type * @dev: drm device * @flags: flags specifying the property type * @name: name of the property @@ -3222,7 +3490,7 @@ struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags, EXPORT_SYMBOL(drm_property_create_enum); /** - * drm_property_create - create a new bitmask property type + * drm_property_create_bitmask - create a new bitmask property type * @dev: drm device * @flags: flags specifying the property type * @name: name of the property @@ -3242,19 +3510,28 @@ EXPORT_SYMBOL(drm_property_create_enum); struct drm_property *drm_property_create_bitmask(struct drm_device *dev, int flags, const char *name, const struct drm_prop_enum_list *props, - int num_values) + int num_props, + uint64_t supported_bits) { struct drm_property *property; - int i, ret; + int i, ret, index = 0; + int num_values = hweight64(supported_bits); flags |= DRM_MODE_PROP_BITMASK; property = drm_property_create(dev, flags, name, num_values); if (!property) return NULL; + for (i = 0; i < num_props; i++) { + if (!(supported_bits & (1ULL << props[i].type))) + continue; - for (i = 0; i < num_values; i++) { - ret = drm_property_add_enum(property, i, + if (WARN_ON(index >= num_values)) { + drm_property_destroy(dev, property); + return NULL; + } + + ret = drm_property_add_enum(property, index++, props[i].type, props[i].name); if (ret) { @@ -3284,7 +3561,7 @@ static struct drm_property *property_create_range(struct drm_device *dev, } /** - * drm_property_create - create a new ranged property type + * drm_property_create_range - create a new ranged property type * @dev: drm device * @flags: flags specifying the property type * @name: name of the property @@ -3703,6 +3980,25 @@ done: return ret; } +int drm_mode_connector_set_path_property(struct drm_connector *connector, + char *path) +{ + struct drm_device *dev = connector->dev; + int ret, size; + size = strlen(path) + 1; + + connector->path_blob_ptr = drm_property_create_blob(connector->dev, + size, path); + if (!connector->path_blob_ptr) + return -EINVAL; + + ret = drm_object_property_set_value(&connector->base, + dev->mode_config.path_property, + connector->path_blob_ptr->base.id); + return ret; +} +EXPORT_SYMBOL(drm_mode_connector_set_path_property); + /** * drm_mode_connector_update_edid_property - update the edid property of a connector * @connector: drm connector @@ -3720,6 +4016,10 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector, struct drm_device *dev = connector->dev; int ret, size; + /* ignore requests to set edid when overridden */ + if (connector->override_edid) + return 0; + if (connector->edid_blob_ptr) drm_property_destroy_blob(dev, connector->edid_blob_ptr); @@ -4396,8 +4696,9 @@ int drm_mode_create_dumb_ioctl(struct drm_device *dev, return -EINVAL; /* overflow checks for 32bit size calculations */ + /* NOTE: DIV_ROUND_UP() can overflow */ cpp = DIV_ROUND_UP(args->bpp, 8); - if (cpp > 0xffffffffU / args->width) + if (!cpp || cpp > 0xffffffffU / args->width) return -EINVAL; stride = cpp * args->width; if (args->height > 0xffffffffU / stride) @@ -4680,6 +4981,36 @@ int drm_format_vert_chroma_subsampling(uint32_t format) EXPORT_SYMBOL(drm_format_vert_chroma_subsampling); /** + * drm_rotation_simplify() - Try to simplify the rotation + * @rotation: Rotation to be simplified + * @supported_rotations: Supported rotations + * + * Attempt to simplify the rotation to a form that is supported. + * Eg. if the hardware supports everything except DRM_REFLECT_X + * one could call this function like this: + * + * drm_rotation_simplify(rotation, BIT(DRM_ROTATE_0) | + * BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_180) | + * BIT(DRM_ROTATE_270) | BIT(DRM_REFLECT_Y)); + * + * to eliminate the DRM_ROTATE_X flag. Depending on what kind of + * transforms the hardware supports, this function may not + * be able to produce a supported transform, so the caller should + * check the result afterwards. + */ +unsigned int drm_rotation_simplify(unsigned int rotation, + unsigned int supported_rotations) +{ + if (rotation & ~supported_rotations) { + rotation ^= BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y); + rotation = (rotation & ~0xf) | BIT((ffs(rotation & 0xf) + 1) % 4); + } + + return rotation; +} +EXPORT_SYMBOL(drm_rotation_simplify); + +/** * drm_mode_config_init - initialize DRM mode_configuration structure * @dev: DRM device * @@ -4797,3 +5128,21 @@ void drm_mode_config_cleanup(struct drm_device *dev) drm_modeset_lock_fini(&dev->mode_config.connection_mutex); } EXPORT_SYMBOL(drm_mode_config_cleanup); + +struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev, + unsigned int supported_rotations) +{ + static const struct drm_prop_enum_list props[] = { + { DRM_ROTATE_0, "rotate-0" }, + { DRM_ROTATE_90, "rotate-90" }, + { DRM_ROTATE_180, "rotate-180" }, + { DRM_ROTATE_270, "rotate-270" }, + { DRM_REFLECT_X, "reflect-x" }, + { DRM_REFLECT_Y, "reflect-y" }, + }; + + return drm_property_create_bitmask(dev, 0, "rotation", + props, ARRAY_SIZE(props), + supported_rotations); +} +EXPORT_SYMBOL(drm_mode_create_rotation_property); diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 78b37f3febd3..6c65a0a28fbd 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -818,6 +818,7 @@ void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb, drm_fb_get_bpp_depth(mode_cmd->pixel_format, &fb->depth, &fb->bits_per_pixel); fb->pixel_format = mode_cmd->pixel_format; + fb->flags = mode_cmd->flags; } EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct); diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c index b4b51d46f339..13bd42923dd4 100644 --- a/drivers/gpu/drm/drm_debugfs.c +++ b/drivers/gpu/drm/drm_debugfs.c @@ -35,6 +35,7 @@ #include <linux/slab.h> #include <linux/export.h> #include <drm/drmP.h> +#include <drm/drm_edid.h> #if defined(CONFIG_DEBUG_FS) @@ -237,5 +238,186 @@ int drm_debugfs_cleanup(struct drm_minor *minor) return 0; } +static int connector_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + const char *status; + + switch (connector->force) { + case DRM_FORCE_ON: + status = "on\n"; + break; + + case DRM_FORCE_ON_DIGITAL: + status = "digital\n"; + break; + + case DRM_FORCE_OFF: + status = "off\n"; + break; + + case DRM_FORCE_UNSPECIFIED: + status = "unspecified\n"; + break; + + default: + return 0; + } + + seq_puts(m, status); + + return 0; +} + +static int connector_open(struct inode *inode, struct file *file) +{ + struct drm_connector *dev = inode->i_private; + + return single_open(file, connector_show, dev); +} + +static ssize_t connector_write(struct file *file, const char __user *ubuf, + size_t len, loff_t *offp) +{ + struct seq_file *m = file->private_data; + struct drm_connector *connector = m->private; + char buf[12]; + + if (len > sizeof(buf) - 1) + return -EINVAL; + + if (copy_from_user(buf, ubuf, len)) + return -EFAULT; + + buf[len] = '\0'; + + if (!strcmp(buf, "on")) + connector->force = DRM_FORCE_ON; + else if (!strcmp(buf, "digital")) + connector->force = DRM_FORCE_ON_DIGITAL; + else if (!strcmp(buf, "off")) + connector->force = DRM_FORCE_OFF; + else if (!strcmp(buf, "unspecified")) + connector->force = DRM_FORCE_UNSPECIFIED; + else + return -EINVAL; + + return len; +} + +static int edid_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct drm_property_blob *edid = connector->edid_blob_ptr; + + if (connector->override_edid && edid) + seq_write(m, edid->data, edid->length); + + return 0; +} + +static int edid_open(struct inode *inode, struct file *file) +{ + struct drm_connector *dev = inode->i_private; + + return single_open(file, edid_show, dev); +} + +static ssize_t edid_write(struct file *file, const char __user *ubuf, + size_t len, loff_t *offp) +{ + struct seq_file *m = file->private_data; + struct drm_connector *connector = m->private; + char *buf; + struct edid *edid; + int ret; + + buf = memdup_user(ubuf, len); + if (IS_ERR(buf)) + return PTR_ERR(buf); + + edid = (struct edid *) buf; + + if (len == 5 && !strncmp(buf, "reset", 5)) { + connector->override_edid = false; + ret = drm_mode_connector_update_edid_property(connector, NULL); + } else if (len < EDID_LENGTH || + EDID_LENGTH * (1 + edid->extensions) > len) + ret = -EINVAL; + else { + connector->override_edid = false; + ret = drm_mode_connector_update_edid_property(connector, edid); + if (!ret) + connector->override_edid = true; + } + + kfree(buf); + + return (ret) ? ret : len; +} + +static const struct file_operations drm_edid_fops = { + .owner = THIS_MODULE, + .open = edid_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = edid_write +}; + + +static const struct file_operations drm_connector_fops = { + .owner = THIS_MODULE, + .open = connector_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = connector_write +}; + +int drm_debugfs_connector_add(struct drm_connector *connector) +{ + struct drm_minor *minor = connector->dev->primary; + struct dentry *root, *ent; + + if (!minor->debugfs_root) + return -1; + + root = debugfs_create_dir(connector->name, minor->debugfs_root); + if (!root) + return -ENOMEM; + + connector->debugfs_entry = root; + + /* force */ + ent = debugfs_create_file("force", S_IRUGO | S_IWUSR, root, connector, + &drm_connector_fops); + if (!ent) + goto error; + + /* edid */ + ent = debugfs_create_file("edid_override", S_IRUGO | S_IWUSR, root, + connector, &drm_edid_fops); + if (!ent) + goto error; + + return 0; + +error: + debugfs_remove_recursive(connector->debugfs_entry); + connector->debugfs_entry = NULL; + return -ENOMEM; +} + +void drm_debugfs_connector_remove(struct drm_connector *connector) +{ + if (!connector->debugfs_entry) + return; + + debugfs_remove_recursive(connector->debugfs_entry); + + connector->debugfs_entry = NULL; +} + #endif /* CONFIG_DEBUG_FS */ diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c new file mode 100644 index 000000000000..ac3c2738db94 --- /dev/null +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -0,0 +1,2715 @@ +/* + * Copyright © 2014 Red Hat + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#include <linux/kernel.h> +#include <linux/delay.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/sched.h> +#include <linux/seq_file.h> +#include <linux/i2c.h> +#include <drm/drm_dp_mst_helper.h> +#include <drm/drmP.h> + +#include <drm/drm_fixed.h> + +/** + * DOC: dp mst helper + * + * These functions contain parts of the DisplayPort 1.2a MultiStream Transport + * protocol. The helpers contain a topology manager and bandwidth manager. + * The helpers encapsulate the sending and received of sideband msgs. + */ +static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr, + char *buf); +static int test_calc_pbn_mode(void); + +static void drm_dp_put_port(struct drm_dp_mst_port *port); + +static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr, + int id, + struct drm_dp_payload *payload); + +static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, + int offset, int size, u8 *bytes); + +static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_branch *mstb); +static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_branch *mstb, + struct drm_dp_mst_port *port); +static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr, + u8 *guid); + +static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux); +static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux); +static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr); +/* sideband msg handling */ +static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles) +{ + u8 bitmask = 0x80; + u8 bitshift = 7; + u8 array_index = 0; + int number_of_bits = num_nibbles * 4; + u8 remainder = 0; + + while (number_of_bits != 0) { + number_of_bits--; + remainder <<= 1; + remainder |= (data[array_index] & bitmask) >> bitshift; + bitmask >>= 1; + bitshift--; + if (bitmask == 0) { + bitmask = 0x80; + bitshift = 7; + array_index++; + } + if ((remainder & 0x10) == 0x10) + remainder ^= 0x13; + } + + number_of_bits = 4; + while (number_of_bits != 0) { + number_of_bits--; + remainder <<= 1; + if ((remainder & 0x10) != 0) + remainder ^= 0x13; + } + + return remainder; +} + +static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes) +{ + u8 bitmask = 0x80; + u8 bitshift = 7; + u8 array_index = 0; + int number_of_bits = number_of_bytes * 8; + u16 remainder = 0; + + while (number_of_bits != 0) { + number_of_bits--; + remainder <<= 1; + remainder |= (data[array_index] & bitmask) >> bitshift; + bitmask >>= 1; + bitshift--; + if (bitmask == 0) { + bitmask = 0x80; + bitshift = 7; + array_index++; + } + if ((remainder & 0x100) == 0x100) + remainder ^= 0xd5; + } + + number_of_bits = 8; + while (number_of_bits != 0) { + number_of_bits--; + remainder <<= 1; + if ((remainder & 0x100) != 0) + remainder ^= 0xd5; + } + + return remainder & 0xff; +} +static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr) +{ + u8 size = 3; + size += (hdr->lct / 2); + return size; +} + +static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr, + u8 *buf, int *len) +{ + int idx = 0; + int i; + u8 crc4; + buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf); + for (i = 0; i < (hdr->lct / 2); i++) + buf[idx++] = hdr->rad[i]; + buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) | + (hdr->msg_len & 0x3f); + buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4); + + crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1); + buf[idx - 1] |= (crc4 & 0xf); + + *len = idx; +} + +static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr, + u8 *buf, int buflen, u8 *hdrlen) +{ + u8 crc4; + u8 len; + int i; + u8 idx; + if (buf[0] == 0) + return false; + len = 3; + len += ((buf[0] & 0xf0) >> 4) / 2; + if (len > buflen) + return false; + crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1); + + if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) { + DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]); + return false; + } + + hdr->lct = (buf[0] & 0xf0) >> 4; + hdr->lcr = (buf[0] & 0xf); + idx = 1; + for (i = 0; i < (hdr->lct / 2); i++) + hdr->rad[i] = buf[idx++]; + hdr->broadcast = (buf[idx] >> 7) & 0x1; + hdr->path_msg = (buf[idx] >> 6) & 0x1; + hdr->msg_len = buf[idx] & 0x3f; + idx++; + hdr->somt = (buf[idx] >> 7) & 0x1; + hdr->eomt = (buf[idx] >> 6) & 0x1; + hdr->seqno = (buf[idx] >> 4) & 0x1; + idx++; + *hdrlen = idx; + return true; +} + +static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req, + struct drm_dp_sideband_msg_tx *raw) +{ + int idx = 0; + int i; + u8 *buf = raw->msg; + buf[idx++] = req->req_type & 0x7f; + + switch (req->req_type) { + case DP_ENUM_PATH_RESOURCES: + buf[idx] = (req->u.port_num.port_number & 0xf) << 4; + idx++; + break; + case DP_ALLOCATE_PAYLOAD: + buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 | + (req->u.allocate_payload.number_sdp_streams & 0xf); + idx++; + buf[idx] = (req->u.allocate_payload.vcpi & 0x7f); + idx++; + buf[idx] = (req->u.allocate_payload.pbn >> 8); + idx++; + buf[idx] = (req->u.allocate_payload.pbn & 0xff); + idx++; + for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) { + buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) | + (req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf); + idx++; + } + if (req->u.allocate_payload.number_sdp_streams & 1) { + i = req->u.allocate_payload.number_sdp_streams - 1; + buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4; + idx++; + } + break; + case DP_QUERY_PAYLOAD: + buf[idx] = (req->u.query_payload.port_number & 0xf) << 4; + idx++; + buf[idx] = (req->u.query_payload.vcpi & 0x7f); + idx++; + break; + case DP_REMOTE_DPCD_READ: + buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4; + buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf; + idx++; + buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8; + idx++; + buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff); + idx++; + buf[idx] = (req->u.dpcd_read.num_bytes); + idx++; + break; + + case DP_REMOTE_DPCD_WRITE: + buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4; + buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf; + idx++; + buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8; + idx++; + buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff); + idx++; + buf[idx] = (req->u.dpcd_write.num_bytes); + idx++; + memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes); + idx += req->u.dpcd_write.num_bytes; + break; + case DP_REMOTE_I2C_READ: + buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4; + buf[idx] |= (req->u.i2c_read.num_transactions & 0x3); + idx++; + for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) { + buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f; + idx++; + buf[idx] = req->u.i2c_read.transactions[i].num_bytes; + idx++; + memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes); + idx += req->u.i2c_read.transactions[i].num_bytes; + + buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 5; + buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf); + idx++; + } + buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f; + idx++; + buf[idx] = (req->u.i2c_read.num_bytes_read); + idx++; + break; + + case DP_REMOTE_I2C_WRITE: + buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4; + idx++; + buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f; + idx++; + buf[idx] = (req->u.i2c_write.num_bytes); + idx++; + memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes); + idx += req->u.i2c_write.num_bytes; + break; + } + raw->cur_len = idx; +} + +static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len) +{ + u8 crc4; + crc4 = drm_dp_msg_data_crc4(msg, len); + msg[len] = crc4; +} + +static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep, + struct drm_dp_sideband_msg_tx *raw) +{ + int idx = 0; + u8 *buf = raw->msg; + + buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f); + + raw->cur_len = idx; +} + +/* this adds a chunk of msg to the builder to get the final msg */ +static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg, + u8 *replybuf, u8 replybuflen, bool hdr) +{ + int ret; + u8 crc4; + + if (hdr) { + u8 hdrlen; + struct drm_dp_sideband_msg_hdr recv_hdr; + ret = drm_dp_decode_sideband_msg_hdr(&recv_hdr, replybuf, replybuflen, &hdrlen); + if (ret == false) { + print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 1, replybuf, replybuflen, false); + return false; + } + + /* get length contained in this portion */ + msg->curchunk_len = recv_hdr.msg_len; + msg->curchunk_hdrlen = hdrlen; + + /* we have already gotten an somt - don't bother parsing */ + if (recv_hdr.somt && msg->have_somt) + return false; + + if (recv_hdr.somt) { + memcpy(&msg->initial_hdr, &recv_hdr, sizeof(struct drm_dp_sideband_msg_hdr)); + msg->have_somt = true; + } + if (recv_hdr.eomt) + msg->have_eomt = true; + + /* copy the bytes for the remainder of this header chunk */ + msg->curchunk_idx = min(msg->curchunk_len, (u8)(replybuflen - hdrlen)); + memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx); + } else { + memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen); + msg->curchunk_idx += replybuflen; + } + + if (msg->curchunk_idx >= msg->curchunk_len) { + /* do CRC */ + crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1); + /* copy chunk into bigger msg */ + memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1); + msg->curlen += msg->curchunk_len - 1; + } + return true; +} + +static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx *raw, + struct drm_dp_sideband_msg_reply_body *repmsg) +{ + int idx = 1; + int i; + memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16); + idx += 16; + repmsg->u.link_addr.nports = raw->msg[idx] & 0xf; + idx++; + if (idx > raw->curlen) + goto fail_len; + for (i = 0; i < repmsg->u.link_addr.nports; i++) { + if (raw->msg[idx] & 0x80) + repmsg->u.link_addr.ports[i].input_port = 1; + + repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7; + repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf); + + idx++; + if (idx > raw->curlen) + goto fail_len; + repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1; + repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1; + if (repmsg->u.link_addr.ports[i].input_port == 0) + repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1; + idx++; + if (idx > raw->curlen) + goto fail_len; + if (repmsg->u.link_addr.ports[i].input_port == 0) { + repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]); + idx++; + if (idx > raw->curlen) + goto fail_len; + memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16); + idx += 16; + if (idx > raw->curlen) + goto fail_len; + repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf; + repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf); + idx++; + + } + if (idx > raw->curlen) + goto fail_len; + } + + return true; +fail_len: + DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen); + return false; +} + +static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw, + struct drm_dp_sideband_msg_reply_body *repmsg) +{ + int idx = 1; + repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf; + idx++; + if (idx > raw->curlen) + goto fail_len; + repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx]; + if (idx > raw->curlen) + goto fail_len; + + memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes); + return true; +fail_len: + DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen); + return false; +} + +static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw, + struct drm_dp_sideband_msg_reply_body *repmsg) +{ + int idx = 1; + repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf; + idx++; + if (idx > raw->curlen) + goto fail_len; + return true; +fail_len: + DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen); + return false; +} + +static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw, + struct drm_dp_sideband_msg_reply_body *repmsg) +{ + int idx = 1; + + repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf); + idx++; + if (idx > raw->curlen) + goto fail_len; + repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx]; + idx++; + /* TODO check */ + memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes); + return true; +fail_len: + DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen); + return false; +} + +static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw, + struct drm_dp_sideband_msg_reply_body *repmsg) +{ + int idx = 1; + repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf; + idx++; + if (idx > raw->curlen) + goto fail_len; + repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]); + idx += 2; + if (idx > raw->curlen) + goto fail_len; + repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]); + idx += 2; + if (idx > raw->curlen) + goto fail_len; + return true; +fail_len: + DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen); + return false; +} + +static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw, + struct drm_dp_sideband_msg_reply_body *repmsg) +{ + int idx = 1; + repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf; + idx++; + if (idx > raw->curlen) + goto fail_len; + repmsg->u.allocate_payload.vcpi = raw->msg[idx]; + idx++; + if (idx > raw->curlen) + goto fail_len; + repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]); + idx += 2; + if (idx > raw->curlen) + goto fail_len; + return true; +fail_len: + DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen); + return false; +} + +static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw, + struct drm_dp_sideband_msg_reply_body *repmsg) +{ + int idx = 1; + repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf; + idx++; + if (idx > raw->curlen) + goto fail_len; + repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]); + idx += 2; + if (idx > raw->curlen) + goto fail_len; + return true; +fail_len: + DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen); + return false; +} + +static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw, + struct drm_dp_sideband_msg_reply_body *msg) +{ + memset(msg, 0, sizeof(*msg)); + msg->reply_type = (raw->msg[0] & 0x80) >> 7; + msg->req_type = (raw->msg[0] & 0x7f); + + if (msg->reply_type) { + memcpy(msg->u.nak.guid, &raw->msg[1], 16); + msg->u.nak.reason = raw->msg[17]; + msg->u.nak.nak_data = raw->msg[18]; + return false; + } + + switch (msg->req_type) { + case DP_LINK_ADDRESS: + return drm_dp_sideband_parse_link_address(raw, msg); + case DP_QUERY_PAYLOAD: + return drm_dp_sideband_parse_query_payload_ack(raw, msg); + case DP_REMOTE_DPCD_READ: + return drm_dp_sideband_parse_remote_dpcd_read(raw, msg); + case DP_REMOTE_DPCD_WRITE: + return drm_dp_sideband_parse_remote_dpcd_write(raw, msg); + case DP_REMOTE_I2C_READ: + return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg); + case DP_ENUM_PATH_RESOURCES: + return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg); + case DP_ALLOCATE_PAYLOAD: + return drm_dp_sideband_parse_allocate_payload_ack(raw, msg); + default: + DRM_ERROR("Got unknown reply 0x%02x\n", msg->req_type); + return false; + } +} + +static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx *raw, + struct drm_dp_sideband_msg_req_body *msg) +{ + int idx = 1; + + msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4; + idx++; + if (idx > raw->curlen) + goto fail_len; + + memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16); + idx += 16; + if (idx > raw->curlen) + goto fail_len; + + msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1; + msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1; + msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1; + msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1; + msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7); + idx++; + return true; +fail_len: + DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx, raw->curlen); + return false; +} + +static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx *raw, + struct drm_dp_sideband_msg_req_body *msg) +{ + int idx = 1; + + msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4; + idx++; + if (idx > raw->curlen) + goto fail_len; + + memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16); + idx += 16; + if (idx > raw->curlen) + goto fail_len; + + msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]); + idx++; + return true; +fail_len: + DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx, raw->curlen); + return false; +} + +static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw, + struct drm_dp_sideband_msg_req_body *msg) +{ + memset(msg, 0, sizeof(*msg)); + msg->req_type = (raw->msg[0] & 0x7f); + + switch (msg->req_type) { + case DP_CONNECTION_STATUS_NOTIFY: + return drm_dp_sideband_parse_connection_status_notify(raw, msg); + case DP_RESOURCE_STATUS_NOTIFY: + return drm_dp_sideband_parse_resource_status_notify(raw, msg); + default: + DRM_ERROR("Got unknown request 0x%02x\n", msg->req_type); + return false; + } +} + +static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes) +{ + struct drm_dp_sideband_msg_req_body req; + + req.req_type = DP_REMOTE_DPCD_WRITE; + req.u.dpcd_write.port_number = port_num; + req.u.dpcd_write.dpcd_address = offset; + req.u.dpcd_write.num_bytes = num_bytes; + req.u.dpcd_write.bytes = bytes; + drm_dp_encode_sideband_req(&req, msg); + + return 0; +} + +static int build_link_address(struct drm_dp_sideband_msg_tx *msg) +{ + struct drm_dp_sideband_msg_req_body req; + + req.req_type = DP_LINK_ADDRESS; + drm_dp_encode_sideband_req(&req, msg); + return 0; +} + +static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num) +{ + struct drm_dp_sideband_msg_req_body req; + + req.req_type = DP_ENUM_PATH_RESOURCES; + req.u.port_num.port_number = port_num; + drm_dp_encode_sideband_req(&req, msg); + msg->path_msg = true; + return 0; +} + +static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num, + u8 vcpi, uint16_t pbn) +{ + struct drm_dp_sideband_msg_req_body req; + memset(&req, 0, sizeof(req)); + req.req_type = DP_ALLOCATE_PAYLOAD; + req.u.allocate_payload.port_number = port_num; + req.u.allocate_payload.vcpi = vcpi; + req.u.allocate_payload.pbn = pbn; + drm_dp_encode_sideband_req(&req, msg); + msg->path_msg = true; + return 0; +} + +static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_vcpi *vcpi) +{ + int ret; + + mutex_lock(&mgr->payload_lock); + ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1); + if (ret > mgr->max_payloads) { + ret = -EINVAL; + DRM_DEBUG_KMS("out of payload ids %d\n", ret); + goto out_unlock; + } + + set_bit(ret, &mgr->payload_mask); + vcpi->vcpi = ret; + mgr->proposed_vcpis[ret - 1] = vcpi; +out_unlock: + mutex_unlock(&mgr->payload_lock); + return ret; +} + +static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr, + int id) +{ + if (id == 0) + return; + + mutex_lock(&mgr->payload_lock); + DRM_DEBUG_KMS("putting payload %d\n", id); + clear_bit(id, &mgr->payload_mask); + mgr->proposed_vcpis[id - 1] = NULL; + mutex_unlock(&mgr->payload_lock); +} + +static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_sideband_msg_tx *txmsg) +{ + bool ret; + mutex_lock(&mgr->qlock); + ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX || + txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT); + mutex_unlock(&mgr->qlock); + return ret; +} + +static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb, + struct drm_dp_sideband_msg_tx *txmsg) +{ + struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; + int ret; + + ret = wait_event_timeout(mgr->tx_waitq, + check_txmsg_state(mgr, txmsg), + (4 * HZ)); + mutex_lock(&mstb->mgr->qlock); + if (ret > 0) { + if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) { + ret = -EIO; + goto out; + } + } else { + DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg, txmsg->state, txmsg->seqno); + + /* dump some state */ + ret = -EIO; + + /* remove from q */ + if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED || + txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND) { + list_del(&txmsg->next); + } + + if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND || + txmsg->state == DRM_DP_SIDEBAND_TX_SENT) { + mstb->tx_slots[txmsg->seqno] = NULL; + } + } +out: + mutex_unlock(&mgr->qlock); + + return ret; +} + +static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad) +{ + struct drm_dp_mst_branch *mstb; + + mstb = kzalloc(sizeof(*mstb), GFP_KERNEL); + if (!mstb) + return NULL; + + mstb->lct = lct; + if (lct > 1) + memcpy(mstb->rad, rad, lct / 2); + INIT_LIST_HEAD(&mstb->ports); + kref_init(&mstb->kref); + return mstb; +} + +static void drm_dp_destroy_mst_branch_device(struct kref *kref) +{ + struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref); + struct drm_dp_mst_port *port, *tmp; + bool wake_tx = false; + + cancel_work_sync(&mstb->mgr->work); + + /* + * destroy all ports - don't need lock + * as there are no more references to the mst branch + * device at this point. + */ + list_for_each_entry_safe(port, tmp, &mstb->ports, next) { + list_del(&port->next); + drm_dp_put_port(port); + } + + /* drop any tx slots msg */ + mutex_lock(&mstb->mgr->qlock); + if (mstb->tx_slots[0]) { + mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT; + mstb->tx_slots[0] = NULL; + wake_tx = true; + } + if (mstb->tx_slots[1]) { + mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT; + mstb->tx_slots[1] = NULL; + wake_tx = true; + } + mutex_unlock(&mstb->mgr->qlock); + + if (wake_tx) + wake_up(&mstb->mgr->tx_waitq); + kfree(mstb); +} + +static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb) +{ + kref_put(&mstb->kref, drm_dp_destroy_mst_branch_device); +} + + +static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt) +{ + switch (old_pdt) { + case DP_PEER_DEVICE_DP_LEGACY_CONV: + case DP_PEER_DEVICE_SST_SINK: + /* remove i2c over sideband */ + drm_dp_mst_unregister_i2c_bus(&port->aux); + break; + case DP_PEER_DEVICE_MST_BRANCHING: + drm_dp_put_mst_branch_device(port->mstb); + port->mstb = NULL; + break; + } +} + +static void drm_dp_destroy_port(struct kref *kref) +{ + struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref); + struct drm_dp_mst_topology_mgr *mgr = port->mgr; + if (!port->input) { + port->vcpi.num_slots = 0; + if (port->connector) + (*port->mgr->cbs->destroy_connector)(mgr, port->connector); + drm_dp_port_teardown_pdt(port, port->pdt); + + if (!port->input && port->vcpi.vcpi > 0) + drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); + } + kfree(port); + + (*mgr->cbs->hotplug)(mgr); +} + +static void drm_dp_put_port(struct drm_dp_mst_port *port) +{ + kref_put(&port->kref, drm_dp_destroy_port); +} + +static struct drm_dp_mst_branch *drm_dp_mst_get_validated_mstb_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_branch *to_find) +{ + struct drm_dp_mst_port *port; + struct drm_dp_mst_branch *rmstb; + if (to_find == mstb) { + kref_get(&mstb->kref); + return mstb; + } + list_for_each_entry(port, &mstb->ports, next) { + if (port->mstb) { + rmstb = drm_dp_mst_get_validated_mstb_ref_locked(port->mstb, to_find); + if (rmstb) + return rmstb; + } + } + return NULL; +} + +static struct drm_dp_mst_branch *drm_dp_get_validated_mstb_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb) +{ + struct drm_dp_mst_branch *rmstb = NULL; + mutex_lock(&mgr->lock); + if (mgr->mst_primary) + rmstb = drm_dp_mst_get_validated_mstb_ref_locked(mgr->mst_primary, mstb); + mutex_unlock(&mgr->lock); + return rmstb; +} + +static struct drm_dp_mst_port *drm_dp_mst_get_port_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_port *to_find) +{ + struct drm_dp_mst_port *port, *mport; + + list_for_each_entry(port, &mstb->ports, next) { + if (port == to_find) { + kref_get(&port->kref); + return port; + } + if (port->mstb) { + mport = drm_dp_mst_get_port_ref_locked(port->mstb, to_find); + if (mport) + return mport; + } + } + return NULL; +} + +static struct drm_dp_mst_port *drm_dp_get_validated_port_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) +{ + struct drm_dp_mst_port *rport = NULL; + mutex_lock(&mgr->lock); + if (mgr->mst_primary) + rport = drm_dp_mst_get_port_ref_locked(mgr->mst_primary, port); + mutex_unlock(&mgr->lock); + return rport; +} + +static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num) +{ + struct drm_dp_mst_port *port; + + list_for_each_entry(port, &mstb->ports, next) { + if (port->port_num == port_num) { + kref_get(&port->kref); + return port; + } + } + + return NULL; +} + +/* + * calculate a new RAD for this MST branch device + * if parent has an LCT of 2 then it has 1 nibble of RAD, + * if parent has an LCT of 3 then it has 2 nibbles of RAD, + */ +static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port, + u8 *rad) +{ + int lct = port->parent->lct; + int shift = 4; + int idx = lct / 2; + if (lct > 1) { + memcpy(rad, port->parent->rad, idx); + shift = (lct % 2) ? 4 : 0; + } else + rad[0] = 0; + + rad[idx] |= port->port_num << shift; + return lct + 1; +} + +/* + * return sends link address for new mstb + */ +static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port) +{ + int ret; + u8 rad[6], lct; + bool send_link = false; + switch (port->pdt) { + case DP_PEER_DEVICE_DP_LEGACY_CONV: + case DP_PEER_DEVICE_SST_SINK: + /* add i2c over sideband */ + ret = drm_dp_mst_register_i2c_bus(&port->aux); + break; + case DP_PEER_DEVICE_MST_BRANCHING: + lct = drm_dp_calculate_rad(port, rad); + + port->mstb = drm_dp_add_mst_branch_device(lct, rad); + port->mstb->mgr = port->mgr; + port->mstb->port_parent = port; + + send_link = true; + break; + } + return send_link; +} + +static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb, + struct drm_dp_mst_port *port) +{ + int ret; + if (port->dpcd_rev >= 0x12) { + port->guid_valid = drm_dp_validate_guid(mstb->mgr, port->guid); + if (!port->guid_valid) { + ret = drm_dp_send_dpcd_write(mstb->mgr, + port, + DP_GUID, + 16, port->guid); + port->guid_valid = true; + } + } +} + +static void build_mst_prop_path(struct drm_dp_mst_port *port, + struct drm_dp_mst_branch *mstb, + char *proppath) +{ + int i; + char temp[8]; + snprintf(proppath, 255, "mst:%d", mstb->mgr->conn_base_id); + for (i = 0; i < (mstb->lct - 1); i++) { + int shift = (i % 2) ? 0 : 4; + int port_num = mstb->rad[i / 2] >> shift; + snprintf(temp, 8, "-%d", port_num); + strncat(proppath, temp, 255); + } + snprintf(temp, 8, "-%d", port->port_num); + strncat(proppath, temp, 255); +} + +static void drm_dp_add_port(struct drm_dp_mst_branch *mstb, + struct device *dev, + struct drm_dp_link_addr_reply_port *port_msg) +{ + struct drm_dp_mst_port *port; + bool ret; + bool created = false; + int old_pdt = 0; + int old_ddps = 0; + port = drm_dp_get_port(mstb, port_msg->port_number); + if (!port) { + port = kzalloc(sizeof(*port), GFP_KERNEL); + if (!port) + return; + kref_init(&port->kref); + port->parent = mstb; + port->port_num = port_msg->port_number; + port->mgr = mstb->mgr; + port->aux.name = "DPMST"; + port->aux.dev = dev; + created = true; + } else { + old_pdt = port->pdt; + old_ddps = port->ddps; + } + + port->pdt = port_msg->peer_device_type; + port->input = port_msg->input_port; + port->mcs = port_msg->mcs; + port->ddps = port_msg->ddps; + port->ldps = port_msg->legacy_device_plug_status; + port->dpcd_rev = port_msg->dpcd_revision; + port->num_sdp_streams = port_msg->num_sdp_streams; + port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks; + memcpy(port->guid, port_msg->peer_guid, 16); + + /* manage mstb port lists with mgr lock - take a reference + for this list */ + if (created) { + mutex_lock(&mstb->mgr->lock); + kref_get(&port->kref); + list_add(&port->next, &mstb->ports); + mutex_unlock(&mstb->mgr->lock); + } + + if (old_ddps != port->ddps) { + if (port->ddps) { + drm_dp_check_port_guid(mstb, port); + if (!port->input) + drm_dp_send_enum_path_resources(mstb->mgr, mstb, port); + } else { + port->guid_valid = false; + port->available_pbn = 0; + } + } + + if (old_pdt != port->pdt && !port->input) { + drm_dp_port_teardown_pdt(port, old_pdt); + + ret = drm_dp_port_setup_pdt(port); + if (ret == true) { + drm_dp_send_link_address(mstb->mgr, port->mstb); + port->mstb->link_address_sent = true; + } + } + + if (created && !port->input) { + char proppath[255]; + build_mst_prop_path(port, mstb, proppath); + port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath); + } + + /* put reference to this port */ + drm_dp_put_port(port); +} + +static void drm_dp_update_port(struct drm_dp_mst_branch *mstb, + struct drm_dp_connection_status_notify *conn_stat) +{ + struct drm_dp_mst_port *port; + int old_pdt; + int old_ddps; + bool dowork = false; + port = drm_dp_get_port(mstb, conn_stat->port_number); + if (!port) + return; + + old_ddps = port->ddps; + old_pdt = port->pdt; + port->pdt = conn_stat->peer_device_type; + port->mcs = conn_stat->message_capability_status; + port->ldps = conn_stat->legacy_device_plug_status; + port->ddps = conn_stat->displayport_device_plug_status; + + if (old_ddps != port->ddps) { + if (port->ddps) { + drm_dp_check_port_guid(mstb, port); + dowork = true; + } else { + port->guid_valid = false; + port->available_pbn = 0; + } + } + if (old_pdt != port->pdt && !port->input) { + drm_dp_port_teardown_pdt(port, old_pdt); + + if (drm_dp_port_setup_pdt(port)) + dowork = true; + } + + drm_dp_put_port(port); + if (dowork) + queue_work(system_long_wq, &mstb->mgr->work); + +} + +static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr, + u8 lct, u8 *rad) +{ + struct drm_dp_mst_branch *mstb; + struct drm_dp_mst_port *port; + int i; + /* find the port by iterating down */ + mstb = mgr->mst_primary; + + for (i = 0; i < lct - 1; i++) { + int shift = (i % 2) ? 0 : 4; + int port_num = rad[i / 2] >> shift; + + list_for_each_entry(port, &mstb->ports, next) { + if (port->port_num == port_num) { + if (!port->mstb) { + DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]); + return NULL; + } + + mstb = port->mstb; + break; + } + } + } + kref_get(&mstb->kref); + return mstb; +} + +static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_branch *mstb) +{ + struct drm_dp_mst_port *port; + + if (!mstb->link_address_sent) { + drm_dp_send_link_address(mgr, mstb); + mstb->link_address_sent = true; + } + list_for_each_entry(port, &mstb->ports, next) { + if (port->input) + continue; + + if (!port->ddps) + continue; + + if (!port->available_pbn) + drm_dp_send_enum_path_resources(mgr, mstb, port); + + if (port->mstb) + drm_dp_check_and_send_link_address(mgr, port->mstb); + } +} + +static void drm_dp_mst_link_probe_work(struct work_struct *work) +{ + struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work); + + drm_dp_check_and_send_link_address(mgr, mgr->mst_primary); + +} + +static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr, + u8 *guid) +{ + static u8 zero_guid[16]; + + if (!memcmp(guid, zero_guid, 16)) { + u64 salt = get_jiffies_64(); + memcpy(&guid[0], &salt, sizeof(u64)); + memcpy(&guid[8], &salt, sizeof(u64)); + return false; + } + return true; +} + +#if 0 +static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes) +{ + struct drm_dp_sideband_msg_req_body req; + + req.req_type = DP_REMOTE_DPCD_READ; + req.u.dpcd_read.port_number = port_num; + req.u.dpcd_read.dpcd_address = offset; + req.u.dpcd_read.num_bytes = num_bytes; + drm_dp_encode_sideband_req(&req, msg); + + return 0; +} +#endif + +static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr, + bool up, u8 *msg, int len) +{ + int ret; + int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE; + int tosend, total, offset; + int retries = 0; + +retry: + total = len; + offset = 0; + do { + tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total); + + ret = drm_dp_dpcd_write(mgr->aux, regbase + offset, + &msg[offset], + tosend); + if (ret != tosend) { + if (ret == -EIO && retries < 5) { + retries++; + goto retry; + } + DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret); + WARN(1, "fail\n"); + + return -EIO; + } + offset += tosend; + total -= tosend; + } while (total > 0); + return 0; +} + +static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr, + struct drm_dp_sideband_msg_tx *txmsg) +{ + struct drm_dp_mst_branch *mstb = txmsg->dst; + + /* both msg slots are full */ + if (txmsg->seqno == -1) { + if (mstb->tx_slots[0] && mstb->tx_slots[1]) { + DRM_DEBUG_KMS("%s: failed to find slot\n", __func__); + return -EAGAIN; + } + if (mstb->tx_slots[0] == NULL && mstb->tx_slots[1] == NULL) { + txmsg->seqno = mstb->last_seqno; + mstb->last_seqno ^= 1; + } else if (mstb->tx_slots[0] == NULL) + txmsg->seqno = 0; + else + txmsg->seqno = 1; + mstb->tx_slots[txmsg->seqno] = txmsg; + } + hdr->broadcast = 0; + hdr->path_msg = txmsg->path_msg; + hdr->lct = mstb->lct; + hdr->lcr = mstb->lct - 1; + if (mstb->lct > 1) + memcpy(hdr->rad, mstb->rad, mstb->lct / 2); + hdr->seqno = txmsg->seqno; + return 0; +} +/* + * process a single block of the next message in the sideband queue + */ +static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_sideband_msg_tx *txmsg, + bool up) +{ + u8 chunk[48]; + struct drm_dp_sideband_msg_hdr hdr; + int len, space, idx, tosend; + int ret; + + memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr)); + + if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) { + txmsg->seqno = -1; + txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND; + } + + /* make hdr from dst mst - for replies use seqno + otherwise assign one */ + ret = set_hdr_from_dst_qlock(&hdr, txmsg); + if (ret < 0) + return ret; + + /* amount left to send in this message */ + len = txmsg->cur_len - txmsg->cur_offset; + + /* 48 - sideband msg size - 1 byte for data CRC, x header bytes */ + space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr); + + tosend = min(len, space); + if (len == txmsg->cur_len) + hdr.somt = 1; + if (space >= len) + hdr.eomt = 1; + + + hdr.msg_len = tosend + 1; + drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx); + memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend); + /* add crc at end */ + drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend); + idx += tosend + 1; + + ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx); + if (ret) { + DRM_DEBUG_KMS("sideband msg failed to send\n"); + return ret; + } + + txmsg->cur_offset += tosend; + if (txmsg->cur_offset == txmsg->cur_len) { + txmsg->state = DRM_DP_SIDEBAND_TX_SENT; + return 1; + } + return 0; +} + +/* must be called holding qlock */ +static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr) +{ + struct drm_dp_sideband_msg_tx *txmsg; + int ret; + + /* construct a chunk from the first msg in the tx_msg queue */ + if (list_empty(&mgr->tx_msg_downq)) { + mgr->tx_down_in_progress = false; + return; + } + mgr->tx_down_in_progress = true; + + txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next); + ret = process_single_tx_qlock(mgr, txmsg, false); + if (ret == 1) { + /* txmsg is sent it should be in the slots now */ + list_del(&txmsg->next); + } else if (ret) { + DRM_DEBUG_KMS("failed to send msg in q %d\n", ret); + list_del(&txmsg->next); + if (txmsg->seqno != -1) + txmsg->dst->tx_slots[txmsg->seqno] = NULL; + txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT; + wake_up(&mgr->tx_waitq); + } + if (list_empty(&mgr->tx_msg_downq)) { + mgr->tx_down_in_progress = false; + return; + } +} + +/* called holding qlock */ +static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr) +{ + struct drm_dp_sideband_msg_tx *txmsg; + int ret; + + /* construct a chunk from the first msg in the tx_msg queue */ + if (list_empty(&mgr->tx_msg_upq)) { + mgr->tx_up_in_progress = false; + return; + } + + txmsg = list_first_entry(&mgr->tx_msg_upq, struct drm_dp_sideband_msg_tx, next); + ret = process_single_tx_qlock(mgr, txmsg, true); + if (ret == 1) { + /* up txmsgs aren't put in slots - so free after we send it */ + list_del(&txmsg->next); + kfree(txmsg); + } else if (ret) + DRM_DEBUG_KMS("failed to send msg in q %d\n", ret); + mgr->tx_up_in_progress = true; +} + +static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_sideband_msg_tx *txmsg) +{ + mutex_lock(&mgr->qlock); + list_add_tail(&txmsg->next, &mgr->tx_msg_downq); + if (!mgr->tx_down_in_progress) + process_single_down_tx_qlock(mgr); + mutex_unlock(&mgr->qlock); +} + +static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_branch *mstb) +{ + int len; + struct drm_dp_sideband_msg_tx *txmsg; + int ret; + + txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); + if (!txmsg) + return -ENOMEM; + + txmsg->dst = mstb; + len = build_link_address(txmsg); + + drm_dp_queue_down_tx(mgr, txmsg); + + ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); + if (ret > 0) { + int i; + + if (txmsg->reply.reply_type == 1) + DRM_DEBUG_KMS("link address nak received\n"); + else { + DRM_DEBUG_KMS("link address reply: %d\n", txmsg->reply.u.link_addr.nports); + for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) { + DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n", i, + txmsg->reply.u.link_addr.ports[i].input_port, + txmsg->reply.u.link_addr.ports[i].peer_device_type, + txmsg->reply.u.link_addr.ports[i].port_number, + txmsg->reply.u.link_addr.ports[i].dpcd_revision, + txmsg->reply.u.link_addr.ports[i].mcs, + txmsg->reply.u.link_addr.ports[i].ddps, + txmsg->reply.u.link_addr.ports[i].legacy_device_plug_status, + txmsg->reply.u.link_addr.ports[i].num_sdp_streams, + txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks); + } + for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) { + drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]); + } + (*mgr->cbs->hotplug)(mgr); + } + } else + DRM_DEBUG_KMS("link address failed %d\n", ret); + + kfree(txmsg); + return 0; +} + +static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_branch *mstb, + struct drm_dp_mst_port *port) +{ + int len; + struct drm_dp_sideband_msg_tx *txmsg; + int ret; + + txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); + if (!txmsg) + return -ENOMEM; + + txmsg->dst = mstb; + len = build_enum_path_resources(txmsg, port->port_num); + + drm_dp_queue_down_tx(mgr, txmsg); + + ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); + if (ret > 0) { + if (txmsg->reply.reply_type == 1) + DRM_DEBUG_KMS("enum path resources nak received\n"); + else { + if (port->port_num != txmsg->reply.u.path_resources.port_number) + DRM_ERROR("got incorrect port in response\n"); + DRM_DEBUG_KMS("enum path resources %d: %d %d\n", txmsg->reply.u.path_resources.port_number, txmsg->reply.u.path_resources.full_payload_bw_number, + txmsg->reply.u.path_resources.avail_payload_bw_number); + port->available_pbn = txmsg->reply.u.path_resources.avail_payload_bw_number; + } + } + + kfree(txmsg); + return 0; +} + +static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, + int id, + int pbn) +{ + struct drm_dp_sideband_msg_tx *txmsg; + struct drm_dp_mst_branch *mstb; + int len, ret; + + mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent); + if (!mstb) + return -EINVAL; + + txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); + if (!txmsg) { + ret = -ENOMEM; + goto fail_put; + } + + txmsg->dst = mstb; + len = build_allocate_payload(txmsg, port->port_num, + id, + pbn); + + drm_dp_queue_down_tx(mgr, txmsg); + + ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); + if (ret > 0) { + if (txmsg->reply.reply_type == 1) { + ret = -EINVAL; + } else + ret = 0; + } + kfree(txmsg); +fail_put: + drm_dp_put_mst_branch_device(mstb); + return ret; +} + +static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr, + int id, + struct drm_dp_payload *payload) +{ + int ret; + + ret = drm_dp_dpcd_write_payload(mgr, id, payload); + if (ret < 0) { + payload->payload_state = 0; + return ret; + } + payload->payload_state = DP_PAYLOAD_LOCAL; + return 0; +} + +static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, + int id, + struct drm_dp_payload *payload) +{ + int ret; + ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn); + if (ret < 0) + return ret; + payload->payload_state = DP_PAYLOAD_REMOTE; + return ret; +} + +static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, + int id, + struct drm_dp_payload *payload) +{ + DRM_DEBUG_KMS("\n"); + /* its okay for these to fail */ + if (port) { + drm_dp_payload_send_msg(mgr, port, id, 0); + } + + drm_dp_dpcd_write_payload(mgr, id, payload); + payload->payload_state = 0; + return 0; +} + +static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr, + int id, + struct drm_dp_payload *payload) +{ + payload->payload_state = 0; + return 0; +} + +/** + * drm_dp_update_payload_part1() - Execute payload update part 1 + * @mgr: manager to use. + * + * This iterates over all proposed virtual channels, and tries to + * allocate space in the link for them. For 0->slots transitions, + * this step just writes the VCPI to the MST device. For slots->0 + * transitions, this writes the updated VCPIs and removes the + * remote VC payloads. + * + * after calling this the driver should generate ACT and payload + * packets. + */ +int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr) +{ + int i; + int cur_slots = 1; + struct drm_dp_payload req_payload; + struct drm_dp_mst_port *port; + + mutex_lock(&mgr->payload_lock); + for (i = 0; i < mgr->max_payloads; i++) { + /* solve the current payloads - compare to the hw ones + - update the hw view */ + req_payload.start_slot = cur_slots; + if (mgr->proposed_vcpis[i]) { + port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi); + req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots; + } else { + port = NULL; + req_payload.num_slots = 0; + } + /* work out what is required to happen with this payload */ + if (mgr->payloads[i].start_slot != req_payload.start_slot || + mgr->payloads[i].num_slots != req_payload.num_slots) { + + /* need to push an update for this payload */ + if (req_payload.num_slots) { + drm_dp_create_payload_step1(mgr, i + 1, &req_payload); + mgr->payloads[i].num_slots = req_payload.num_slots; + } else if (mgr->payloads[i].num_slots) { + mgr->payloads[i].num_slots = 0; + drm_dp_destroy_payload_step1(mgr, port, i + 1, &mgr->payloads[i]); + req_payload.payload_state = mgr->payloads[i].payload_state; + } else + req_payload.payload_state = 0; + + mgr->payloads[i].start_slot = req_payload.start_slot; + mgr->payloads[i].payload_state = req_payload.payload_state; + } + cur_slots += req_payload.num_slots; + } + mutex_unlock(&mgr->payload_lock); + + return 0; +} +EXPORT_SYMBOL(drm_dp_update_payload_part1); + +/** + * drm_dp_update_payload_part2() - Execute payload update part 2 + * @mgr: manager to use. + * + * This iterates over all proposed virtual channels, and tries to + * allocate space in the link for them. For 0->slots transitions, + * this step writes the remote VC payload commands. For slots->0 + * this just resets some internal state. + */ +int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr) +{ + struct drm_dp_mst_port *port; + int i; + int ret = 0; + mutex_lock(&mgr->payload_lock); + for (i = 0; i < mgr->max_payloads; i++) { + + if (!mgr->proposed_vcpis[i]) + continue; + + port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi); + + DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state); + if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) { + ret = drm_dp_create_payload_step2(mgr, port, i + 1, &mgr->payloads[i]); + } else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) { + ret = drm_dp_destroy_payload_step2(mgr, i + 1, &mgr->payloads[i]); + } + if (ret) { + mutex_unlock(&mgr->payload_lock); + return ret; + } + } + mutex_unlock(&mgr->payload_lock); + return 0; +} +EXPORT_SYMBOL(drm_dp_update_payload_part2); + +#if 0 /* unused as of yet */ +static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, + int offset, int size) +{ + int len; + struct drm_dp_sideband_msg_tx *txmsg; + + txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); + if (!txmsg) + return -ENOMEM; + + len = build_dpcd_read(txmsg, port->port_num, 0, 8); + txmsg->dst = port->parent; + + drm_dp_queue_down_tx(mgr, txmsg); + + return 0; +} +#endif + +static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, + int offset, int size, u8 *bytes) +{ + int len; + int ret; + struct drm_dp_sideband_msg_tx *txmsg; + struct drm_dp_mst_branch *mstb; + + mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent); + if (!mstb) + return -EINVAL; + + txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); + if (!txmsg) { + ret = -ENOMEM; + goto fail_put; + } + + len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes); + txmsg->dst = mstb; + + drm_dp_queue_down_tx(mgr, txmsg); + + ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); + if (ret > 0) { + if (txmsg->reply.reply_type == 1) { + ret = -EINVAL; + } else + ret = 0; + } + kfree(txmsg); +fail_put: + drm_dp_put_mst_branch_device(mstb); + return ret; +} + +static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type) +{ + struct drm_dp_sideband_msg_reply_body reply; + + reply.reply_type = 1; + reply.req_type = req_type; + drm_dp_encode_sideband_reply(&reply, msg); + return 0; +} + +static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_branch *mstb, + int req_type, int seqno, bool broadcast) +{ + struct drm_dp_sideband_msg_tx *txmsg; + + txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); + if (!txmsg) + return -ENOMEM; + + txmsg->dst = mstb; + txmsg->seqno = seqno; + drm_dp_encode_up_ack_reply(txmsg, req_type); + + mutex_lock(&mgr->qlock); + list_add_tail(&txmsg->next, &mgr->tx_msg_upq); + if (!mgr->tx_up_in_progress) { + process_single_up_tx_qlock(mgr); + } + mutex_unlock(&mgr->qlock); + return 0; +} + +static int drm_dp_get_vc_payload_bw(int dp_link_bw, int dp_link_count) +{ + switch (dp_link_bw) { + case DP_LINK_BW_1_62: + return 3 * dp_link_count; + case DP_LINK_BW_2_7: + return 5 * dp_link_count; + case DP_LINK_BW_5_4: + return 10 * dp_link_count; + } + return 0; +} + +/** + * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager + * @mgr: manager to set state for + * @mst_state: true to enable MST on this connector - false to disable. + * + * This is called by the driver when it detects an MST capable device plugged + * into a DP MST capable port, or when a DP MST capable device is unplugged. + */ +int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state) +{ + int ret = 0; + struct drm_dp_mst_branch *mstb = NULL; + + mutex_lock(&mgr->lock); + if (mst_state == mgr->mst_state) + goto out_unlock; + + mgr->mst_state = mst_state; + /* set the device into MST mode */ + if (mst_state) { + WARN_ON(mgr->mst_primary); + + /* get dpcd info */ + ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE); + if (ret != DP_RECEIVER_CAP_SIZE) { + DRM_DEBUG_KMS("failed to read DPCD\n"); + goto out_unlock; + } + + mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr->dpcd[1], mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK); + mgr->total_pbn = 2560; + mgr->total_slots = DIV_ROUND_UP(mgr->total_pbn, mgr->pbn_div); + mgr->avail_slots = mgr->total_slots; + + /* add initial branch device at LCT 1 */ + mstb = drm_dp_add_mst_branch_device(1, NULL); + if (mstb == NULL) { + ret = -ENOMEM; + goto out_unlock; + } + mstb->mgr = mgr; + + /* give this the main reference */ + mgr->mst_primary = mstb; + kref_get(&mgr->mst_primary->kref); + + { + struct drm_dp_payload reset_pay; + reset_pay.start_slot = 0; + reset_pay.num_slots = 0x3f; + drm_dp_dpcd_write_payload(mgr, 0, &reset_pay); + } + + ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, + DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC); + if (ret < 0) { + goto out_unlock; + } + + + /* sort out guid */ + ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, mgr->guid, 16); + if (ret != 16) { + DRM_DEBUG_KMS("failed to read DP GUID %d\n", ret); + goto out_unlock; + } + + mgr->guid_valid = drm_dp_validate_guid(mgr, mgr->guid); + if (!mgr->guid_valid) { + ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, mgr->guid, 16); + mgr->guid_valid = true; + } + + queue_work(system_long_wq, &mgr->work); + + ret = 0; + } else { + /* disable MST on the device */ + mstb = mgr->mst_primary; + mgr->mst_primary = NULL; + /* this can fail if the device is gone */ + drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0); + ret = 0; + memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload)); + mgr->payload_mask = 0; + set_bit(0, &mgr->payload_mask); + } + +out_unlock: + mutex_unlock(&mgr->lock); + if (mstb) + drm_dp_put_mst_branch_device(mstb); + return ret; + +} +EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst); + +/** + * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager + * @mgr: manager to suspend + * + * This function tells the MST device that we can't handle UP messages + * anymore. This should stop it from sending any since we are suspended. + */ +void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr) +{ + mutex_lock(&mgr->lock); + drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, + DP_MST_EN | DP_UPSTREAM_IS_SRC); + mutex_unlock(&mgr->lock); +} +EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend); + +/** + * drm_dp_mst_topology_mgr_resume() - resume the MST manager + * @mgr: manager to resume + * + * This will fetch DPCD and see if the device is still there, + * if it is, it will rewrite the MSTM control bits, and return. + * + * if the device fails this returns -1, and the driver should do + * a full MST reprobe, in case we were undocked. + */ +int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr) +{ + int ret = 0; + + mutex_lock(&mgr->lock); + + if (mgr->mst_primary) { + int sret; + sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE); + if (sret != DP_RECEIVER_CAP_SIZE) { + DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n"); + ret = -1; + goto out_unlock; + } + + ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, + DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC); + if (ret < 0) { + DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n"); + ret = -1; + goto out_unlock; + } + ret = 0; + } else + ret = -1; + +out_unlock: + mutex_unlock(&mgr->lock); + return ret; +} +EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume); + +static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up) +{ + int len; + u8 replyblock[32]; + int replylen, origlen, curreply; + int ret; + struct drm_dp_sideband_msg_rx *msg; + int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE; + msg = up ? &mgr->up_req_recv : &mgr->down_rep_recv; + + len = min(mgr->max_dpcd_transaction_bytes, 16); + ret = drm_dp_dpcd_read(mgr->aux, basereg, + replyblock, len); + if (ret != len) { + DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret); + return; + } + ret = drm_dp_sideband_msg_build(msg, replyblock, len, true); + if (!ret) { + DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]); + return; + } + replylen = msg->curchunk_len + msg->curchunk_hdrlen; + + origlen = replylen; + replylen -= len; + curreply = len; + while (replylen > 0) { + len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16); + ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply, + replyblock, len); + if (ret != len) { + DRM_DEBUG_KMS("failed to read a chunk\n"); + } + ret = drm_dp_sideband_msg_build(msg, replyblock, len, false); + if (ret == false) + DRM_DEBUG_KMS("failed to build sideband msg\n"); + curreply += len; + replylen -= len; + } +} + +static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) +{ + int ret = 0; + + drm_dp_get_one_sb_msg(mgr, false); + + if (mgr->down_rep_recv.have_eomt) { + struct drm_dp_sideband_msg_tx *txmsg; + struct drm_dp_mst_branch *mstb; + int slot = -1; + mstb = drm_dp_get_mst_branch_device(mgr, + mgr->down_rep_recv.initial_hdr.lct, + mgr->down_rep_recv.initial_hdr.rad); + + if (!mstb) { + DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->down_rep_recv.initial_hdr.lct); + memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); + return 0; + } + + /* find the message */ + slot = mgr->down_rep_recv.initial_hdr.seqno; + mutex_lock(&mgr->qlock); + txmsg = mstb->tx_slots[slot]; + /* remove from slots */ + mutex_unlock(&mgr->qlock); + + if (!txmsg) { + DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n", + mstb, + mgr->down_rep_recv.initial_hdr.seqno, + mgr->down_rep_recv.initial_hdr.lct, + mgr->down_rep_recv.initial_hdr.rad[0], + mgr->down_rep_recv.msg[0]); + drm_dp_put_mst_branch_device(mstb); + memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); + return 0; + } + + drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply); + if (txmsg->reply.reply_type == 1) { + DRM_DEBUG_KMS("Got NAK reply: req 0x%02x, reason 0x%02x, nak data 0x%02x\n", txmsg->reply.req_type, txmsg->reply.u.nak.reason, txmsg->reply.u.nak.nak_data); + } + + memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); + drm_dp_put_mst_branch_device(mstb); + + mutex_lock(&mgr->qlock); + txmsg->state = DRM_DP_SIDEBAND_TX_RX; + mstb->tx_slots[slot] = NULL; + mutex_unlock(&mgr->qlock); + + wake_up(&mgr->tx_waitq); + } + return ret; +} + +static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) +{ + int ret = 0; + drm_dp_get_one_sb_msg(mgr, true); + + if (mgr->up_req_recv.have_eomt) { + struct drm_dp_sideband_msg_req_body msg; + struct drm_dp_mst_branch *mstb; + bool seqno; + mstb = drm_dp_get_mst_branch_device(mgr, + mgr->up_req_recv.initial_hdr.lct, + mgr->up_req_recv.initial_hdr.rad); + if (!mstb) { + DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct); + memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); + return 0; + } + + seqno = mgr->up_req_recv.initial_hdr.seqno; + drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg); + + if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) { + drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false); + drm_dp_update_port(mstb, &msg.u.conn_stat); + DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type); + (*mgr->cbs->hotplug)(mgr); + + } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) { + drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false); + DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn); + } + + drm_dp_put_mst_branch_device(mstb); + memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); + } + return ret; +} + +/** + * drm_dp_mst_hpd_irq() - MST hotplug IRQ notify + * @mgr: manager to notify irq for. + * @esi: 4 bytes from SINK_COUNT_ESI + * + * This should be called from the driver when it detects a short IRQ, + * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The + * topology manager will process the sideband messages received as a result + * of this. + */ +int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled) +{ + int ret = 0; + int sc; + *handled = false; + sc = esi[0] & 0x3f; + + if (sc != mgr->sink_count) { + mgr->sink_count = sc; + *handled = true; + } + + if (esi[1] & DP_DOWN_REP_MSG_RDY) { + ret = drm_dp_mst_handle_down_rep(mgr); + *handled = true; + } + + if (esi[1] & DP_UP_REQ_MSG_RDY) { + ret |= drm_dp_mst_handle_up_req(mgr); + *handled = true; + } + + drm_dp_mst_kick_tx(mgr); + return ret; +} +EXPORT_SYMBOL(drm_dp_mst_hpd_irq); + +/** + * drm_dp_mst_detect_port() - get connection status for an MST port + * @mgr: manager for this port + * @port: unverified pointer to a port + * + * This returns the current connection state for a port. It validates the + * port pointer still exists so the caller doesn't require a reference + */ +enum drm_connector_status drm_dp_mst_detect_port(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) +{ + enum drm_connector_status status = connector_status_disconnected; + + /* we need to search for the port in the mgr in case its gone */ + port = drm_dp_get_validated_port_ref(mgr, port); + if (!port) + return connector_status_disconnected; + + if (!port->ddps) + goto out; + + switch (port->pdt) { + case DP_PEER_DEVICE_NONE: + case DP_PEER_DEVICE_MST_BRANCHING: + break; + + case DP_PEER_DEVICE_SST_SINK: + status = connector_status_connected; + break; + case DP_PEER_DEVICE_DP_LEGACY_CONV: + if (port->ldps) + status = connector_status_connected; + break; + } +out: + drm_dp_put_port(port); + return status; +} +EXPORT_SYMBOL(drm_dp_mst_detect_port); + +/** + * drm_dp_mst_get_edid() - get EDID for an MST port + * @connector: toplevel connector to get EDID for + * @mgr: manager for this port + * @port: unverified pointer to a port. + * + * This returns an EDID for the port connected to a connector, + * It validates the pointer still exists so the caller doesn't require a + * reference. + */ +struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) +{ + struct edid *edid = NULL; + + /* we need to search for the port in the mgr in case its gone */ + port = drm_dp_get_validated_port_ref(mgr, port); + if (!port) + return NULL; + + edid = drm_get_edid(connector, &port->aux.ddc); + drm_dp_put_port(port); + return edid; +} +EXPORT_SYMBOL(drm_dp_mst_get_edid); + +/** + * drm_dp_find_vcpi_slots() - find slots for this PBN value + * @mgr: manager to use + * @pbn: payload bandwidth to convert into slots. + */ +int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, + int pbn) +{ + int num_slots; + + num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div); + + if (num_slots > mgr->avail_slots) + return -ENOSPC; + return num_slots; +} +EXPORT_SYMBOL(drm_dp_find_vcpi_slots); + +static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_vcpi *vcpi, int pbn) +{ + int num_slots; + int ret; + + num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div); + + if (num_slots > mgr->avail_slots) + return -ENOSPC; + + vcpi->pbn = pbn; + vcpi->aligned_pbn = num_slots * mgr->pbn_div; + vcpi->num_slots = num_slots; + + ret = drm_dp_mst_assign_payload_id(mgr, vcpi); + if (ret < 0) + return ret; + return 0; +} + +/** + * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel + * @mgr: manager for this port + * @port: port to allocate a virtual channel for. + * @pbn: payload bandwidth number to request + * @slots: returned number of slots for this PBN. + */ +bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int pbn, int *slots) +{ + int ret; + + port = drm_dp_get_validated_port_ref(mgr, port); + if (!port) + return false; + + if (port->vcpi.vcpi > 0) { + DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn); + if (pbn == port->vcpi.pbn) { + *slots = port->vcpi.num_slots; + return true; + } + } + + ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn); + if (ret) { + DRM_DEBUG_KMS("failed to init vcpi %d %d %d\n", DIV_ROUND_UP(pbn, mgr->pbn_div), mgr->avail_slots, ret); + goto out; + } + DRM_DEBUG_KMS("initing vcpi for %d %d\n", pbn, port->vcpi.num_slots); + *slots = port->vcpi.num_slots; + + drm_dp_put_port(port); + return true; +out: + return false; +} +EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi); + +/** + * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI + * @mgr: manager for this port + * @port: unverified pointer to a port. + * + * This just resets the number of slots for the ports VCPI for later programming. + */ +void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) +{ + port = drm_dp_get_validated_port_ref(mgr, port); + if (!port) + return; + port->vcpi.num_slots = 0; + drm_dp_put_port(port); +} +EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots); + +/** + * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI + * @mgr: manager for this port + * @port: unverified port to deallocate vcpi for + */ +void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) +{ + port = drm_dp_get_validated_port_ref(mgr, port); + if (!port) + return; + + drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); + port->vcpi.num_slots = 0; + port->vcpi.pbn = 0; + port->vcpi.aligned_pbn = 0; + port->vcpi.vcpi = 0; + drm_dp_put_port(port); +} +EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi); + +static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr, + int id, struct drm_dp_payload *payload) +{ + u8 payload_alloc[3], status; + int ret; + int retries = 0; + + drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, + DP_PAYLOAD_TABLE_UPDATED); + + payload_alloc[0] = id; + payload_alloc[1] = payload->start_slot; + payload_alloc[2] = payload->num_slots; + + ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3); + if (ret != 3) { + DRM_DEBUG_KMS("failed to write payload allocation %d\n", ret); + goto fail; + } + +retry: + ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status); + if (ret < 0) { + DRM_DEBUG_KMS("failed to read payload table status %d\n", ret); + goto fail; + } + + if (!(status & DP_PAYLOAD_TABLE_UPDATED)) { + retries++; + if (retries < 20) { + usleep_range(10000, 20000); + goto retry; + } + DRM_DEBUG_KMS("status not set after read payload table status %d\n", status); + ret = -EINVAL; + goto fail; + } + ret = 0; +fail: + return ret; +} + + +/** + * drm_dp_check_act_status() - Check ACT handled status. + * @mgr: manager to use + * + * Check the payload status bits in the DPCD for ACT handled completion. + */ +int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr) +{ + u8 status; + int ret; + int count = 0; + + do { + ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status); + + if (ret < 0) { + DRM_DEBUG_KMS("failed to read payload table status %d\n", ret); + goto fail; + } + + if (status & DP_PAYLOAD_ACT_HANDLED) + break; + count++; + udelay(100); + + } while (count < 30); + + if (!(status & DP_PAYLOAD_ACT_HANDLED)) { + DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count); + ret = -EINVAL; + goto fail; + } + return 0; +fail: + return ret; +} +EXPORT_SYMBOL(drm_dp_check_act_status); + +/** + * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode. + * @clock: dot clock for the mode + * @bpp: bpp for the mode. + * + * This uses the formula in the spec to calculate the PBN value for a mode. + */ +int drm_dp_calc_pbn_mode(int clock, int bpp) +{ + fixed20_12 pix_bw; + fixed20_12 fbpp; + fixed20_12 result; + fixed20_12 margin, tmp; + u32 res; + + pix_bw.full = dfixed_const(clock); + fbpp.full = dfixed_const(bpp); + tmp.full = dfixed_const(8); + fbpp.full = dfixed_div(fbpp, tmp); + + result.full = dfixed_mul(pix_bw, fbpp); + margin.full = dfixed_const(54); + tmp.full = dfixed_const(64); + margin.full = dfixed_div(margin, tmp); + result.full = dfixed_div(result, margin); + + margin.full = dfixed_const(1006); + tmp.full = dfixed_const(1000); + margin.full = dfixed_div(margin, tmp); + result.full = dfixed_mul(result, margin); + + result.full = dfixed_div(result, tmp); + result.full = dfixed_ceil(result); + res = dfixed_trunc(result); + return res; +} +EXPORT_SYMBOL(drm_dp_calc_pbn_mode); + +static int test_calc_pbn_mode(void) +{ + int ret; + ret = drm_dp_calc_pbn_mode(154000, 30); + if (ret != 689) + return -EINVAL; + ret = drm_dp_calc_pbn_mode(234000, 30); + if (ret != 1047) + return -EINVAL; + return 0; +} + +/* we want to kick the TX after we've ack the up/down IRQs. */ +static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr) +{ + queue_work(system_long_wq, &mgr->tx_work); +} + +static void drm_dp_mst_dump_mstb(struct seq_file *m, + struct drm_dp_mst_branch *mstb) +{ + struct drm_dp_mst_port *port; + int tabs = mstb->lct; + char prefix[10]; + int i; + + for (i = 0; i < tabs; i++) + prefix[i] = '\t'; + prefix[i] = '\0'; + + seq_printf(m, "%smst: %p, %d\n", prefix, mstb, mstb->num_ports); + list_for_each_entry(port, &mstb->ports, next) { + seq_printf(m, "%sport: %d: ddps: %d ldps: %d, %p, conn: %p\n", prefix, port->port_num, port->ddps, port->ldps, port, port->connector); + if (port->mstb) + drm_dp_mst_dump_mstb(m, port->mstb); + } +} + +static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr, + char *buf) +{ + int ret; + int i; + for (i = 0; i < 4; i++) { + ret = drm_dp_dpcd_read(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS + (i * 16), &buf[i * 16], 16); + if (ret != 16) + break; + } + if (i == 4) + return true; + return false; +} + +/** + * drm_dp_mst_dump_topology(): dump topology to seq file. + * @m: seq_file to dump output to + * @mgr: manager to dump current topology for. + * + * helper to dump MST topology to a seq file for debugfs. + */ +void drm_dp_mst_dump_topology(struct seq_file *m, + struct drm_dp_mst_topology_mgr *mgr) +{ + int i; + struct drm_dp_mst_port *port; + mutex_lock(&mgr->lock); + if (mgr->mst_primary) + drm_dp_mst_dump_mstb(m, mgr->mst_primary); + + /* dump VCPIs */ + mutex_unlock(&mgr->lock); + + mutex_lock(&mgr->payload_lock); + seq_printf(m, "vcpi: %lx\n", mgr->payload_mask); + + for (i = 0; i < mgr->max_payloads; i++) { + if (mgr->proposed_vcpis[i]) { + port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi); + seq_printf(m, "vcpi %d: %d %d %d\n", i, port->port_num, port->vcpi.vcpi, port->vcpi.num_slots); + } else + seq_printf(m, "vcpi %d:unsed\n", i); + } + for (i = 0; i < mgr->max_payloads; i++) { + seq_printf(m, "payload %d: %d, %d, %d\n", + i, + mgr->payloads[i].payload_state, + mgr->payloads[i].start_slot, + mgr->payloads[i].num_slots); + + + } + mutex_unlock(&mgr->payload_lock); + + mutex_lock(&mgr->lock); + if (mgr->mst_primary) { + u8 buf[64]; + bool bret; + int ret; + ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE); + seq_printf(m, "dpcd: "); + for (i = 0; i < DP_RECEIVER_CAP_SIZE; i++) + seq_printf(m, "%02x ", buf[i]); + seq_printf(m, "\n"); + ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2); + seq_printf(m, "faux/mst: "); + for (i = 0; i < 2; i++) + seq_printf(m, "%02x ", buf[i]); + seq_printf(m, "\n"); + ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1); + seq_printf(m, "mst ctrl: "); + for (i = 0; i < 1; i++) + seq_printf(m, "%02x ", buf[i]); + seq_printf(m, "\n"); + + bret = dump_dp_payload_table(mgr, buf); + if (bret == true) { + seq_printf(m, "payload table: "); + for (i = 0; i < 63; i++) + seq_printf(m, "%02x ", buf[i]); + seq_printf(m, "\n"); + } + + } + + mutex_unlock(&mgr->lock); + +} +EXPORT_SYMBOL(drm_dp_mst_dump_topology); + +static void drm_dp_tx_work(struct work_struct *work) +{ + struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work); + + mutex_lock(&mgr->qlock); + if (mgr->tx_down_in_progress) + process_single_down_tx_qlock(mgr); + mutex_unlock(&mgr->qlock); +} + +/** + * drm_dp_mst_topology_mgr_init - initialise a topology manager + * @mgr: manager struct to initialise + * @dev: device providing this structure - for i2c addition. + * @aux: DP helper aux channel to talk to this device + * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit + * @max_payloads: maximum number of payloads this GPU can source + * @conn_base_id: the connector object ID the MST device is connected to. + * + * Return 0 for success, or negative error code on failure + */ +int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, + struct device *dev, struct drm_dp_aux *aux, + int max_dpcd_transaction_bytes, + int max_payloads, int conn_base_id) +{ + mutex_init(&mgr->lock); + mutex_init(&mgr->qlock); + mutex_init(&mgr->payload_lock); + INIT_LIST_HEAD(&mgr->tx_msg_upq); + INIT_LIST_HEAD(&mgr->tx_msg_downq); + INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work); + INIT_WORK(&mgr->tx_work, drm_dp_tx_work); + init_waitqueue_head(&mgr->tx_waitq); + mgr->dev = dev; + mgr->aux = aux; + mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes; + mgr->max_payloads = max_payloads; + mgr->conn_base_id = conn_base_id; + mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL); + if (!mgr->payloads) + return -ENOMEM; + mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL); + if (!mgr->proposed_vcpis) + return -ENOMEM; + set_bit(0, &mgr->payload_mask); + test_calc_pbn_mode(); + return 0; +} +EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init); + +/** + * drm_dp_mst_topology_mgr_destroy() - destroy topology manager. + * @mgr: manager to destroy + */ +void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr) +{ + mutex_lock(&mgr->payload_lock); + kfree(mgr->payloads); + mgr->payloads = NULL; + kfree(mgr->proposed_vcpis); + mgr->proposed_vcpis = NULL; + mutex_unlock(&mgr->payload_lock); + mgr->dev = NULL; + mgr->aux = NULL; +} +EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy); + +/* I2C device */ +static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, + int num) +{ + struct drm_dp_aux *aux = adapter->algo_data; + struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, aux); + struct drm_dp_mst_branch *mstb; + struct drm_dp_mst_topology_mgr *mgr = port->mgr; + unsigned int i; + bool reading = false; + struct drm_dp_sideband_msg_req_body msg; + struct drm_dp_sideband_msg_tx *txmsg = NULL; + int ret; + + mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent); + if (!mstb) + return -EREMOTEIO; + + /* construct i2c msg */ + /* see if last msg is a read */ + if (msgs[num - 1].flags & I2C_M_RD) + reading = true; + + if (!reading) { + DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n"); + ret = -EIO; + goto out; + } + + msg.req_type = DP_REMOTE_I2C_READ; + msg.u.i2c_read.num_transactions = num - 1; + msg.u.i2c_read.port_number = port->port_num; + for (i = 0; i < num - 1; i++) { + msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr; + msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len; + msg.u.i2c_read.transactions[i].bytes = msgs[i].buf; + } + msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr; + msg.u.i2c_read.num_bytes_read = msgs[num - 1].len; + + txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); + if (!txmsg) { + ret = -ENOMEM; + goto out; + } + + txmsg->dst = mstb; + drm_dp_encode_sideband_req(&msg, txmsg); + + drm_dp_queue_down_tx(mgr, txmsg); + + ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); + if (ret > 0) { + + if (txmsg->reply.reply_type == 1) { /* got a NAK back */ + ret = -EREMOTEIO; + goto out; + } + if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) { + ret = -EIO; + goto out; + } + memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len); + ret = num; + } +out: + kfree(txmsg); + drm_dp_put_mst_branch_device(mstb); + return ret; +} + +static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | + I2C_FUNC_SMBUS_READ_BLOCK_DATA | + I2C_FUNC_SMBUS_BLOCK_PROC_CALL | + I2C_FUNC_10BIT_ADDR; +} + +static const struct i2c_algorithm drm_dp_mst_i2c_algo = { + .functionality = drm_dp_mst_i2c_functionality, + .master_xfer = drm_dp_mst_i2c_xfer, +}; + +/** + * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX + * @aux: DisplayPort AUX channel + * + * Returns 0 on success or a negative error code on failure. + */ +static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux) +{ + aux->ddc.algo = &drm_dp_mst_i2c_algo; + aux->ddc.algo_data = aux; + aux->ddc.retries = 3; + + aux->ddc.class = I2C_CLASS_DDC; + aux->ddc.owner = THIS_MODULE; + aux->ddc.dev.parent = aux->dev; + aux->ddc.dev.of_node = aux->dev->of_node; + + strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev), + sizeof(aux->ddc.name)); + + return i2c_add_adapter(&aux->ddc); +} + +/** + * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter + * @aux: DisplayPort AUX channel + */ +static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux) +{ + i2c_del_adapter(&aux->ddc); +} diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 8218078b6133..3242e208c0d0 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -1,31 +1,11 @@ -/** - * \file drm_drv.c - * Generic driver template - * - * \author Rickard E. (Rik) Faith <faith@valinux.com> - * \author Gareth Hughes <gareth@valinux.com> - * - * To use this template, you must at least define the following (samples - * given for the MGA driver): - * - * \code - * #define DRIVER_AUTHOR "VA Linux Systems, Inc." - * - * #define DRIVER_NAME "mga" - * #define DRIVER_DESC "Matrox G200/G400" - * #define DRIVER_DATE "20001127" - * - * #define drm_x mga_##x - * \endcode - */ - /* - * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com + * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org * - * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. - * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * + * Author Rickard E. (Rik) Faith <faith@valinux.com> + * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation @@ -40,432 +20,906 @@ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. */ #include <linux/debugfs.h> +#include <linux/fs.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/mount.h> #include <linux/slab.h> -#include <linux/export.h> #include <drm/drmP.h> #include <drm/drm_core.h> +#include "drm_legacy.h" +unsigned int drm_debug = 0; /* 1 to enable debug output */ +EXPORT_SYMBOL(drm_debug); -static int drm_version(struct drm_device *dev, void *data, - struct drm_file *file_priv); - -#define DRM_IOCTL_DEF(ioctl, _func, _flags) \ - [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl} - -/** Ioctl table */ -static const struct drm_ioctl_desc drm_ioctls[] = { - DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0), - DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0), - DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, 0), - DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER), - - DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER), - - DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH), - - DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH), - - DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY), - - DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH), - DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH), - - DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - - DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH), - DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH), - - DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH), - - DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH), - DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH), - DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH), - DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_dma_ioctl, DRM_AUTH), - - DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - -#if __OS_HAS_AGP - DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), -#endif - - DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - - DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED), - - DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0), - - DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - - DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED), - - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_CONTROL_ALLOW|DRM_UNLOCKED), - - DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), - - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR2, drm_mode_cursor2_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), -}; +unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */ -#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) +unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */ -/** File operations structure */ -static const struct file_operations drm_stub_fops = { - .owner = THIS_MODULE, - .open = drm_stub_open, - .llseek = noop_llseek, -}; +/* + * Default to use monotonic timestamps for wait-for-vblank and page-flip + * complete events. + */ +unsigned int drm_timestamp_monotonic = 1; -static int __init drm_core_init(void) +MODULE_AUTHOR(CORE_AUTHOR); +MODULE_DESCRIPTION(CORE_DESC); +MODULE_LICENSE("GPL and additional rights"); +MODULE_PARM_DESC(debug, "Enable debug output"); +MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]"); +MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]"); +MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps"); + +module_param_named(debug, drm_debug, int, 0600); +module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600); +module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600); +module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600); + +static DEFINE_SPINLOCK(drm_minor_lock); +static struct idr drm_minors_idr; + +struct class *drm_class; +static struct dentry *drm_debugfs_root; + +int drm_err(const char *func, const char *format, ...) { - int ret = -ENOMEM; + struct va_format vaf; + va_list args; + int r; - drm_global_init(); - drm_connector_ida_init(); - idr_init(&drm_minors_idr); + va_start(args, format); - if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops)) - goto err_p1; + vaf.fmt = format; + vaf.va = &args; - drm_class = drm_sysfs_create(THIS_MODULE, "drm"); - if (IS_ERR(drm_class)) { - printk(KERN_ERR "DRM: Error creating drm class.\n"); - ret = PTR_ERR(drm_class); - goto err_p2; + r = printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* %pV", func, &vaf); + + va_end(args); + + return r; +} +EXPORT_SYMBOL(drm_err); + +void drm_ut_debug_printk(const char *function_name, const char *format, ...) +{ + struct va_format vaf; + va_list args; + + va_start(args, format); + vaf.fmt = format; + vaf.va = &args; + + printk(KERN_DEBUG "[" DRM_NAME ":%s] %pV", function_name, &vaf); + + va_end(args); +} +EXPORT_SYMBOL(drm_ut_debug_printk); + +struct drm_master *drm_master_create(struct drm_minor *minor) +{ + struct drm_master *master; + + master = kzalloc(sizeof(*master), GFP_KERNEL); + if (!master) + return NULL; + + kref_init(&master->refcount); + spin_lock_init(&master->lock.spinlock); + init_waitqueue_head(&master->lock.lock_queue); + if (drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER)) { + kfree(master); + return NULL; } + INIT_LIST_HEAD(&master->magicfree); + master->minor = minor; - drm_debugfs_root = debugfs_create_dir("dri", NULL); - if (!drm_debugfs_root) { - DRM_ERROR("Cannot create /sys/kernel/debug/dri\n"); - ret = -1; - goto err_p3; + return master; +} + +struct drm_master *drm_master_get(struct drm_master *master) +{ + kref_get(&master->refcount); + return master; +} +EXPORT_SYMBOL(drm_master_get); + +static void drm_master_destroy(struct kref *kref) +{ + struct drm_master *master = container_of(kref, struct drm_master, refcount); + struct drm_magic_entry *pt, *next; + struct drm_device *dev = master->minor->dev; + struct drm_map_list *r_list, *list_temp; + + mutex_lock(&dev->struct_mutex); + if (dev->driver->master_destroy) + dev->driver->master_destroy(dev, master); + + list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) { + if (r_list->master == master) { + drm_rmmap_locked(dev, r_list->map); + r_list = NULL; + } } - DRM_INFO("Initialized %s %d.%d.%d %s\n", - CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE); - return 0; -err_p3: - drm_sysfs_destroy(); -err_p2: - unregister_chrdev(DRM_MAJOR, "drm"); + if (master->unique) { + kfree(master->unique); + master->unique = NULL; + master->unique_len = 0; + } - idr_destroy(&drm_minors_idr); -err_p1: - return ret; + list_for_each_entry_safe(pt, next, &master->magicfree, head) { + list_del(&pt->head); + drm_ht_remove_item(&master->magiclist, &pt->hash_item); + kfree(pt); + } + + drm_ht_remove(&master->magiclist); + + mutex_unlock(&dev->struct_mutex); + kfree(master); } -static void __exit drm_core_exit(void) +void drm_master_put(struct drm_master **master) { - debugfs_remove(drm_debugfs_root); - drm_sysfs_destroy(); + kref_put(&(*master)->refcount, drm_master_destroy); + *master = NULL; +} +EXPORT_SYMBOL(drm_master_put); - unregister_chrdev(DRM_MAJOR, "drm"); +int drm_setmaster_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + int ret = 0; - drm_connector_ida_destroy(); - idr_destroy(&drm_minors_idr); + mutex_lock(&dev->master_mutex); + if (file_priv->is_master) + goto out_unlock; + + if (file_priv->minor->master) { + ret = -EINVAL; + goto out_unlock; + } + + if (!file_priv->master) { + ret = -EINVAL; + goto out_unlock; + } + + file_priv->minor->master = drm_master_get(file_priv->master); + file_priv->is_master = 1; + if (dev->driver->master_set) { + ret = dev->driver->master_set(dev, file_priv, false); + if (unlikely(ret != 0)) { + file_priv->is_master = 0; + drm_master_put(&file_priv->minor->master); + } + } + +out_unlock: + mutex_unlock(&dev->master_mutex); + return ret; } -module_init(drm_core_init); -module_exit(drm_core_exit); +int drm_dropmaster_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + int ret = -EINVAL; -/** - * Copy and IOCTL return string to user space + mutex_lock(&dev->master_mutex); + if (!file_priv->is_master) + goto out_unlock; + + if (!file_priv->minor->master) + goto out_unlock; + + ret = 0; + if (dev->driver->master_drop) + dev->driver->master_drop(dev, file_priv, false); + drm_master_put(&file_priv->minor->master); + file_priv->is_master = 0; + +out_unlock: + mutex_unlock(&dev->master_mutex); + return ret; +} + +/* + * DRM Minors + * A DRM device can provide several char-dev interfaces on the DRM-Major. Each + * of them is represented by a drm_minor object. Depending on the capabilities + * of the device-driver, different interfaces are registered. + * + * Minors can be accessed via dev->$minor_name. This pointer is either + * NULL or a valid drm_minor pointer and stays valid as long as the device is + * valid. This means, DRM minors have the same life-time as the underlying + * device. However, this doesn't mean that the minor is active. Minors are + * registered and unregistered dynamically according to device-state. */ -static int drm_copy_field(char *buf, size_t *buf_len, const char *value) + +static struct drm_minor **drm_minor_get_slot(struct drm_device *dev, + unsigned int type) { - int len; + switch (type) { + case DRM_MINOR_LEGACY: + return &dev->primary; + case DRM_MINOR_RENDER: + return &dev->render; + case DRM_MINOR_CONTROL: + return &dev->control; + default: + return NULL; + } +} - /* don't overflow userbuf */ - len = strlen(value); - if (len > *buf_len) - len = *buf_len; +static int drm_minor_alloc(struct drm_device *dev, unsigned int type) +{ + struct drm_minor *minor; + unsigned long flags; + int r; + + minor = kzalloc(sizeof(*minor), GFP_KERNEL); + if (!minor) + return -ENOMEM; + + minor->type = type; + minor->dev = dev; + + idr_preload(GFP_KERNEL); + spin_lock_irqsave(&drm_minor_lock, flags); + r = idr_alloc(&drm_minors_idr, + NULL, + 64 * type, + 64 * (type + 1), + GFP_NOWAIT); + spin_unlock_irqrestore(&drm_minor_lock, flags); + idr_preload_end(); + + if (r < 0) + goto err_free; + + minor->index = r; + + minor->kdev = drm_sysfs_minor_alloc(minor); + if (IS_ERR(minor->kdev)) { + r = PTR_ERR(minor->kdev); + goto err_index; + } - /* let userspace know exact length of driver value (which could be - * larger than the userspace-supplied buffer) */ - *buf_len = strlen(value); + *drm_minor_get_slot(dev, type) = minor; + return 0; + +err_index: + spin_lock_irqsave(&drm_minor_lock, flags); + idr_remove(&drm_minors_idr, minor->index); + spin_unlock_irqrestore(&drm_minor_lock, flags); +err_free: + kfree(minor); + return r; +} + +static void drm_minor_free(struct drm_device *dev, unsigned int type) +{ + struct drm_minor **slot, *minor; + unsigned long flags; + + slot = drm_minor_get_slot(dev, type); + minor = *slot; + if (!minor) + return; + + drm_mode_group_destroy(&minor->mode_group); + put_device(minor->kdev); + + spin_lock_irqsave(&drm_minor_lock, flags); + idr_remove(&drm_minors_idr, minor->index); + spin_unlock_irqrestore(&drm_minor_lock, flags); + + kfree(minor); + *slot = NULL; +} - /* finally, try filling in the userbuf */ - if (len && buf) - if (copy_to_user(buf, value, len)) - return -EFAULT; +static int drm_minor_register(struct drm_device *dev, unsigned int type) +{ + struct drm_minor *minor; + unsigned long flags; + int ret; + + DRM_DEBUG("\n"); + + minor = *drm_minor_get_slot(dev, type); + if (!minor) + return 0; + + ret = drm_debugfs_init(minor, minor->index, drm_debugfs_root); + if (ret) { + DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n"); + return ret; + } + + ret = device_add(minor->kdev); + if (ret) + goto err_debugfs; + + /* replace NULL with @minor so lookups will succeed from now on */ + spin_lock_irqsave(&drm_minor_lock, flags); + idr_replace(&drm_minors_idr, minor, minor->index); + spin_unlock_irqrestore(&drm_minor_lock, flags); + + DRM_DEBUG("new minor registered %d\n", minor->index); return 0; + +err_debugfs: + drm_debugfs_cleanup(minor); + return ret; +} + +static void drm_minor_unregister(struct drm_device *dev, unsigned int type) +{ + struct drm_minor *minor; + unsigned long flags; + + minor = *drm_minor_get_slot(dev, type); + if (!minor || !device_is_registered(minor->kdev)) + return; + + /* replace @minor with NULL so lookups will fail from now on */ + spin_lock_irqsave(&drm_minor_lock, flags); + idr_replace(&drm_minors_idr, NULL, minor->index); + spin_unlock_irqrestore(&drm_minor_lock, flags); + + device_del(minor->kdev); + dev_set_drvdata(minor->kdev, NULL); /* safety belt */ + drm_debugfs_cleanup(minor); } /** - * Get version information + * drm_minor_acquire - Acquire a DRM minor + * @minor_id: Minor ID of the DRM-minor + * + * Looks up the given minor-ID and returns the respective DRM-minor object. The + * refence-count of the underlying device is increased so you must release this + * object with drm_minor_release(). * - * \param inode device inode. - * \param filp file pointer. - * \param cmd command. - * \param arg user argument, pointing to a drm_version structure. - * \return zero on success or negative number on failure. + * As long as you hold this minor, it is guaranteed that the object and the + * minor->dev pointer will stay valid! However, the device may get unplugged and + * unregistered while you hold the minor. * - * Fills in the version information in \p arg. + * Returns: + * Pointer to minor-object with increased device-refcount, or PTR_ERR on + * failure. */ -static int drm_version(struct drm_device *dev, void *data, - struct drm_file *file_priv) +struct drm_minor *drm_minor_acquire(unsigned int minor_id) { - struct drm_version *version = data; - int err; + struct drm_minor *minor; + unsigned long flags; + + spin_lock_irqsave(&drm_minor_lock, flags); + minor = idr_find(&drm_minors_idr, minor_id); + if (minor) + drm_dev_ref(minor->dev); + spin_unlock_irqrestore(&drm_minor_lock, flags); + + if (!minor) { + return ERR_PTR(-ENODEV); + } else if (drm_device_is_unplugged(minor->dev)) { + drm_dev_unref(minor->dev); + return ERR_PTR(-ENODEV); + } - version->version_major = dev->driver->major; - version->version_minor = dev->driver->minor; - version->version_patchlevel = dev->driver->patchlevel; - err = drm_copy_field(version->name, &version->name_len, - dev->driver->name); - if (!err) - err = drm_copy_field(version->date, &version->date_len, - dev->driver->date); - if (!err) - err = drm_copy_field(version->desc, &version->desc_len, - dev->driver->desc); + return minor; +} - return err; +/** + * drm_minor_release - Release DRM minor + * @minor: Pointer to DRM minor object + * + * Release a minor that was previously acquired via drm_minor_acquire(). + */ +void drm_minor_release(struct drm_minor *minor) +{ + drm_dev_unref(minor->dev); } /** - * drm_ioctl_permit - Check ioctl permissions against caller + * drm_put_dev - Unregister and release a DRM device + * @dev: DRM device + * + * Called at module unload time or when a PCI device is unplugged. * - * @flags: ioctl permission flags. - * @file_priv: Pointer to struct drm_file identifying the caller. + * Use of this function is discouraged. It will eventually go away completely. + * Please use drm_dev_unregister() and drm_dev_unref() explicitly instead. * - * Checks whether the caller is allowed to run an ioctl with the - * indicated permissions. If so, returns zero. Otherwise returns an - * error code suitable for ioctl return. + * Cleans up all DRM device, calling drm_lastclose(). */ -static int drm_ioctl_permit(u32 flags, struct drm_file *file_priv) +void drm_put_dev(struct drm_device *dev) { - /* ROOT_ONLY is only for CAP_SYS_ADMIN */ - if (unlikely((flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN))) - return -EACCES; - - /* AUTH is only for authenticated or render client */ - if (unlikely((flags & DRM_AUTH) && !drm_is_render_client(file_priv) && - !file_priv->authenticated)) - return -EACCES; - - /* MASTER is only for master or control clients */ - if (unlikely((flags & DRM_MASTER) && !file_priv->is_master && - !drm_is_control_client(file_priv))) - return -EACCES; - - /* Control clients must be explicitly allowed */ - if (unlikely(!(flags & DRM_CONTROL_ALLOW) && - drm_is_control_client(file_priv))) - return -EACCES; - - /* Render clients must be explicitly allowed */ - if (unlikely(!(flags & DRM_RENDER_ALLOW) && - drm_is_render_client(file_priv))) - return -EACCES; + DRM_DEBUG("\n"); - return 0; + if (!dev) { + DRM_ERROR("cleanup called no dev\n"); + return; + } + + drm_dev_unregister(dev); + drm_dev_unref(dev); +} +EXPORT_SYMBOL(drm_put_dev); + +void drm_unplug_dev(struct drm_device *dev) +{ + /* for a USB device */ + drm_minor_unregister(dev, DRM_MINOR_LEGACY); + drm_minor_unregister(dev, DRM_MINOR_RENDER); + drm_minor_unregister(dev, DRM_MINOR_CONTROL); + + mutex_lock(&drm_global_mutex); + + drm_device_set_unplugged(dev); + + if (dev->open_count == 0) { + drm_put_dev(dev); + } + mutex_unlock(&drm_global_mutex); +} +EXPORT_SYMBOL(drm_unplug_dev); + +/* + * DRM internal mount + * We want to be able to allocate our own "struct address_space" to control + * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow + * stand-alone address_space objects, so we need an underlying inode. As there + * is no way to allocate an independent inode easily, we need a fake internal + * VFS mount-point. + * + * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free() + * frees it again. You are allowed to use iget() and iput() to get references to + * the inode. But each drm_fs_inode_new() call must be paired with exactly one + * drm_fs_inode_free() call (which does not have to be the last iput()). + * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it + * between multiple inode-users. You could, technically, call + * iget() + drm_fs_inode_free() directly after alloc and sometime later do an + * iput(), but this way you'd end up with a new vfsmount for each inode. + */ + +static int drm_fs_cnt; +static struct vfsmount *drm_fs_mnt; + +static const struct dentry_operations drm_fs_dops = { + .d_dname = simple_dname, +}; + +static const struct super_operations drm_fs_sops = { + .statfs = simple_statfs, +}; + +static struct dentry *drm_fs_mount(struct file_system_type *fs_type, int flags, + const char *dev_name, void *data) +{ + return mount_pseudo(fs_type, + "drm:", + &drm_fs_sops, + &drm_fs_dops, + 0x010203ff); +} + +static struct file_system_type drm_fs_type = { + .name = "drm", + .owner = THIS_MODULE, + .mount = drm_fs_mount, + .kill_sb = kill_anon_super, +}; + +static struct inode *drm_fs_inode_new(void) +{ + struct inode *inode; + int r; + + r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt); + if (r < 0) { + DRM_ERROR("Cannot mount pseudo fs: %d\n", r); + return ERR_PTR(r); + } + + inode = alloc_anon_inode(drm_fs_mnt->mnt_sb); + if (IS_ERR(inode)) + simple_release_fs(&drm_fs_mnt, &drm_fs_cnt); + + return inode; +} + +static void drm_fs_inode_free(struct inode *inode) +{ + if (inode) { + iput(inode); + simple_release_fs(&drm_fs_mnt, &drm_fs_cnt); + } } /** - * Called whenever a process performs an ioctl on /dev/drm. + * drm_dev_alloc - Allocate new DRM device + * @driver: DRM driver to allocate device for + * @parent: Parent device object * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg user argument. - * \return zero on success or negative number on failure. + * Allocate and initialize a new DRM device. No device registration is done. + * Call drm_dev_register() to advertice the device to user space and register it + * with other core subsystems. * - * Looks up the ioctl function in the ::ioctls table, checking for root - * previleges if so required, and dispatches to the respective function. + * The initial ref-count of the object is 1. Use drm_dev_ref() and + * drm_dev_unref() to take and drop further ref-counts. + * + * RETURNS: + * Pointer to new DRM device, or NULL if out of memory. */ -long drm_ioctl(struct file *filp, - unsigned int cmd, unsigned long arg) +struct drm_device *drm_dev_alloc(struct drm_driver *driver, + struct device *parent) { - struct drm_file *file_priv = filp->private_data; struct drm_device *dev; - const struct drm_ioctl_desc *ioctl = NULL; - drm_ioctl_t *func; - unsigned int nr = DRM_IOCTL_NR(cmd); - int retcode = -EINVAL; - char stack_kdata[128]; - char *kdata = NULL; - unsigned int usize, asize; - - dev = file_priv->minor->dev; - - if (drm_device_is_unplugged(dev)) - return -ENODEV; - - if ((nr >= DRM_CORE_IOCTL_COUNT) && - ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END))) - goto err_i1; - if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) && - (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) { - u32 drv_size; - ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE]; - drv_size = _IOC_SIZE(ioctl->cmd_drv); - usize = asize = _IOC_SIZE(cmd); - if (drv_size > asize) - asize = drv_size; - cmd = ioctl->cmd_drv; + int ret; + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return NULL; + + kref_init(&dev->ref); + dev->dev = parent; + dev->driver = driver; + + INIT_LIST_HEAD(&dev->filelist); + INIT_LIST_HEAD(&dev->ctxlist); + INIT_LIST_HEAD(&dev->vmalist); + INIT_LIST_HEAD(&dev->maplist); + INIT_LIST_HEAD(&dev->vblank_event_list); + + spin_lock_init(&dev->buf_lock); + spin_lock_init(&dev->event_lock); + mutex_init(&dev->struct_mutex); + mutex_init(&dev->ctxlist_mutex); + mutex_init(&dev->master_mutex); + + dev->anon_inode = drm_fs_inode_new(); + if (IS_ERR(dev->anon_inode)) { + ret = PTR_ERR(dev->anon_inode); + DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret); + goto err_free; } - else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) { - u32 drv_size; - - ioctl = &drm_ioctls[nr]; - drv_size = _IOC_SIZE(ioctl->cmd); - usize = asize = _IOC_SIZE(cmd); - if (drv_size > asize) - asize = drv_size; + if (drm_core_check_feature(dev, DRIVER_MODESET)) { + ret = drm_minor_alloc(dev, DRM_MINOR_CONTROL); + if (ret) + goto err_minors; + } - cmd = ioctl->cmd; - } else - goto err_i1; + if (drm_core_check_feature(dev, DRIVER_RENDER)) { + ret = drm_minor_alloc(dev, DRM_MINOR_RENDER); + if (ret) + goto err_minors; + } - DRM_DEBUG("pid=%d, dev=0x%lx, auth=%d, %s\n", - task_pid_nr(current), - (long)old_encode_dev(file_priv->minor->kdev->devt), - file_priv->authenticated, ioctl->name); + ret = drm_minor_alloc(dev, DRM_MINOR_LEGACY); + if (ret) + goto err_minors; - /* Do not trust userspace, use our own definition */ - func = ioctl->func; + if (drm_ht_create(&dev->map_hash, 12)) + goto err_minors; - if (unlikely(!func)) { - DRM_DEBUG("no function\n"); - retcode = -EINVAL; - goto err_i1; + ret = drm_legacy_ctxbitmap_init(dev); + if (ret) { + DRM_ERROR("Cannot allocate memory for context bitmap.\n"); + goto err_ht; } - retcode = drm_ioctl_permit(ioctl->flags, file_priv); - if (unlikely(retcode)) - goto err_i1; - - if (cmd & (IOC_IN | IOC_OUT)) { - if (asize <= sizeof(stack_kdata)) { - kdata = stack_kdata; - } else { - kdata = kmalloc(asize, GFP_KERNEL); - if (!kdata) { - retcode = -ENOMEM; - goto err_i1; - } + if (driver->driver_features & DRIVER_GEM) { + ret = drm_gem_init(dev); + if (ret) { + DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n"); + goto err_ctxbitmap; } - if (asize > usize) - memset(kdata + usize, 0, asize - usize); } - if (cmd & IOC_IN) { - if (copy_from_user(kdata, (void __user *)arg, - usize) != 0) { - retcode = -EFAULT; - goto err_i1; - } - } else if (cmd & IOC_OUT) { - memset(kdata, 0, usize); - } + return dev; + +err_ctxbitmap: + drm_legacy_ctxbitmap_cleanup(dev); +err_ht: + drm_ht_remove(&dev->map_hash); +err_minors: + drm_minor_free(dev, DRM_MINOR_LEGACY); + drm_minor_free(dev, DRM_MINOR_RENDER); + drm_minor_free(dev, DRM_MINOR_CONTROL); + drm_fs_inode_free(dev->anon_inode); +err_free: + mutex_destroy(&dev->master_mutex); + kfree(dev); + return NULL; +} +EXPORT_SYMBOL(drm_dev_alloc); + +static void drm_dev_release(struct kref *ref) +{ + struct drm_device *dev = container_of(ref, struct drm_device, ref); + + if (dev->driver->driver_features & DRIVER_GEM) + drm_gem_destroy(dev); + + drm_legacy_ctxbitmap_cleanup(dev); + drm_ht_remove(&dev->map_hash); + drm_fs_inode_free(dev->anon_inode); + + drm_minor_free(dev, DRM_MINOR_LEGACY); + drm_minor_free(dev, DRM_MINOR_RENDER); + drm_minor_free(dev, DRM_MINOR_CONTROL); + + mutex_destroy(&dev->master_mutex); + kfree(dev->unique); + kfree(dev); +} - if (ioctl->flags & DRM_UNLOCKED) - retcode = func(dev, kdata, file_priv); - else { - mutex_lock(&drm_global_mutex); - retcode = func(dev, kdata, file_priv); - mutex_unlock(&drm_global_mutex); +/** + * drm_dev_ref - Take reference of a DRM device + * @dev: device to take reference of or NULL + * + * This increases the ref-count of @dev by one. You *must* already own a + * reference when calling this. Use drm_dev_unref() to drop this reference + * again. + * + * This function never fails. However, this function does not provide *any* + * guarantee whether the device is alive or running. It only provides a + * reference to the object and the memory associated with it. + */ +void drm_dev_ref(struct drm_device *dev) +{ + if (dev) + kref_get(&dev->ref); +} +EXPORT_SYMBOL(drm_dev_ref); + +/** + * drm_dev_unref - Drop reference of a DRM device + * @dev: device to drop reference of or NULL + * + * This decreases the ref-count of @dev by one. The device is destroyed if the + * ref-count drops to zero. + */ +void drm_dev_unref(struct drm_device *dev) +{ + if (dev) + kref_put(&dev->ref, drm_dev_release); +} +EXPORT_SYMBOL(drm_dev_unref); + +/** + * drm_dev_register - Register DRM device + * @dev: Device to register + * @flags: Flags passed to the driver's .load() function + * + * Register the DRM device @dev with the system, advertise device to user-space + * and start normal device operation. @dev must be allocated via drm_dev_alloc() + * previously. + * + * Never call this twice on any device! + * + * RETURNS: + * 0 on success, negative error code on failure. + */ +int drm_dev_register(struct drm_device *dev, unsigned long flags) +{ + int ret; + + mutex_lock(&drm_global_mutex); + + ret = drm_minor_register(dev, DRM_MINOR_CONTROL); + if (ret) + goto err_minors; + + ret = drm_minor_register(dev, DRM_MINOR_RENDER); + if (ret) + goto err_minors; + + ret = drm_minor_register(dev, DRM_MINOR_LEGACY); + if (ret) + goto err_minors; + + if (dev->driver->load) { + ret = dev->driver->load(dev, flags); + if (ret) + goto err_minors; } - if (cmd & IOC_OUT) { - if (copy_to_user((void __user *)arg, kdata, - usize) != 0) - retcode = -EFAULT; + /* setup grouping for legacy outputs */ + if (drm_core_check_feature(dev, DRIVER_MODESET)) { + ret = drm_mode_group_init_legacy_group(dev, + &dev->primary->mode_group); + if (ret) + goto err_unload; } - err_i1: - if (!ioctl) - DRM_DEBUG("invalid ioctl: pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n", - task_pid_nr(current), - (long)old_encode_dev(file_priv->minor->kdev->devt), - file_priv->authenticated, cmd, nr); - - if (kdata != stack_kdata) - kfree(kdata); - if (retcode) - DRM_DEBUG("ret = %d\n", retcode); - return retcode; + ret = 0; + goto out_unlock; + +err_unload: + if (dev->driver->unload) + dev->driver->unload(dev); +err_minors: + drm_minor_unregister(dev, DRM_MINOR_LEGACY); + drm_minor_unregister(dev, DRM_MINOR_RENDER); + drm_minor_unregister(dev, DRM_MINOR_CONTROL); +out_unlock: + mutex_unlock(&drm_global_mutex); + return ret; } -EXPORT_SYMBOL(drm_ioctl); +EXPORT_SYMBOL(drm_dev_register); /** - * drm_ioctl_flags - Check for core ioctl and return ioctl permission flags + * drm_dev_unregister - Unregister DRM device + * @dev: Device to unregister + * + * Unregister the DRM device from the system. This does the reverse of + * drm_dev_register() but does not deallocate the device. The caller must call + * drm_dev_unref() to drop their final reference. + */ +void drm_dev_unregister(struct drm_device *dev) +{ + struct drm_map_list *r_list, *list_temp; + + drm_lastclose(dev); + + if (dev->driver->unload) + dev->driver->unload(dev); + + if (dev->agp) + drm_pci_agp_destroy(dev); + + drm_vblank_cleanup(dev); + + list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) + drm_rmmap(dev, r_list->map); + + drm_minor_unregister(dev, DRM_MINOR_LEGACY); + drm_minor_unregister(dev, DRM_MINOR_RENDER); + drm_minor_unregister(dev, DRM_MINOR_CONTROL); +} +EXPORT_SYMBOL(drm_dev_unregister); + +/** + * drm_dev_set_unique - Set the unique name of a DRM device + * @dev: device of which to set the unique name + * @fmt: format string for unique name + * + * Sets the unique name of a DRM device using the specified format string and + * a variable list of arguments. Drivers can use this at driver probe time if + * the unique name of the devices they drive is static. * - * @nr: Ioctl number. - * @flags: Where to return the ioctl permission flags + * Return: 0 on success or a negative error code on failure. */ -bool drm_ioctl_flags(unsigned int nr, unsigned int *flags) +int drm_dev_set_unique(struct drm_device *dev, const char *fmt, ...) { - if ((nr >= DRM_COMMAND_END && nr < DRM_CORE_IOCTL_COUNT) || - (nr < DRM_COMMAND_BASE)) { - *flags = drm_ioctls[nr].flags; - return true; + va_list ap; + + kfree(dev->unique); + + va_start(ap, fmt); + dev->unique = kvasprintf(GFP_KERNEL, fmt, ap); + va_end(ap); + + return dev->unique ? 0 : -ENOMEM; +} +EXPORT_SYMBOL(drm_dev_set_unique); + +/* + * DRM Core + * The DRM core module initializes all global DRM objects and makes them + * available to drivers. Once setup, drivers can probe their respective + * devices. + * Currently, core management includes: + * - The "DRM-Global" key/value database + * - Global ID management for connectors + * - DRM major number allocation + * - DRM minor management + * - DRM sysfs class + * - DRM debugfs root + * + * Furthermore, the DRM core provides dynamic char-dev lookups. For each + * interface registered on a DRM device, you can request minor numbers from DRM + * core. DRM core takes care of major-number management and char-dev + * registration. A stub ->open() callback forwards any open() requests to the + * registered minor. + */ + +static int drm_stub_open(struct inode *inode, struct file *filp) +{ + const struct file_operations *new_fops; + struct drm_minor *minor; + int err; + + DRM_DEBUG("\n"); + + mutex_lock(&drm_global_mutex); + minor = drm_minor_acquire(iminor(inode)); + if (IS_ERR(minor)) { + err = PTR_ERR(minor); + goto out_unlock; + } + + new_fops = fops_get(minor->dev->driver->fops); + if (!new_fops) { + err = -ENODEV; + goto out_release; } - return false; + replace_fops(filp, new_fops); + if (filp->f_op->open) + err = filp->f_op->open(inode, filp); + else + err = 0; + +out_release: + drm_minor_release(minor); +out_unlock: + mutex_unlock(&drm_global_mutex); + return err; } -EXPORT_SYMBOL(drm_ioctl_flags); + +static const struct file_operations drm_stub_fops = { + .owner = THIS_MODULE, + .open = drm_stub_open, + .llseek = noop_llseek, +}; + +static int __init drm_core_init(void) +{ + int ret = -ENOMEM; + + drm_global_init(); + drm_connector_ida_init(); + idr_init(&drm_minors_idr); + + if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops)) + goto err_p1; + + drm_class = drm_sysfs_create(THIS_MODULE, "drm"); + if (IS_ERR(drm_class)) { + printk(KERN_ERR "DRM: Error creating drm class.\n"); + ret = PTR_ERR(drm_class); + goto err_p2; + } + + drm_debugfs_root = debugfs_create_dir("dri", NULL); + if (!drm_debugfs_root) { + DRM_ERROR("Cannot create /sys/kernel/debug/dri\n"); + ret = -1; + goto err_p3; + } + + DRM_INFO("Initialized %s %d.%d.%d %s\n", + CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE); + return 0; +err_p3: + drm_sysfs_destroy(); +err_p2: + unregister_chrdev(DRM_MAJOR, "drm"); + + idr_destroy(&drm_minors_idr); +err_p1: + return ret; +} + +static void __exit drm_core_exit(void) +{ + debugfs_remove(drm_debugfs_root); + drm_sysfs_destroy(); + + unregister_chrdev(DRM_MAJOR, "drm"); + + drm_connector_ida_destroy(); + idr_destroy(&drm_minors_idr); +} + +module_init(drm_core_init); +module_exit(drm_core_exit); diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index dfa9769b26b5..1dbf3bc4c6a3 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -3305,6 +3305,7 @@ struct drm_connector *drm_select_eld(struct drm_encoder *encoder, struct drm_device *dev = encoder->dev; WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); + WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); list_for_each_entry(connector, &dev->mode_config.connector_list, head) if (connector->encoder == encoder && connector->eld[0]) @@ -3775,8 +3776,14 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame, frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE; - /* Populate picture aspect ratio from CEA mode list */ - if (frame->video_code > 0) + /* + * Populate picture aspect ratio from either + * user input (if specified) or from the CEA mode list. + */ + if (mode->picture_aspect_ratio == HDMI_PICTURE_ASPECT_4_3 || + mode->picture_aspect_ratio == HDMI_PICTURE_ASPECT_16_9) + frame->picture_aspect = mode->picture_aspect_ratio; + else if (frame->video_code > 0) frame->picture_aspect = drm_get_cea_aspect_ratio( frame->video_code); diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c index f27c883be391..cc0ae047ed3b 100644 --- a/drivers/gpu/drm/drm_fb_cma_helper.c +++ b/drivers/gpu/drm/drm_fb_cma_helper.c @@ -327,7 +327,7 @@ err_drm_gem_cma_free_object: return ret; } -static struct drm_fb_helper_funcs drm_fb_cma_helper_funcs = { +static const struct drm_fb_helper_funcs drm_fb_cma_helper_funcs = { .fb_probe = drm_fbdev_cma_create, }; @@ -354,9 +354,10 @@ struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev, return ERR_PTR(-ENOMEM); } - fbdev_cma->fb_helper.funcs = &drm_fb_cma_helper_funcs; helper = &fbdev_cma->fb_helper; + drm_fb_helper_prepare(dev, helper, &drm_fb_cma_helper_funcs); + ret = drm_fb_helper_init(dev, helper, num_crtc, max_conn_count); if (ret < 0) { dev_err(dev->dev, "Failed to initialize drm fb helper.\n"); diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index d5d8cea1a679..3144db9dc0f1 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -49,10 +49,11 @@ static LIST_HEAD(kernel_fb_helper_list); * helper functions used by many drivers to implement the kernel mode setting * interfaces. * - * Initialization is done as a three-step process with drm_fb_helper_init(), - * drm_fb_helper_single_add_all_connectors() and drm_fb_helper_initial_config(). - * Drivers with fancier requirements than the default behaviour can override the - * second step with their own code. Teardown is done with drm_fb_helper_fini(). + * Initialization is done as a four-step process with drm_fb_helper_prepare(), + * drm_fb_helper_init(), drm_fb_helper_single_add_all_connectors() and + * drm_fb_helper_initial_config(). Drivers with fancier requirements than the + * default behaviour can override the third step with their own code. + * Teardown is done with drm_fb_helper_fini(). * * At runtime drivers should restore the fbdev console by calling * drm_fb_helper_restore_fbdev_mode() from their ->lastclose callback. They @@ -63,6 +64,19 @@ static LIST_HEAD(kernel_fb_helper_list); * * All other functions exported by the fb helper library can be used to * implement the fbdev driver interface by the driver. + * + * It is possible, though perhaps somewhat tricky, to implement race-free + * hotplug detection using the fbdev helpers. The drm_fb_helper_prepare() + * helper must be called first to initialize the minimum required to make + * hotplug detection work. Drivers also need to make sure to properly set up + * the dev->mode_config.funcs member. After calling drm_kms_helper_poll_init() + * it is safe to enable interrupts and start processing hotplug events. At the + * same time, drivers should initialize all modeset objects such as CRTCs, + * encoders and connectors. To finish up the fbdev helper initialization, the + * drm_fb_helper_init() function is called. To probe for all attached displays + * and set up an initial configuration using the detected hardware, drivers + * should call drm_fb_helper_single_add_all_connectors() followed by + * drm_fb_helper_initial_config(). */ /** @@ -105,6 +119,58 @@ fail: } EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors); +int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_connector *connector) +{ + struct drm_fb_helper_connector **temp; + struct drm_fb_helper_connector *fb_helper_connector; + + WARN_ON(!mutex_is_locked(&fb_helper->dev->mode_config.mutex)); + if (fb_helper->connector_count + 1 > fb_helper->connector_info_alloc_count) { + temp = krealloc(fb_helper->connector_info, sizeof(struct drm_fb_helper_connector) * (fb_helper->connector_count + 1), GFP_KERNEL); + if (!temp) + return -ENOMEM; + + fb_helper->connector_info_alloc_count = fb_helper->connector_count + 1; + fb_helper->connector_info = temp; + } + + + fb_helper_connector = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL); + if (!fb_helper_connector) + return -ENOMEM; + + fb_helper_connector->connector = connector; + fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector; + return 0; +} +EXPORT_SYMBOL(drm_fb_helper_add_one_connector); + +int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, + struct drm_connector *connector) +{ + struct drm_fb_helper_connector *fb_helper_connector; + int i, j; + + WARN_ON(!mutex_is_locked(&fb_helper->dev->mode_config.mutex)); + + for (i = 0; i < fb_helper->connector_count; i++) { + if (fb_helper->connector_info[i]->connector == connector) + break; + } + + if (i == fb_helper->connector_count) + return -EINVAL; + fb_helper_connector = fb_helper->connector_info[i]; + + for (j = i + 1; j < fb_helper->connector_count; j++) { + fb_helper->connector_info[j - 1] = fb_helper->connector_info[j]; + } + fb_helper->connector_count--; + kfree(fb_helper_connector); + return 0; +} +EXPORT_SYMBOL(drm_fb_helper_remove_one_connector); + static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper) { struct drm_fb_helper_connector *fb_helper_conn; @@ -199,9 +265,6 @@ int drm_fb_helper_debug_enter(struct fb_info *info) struct drm_crtc_helper_funcs *funcs; int i; - if (list_empty(&kernel_fb_helper_list)) - return false; - list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) { for (i = 0; i < helper->crtc_count; i++) { struct drm_mode_set *mode_set = @@ -531,6 +594,24 @@ static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper) } /** + * drm_fb_helper_prepare - setup a drm_fb_helper structure + * @dev: DRM device + * @helper: driver-allocated fbdev helper structure to set up + * @funcs: pointer to structure of functions associate with this helper + * + * Sets up the bare minimum to make the framebuffer helper usable. This is + * useful to implement race-free initialization of the polling helpers. + */ +void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper, + const struct drm_fb_helper_funcs *funcs) +{ + INIT_LIST_HEAD(&helper->kernel_fb_list); + helper->funcs = funcs; + helper->dev = dev; +} +EXPORT_SYMBOL(drm_fb_helper_prepare); + +/** * drm_fb_helper_init - initialize a drm_fb_helper structure * @dev: drm device * @fb_helper: driver-allocated fbdev helper structure to initialize @@ -542,8 +623,7 @@ static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper) * nor register the fbdev. This is only done in drm_fb_helper_initial_config() * to allow driver writes more control over the exact init sequence. * - * Drivers must set fb_helper->funcs before calling - * drm_fb_helper_initial_config(). + * Drivers must call drm_fb_helper_prepare() before calling this function. * * RETURNS: * Zero if everything went ok, nonzero otherwise. @@ -558,10 +638,6 @@ int drm_fb_helper_init(struct drm_device *dev, if (!max_conn_count) return -EINVAL; - fb_helper->dev = dev; - - INIT_LIST_HEAD(&fb_helper->kernel_fb_list); - fb_helper->crtc_info = kcalloc(crtc_count, sizeof(struct drm_fb_helper_crtc), GFP_KERNEL); if (!fb_helper->crtc_info) return -ENOMEM; @@ -572,6 +648,7 @@ int drm_fb_helper_init(struct drm_device *dev, kfree(fb_helper->crtc_info); return -ENOMEM; } + fb_helper->connector_info_alloc_count = dev->mode_config.num_connector; fb_helper->connector_count = 0; for (i = 0; i < crtc_count; i++) { @@ -1056,7 +1133,6 @@ void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch, info->fix.ypanstep = 1; /* doing it in hw */ info->fix.ywrapstep = 0; info->fix.accel = FB_ACCEL_NONE; - info->fix.type_aux = 0; info->fix.line_length = pitch; return; @@ -1613,8 +1689,10 @@ EXPORT_SYMBOL(drm_fb_helper_initial_config); * either the output polling work or a work item launched from the driver's * hotplug interrupt). * - * Note that the driver must ensure that this is only called _after_ the fb has - * been fully set up, i.e. after the call to drm_fb_helper_initial_config. + * Note that drivers may call this even before calling + * drm_fb_helper_initial_config but only aftert drm_fb_helper_init. This allows + * for a race-free fbcon setup and will make sure that the fbdev emulation will + * not miss any hotplug events. * * RETURNS: * 0 on success and a non-zero error code otherwise. @@ -1624,11 +1702,8 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) struct drm_device *dev = fb_helper->dev; u32 max_width, max_height; - if (!fb_helper->fb) - return 0; - mutex_lock(&fb_helper->dev->mode_config.mutex); - if (!drm_fb_helper_is_bound(fb_helper)) { + if (!fb_helper->fb || !drm_fb_helper_is_bound(fb_helper)) { fb_helper->delayed_hotplug = true; mutex_unlock(&fb_helper->dev->mode_config.mutex); return 0; diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 021fe5d11df5..79d5221c6e41 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -38,6 +38,7 @@ #include <linux/poll.h> #include <linux/slab.h> #include <linux/module.h> +#include "drm_legacy.h" /* from BKL pushdown */ DEFINE_MUTEX(drm_global_mutex); @@ -112,55 +113,12 @@ err_undo: EXPORT_SYMBOL(drm_open); /** - * File \c open operation. - * - * \param inode device inode. - * \param filp file pointer. - * - * Puts the dev->fops corresponding to the device minor number into - * \p filp, call the \c open method, and restore the file operations. - */ -int drm_stub_open(struct inode *inode, struct file *filp) -{ - struct drm_device *dev; - struct drm_minor *minor; - int err = -ENODEV; - const struct file_operations *new_fops; - - DRM_DEBUG("\n"); - - mutex_lock(&drm_global_mutex); - minor = drm_minor_acquire(iminor(inode)); - if (IS_ERR(minor)) - goto out_unlock; - - dev = minor->dev; - new_fops = fops_get(dev->driver->fops); - if (!new_fops) - goto out_release; - - replace_fops(filp, new_fops); - if (filp->f_op->open) - err = filp->f_op->open(inode, filp); - -out_release: - drm_minor_release(minor); -out_unlock: - mutex_unlock(&drm_global_mutex); - return err; -} - -/** * Check whether DRI will run on this CPU. * * \return non-zero if the DRI will run on this CPU, or zero otherwise. */ static int drm_cpu_valid(void) { -#if defined(__i386__) - if (boot_cpu_data.x86 == 3) - return 0; /* No cmpxchg on a 386 */ -#endif #if defined(__sparc__) && !defined(__sparc_v9__) return 0; /* No cmpxchg before v9 sparc. */ #endif @@ -203,8 +161,7 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor) priv->minor = minor; /* for compatibility root is always authenticated */ - priv->always_authenticated = capable(CAP_SYS_ADMIN); - priv->authenticated = priv->always_authenticated; + priv->authenticated = capable(CAP_SYS_ADMIN); priv->lock_count = 0; INIT_LIST_HEAD(&priv->lhead); @@ -429,6 +386,10 @@ int drm_release(struct inode *inode, struct file *filp) DRM_DEBUG("open_count = %d\n", dev->open_count); + mutex_lock(&dev->struct_mutex); + list_del(&file_priv->lhead); + mutex_unlock(&dev->struct_mutex); + if (dev->driver->preclose) dev->driver->preclose(dev, file_priv); @@ -461,44 +422,18 @@ int drm_release(struct inode *inode, struct file *filp) if (dev->driver->driver_features & DRIVER_GEM) drm_gem_release(dev, file_priv); - mutex_lock(&dev->ctxlist_mutex); - if (!list_empty(&dev->ctxlist)) { - struct drm_ctx_list *pos, *n; - - list_for_each_entry_safe(pos, n, &dev->ctxlist, head) { - if (pos->tag == file_priv && - pos->handle != DRM_KERNEL_CONTEXT) { - if (dev->driver->context_dtor) - dev->driver->context_dtor(dev, - pos->handle); - - drm_ctxbitmap_free(dev, pos->handle); - - list_del(&pos->head); - kfree(pos); - } - } - } - mutex_unlock(&dev->ctxlist_mutex); + drm_legacy_ctxbitmap_flush(dev, file_priv); mutex_lock(&dev->master_mutex); if (file_priv->is_master) { struct drm_master *master = file_priv->master; - struct drm_file *temp; - - mutex_lock(&dev->struct_mutex); - list_for_each_entry(temp, &dev->filelist, lhead) { - if ((temp->master == file_priv->master) && - (temp != file_priv)) - temp->authenticated = temp->always_authenticated; - } /** * Since the master is disappearing, so is the * possibility to lock. */ - + mutex_lock(&dev->struct_mutex); if (master->lock.hw_lock) { if (dev->sigdata.lock == master->lock.hw_lock) dev->sigdata.lock = NULL; @@ -522,10 +457,6 @@ int drm_release(struct inode *inode, struct file *filp) file_priv->is_master = 0; mutex_unlock(&dev->master_mutex); - mutex_lock(&dev->struct_mutex); - list_del(&file_priv->lhead); - mutex_unlock(&dev->struct_mutex); - if (dev->driver->postclose) dev->driver->postclose(dev, file_priv); diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index f7d71190aad5..6adee4c2afc0 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -441,18 +441,31 @@ EXPORT_SYMBOL(drm_gem_create_mmap_offset); * drm_gem_get_pages - helper to allocate backing pages for a GEM object * from shmem * @obj: obj in question - * @gfpmask: gfp mask of requested pages + * + * This reads the page-array of the shmem-backing storage of the given gem + * object. An array of pages is returned. If a page is not allocated or + * swapped-out, this will allocate/swap-in the required pages. Note that the + * whole object is covered by the page-array and pinned in memory. + * + * Use drm_gem_put_pages() to release the array and unpin all pages. + * + * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()). + * If you require other GFP-masks, you have to do those allocations yourself. + * + * Note that you are not allowed to change gfp-zones during runtime. That is, + * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as + * set during initialization. If you have special zone constraints, set them + * after drm_gem_init_object() via mapping_set_gfp_mask(). shmem-core takes care + * to keep pages in the required zone during swap-in. */ -struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask) +struct page **drm_gem_get_pages(struct drm_gem_object *obj) { - struct inode *inode; struct address_space *mapping; struct page *p, **pages; int i, npages; /* This is the shared memory object that backs the GEM resource */ - inode = file_inode(obj->filp); - mapping = inode->i_mapping; + mapping = file_inode(obj->filp)->i_mapping; /* We already BUG_ON() for non-page-aligned sizes in * drm_gem_object_init(), so we should never hit this unless @@ -466,10 +479,8 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask) if (pages == NULL) return ERR_PTR(-ENOMEM); - gfpmask |= mapping_gfp_mask(mapping); - for (i = 0; i < npages; i++) { - p = shmem_read_mapping_page_gfp(mapping, i, gfpmask); + p = shmem_read_mapping_page(mapping, i); if (IS_ERR(p)) goto fail; pages[i] = p; @@ -479,7 +490,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask) * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping) * so shmem can relocate pages during swapin if required. */ - BUG_ON((gfpmask & __GFP_DMA32) && + BUG_ON((mapping_gfp_mask(mapping) & __GFP_DMA32) && (page_to_pfn(p) >= 0x00100000UL)); } diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c index 05c97c5350a1..e467e67af6e7 100644 --- a/drivers/gpu/drm/drm_gem_cma_helper.c +++ b/drivers/gpu/drm/drm_gem_cma_helper.c @@ -327,7 +327,7 @@ drm_gem_cma_prime_import_sg_table(struct drm_device *dev, size_t size, /* Create a CMA GEM buffer. */ cma_obj = __drm_gem_cma_create(dev, size); if (IS_ERR(cma_obj)) - return ERR_PTR(PTR_ERR(cma_obj)); + return ERR_CAST(cma_obj); cma_obj->paddr = sg_dma_address(sgt->sgl); cma_obj->sgt = sgt; diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c index 7e4bae760e27..c3b80fd65d62 100644 --- a/drivers/gpu/drm/drm_hashtab.c +++ b/drivers/gpu/drm/drm_hashtab.c @@ -125,7 +125,7 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item) parent = &entry->head; } if (parent) { - hlist_add_after_rcu(parent, &item->head); + hlist_add_behind_rcu(&item->head, parent); } else { hlist_add_head_rcu(&item->head, h_list); } diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c index 86feedd5e6f6..ecaf0fa2eec8 100644 --- a/drivers/gpu/drm/drm_info.c +++ b/drivers/gpu/drm/drm_info.c @@ -132,7 +132,7 @@ int drm_bufs_info(struct seq_file *m, void *data) i, dma->bufs[i].buf_size, dma->bufs[i].buf_count, - atomic_read(&dma->bufs[i].freelist.count), + 0, dma->bufs[i].seg_count, seg_pages, seg_pages * PAGE_SIZE / 1024); diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index 69c61f392e66..40be746b7e68 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -1,11 +1,3 @@ -/** - * \file drm_ioctl.c - * IOCTL processing for DRM - * - * \author Rickard E. (Rik) Faith <faith@valinux.com> - * \author Gareth Hughes <gareth@valinux.com> - */ - /* * Created: Fri Jan 8 09:01:26 1999 by faith@valinux.com * @@ -13,6 +5,9 @@ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * + * Author Rickard E. (Rik) Faith <faith@valinux.com> + * Author Gareth Hughes <gareth@valinux.com> + * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation @@ -35,6 +30,7 @@ #include <drm/drmP.h> #include <drm/drm_core.h> +#include "drm_legacy.h" #include <linux/pci.h> #include <linux/export.h> @@ -42,6 +38,124 @@ #include <asm/mtrr.h> #endif +static int drm_version(struct drm_device *dev, void *data, + struct drm_file *file_priv); + +#define DRM_IOCTL_DEF(ioctl, _func, _flags) \ + [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl} + +/** Ioctl table */ +static const struct drm_ioctl_desc drm_ioctls[] = { + DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0), + DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0), + DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, 0), + DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER), + + DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER), + + DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH), + + DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_legacy_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_legacy_getsareactx, DRM_AUTH), + + DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY), + + DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_legacy_addctx, DRM_AUTH|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_legacy_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_legacy_getctx, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_legacy_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_legacy_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_legacy_resctx, DRM_AUTH), + + DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + + DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH), + + DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH), + + DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_dma_ioctl, DRM_AUTH), + + DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + +#if __OS_HAS_AGP + DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), +#endif + + DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + + DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED), + + DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0), + + DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + + DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED), + + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_CONTROL_ALLOW|DRM_UNLOCKED), + + DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), + + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR2, drm_mode_cursor2_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), +}; + +#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) + /** * Get the bus id. * @@ -342,8 +456,6 @@ drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv) file_priv->stereo_allowed = req->value; break; case DRM_CLIENT_CAP_UNIVERSAL_PLANES: - if (!drm_universal_planes) - return -EINVAL; if (req->value > 1) return -EINVAL; file_priv->universal_planes = req->value; @@ -417,3 +529,243 @@ int drm_noop(struct drm_device *dev, void *data, return 0; } EXPORT_SYMBOL(drm_noop); + +/** + * Copy and IOCTL return string to user space + */ +static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value) +{ + int len; + + /* don't overflow userbuf */ + len = strlen(value); + if (len > *buf_len) + len = *buf_len; + + /* let userspace know exact length of driver value (which could be + * larger than the userspace-supplied buffer) */ + *buf_len = strlen(value); + + /* finally, try filling in the userbuf */ + if (len && buf) + if (copy_to_user(buf, value, len)) + return -EFAULT; + return 0; +} + +/** + * Get version information + * + * \param inode device inode. + * \param filp file pointer. + * \param cmd command. + * \param arg user argument, pointing to a drm_version structure. + * \return zero on success or negative number on failure. + * + * Fills in the version information in \p arg. + */ +static int drm_version(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_version *version = data; + int err; + + version->version_major = dev->driver->major; + version->version_minor = dev->driver->minor; + version->version_patchlevel = dev->driver->patchlevel; + err = drm_copy_field(version->name, &version->name_len, + dev->driver->name); + if (!err) + err = drm_copy_field(version->date, &version->date_len, + dev->driver->date); + if (!err) + err = drm_copy_field(version->desc, &version->desc_len, + dev->driver->desc); + + return err; +} + +/** + * drm_ioctl_permit - Check ioctl permissions against caller + * + * @flags: ioctl permission flags. + * @file_priv: Pointer to struct drm_file identifying the caller. + * + * Checks whether the caller is allowed to run an ioctl with the + * indicated permissions. If so, returns zero. Otherwise returns an + * error code suitable for ioctl return. + */ +static int drm_ioctl_permit(u32 flags, struct drm_file *file_priv) +{ + /* ROOT_ONLY is only for CAP_SYS_ADMIN */ + if (unlikely((flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN))) + return -EACCES; + + /* AUTH is only for authenticated or render client */ + if (unlikely((flags & DRM_AUTH) && !drm_is_render_client(file_priv) && + !file_priv->authenticated)) + return -EACCES; + + /* MASTER is only for master or control clients */ + if (unlikely((flags & DRM_MASTER) && !file_priv->is_master && + !drm_is_control_client(file_priv))) + return -EACCES; + + /* Control clients must be explicitly allowed */ + if (unlikely(!(flags & DRM_CONTROL_ALLOW) && + drm_is_control_client(file_priv))) + return -EACCES; + + /* Render clients must be explicitly allowed */ + if (unlikely(!(flags & DRM_RENDER_ALLOW) && + drm_is_render_client(file_priv))) + return -EACCES; + + return 0; +} + +/** + * Called whenever a process performs an ioctl on /dev/drm. + * + * \param inode device inode. + * \param file_priv DRM file private. + * \param cmd command. + * \param arg user argument. + * \return zero on success or negative number on failure. + * + * Looks up the ioctl function in the ::ioctls table, checking for root + * previleges if so required, and dispatches to the respective function. + */ +long drm_ioctl(struct file *filp, + unsigned int cmd, unsigned long arg) +{ + struct drm_file *file_priv = filp->private_data; + struct drm_device *dev; + const struct drm_ioctl_desc *ioctl = NULL; + drm_ioctl_t *func; + unsigned int nr = DRM_IOCTL_NR(cmd); + int retcode = -EINVAL; + char stack_kdata[128]; + char *kdata = NULL; + unsigned int usize, asize; + + dev = file_priv->minor->dev; + + if (drm_device_is_unplugged(dev)) + return -ENODEV; + + if ((nr >= DRM_CORE_IOCTL_COUNT) && + ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END))) + goto err_i1; + if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) && + (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) { + u32 drv_size; + ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE]; + drv_size = _IOC_SIZE(ioctl->cmd_drv); + usize = asize = _IOC_SIZE(cmd); + if (drv_size > asize) + asize = drv_size; + cmd = ioctl->cmd_drv; + } + else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) { + u32 drv_size; + + ioctl = &drm_ioctls[nr]; + + drv_size = _IOC_SIZE(ioctl->cmd); + usize = asize = _IOC_SIZE(cmd); + if (drv_size > asize) + asize = drv_size; + + cmd = ioctl->cmd; + } else + goto err_i1; + + DRM_DEBUG("pid=%d, dev=0x%lx, auth=%d, %s\n", + task_pid_nr(current), + (long)old_encode_dev(file_priv->minor->kdev->devt), + file_priv->authenticated, ioctl->name); + + /* Do not trust userspace, use our own definition */ + func = ioctl->func; + + if (unlikely(!func)) { + DRM_DEBUG("no function\n"); + retcode = -EINVAL; + goto err_i1; + } + + retcode = drm_ioctl_permit(ioctl->flags, file_priv); + if (unlikely(retcode)) + goto err_i1; + + if (cmd & (IOC_IN | IOC_OUT)) { + if (asize <= sizeof(stack_kdata)) { + kdata = stack_kdata; + } else { + kdata = kmalloc(asize, GFP_KERNEL); + if (!kdata) { + retcode = -ENOMEM; + goto err_i1; + } + } + if (asize > usize) + memset(kdata + usize, 0, asize - usize); + } + + if (cmd & IOC_IN) { + if (copy_from_user(kdata, (void __user *)arg, + usize) != 0) { + retcode = -EFAULT; + goto err_i1; + } + } else if (cmd & IOC_OUT) { + memset(kdata, 0, usize); + } + + if (ioctl->flags & DRM_UNLOCKED) + retcode = func(dev, kdata, file_priv); + else { + mutex_lock(&drm_global_mutex); + retcode = func(dev, kdata, file_priv); + mutex_unlock(&drm_global_mutex); + } + + if (cmd & IOC_OUT) { + if (copy_to_user((void __user *)arg, kdata, + usize) != 0) + retcode = -EFAULT; + } + + err_i1: + if (!ioctl) + DRM_DEBUG("invalid ioctl: pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n", + task_pid_nr(current), + (long)old_encode_dev(file_priv->minor->kdev->devt), + file_priv->authenticated, cmd, nr); + + if (kdata != stack_kdata) + kfree(kdata); + if (retcode) + DRM_DEBUG("ret = %d\n", retcode); + return retcode; +} +EXPORT_SYMBOL(drm_ioctl); + +/** + * drm_ioctl_flags - Check for core ioctl and return ioctl permission flags + * + * @nr: Ioctl number. + * @flags: Where to return the ioctl permission flags + */ +bool drm_ioctl_flags(unsigned int nr, unsigned int *flags) +{ + if ((nr >= DRM_COMMAND_END && nr < DRM_CORE_IOCTL_COUNT) || + (nr < DRM_COMMAND_BASE)) { + *flags = drm_ioctls[nr].flags; + return true; + } + + return false; +} +EXPORT_SYMBOL(drm_ioctl_flags); diff --git a/drivers/gpu/drm/drm_legacy.h b/drivers/gpu/drm/drm_legacy.h new file mode 100644 index 000000000000..d34f20a79b7c --- /dev/null +++ b/drivers/gpu/drm/drm_legacy.h @@ -0,0 +1,51 @@ +#ifndef __DRM_LEGACY_H__ +#define __DRM_LEGACY_H__ + +/* + * Copyright (c) 2014 David Herrmann <dh.herrmann@gmail.com> + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +struct drm_device; +struct drm_file; + +/* + * Generic DRM Contexts + */ + +#define DRM_KERNEL_CONTEXT 0 +#define DRM_RESERVED_CONTEXTS 1 + +int drm_legacy_ctxbitmap_init(struct drm_device *dev); +void drm_legacy_ctxbitmap_cleanup(struct drm_device *dev); +void drm_legacy_ctxbitmap_free(struct drm_device *dev, int ctx_handle); +void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file); + +int drm_legacy_resctx(struct drm_device *d, void *v, struct drm_file *f); +int drm_legacy_addctx(struct drm_device *d, void *v, struct drm_file *f); +int drm_legacy_getctx(struct drm_device *d, void *v, struct drm_file *f); +int drm_legacy_switchctx(struct drm_device *d, void *v, struct drm_file *f); +int drm_legacy_newctx(struct drm_device *d, void *v, struct drm_file *f); +int drm_legacy_rmctx(struct drm_device *d, void *v, struct drm_file *f); + +int drm_legacy_setsareactx(struct drm_device *d, void *v, struct drm_file *f); +int drm_legacy_getsareactx(struct drm_device *d, void *v, struct drm_file *f); + +#endif /* __DRM_LEGACY_H__ */ diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c index f6452682141b..e26b59e385ff 100644 --- a/drivers/gpu/drm/drm_lock.c +++ b/drivers/gpu/drm/drm_lock.c @@ -35,6 +35,7 @@ #include <linux/export.h> #include <drm/drmP.h> +#include "drm_legacy.h" static int drm_notifier(void *priv); diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c index e633df2f68d8..6aa6a9e95570 100644 --- a/drivers/gpu/drm/drm_mipi_dsi.c +++ b/drivers/gpu/drm/drm_mipi_dsi.c @@ -201,16 +201,15 @@ EXPORT_SYMBOL(mipi_dsi_detach); /** * mipi_dsi_dcs_write - send DCS write command * @dsi: DSI device - * @channel: virtual channel * @data: pointer to the command followed by parameters * @len: length of @data */ -int mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, unsigned int channel, - const void *data, size_t len) +ssize_t mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, const void *data, + size_t len) { const struct mipi_dsi_host_ops *ops = dsi->host->ops; struct mipi_dsi_msg msg = { - .channel = channel, + .channel = dsi->channel, .tx_buf = data, .tx_len = len }; @@ -239,19 +238,18 @@ EXPORT_SYMBOL(mipi_dsi_dcs_write); /** * mipi_dsi_dcs_read - send DCS read request command * @dsi: DSI device - * @channel: virtual channel * @cmd: DCS read command * @data: pointer to read buffer * @len: length of @data * * Function returns number of read bytes or error code. */ -ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, unsigned int channel, - u8 cmd, void *data, size_t len) +ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, u8 cmd, void *data, + size_t len) { const struct mipi_dsi_host_ops *ops = dsi->host->ops; struct mipi_dsi_msg msg = { - .channel = channel, + .channel = dsi->channel, .type = MIPI_DSI_DCS_READ, .tx_buf = &cmd, .tx_len = 1, diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c new file mode 100644 index 000000000000..16150a00c237 --- /dev/null +++ b/drivers/gpu/drm/drm_of.c @@ -0,0 +1,67 @@ +#include <linux/export.h> +#include <linux/list.h> +#include <linux/of_graph.h> +#include <drm/drmP.h> +#include <drm/drm_crtc.h> +#include <drm/drm_of.h> + +/** + * drm_crtc_port_mask - find the mask of a registered CRTC by port OF node + * @dev: DRM device + * @port: port OF node + * + * Given a port OF node, return the possible mask of the corresponding + * CRTC within a device's list of CRTCs. Returns zero if not found. + */ +static uint32_t drm_crtc_port_mask(struct drm_device *dev, + struct device_node *port) +{ + unsigned int index = 0; + struct drm_crtc *tmp; + + list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) { + if (tmp->port == port) + return 1 << index; + + index++; + } + + return 0; +} + +/** + * drm_of_find_possible_crtcs - find the possible CRTCs for an encoder port + * @dev: DRM device + * @port: encoder port to scan for endpoints + * + * Scan all endpoints attached to a port, locate their attached CRTCs, + * and generate the DRM mask of CRTCs which may be attached to this + * encoder. + * + * See Documentation/devicetree/bindings/graph.txt for the bindings. + */ +uint32_t drm_of_find_possible_crtcs(struct drm_device *dev, + struct device_node *port) +{ + struct device_node *remote_port, *ep = NULL; + uint32_t possible_crtcs = 0; + + do { + ep = of_graph_get_next_endpoint(port, ep); + if (!ep) + break; + + remote_port = of_graph_get_remote_port(ep); + if (!remote_port) { + of_node_put(ep); + return 0; + } + + possible_crtcs |= drm_crtc_port_mask(dev, remote_port); + + of_node_put(remote_port); + } while (1); + + return possible_crtcs; +} +EXPORT_SYMBOL(drm_of_find_possible_crtcs); diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c index 6d133149cc74..827ec1a3040b 100644 --- a/drivers/gpu/drm/drm_plane_helper.c +++ b/drivers/gpu/drm/drm_plane_helper.c @@ -335,9 +335,10 @@ struct drm_plane *drm_primary_helper_create_plane(struct drm_device *dev, } /* possible_crtc's will be filled in later by crtc_init */ - ret = drm_plane_init(dev, primary, 0, &drm_primary_helper_funcs, - formats, num_formats, - DRM_PLANE_TYPE_PRIMARY); + ret = drm_universal_plane_init(dev, primary, 0, + &drm_primary_helper_funcs, + formats, num_formats, + DRM_PLANE_TYPE_PRIMARY); if (ret) { kfree(primary); primary = NULL; diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index d22676b89cbb..db7d250f7ac7 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -130,7 +130,14 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect count = drm_load_edid_firmware(connector); if (count == 0) #endif - count = (*connector_funcs->get_modes)(connector); + { + if (connector->override_edid) { + struct edid *edid = (struct edid *) connector->edid_blob_ptr->data; + + count = drm_add_edid_modes(connector, edid); + } else + count = (*connector_funcs->get_modes)(connector); + } if (count == 0 && connector->status == connector_status_connected) count = drm_add_modes_noedid(connector, 1024, 768); diff --git a/drivers/gpu/drm/drm_rect.c b/drivers/gpu/drm/drm_rect.c index 7047ca025787..631f5afd451c 100644 --- a/drivers/gpu/drm/drm_rect.c +++ b/drivers/gpu/drm/drm_rect.c @@ -293,3 +293,143 @@ void drm_rect_debug_print(const struct drm_rect *r, bool fixed_point) DRM_DEBUG_KMS("%dx%d%+d%+d\n", w, h, r->x1, r->y1); } EXPORT_SYMBOL(drm_rect_debug_print); + +/** + * drm_rect_rotate - Rotate the rectangle + * @r: rectangle to be rotated + * @width: Width of the coordinate space + * @height: Height of the coordinate space + * @rotation: Transformation to be applied + * + * Apply @rotation to the coordinates of rectangle @r. + * + * @width and @height combined with @rotation define + * the location of the new origin. + * + * @width correcsponds to the horizontal and @height + * to the vertical axis of the untransformed coordinate + * space. + */ +void drm_rect_rotate(struct drm_rect *r, + int width, int height, + unsigned int rotation) +{ + struct drm_rect tmp; + + if (rotation & (BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y))) { + tmp = *r; + + if (rotation & BIT(DRM_REFLECT_X)) { + r->x1 = width - tmp.x2; + r->x2 = width - tmp.x1; + } + + if (rotation & BIT(DRM_REFLECT_Y)) { + r->y1 = height - tmp.y2; + r->y2 = height - tmp.y1; + } + } + + switch (rotation & 0xf) { + case BIT(DRM_ROTATE_0): + break; + case BIT(DRM_ROTATE_90): + tmp = *r; + r->x1 = tmp.y1; + r->x2 = tmp.y2; + r->y1 = width - tmp.x2; + r->y2 = width - tmp.x1; + break; + case BIT(DRM_ROTATE_180): + tmp = *r; + r->x1 = width - tmp.x2; + r->x2 = width - tmp.x1; + r->y1 = height - tmp.y2; + r->y2 = height - tmp.y1; + break; + case BIT(DRM_ROTATE_270): + tmp = *r; + r->x1 = height - tmp.y2; + r->x2 = height - tmp.y1; + r->y1 = tmp.x1; + r->y2 = tmp.x2; + break; + default: + break; + } +} +EXPORT_SYMBOL(drm_rect_rotate); + +/** + * drm_rect_rotate_inv - Inverse rotate the rectangle + * @r: rectangle to be rotated + * @width: Width of the coordinate space + * @height: Height of the coordinate space + * @rotation: Transformation whose inverse is to be applied + * + * Apply the inverse of @rotation to the coordinates + * of rectangle @r. + * + * @width and @height combined with @rotation define + * the location of the new origin. + * + * @width correcsponds to the horizontal and @height + * to the vertical axis of the original untransformed + * coordinate space, so that you never have to flip + * them when doing a rotatation and its inverse. + * That is, if you do: + * + * drm_rotate(&r, width, height, rotation); + * drm_rotate_inv(&r, width, height, rotation); + * + * you will always get back the original rectangle. + */ +void drm_rect_rotate_inv(struct drm_rect *r, + int width, int height, + unsigned int rotation) +{ + struct drm_rect tmp; + + switch (rotation & 0xf) { + case BIT(DRM_ROTATE_0): + break; + case BIT(DRM_ROTATE_90): + tmp = *r; + r->x1 = width - tmp.y2; + r->x2 = width - tmp.y1; + r->y1 = tmp.x1; + r->y2 = tmp.x2; + break; + case BIT(DRM_ROTATE_180): + tmp = *r; + r->x1 = width - tmp.x2; + r->x2 = width - tmp.x1; + r->y1 = height - tmp.y2; + r->y2 = height - tmp.y1; + break; + case BIT(DRM_ROTATE_270): + tmp = *r; + r->x1 = tmp.y1; + r->x2 = tmp.y2; + r->y1 = height - tmp.x2; + r->y2 = height - tmp.x1; + break; + default: + break; + } + + if (rotation & (BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y))) { + tmp = *r; + + if (rotation & BIT(DRM_REFLECT_X)) { + r->x1 = width - tmp.x2; + r->x2 = width - tmp.x1; + } + + if (rotation & BIT(DRM_REFLECT_Y)) { + r->y1 = height - tmp.y2; + r->y2 = height - tmp.y1; + } + } +} +EXPORT_SYMBOL(drm_rect_rotate_inv); diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c deleted file mode 100644 index 14d16464000a..000000000000 --- a/drivers/gpu/drm/drm_stub.c +++ /dev/null @@ -1,805 +0,0 @@ -/* - * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org - * - * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California. - * All Rights Reserved. - * - * Author Rickard E. (Rik) Faith <faith@valinux.com> - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include <linux/fs.h> -#include <linux/module.h> -#include <linux/moduleparam.h> -#include <linux/mount.h> -#include <linux/slab.h> -#include <drm/drmP.h> -#include <drm/drm_core.h> - -unsigned int drm_debug = 0; /* 1 to enable debug output */ -EXPORT_SYMBOL(drm_debug); - -unsigned int drm_rnodes = 0; /* 1 to enable experimental render nodes API */ -EXPORT_SYMBOL(drm_rnodes); - -/* 1 to allow user space to request universal planes (experimental) */ -unsigned int drm_universal_planes = 0; -EXPORT_SYMBOL(drm_universal_planes); - -unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */ -EXPORT_SYMBOL(drm_vblank_offdelay); - -unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */ -EXPORT_SYMBOL(drm_timestamp_precision); - -/* - * Default to use monotonic timestamps for wait-for-vblank and page-flip - * complete events. - */ -unsigned int drm_timestamp_monotonic = 1; - -MODULE_AUTHOR(CORE_AUTHOR); -MODULE_DESCRIPTION(CORE_DESC); -MODULE_LICENSE("GPL and additional rights"); -MODULE_PARM_DESC(debug, "Enable debug output"); -MODULE_PARM_DESC(rnodes, "Enable experimental render nodes API"); -MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]"); -MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]"); -MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps"); - -module_param_named(debug, drm_debug, int, 0600); -module_param_named(rnodes, drm_rnodes, int, 0600); -module_param_named(universal_planes, drm_universal_planes, int, 0600); -module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600); -module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600); -module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600); - -static DEFINE_SPINLOCK(drm_minor_lock); -struct idr drm_minors_idr; - -struct class *drm_class; -struct dentry *drm_debugfs_root; - -int drm_err(const char *func, const char *format, ...) -{ - struct va_format vaf; - va_list args; - int r; - - va_start(args, format); - - vaf.fmt = format; - vaf.va = &args; - - r = printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* %pV", func, &vaf); - - va_end(args); - - return r; -} -EXPORT_SYMBOL(drm_err); - -void drm_ut_debug_printk(const char *function_name, const char *format, ...) -{ - struct va_format vaf; - va_list args; - - va_start(args, format); - vaf.fmt = format; - vaf.va = &args; - - printk(KERN_DEBUG "[" DRM_NAME ":%s] %pV", function_name, &vaf); - - va_end(args); -} -EXPORT_SYMBOL(drm_ut_debug_printk); - -struct drm_master *drm_master_create(struct drm_minor *minor) -{ - struct drm_master *master; - - master = kzalloc(sizeof(*master), GFP_KERNEL); - if (!master) - return NULL; - - kref_init(&master->refcount); - spin_lock_init(&master->lock.spinlock); - init_waitqueue_head(&master->lock.lock_queue); - if (drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER)) { - kfree(master); - return NULL; - } - INIT_LIST_HEAD(&master->magicfree); - master->minor = minor; - - return master; -} - -struct drm_master *drm_master_get(struct drm_master *master) -{ - kref_get(&master->refcount); - return master; -} -EXPORT_SYMBOL(drm_master_get); - -static void drm_master_destroy(struct kref *kref) -{ - struct drm_master *master = container_of(kref, struct drm_master, refcount); - struct drm_magic_entry *pt, *next; - struct drm_device *dev = master->minor->dev; - struct drm_map_list *r_list, *list_temp; - - mutex_lock(&dev->struct_mutex); - if (dev->driver->master_destroy) - dev->driver->master_destroy(dev, master); - - list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) { - if (r_list->master == master) { - drm_rmmap_locked(dev, r_list->map); - r_list = NULL; - } - } - - if (master->unique) { - kfree(master->unique); - master->unique = NULL; - master->unique_len = 0; - } - - list_for_each_entry_safe(pt, next, &master->magicfree, head) { - list_del(&pt->head); - drm_ht_remove_item(&master->magiclist, &pt->hash_item); - kfree(pt); - } - - drm_ht_remove(&master->magiclist); - - mutex_unlock(&dev->struct_mutex); - kfree(master); -} - -void drm_master_put(struct drm_master **master) -{ - kref_put(&(*master)->refcount, drm_master_destroy); - *master = NULL; -} -EXPORT_SYMBOL(drm_master_put); - -int drm_setmaster_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - int ret = 0; - - mutex_lock(&dev->master_mutex); - if (file_priv->is_master) - goto out_unlock; - - if (file_priv->minor->master) { - ret = -EINVAL; - goto out_unlock; - } - - if (!file_priv->master) { - ret = -EINVAL; - goto out_unlock; - } - - file_priv->minor->master = drm_master_get(file_priv->master); - file_priv->is_master = 1; - if (dev->driver->master_set) { - ret = dev->driver->master_set(dev, file_priv, false); - if (unlikely(ret != 0)) { - file_priv->is_master = 0; - drm_master_put(&file_priv->minor->master); - } - } - -out_unlock: - mutex_unlock(&dev->master_mutex); - return ret; -} - -int drm_dropmaster_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - int ret = -EINVAL; - - mutex_lock(&dev->master_mutex); - if (!file_priv->is_master) - goto out_unlock; - - if (!file_priv->minor->master) - goto out_unlock; - - ret = 0; - if (dev->driver->master_drop) - dev->driver->master_drop(dev, file_priv, false); - drm_master_put(&file_priv->minor->master); - file_priv->is_master = 0; - -out_unlock: - mutex_unlock(&dev->master_mutex); - return ret; -} - -/* - * DRM Minors - * A DRM device can provide several char-dev interfaces on the DRM-Major. Each - * of them is represented by a drm_minor object. Depending on the capabilities - * of the device-driver, different interfaces are registered. - * - * Minors can be accessed via dev->$minor_name. This pointer is either - * NULL or a valid drm_minor pointer and stays valid as long as the device is - * valid. This means, DRM minors have the same life-time as the underlying - * device. However, this doesn't mean that the minor is active. Minors are - * registered and unregistered dynamically according to device-state. - */ - -static struct drm_minor **drm_minor_get_slot(struct drm_device *dev, - unsigned int type) -{ - switch (type) { - case DRM_MINOR_LEGACY: - return &dev->primary; - case DRM_MINOR_RENDER: - return &dev->render; - case DRM_MINOR_CONTROL: - return &dev->control; - default: - return NULL; - } -} - -static int drm_minor_alloc(struct drm_device *dev, unsigned int type) -{ - struct drm_minor *minor; - - minor = kzalloc(sizeof(*minor), GFP_KERNEL); - if (!minor) - return -ENOMEM; - - minor->type = type; - minor->dev = dev; - - *drm_minor_get_slot(dev, type) = minor; - return 0; -} - -static void drm_minor_free(struct drm_device *dev, unsigned int type) -{ - struct drm_minor **slot; - - slot = drm_minor_get_slot(dev, type); - if (*slot) { - drm_mode_group_destroy(&(*slot)->mode_group); - kfree(*slot); - *slot = NULL; - } -} - -static int drm_minor_register(struct drm_device *dev, unsigned int type) -{ - struct drm_minor *new_minor; - unsigned long flags; - int ret; - int minor_id; - - DRM_DEBUG("\n"); - - new_minor = *drm_minor_get_slot(dev, type); - if (!new_minor) - return 0; - - idr_preload(GFP_KERNEL); - spin_lock_irqsave(&drm_minor_lock, flags); - minor_id = idr_alloc(&drm_minors_idr, - NULL, - 64 * type, - 64 * (type + 1), - GFP_NOWAIT); - spin_unlock_irqrestore(&drm_minor_lock, flags); - idr_preload_end(); - - if (minor_id < 0) - return minor_id; - - new_minor->index = minor_id; - - ret = drm_debugfs_init(new_minor, minor_id, drm_debugfs_root); - if (ret) { - DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n"); - goto err_id; - } - - ret = drm_sysfs_device_add(new_minor); - if (ret) { - DRM_ERROR("DRM: Error sysfs_device_add.\n"); - goto err_debugfs; - } - - /* replace NULL with @minor so lookups will succeed from now on */ - spin_lock_irqsave(&drm_minor_lock, flags); - idr_replace(&drm_minors_idr, new_minor, new_minor->index); - spin_unlock_irqrestore(&drm_minor_lock, flags); - - DRM_DEBUG("new minor assigned %d\n", minor_id); - return 0; - -err_debugfs: - drm_debugfs_cleanup(new_minor); -err_id: - spin_lock_irqsave(&drm_minor_lock, flags); - idr_remove(&drm_minors_idr, minor_id); - spin_unlock_irqrestore(&drm_minor_lock, flags); - new_minor->index = 0; - return ret; -} - -static void drm_minor_unregister(struct drm_device *dev, unsigned int type) -{ - struct drm_minor *minor; - unsigned long flags; - - minor = *drm_minor_get_slot(dev, type); - if (!minor || !minor->kdev) - return; - - spin_lock_irqsave(&drm_minor_lock, flags); - idr_remove(&drm_minors_idr, minor->index); - spin_unlock_irqrestore(&drm_minor_lock, flags); - minor->index = 0; - - drm_debugfs_cleanup(minor); - drm_sysfs_device_remove(minor); -} - -/** - * drm_minor_acquire - Acquire a DRM minor - * @minor_id: Minor ID of the DRM-minor - * - * Looks up the given minor-ID and returns the respective DRM-minor object. The - * refence-count of the underlying device is increased so you must release this - * object with drm_minor_release(). - * - * As long as you hold this minor, it is guaranteed that the object and the - * minor->dev pointer will stay valid! However, the device may get unplugged and - * unregistered while you hold the minor. - * - * Returns: - * Pointer to minor-object with increased device-refcount, or PTR_ERR on - * failure. - */ -struct drm_minor *drm_minor_acquire(unsigned int minor_id) -{ - struct drm_minor *minor; - unsigned long flags; - - spin_lock_irqsave(&drm_minor_lock, flags); - minor = idr_find(&drm_minors_idr, minor_id); - if (minor) - drm_dev_ref(minor->dev); - spin_unlock_irqrestore(&drm_minor_lock, flags); - - if (!minor) { - return ERR_PTR(-ENODEV); - } else if (drm_device_is_unplugged(minor->dev)) { - drm_dev_unref(minor->dev); - return ERR_PTR(-ENODEV); - } - - return minor; -} - -/** - * drm_minor_release - Release DRM minor - * @minor: Pointer to DRM minor object - * - * Release a minor that was previously acquired via drm_minor_acquire(). - */ -void drm_minor_release(struct drm_minor *minor) -{ - drm_dev_unref(minor->dev); -} - -/** - * drm_put_dev - Unregister and release a DRM device - * @dev: DRM device - * - * Called at module unload time or when a PCI device is unplugged. - * - * Use of this function is discouraged. It will eventually go away completely. - * Please use drm_dev_unregister() and drm_dev_unref() explicitly instead. - * - * Cleans up all DRM device, calling drm_lastclose(). - */ -void drm_put_dev(struct drm_device *dev) -{ - DRM_DEBUG("\n"); - - if (!dev) { - DRM_ERROR("cleanup called no dev\n"); - return; - } - - drm_dev_unregister(dev); - drm_dev_unref(dev); -} -EXPORT_SYMBOL(drm_put_dev); - -void drm_unplug_dev(struct drm_device *dev) -{ - /* for a USB device */ - drm_minor_unregister(dev, DRM_MINOR_LEGACY); - drm_minor_unregister(dev, DRM_MINOR_RENDER); - drm_minor_unregister(dev, DRM_MINOR_CONTROL); - - mutex_lock(&drm_global_mutex); - - drm_device_set_unplugged(dev); - - if (dev->open_count == 0) { - drm_put_dev(dev); - } - mutex_unlock(&drm_global_mutex); -} -EXPORT_SYMBOL(drm_unplug_dev); - -/* - * DRM internal mount - * We want to be able to allocate our own "struct address_space" to control - * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow - * stand-alone address_space objects, so we need an underlying inode. As there - * is no way to allocate an independent inode easily, we need a fake internal - * VFS mount-point. - * - * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free() - * frees it again. You are allowed to use iget() and iput() to get references to - * the inode. But each drm_fs_inode_new() call must be paired with exactly one - * drm_fs_inode_free() call (which does not have to be the last iput()). - * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it - * between multiple inode-users. You could, technically, call - * iget() + drm_fs_inode_free() directly after alloc and sometime later do an - * iput(), but this way you'd end up with a new vfsmount for each inode. - */ - -static int drm_fs_cnt; -static struct vfsmount *drm_fs_mnt; - -static const struct dentry_operations drm_fs_dops = { - .d_dname = simple_dname, -}; - -static const struct super_operations drm_fs_sops = { - .statfs = simple_statfs, -}; - -static struct dentry *drm_fs_mount(struct file_system_type *fs_type, int flags, - const char *dev_name, void *data) -{ - return mount_pseudo(fs_type, - "drm:", - &drm_fs_sops, - &drm_fs_dops, - 0x010203ff); -} - -static struct file_system_type drm_fs_type = { - .name = "drm", - .owner = THIS_MODULE, - .mount = drm_fs_mount, - .kill_sb = kill_anon_super, -}; - -static struct inode *drm_fs_inode_new(void) -{ - struct inode *inode; - int r; - - r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt); - if (r < 0) { - DRM_ERROR("Cannot mount pseudo fs: %d\n", r); - return ERR_PTR(r); - } - - inode = alloc_anon_inode(drm_fs_mnt->mnt_sb); - if (IS_ERR(inode)) - simple_release_fs(&drm_fs_mnt, &drm_fs_cnt); - - return inode; -} - -static void drm_fs_inode_free(struct inode *inode) -{ - if (inode) { - iput(inode); - simple_release_fs(&drm_fs_mnt, &drm_fs_cnt); - } -} - -/** - * drm_dev_alloc - Allocate new DRM device - * @driver: DRM driver to allocate device for - * @parent: Parent device object - * - * Allocate and initialize a new DRM device. No device registration is done. - * Call drm_dev_register() to advertice the device to user space and register it - * with other core subsystems. - * - * The initial ref-count of the object is 1. Use drm_dev_ref() and - * drm_dev_unref() to take and drop further ref-counts. - * - * RETURNS: - * Pointer to new DRM device, or NULL if out of memory. - */ -struct drm_device *drm_dev_alloc(struct drm_driver *driver, - struct device *parent) -{ - struct drm_device *dev; - int ret; - - dev = kzalloc(sizeof(*dev), GFP_KERNEL); - if (!dev) - return NULL; - - kref_init(&dev->ref); - dev->dev = parent; - dev->driver = driver; - - INIT_LIST_HEAD(&dev->filelist); - INIT_LIST_HEAD(&dev->ctxlist); - INIT_LIST_HEAD(&dev->vmalist); - INIT_LIST_HEAD(&dev->maplist); - INIT_LIST_HEAD(&dev->vblank_event_list); - - spin_lock_init(&dev->buf_lock); - spin_lock_init(&dev->event_lock); - mutex_init(&dev->struct_mutex); - mutex_init(&dev->ctxlist_mutex); - mutex_init(&dev->master_mutex); - - dev->anon_inode = drm_fs_inode_new(); - if (IS_ERR(dev->anon_inode)) { - ret = PTR_ERR(dev->anon_inode); - DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret); - goto err_free; - } - - if (drm_core_check_feature(dev, DRIVER_MODESET)) { - ret = drm_minor_alloc(dev, DRM_MINOR_CONTROL); - if (ret) - goto err_minors; - } - - if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) { - ret = drm_minor_alloc(dev, DRM_MINOR_RENDER); - if (ret) - goto err_minors; - } - - ret = drm_minor_alloc(dev, DRM_MINOR_LEGACY); - if (ret) - goto err_minors; - - if (drm_ht_create(&dev->map_hash, 12)) - goto err_minors; - - ret = drm_ctxbitmap_init(dev); - if (ret) { - DRM_ERROR("Cannot allocate memory for context bitmap.\n"); - goto err_ht; - } - - if (driver->driver_features & DRIVER_GEM) { - ret = drm_gem_init(dev); - if (ret) { - DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n"); - goto err_ctxbitmap; - } - } - - return dev; - -err_ctxbitmap: - drm_ctxbitmap_cleanup(dev); -err_ht: - drm_ht_remove(&dev->map_hash); -err_minors: - drm_minor_free(dev, DRM_MINOR_LEGACY); - drm_minor_free(dev, DRM_MINOR_RENDER); - drm_minor_free(dev, DRM_MINOR_CONTROL); - drm_fs_inode_free(dev->anon_inode); -err_free: - mutex_destroy(&dev->master_mutex); - kfree(dev); - return NULL; -} -EXPORT_SYMBOL(drm_dev_alloc); - -static void drm_dev_release(struct kref *ref) -{ - struct drm_device *dev = container_of(ref, struct drm_device, ref); - - if (dev->driver->driver_features & DRIVER_GEM) - drm_gem_destroy(dev); - - drm_ctxbitmap_cleanup(dev); - drm_ht_remove(&dev->map_hash); - drm_fs_inode_free(dev->anon_inode); - - drm_minor_free(dev, DRM_MINOR_LEGACY); - drm_minor_free(dev, DRM_MINOR_RENDER); - drm_minor_free(dev, DRM_MINOR_CONTROL); - - mutex_destroy(&dev->master_mutex); - kfree(dev->unique); - kfree(dev); -} - -/** - * drm_dev_ref - Take reference of a DRM device - * @dev: device to take reference of or NULL - * - * This increases the ref-count of @dev by one. You *must* already own a - * reference when calling this. Use drm_dev_unref() to drop this reference - * again. - * - * This function never fails. However, this function does not provide *any* - * guarantee whether the device is alive or running. It only provides a - * reference to the object and the memory associated with it. - */ -void drm_dev_ref(struct drm_device *dev) -{ - if (dev) - kref_get(&dev->ref); -} -EXPORT_SYMBOL(drm_dev_ref); - -/** - * drm_dev_unref - Drop reference of a DRM device - * @dev: device to drop reference of or NULL - * - * This decreases the ref-count of @dev by one. The device is destroyed if the - * ref-count drops to zero. - */ -void drm_dev_unref(struct drm_device *dev) -{ - if (dev) - kref_put(&dev->ref, drm_dev_release); -} -EXPORT_SYMBOL(drm_dev_unref); - -/** - * drm_dev_register - Register DRM device - * @dev: Device to register - * @flags: Flags passed to the driver's .load() function - * - * Register the DRM device @dev with the system, advertise device to user-space - * and start normal device operation. @dev must be allocated via drm_dev_alloc() - * previously. - * - * Never call this twice on any device! - * - * RETURNS: - * 0 on success, negative error code on failure. - */ -int drm_dev_register(struct drm_device *dev, unsigned long flags) -{ - int ret; - - mutex_lock(&drm_global_mutex); - - ret = drm_minor_register(dev, DRM_MINOR_CONTROL); - if (ret) - goto err_minors; - - ret = drm_minor_register(dev, DRM_MINOR_RENDER); - if (ret) - goto err_minors; - - ret = drm_minor_register(dev, DRM_MINOR_LEGACY); - if (ret) - goto err_minors; - - if (dev->driver->load) { - ret = dev->driver->load(dev, flags); - if (ret) - goto err_minors; - } - - /* setup grouping for legacy outputs */ - if (drm_core_check_feature(dev, DRIVER_MODESET)) { - ret = drm_mode_group_init_legacy_group(dev, - &dev->primary->mode_group); - if (ret) - goto err_unload; - } - - ret = 0; - goto out_unlock; - -err_unload: - if (dev->driver->unload) - dev->driver->unload(dev); -err_minors: - drm_minor_unregister(dev, DRM_MINOR_LEGACY); - drm_minor_unregister(dev, DRM_MINOR_RENDER); - drm_minor_unregister(dev, DRM_MINOR_CONTROL); -out_unlock: - mutex_unlock(&drm_global_mutex); - return ret; -} -EXPORT_SYMBOL(drm_dev_register); - -/** - * drm_dev_unregister - Unregister DRM device - * @dev: Device to unregister - * - * Unregister the DRM device from the system. This does the reverse of - * drm_dev_register() but does not deallocate the device. The caller must call - * drm_dev_unref() to drop their final reference. - */ -void drm_dev_unregister(struct drm_device *dev) -{ - struct drm_map_list *r_list, *list_temp; - - drm_lastclose(dev); - - if (dev->driver->unload) - dev->driver->unload(dev); - - if (dev->agp) - drm_pci_agp_destroy(dev); - - drm_vblank_cleanup(dev); - - list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) - drm_rmmap(dev, r_list->map); - - drm_minor_unregister(dev, DRM_MINOR_LEGACY); - drm_minor_unregister(dev, DRM_MINOR_RENDER); - drm_minor_unregister(dev, DRM_MINOR_CONTROL); -} -EXPORT_SYMBOL(drm_dev_unregister); - -/** - * drm_dev_set_unique - Set the unique name of a DRM device - * @dev: device of which to set the unique name - * @fmt: format string for unique name - * - * Sets the unique name of a DRM device using the specified format string and - * a variable list of arguments. Drivers can use this at driver probe time if - * the unique name of the devices they drive is static. - * - * Return: 0 on success or a negative error code on failure. - */ -int drm_dev_set_unique(struct drm_device *dev, const char *fmt, ...) -{ - va_list ap; - - kfree(dev->unique); - - va_start(ap, fmt); - dev->unique = kvasprintf(GFP_KERNEL, fmt, ap); - va_end(ap); - - return dev->unique ? 0 : -ENOMEM; -} -EXPORT_SYMBOL(drm_dev_set_unique); diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c index 369b26278e76..ab1a5f6dde8a 100644 --- a/drivers/gpu/drm/drm_sysfs.c +++ b/drivers/gpu/drm/drm_sysfs.c @@ -438,7 +438,6 @@ err_out_files: out: return ret; } -EXPORT_SYMBOL(drm_sysfs_connector_add); /** * drm_sysfs_connector_remove - remove an connector device from sysfs @@ -468,7 +467,6 @@ void drm_sysfs_connector_remove(struct drm_connector *connector) device_unregister(connector->kdev); connector->kdev = NULL; } -EXPORT_SYMBOL(drm_sysfs_connector_remove); /** * drm_sysfs_hotplug_event - generate a DRM uevent @@ -495,71 +493,55 @@ static void drm_sysfs_release(struct device *dev) } /** - * drm_sysfs_device_add - adds a class device to sysfs for a character driver - * @dev: DRM device to be added - * @head: DRM head in question + * drm_sysfs_minor_alloc() - Allocate sysfs device for given minor + * @minor: minor to allocate sysfs device for * - * Add a DRM device to the DRM's device model class. We use @dev's PCI device - * as the parent for the Linux device, and make sure it has a file containing - * the driver we're using (for userspace compatibility). + * This allocates a new sysfs device for @minor and returns it. The device is + * not registered nor linked. The caller has to use device_add() and + * device_del() to register and unregister it. + * + * Note that dev_get_drvdata() on the new device will return the minor. + * However, the device does not hold a ref-count to the minor nor to the + * underlying drm_device. This is unproblematic as long as you access the + * private data only in sysfs callbacks. device_del() disables those + * synchronously, so they cannot be called after you cleanup a minor. */ -int drm_sysfs_device_add(struct drm_minor *minor) +struct device *drm_sysfs_minor_alloc(struct drm_minor *minor) { - char *minor_str; + const char *minor_str; + struct device *kdev; int r; if (minor->type == DRM_MINOR_CONTROL) minor_str = "controlD%d"; - else if (minor->type == DRM_MINOR_RENDER) - minor_str = "renderD%d"; - else - minor_str = "card%d"; - - minor->kdev = kzalloc(sizeof(*minor->kdev), GFP_KERNEL); - if (!minor->kdev) { - r = -ENOMEM; - goto error; - } - - device_initialize(minor->kdev); - minor->kdev->devt = MKDEV(DRM_MAJOR, minor->index); - minor->kdev->class = drm_class; - minor->kdev->type = &drm_sysfs_device_minor; - minor->kdev->parent = minor->dev->dev; - minor->kdev->release = drm_sysfs_release; - dev_set_drvdata(minor->kdev, minor); - - r = dev_set_name(minor->kdev, minor_str, minor->index); + else if (minor->type == DRM_MINOR_RENDER) + minor_str = "renderD%d"; + else + minor_str = "card%d"; + + kdev = kzalloc(sizeof(*kdev), GFP_KERNEL); + if (!kdev) + return ERR_PTR(-ENOMEM); + + device_initialize(kdev); + kdev->devt = MKDEV(DRM_MAJOR, minor->index); + kdev->class = drm_class; + kdev->type = &drm_sysfs_device_minor; + kdev->parent = minor->dev->dev; + kdev->release = drm_sysfs_release; + dev_set_drvdata(kdev, minor); + + r = dev_set_name(kdev, minor_str, minor->index); if (r < 0) - goto error; - - r = device_add(minor->kdev); - if (r < 0) - goto error; - - return 0; + goto err_free; -error: - DRM_ERROR("device create failed %d\n", r); - put_device(minor->kdev); - return r; -} + return kdev; -/** - * drm_sysfs_device_remove - remove DRM device - * @dev: DRM device to remove - * - * This call unregisters and cleans up a class device that was created with a - * call to drm_sysfs_device_add() - */ -void drm_sysfs_device_remove(struct drm_minor *minor) -{ - if (minor->kdev) - device_unregister(minor->kdev); - minor->kdev = NULL; +err_free: + put_device(kdev); + return ERR_PTR(r); } - /** * drm_class_device_register - Register a struct device in the drm class. * diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig index 178d2a9672a8..7f9f6f9e9b7e 100644 --- a/drivers/gpu/drm/exynos/Kconfig +++ b/drivers/gpu/drm/exynos/Kconfig @@ -28,6 +28,7 @@ config DRM_EXYNOS_FIMD bool "Exynos DRM FIMD" depends on DRM_EXYNOS && !FB_S3C select FB_MODE_HELPERS + select MFD_SYSCON help Choose this option if you want to use Exynos FIMD for DRM. @@ -52,6 +53,7 @@ config DRM_EXYNOS_DP bool "EXYNOS DRM DP driver support" depends on DRM_EXYNOS_FIMD && ARCH_EXYNOS && (DRM_PTN3460=n || DRM_PTN3460=y || DRM_PTN3460=DRM_EXYNOS) default DRM_EXYNOS + select DRM_PANEL help This enables support for DP device. diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c index a8ffc8c1477b..4f3c7eb2d37d 100644 --- a/drivers/gpu/drm/exynos/exynos_dp_core.c +++ b/drivers/gpu/drm/exynos/exynos_dp_core.c @@ -16,7 +16,6 @@ #include <linux/clk.h> #include <linux/io.h> #include <linux/interrupt.h> -#include <linux/delay.h> #include <linux/of.h> #include <linux/of_gpio.h> #include <linux/gpio.h> @@ -28,6 +27,7 @@ #include <drm/drmP.h> #include <drm/drm_crtc.h> #include <drm/drm_crtc_helper.h> +#include <drm/drm_panel.h> #include <drm/bridge/ptn3460.h> #include "exynos_drm_drv.h" @@ -41,7 +41,7 @@ struct bridge_init { struct device_node *node; }; -static int exynos_dp_init_dp(struct exynos_dp_device *dp) +static void exynos_dp_init_dp(struct exynos_dp_device *dp) { exynos_dp_reset(dp); @@ -58,8 +58,6 @@ static int exynos_dp_init_dp(struct exynos_dp_device *dp) exynos_dp_init_hpd(dp); exynos_dp_init_aux(dp); - - return 0; } static int exynos_dp_detect_hpd(struct exynos_dp_device *dp) @@ -875,10 +873,24 @@ static irqreturn_t exynos_dp_irq_handler(int irq, void *arg) static void exynos_dp_hotplug(struct work_struct *work) { struct exynos_dp_device *dp; - int ret; dp = container_of(work, struct exynos_dp_device, hotplug_work); + if (dp->drm_dev) + drm_helper_hpd_irq_event(dp->drm_dev); +} + +static void exynos_dp_commit(struct exynos_drm_display *display) +{ + struct exynos_dp_device *dp = display->ctx; + int ret; + + /* Keep the panel disabled while we configure video */ + if (dp->panel) { + if (drm_panel_disable(dp->panel)) + DRM_ERROR("failed to disable the panel\n"); + } + ret = exynos_dp_detect_hpd(dp); if (ret) { /* Cable has been disconnected, we're done */ @@ -909,6 +921,12 @@ static void exynos_dp_hotplug(struct work_struct *work) ret = exynos_dp_config_video(dp); if (ret) dev_err(dp->dev, "unable to config video\n"); + + /* Safe to enable the panel now */ + if (dp->panel) { + if (drm_panel_enable(dp->panel)) + DRM_ERROR("failed to enable the panel\n"); + } } static enum drm_connector_status exynos_dp_detect( @@ -933,15 +951,18 @@ static int exynos_dp_get_modes(struct drm_connector *connector) struct exynos_dp_device *dp = ctx_from_connector(connector); struct drm_display_mode *mode; + if (dp->panel) + return drm_panel_get_modes(dp->panel); + mode = drm_mode_create(connector->dev); if (!mode) { DRM_ERROR("failed to create a new display mode.\n"); return 0; } - drm_display_mode_from_videomode(&dp->panel.vm, mode); - mode->width_mm = dp->panel.width_mm; - mode->height_mm = dp->panel.height_mm; + drm_display_mode_from_videomode(&dp->priv.vm, mode); + mode->width_mm = dp->priv.width_mm; + mode->height_mm = dp->priv.height_mm; connector->display_info.width_mm = mode->width_mm; connector->display_info.height_mm = mode->height_mm; @@ -1018,10 +1039,13 @@ static int exynos_dp_create_connector(struct exynos_drm_display *display, } drm_connector_helper_add(connector, &exynos_dp_connector_helper_funcs); - drm_sysfs_connector_add(connector); + drm_connector_register(connector); drm_mode_connector_attach_encoder(connector, encoder); - return 0; + if (dp->panel) + ret = drm_panel_attach(dp->panel, &dp->connector); + + return ret; } static void exynos_dp_phy_init(struct exynos_dp_device *dp) @@ -1050,26 +1074,50 @@ static void exynos_dp_phy_exit(struct exynos_dp_device *dp) } } -static void exynos_dp_poweron(struct exynos_dp_device *dp) +static void exynos_dp_poweron(struct exynos_drm_display *display) { + struct exynos_dp_device *dp = display->ctx; + if (dp->dpms_mode == DRM_MODE_DPMS_ON) return; + if (dp->panel) { + if (drm_panel_prepare(dp->panel)) { + DRM_ERROR("failed to setup the panel\n"); + return; + } + } + clk_prepare_enable(dp->clock); exynos_dp_phy_init(dp); exynos_dp_init_dp(dp); enable_irq(dp->irq); + exynos_dp_commit(display); } -static void exynos_dp_poweroff(struct exynos_dp_device *dp) +static void exynos_dp_poweroff(struct exynos_drm_display *display) { + struct exynos_dp_device *dp = display->ctx; + if (dp->dpms_mode != DRM_MODE_DPMS_ON) return; + if (dp->panel) { + if (drm_panel_disable(dp->panel)) { + DRM_ERROR("failed to disable the panel\n"); + return; + } + } + disable_irq(dp->irq); flush_work(&dp->hotplug_work); exynos_dp_phy_exit(dp); clk_disable_unprepare(dp->clock); + + if (dp->panel) { + if (drm_panel_unprepare(dp->panel)) + DRM_ERROR("failed to turnoff the panel\n"); + } } static void exynos_dp_dpms(struct exynos_drm_display *display, int mode) @@ -1078,12 +1126,12 @@ static void exynos_dp_dpms(struct exynos_drm_display *display, int mode) switch (mode) { case DRM_MODE_DPMS_ON: - exynos_dp_poweron(dp); + exynos_dp_poweron(display); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: - exynos_dp_poweroff(dp); + exynos_dp_poweroff(display); break; default: break; @@ -1094,6 +1142,7 @@ static void exynos_dp_dpms(struct exynos_drm_display *display, int mode) static struct exynos_drm_display_ops exynos_dp_display_ops = { .create_connector = exynos_dp_create_connector, .dpms = exynos_dp_dpms, + .commit = exynos_dp_commit, }; static struct exynos_drm_display exynos_dp_display = { @@ -1201,7 +1250,7 @@ static int exynos_dp_dt_parse_panel(struct exynos_dp_device *dp) { int ret; - ret = of_get_videomode(dp->dev->of_node, &dp->panel.vm, + ret = of_get_videomode(dp->dev->of_node, &dp->priv.vm, OF_USE_NATIVE_MODE); if (ret) { DRM_ERROR("failed: of_get_videomode() : %d\n", ret); @@ -1215,16 +1264,10 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data) struct platform_device *pdev = to_platform_device(dev); struct drm_device *drm_dev = data; struct resource *res; - struct exynos_dp_device *dp; + struct exynos_dp_device *dp = exynos_dp_display.ctx; unsigned int irq_flags; - int ret = 0; - dp = devm_kzalloc(&pdev->dev, sizeof(struct exynos_dp_device), - GFP_KERNEL); - if (!dp) - return -ENOMEM; - dp->dev = &pdev->dev; dp->dpms_mode = DRM_MODE_DPMS_OFF; @@ -1236,9 +1279,11 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data) if (ret) return ret; - ret = exynos_dp_dt_parse_panel(dp); - if (ret) - return ret; + if (!dp->panel) { + ret = exynos_dp_dt_parse_panel(dp); + if (ret) + return ret; + } dp->clock = devm_clk_get(&pdev->dev, "dp"); if (IS_ERR(dp->clock)) { @@ -1298,7 +1343,6 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data) disable_irq(dp->irq); dp->drm_dev = drm_dev; - exynos_dp_display.ctx = dp; platform_set_drvdata(pdev, &exynos_dp_display); @@ -1325,6 +1369,9 @@ static const struct component_ops exynos_dp_ops = { static int exynos_dp_probe(struct platform_device *pdev) { + struct device *dev = &pdev->dev; + struct device_node *panel_node; + struct exynos_dp_device *dp; int ret; ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR, @@ -1332,6 +1379,21 @@ static int exynos_dp_probe(struct platform_device *pdev) if (ret) return ret; + dp = devm_kzalloc(&pdev->dev, sizeof(struct exynos_dp_device), + GFP_KERNEL); + if (!dp) + return -ENOMEM; + + panel_node = of_parse_phandle(dev->of_node, "panel", 0); + if (panel_node) { + dp->panel = of_drm_find_panel(panel_node); + of_node_put(panel_node); + if (!dp->panel) + return -EPROBE_DEFER; + } + + exynos_dp_display.ctx = dp; + ret = component_add(&pdev->dev, &exynos_dp_ops); if (ret) exynos_drm_component_del(&pdev->dev, @@ -1376,6 +1438,7 @@ static const struct of_device_id exynos_dp_match[] = { { .compatible = "samsung,exynos5-dp" }, {}, }; +MODULE_DEVICE_TABLE(of, exynos_dp_match); struct platform_driver dp_driver = { .probe = exynos_dp_probe, @@ -1390,4 +1453,4 @@ struct platform_driver dp_driver = { MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>"); MODULE_DESCRIPTION("Samsung SoC DP Driver"); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.h b/drivers/gpu/drm/exynos/exynos_dp_core.h index 02cc4f9ab903..a1aee6931bd7 100644 --- a/drivers/gpu/drm/exynos/exynos_dp_core.h +++ b/drivers/gpu/drm/exynos/exynos_dp_core.h @@ -149,6 +149,7 @@ struct exynos_dp_device { struct drm_device *drm_dev; struct drm_connector connector; struct drm_encoder *encoder; + struct drm_panel *panel; struct clk *clock; unsigned int irq; void __iomem *reg_base; @@ -162,7 +163,7 @@ struct exynos_dp_device { int dpms_mode; int hpd_gpio; - struct exynos_drm_panel_info panel; + struct exynos_drm_panel_info priv; }; /* exynos_dp_reg.c */ diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c index 9a16dbe121d1..ba9b3d5ed672 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_connector.c +++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c @@ -117,20 +117,7 @@ static struct drm_encoder *exynos_drm_best_encoder( struct drm_device *dev = connector->dev; struct exynos_drm_connector *exynos_connector = to_exynos_connector(connector); - struct drm_mode_object *obj; - struct drm_encoder *encoder; - - obj = drm_mode_object_find(dev, exynos_connector->encoder_id, - DRM_MODE_OBJECT_ENCODER); - if (!obj) { - DRM_DEBUG_KMS("Unknown ENCODER ID %d\n", - exynos_connector->encoder_id); - return NULL; - } - - encoder = obj_to_encoder(obj); - - return encoder; + return drm_encoder_find(dev, exynos_connector->encoder_id); } static struct drm_connector_helper_funcs exynos_connector_helper_funcs = { @@ -185,7 +172,7 @@ static void exynos_drm_connector_destroy(struct drm_connector *connector) struct exynos_drm_connector *exynos_connector = to_exynos_connector(connector); - drm_sysfs_connector_remove(connector); + drm_connector_unregister(connector); drm_connector_cleanup(connector); kfree(exynos_connector); } @@ -230,7 +217,7 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev, drm_connector_init(dev, connector, &exynos_connector_funcs, type); drm_connector_helper_add(connector, &exynos_connector_helper_funcs); - err = drm_sysfs_connector_add(connector); + err = drm_connector_register(connector); if (err) goto err_connector; @@ -250,7 +237,7 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev, return connector; err_sysfs: - drm_sysfs_connector_remove(connector); + drm_connector_unregister(connector); err_connector: drm_connector_cleanup(connector); kfree(exynos_connector); diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c index 95c9435d0266..b68e58f78cd1 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c @@ -69,8 +69,10 @@ static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode) if (mode > DRM_MODE_DPMS_ON) { /* wait for the completion of page flip. */ - wait_event(exynos_crtc->pending_flip_queue, - atomic_read(&exynos_crtc->pending_flip) == 0); + if (!wait_event_timeout(exynos_crtc->pending_flip_queue, + !atomic_read(&exynos_crtc->pending_flip), + HZ/20)) + atomic_set(&exynos_crtc->pending_flip, 0); drm_vblank_off(crtc->dev, exynos_crtc->pipe); } @@ -259,6 +261,7 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc, spin_lock_irq(&dev->event_lock); drm_vblank_put(dev, exynos_crtc->pipe); list_del(&event->base.link); + atomic_set(&exynos_crtc->pending_flip, 0); spin_unlock_irq(&dev->event_lock); goto out; @@ -508,3 +511,11 @@ int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev, return -EPERM; } + +void exynos_drm_crtc_te_handler(struct drm_crtc *crtc) +{ + struct exynos_drm_manager *manager = to_exynos_crtc(crtc)->manager; + + if (manager->ops->te_handler) + manager->ops->te_handler(manager); +} diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h index 9f74b10a8a01..690dcddab725 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h @@ -36,4 +36,11 @@ void exynos_drm_crtc_plane_disable(struct drm_crtc *crtc, int zpos); int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev, unsigned int out_type); +/* + * This function calls the crtc device(manager)'s te_handler() callback + * to trigger to transfer video image at the tearing effect synchronization + * signal. + */ +void exynos_drm_crtc_te_handler(struct drm_crtc *crtc); + #endif diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c index 9e530f205ad2..fa08f05e3e34 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c @@ -48,7 +48,7 @@ exynos_dpi_detect(struct drm_connector *connector, bool force) static void exynos_dpi_connector_destroy(struct drm_connector *connector) { - drm_sysfs_connector_remove(connector); + drm_connector_unregister(connector); drm_connector_cleanup(connector); } @@ -117,7 +117,7 @@ static int exynos_dpi_create_connector(struct exynos_drm_display *display, } drm_connector_helper_add(connector, &exynos_dpi_connector_helper_funcs); - drm_sysfs_connector_add(connector); + drm_connector_register(connector); drm_mode_connector_attach_encoder(connector, encoder); return 0; @@ -125,14 +125,18 @@ static int exynos_dpi_create_connector(struct exynos_drm_display *display, static void exynos_dpi_poweron(struct exynos_dpi *ctx) { - if (ctx->panel) + if (ctx->panel) { + drm_panel_prepare(ctx->panel); drm_panel_enable(ctx->panel); + } } static void exynos_dpi_poweroff(struct exynos_dpi *ctx) { - if (ctx->panel) + if (ctx->panel) { drm_panel_disable(ctx->panel); + drm_panel_unprepare(ctx->panel); + } } static void exynos_dpi_dpms(struct exynos_drm_display *display, int mode) diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index ab7d182063c3..0d74e9b99c4e 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -39,8 +39,6 @@ #define DRIVER_MAJOR 1 #define DRIVER_MINOR 0 -#define VBLANK_OFF_DELAY 50000 - static struct platform_device *exynos_drm_pdev; static DEFINE_MUTEX(drm_component_lock); @@ -103,8 +101,6 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags) /* setup possible_clones. */ exynos_drm_encoder_setup(dev); - drm_vblank_offdelay = VBLANK_OFF_DELAY; - platform_set_drvdata(dev->platformdev, dev); /* Try to bind all sub drivers. */ @@ -362,7 +358,7 @@ static int exynos_drm_sys_suspend(struct device *dev) struct drm_device *drm_dev = dev_get_drvdata(dev); pm_message_t message; - if (pm_runtime_suspended(dev)) + if (pm_runtime_suspended(dev) || !drm_dev) return 0; message.event = PM_EVENT_SUSPEND; @@ -373,7 +369,7 @@ static int exynos_drm_sys_resume(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); - if (pm_runtime_suspended(dev)) + if (pm_runtime_suspended(dev) || !drm_dev) return 0; return exynos_drm_resume(drm_dev); diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h index 06cde4506278..69a6fa397d75 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.h +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h @@ -40,8 +40,6 @@ struct drm_device; struct exynos_drm_overlay; struct drm_connector; -extern unsigned int drm_vblank_offdelay; - /* This enumerates device type. */ enum exynos_drm_device_type { EXYNOS_DEVICE_TYPE_NONE, @@ -188,6 +186,8 @@ struct exynos_drm_display { * @win_commit: apply hardware specific overlay data to registers. * @win_enable: enable hardware specific overlay. * @win_disable: disable hardware specific overlay. + * @te_handler: trigger to transfer video image at the tearing effect + * synchronization signal if there is a page flip request. */ struct exynos_drm_manager; struct exynos_drm_manager_ops { @@ -206,6 +206,7 @@ struct exynos_drm_manager_ops { void (*win_commit)(struct exynos_drm_manager *mgr, int zpos); void (*win_enable)(struct exynos_drm_manager *mgr, int zpos); void (*win_disable)(struct exynos_drm_manager *mgr, int zpos); + void (*te_handler)(struct exynos_drm_manager *mgr); }; /* @@ -236,14 +237,9 @@ struct exynos_drm_g2d_private { struct list_head userptr_list; }; -struct exynos_drm_ipp_private { - struct device *dev; - struct list_head event_list; -}; - struct drm_exynos_file_private { struct exynos_drm_g2d_private *g2d_priv; - struct exynos_drm_ipp_private *ipp_priv; + struct device *ipp_dev; struct file *anon_filp; }; diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index 6302aa64f6c1..442aa2d00132 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c @@ -16,7 +16,10 @@ #include <drm/drm_panel.h> #include <linux/clk.h> +#include <linux/gpio/consumer.h> #include <linux/irq.h> +#include <linux/of_device.h> +#include <linux/of_gpio.h> #include <linux/phy/phy.h> #include <linux/regulator/consumer.h> #include <linux/component.h> @@ -24,6 +27,7 @@ #include <video/mipi_display.h> #include <video/videomode.h> +#include "exynos_drm_crtc.h" #include "exynos_drm_drv.h" /* returns true iff both arguments logically differs */ @@ -54,9 +58,12 @@ /* FIFO memory AC characteristic register */ #define DSIM_PLLCTRL_REG 0x4c /* PLL control register */ -#define DSIM_PLLTMR_REG 0x50 /* PLL timer register */ #define DSIM_PHYACCHR_REG 0x54 /* D-PHY AC characteristic register */ #define DSIM_PHYACCHR1_REG 0x58 /* D-PHY AC characteristic register1 */ +#define DSIM_PHYCTRL_REG 0x5c +#define DSIM_PHYTIMING_REG 0x64 +#define DSIM_PHYTIMING1_REG 0x68 +#define DSIM_PHYTIMING2_REG 0x6c /* DSIM_STATUS */ #define DSIM_STOP_STATE_DAT(x) (((x) & 0xf) << 0) @@ -200,6 +207,24 @@ #define DSIM_PLL_M(x) ((x) << 4) #define DSIM_PLL_S(x) ((x) << 1) +/* DSIM_PHYCTRL */ +#define DSIM_PHYCTRL_ULPS_EXIT(x) (((x) & 0x1ff) << 0) + +/* DSIM_PHYTIMING */ +#define DSIM_PHYTIMING_LPX(x) ((x) << 8) +#define DSIM_PHYTIMING_HS_EXIT(x) ((x) << 0) + +/* DSIM_PHYTIMING1 */ +#define DSIM_PHYTIMING1_CLK_PREPARE(x) ((x) << 24) +#define DSIM_PHYTIMING1_CLK_ZERO(x) ((x) << 16) +#define DSIM_PHYTIMING1_CLK_POST(x) ((x) << 8) +#define DSIM_PHYTIMING1_CLK_TRAIL(x) ((x) << 0) + +/* DSIM_PHYTIMING2 */ +#define DSIM_PHYTIMING2_HS_PREPARE(x) ((x) << 16) +#define DSIM_PHYTIMING2_HS_ZERO(x) ((x) << 8) +#define DSIM_PHYTIMING2_HS_TRAIL(x) ((x) << 0) + #define DSI_MAX_BUS_WIDTH 4 #define DSI_NUM_VIRTUAL_CHANNELS 4 #define DSI_TX_FIFO_SIZE 2048 @@ -233,6 +258,12 @@ struct exynos_dsi_transfer { #define DSIM_STATE_INITIALIZED BIT(1) #define DSIM_STATE_CMD_LPM BIT(2) +struct exynos_dsi_driver_data { + unsigned int plltmr_reg; + + unsigned int has_freqband:1; +}; + struct exynos_dsi { struct mipi_dsi_host dsi_host; struct drm_connector connector; @@ -247,6 +278,7 @@ struct exynos_dsi { struct clk *bus_clk; struct regulator_bulk_data supplies[2]; int irq; + int te_gpio; u32 pll_clk_rate; u32 burst_clk_rate; @@ -262,11 +294,39 @@ struct exynos_dsi { spinlock_t transfer_lock; /* protects transfer_list */ struct list_head transfer_list; + + struct exynos_dsi_driver_data *driver_data; }; #define host_to_dsi(host) container_of(host, struct exynos_dsi, dsi_host) #define connector_to_dsi(c) container_of(c, struct exynos_dsi, connector) +static struct exynos_dsi_driver_data exynos4_dsi_driver_data = { + .plltmr_reg = 0x50, + .has_freqband = 1, +}; + +static struct exynos_dsi_driver_data exynos5_dsi_driver_data = { + .plltmr_reg = 0x58, +}; + +static struct of_device_id exynos_dsi_of_match[] = { + { .compatible = "samsung,exynos4210-mipi-dsi", + .data = &exynos4_dsi_driver_data }, + { .compatible = "samsung,exynos5410-mipi-dsi", + .data = &exynos5_dsi_driver_data }, + { } +}; + +static inline struct exynos_dsi_driver_data *exynos_dsi_get_driver_data( + struct platform_device *pdev) +{ + const struct of_device_id *of_id = + of_match_device(exynos_dsi_of_match, &pdev->dev); + + return (struct exynos_dsi_driver_data *)of_id->data; +} + static void exynos_dsi_wait_for_reset(struct exynos_dsi *dsi) { if (wait_for_completion_timeout(&dsi->completed, msecs_to_jiffies(300))) @@ -340,14 +400,9 @@ static unsigned long exynos_dsi_pll_find_pms(struct exynos_dsi *dsi, static unsigned long exynos_dsi_set_pll(struct exynos_dsi *dsi, unsigned long freq) { - static const unsigned long freq_bands[] = { - 100 * MHZ, 120 * MHZ, 160 * MHZ, 200 * MHZ, - 270 * MHZ, 320 * MHZ, 390 * MHZ, 450 * MHZ, - 510 * MHZ, 560 * MHZ, 640 * MHZ, 690 * MHZ, - 770 * MHZ, 870 * MHZ, 950 * MHZ, - }; + struct exynos_dsi_driver_data *driver_data = dsi->driver_data; unsigned long fin, fout; - int timeout, band; + int timeout; u8 p, s; u16 m; u32 reg; @@ -368,18 +423,30 @@ static unsigned long exynos_dsi_set_pll(struct exynos_dsi *dsi, "failed to find PLL PMS for requested frequency\n"); return -EFAULT; } + dev_dbg(dsi->dev, "PLL freq %lu, (p %d, m %d, s %d)\n", fout, p, m, s); - for (band = 0; band < ARRAY_SIZE(freq_bands); ++band) - if (fout < freq_bands[band]) - break; + writel(500, dsi->reg_base + driver_data->plltmr_reg); + + reg = DSIM_PLL_EN | DSIM_PLL_P(p) | DSIM_PLL_M(m) | DSIM_PLL_S(s); - dev_dbg(dsi->dev, "PLL freq %lu, (p %d, m %d, s %d), band %d\n", fout, - p, m, s, band); + if (driver_data->has_freqband) { + static const unsigned long freq_bands[] = { + 100 * MHZ, 120 * MHZ, 160 * MHZ, 200 * MHZ, + 270 * MHZ, 320 * MHZ, 390 * MHZ, 450 * MHZ, + 510 * MHZ, 560 * MHZ, 640 * MHZ, 690 * MHZ, + 770 * MHZ, 870 * MHZ, 950 * MHZ, + }; + int band; - writel(500, dsi->reg_base + DSIM_PLLTMR_REG); + for (band = 0; band < ARRAY_SIZE(freq_bands); ++band) + if (fout < freq_bands[band]) + break; + + dev_dbg(dsi->dev, "band %d\n", band); + + reg |= DSIM_FREQ_BAND(band); + } - reg = DSIM_FREQ_BAND(band) | DSIM_PLL_EN - | DSIM_PLL_P(p) | DSIM_PLL_M(m) | DSIM_PLL_S(s); writel(reg, dsi->reg_base + DSIM_PLLCTRL_REG); timeout = 1000; @@ -433,6 +500,59 @@ static int exynos_dsi_enable_clock(struct exynos_dsi *dsi) return 0; } +static void exynos_dsi_set_phy_ctrl(struct exynos_dsi *dsi) +{ + struct exynos_dsi_driver_data *driver_data = dsi->driver_data; + u32 reg; + + if (driver_data->has_freqband) + return; + + /* B D-PHY: D-PHY Master & Slave Analog Block control */ + reg = DSIM_PHYCTRL_ULPS_EXIT(0x0af); + writel(reg, dsi->reg_base + DSIM_PHYCTRL_REG); + + /* + * T LPX: Transmitted length of any Low-Power state period + * T HS-EXIT: Time that the transmitter drives LP-11 following a HS + * burst + */ + reg = DSIM_PHYTIMING_LPX(0x06) | DSIM_PHYTIMING_HS_EXIT(0x0b); + writel(reg, dsi->reg_base + DSIM_PHYTIMING_REG); + + /* + * T CLK-PREPARE: Time that the transmitter drives the Clock Lane LP-00 + * Line state immediately before the HS-0 Line state starting the + * HS transmission + * T CLK-ZERO: Time that the transmitter drives the HS-0 state prior to + * transmitting the Clock. + * T CLK_POST: Time that the transmitter continues to send HS clock + * after the last associated Data Lane has transitioned to LP Mode + * Interval is defined as the period from the end of T HS-TRAIL to + * the beginning of T CLK-TRAIL + * T CLK-TRAIL: Time that the transmitter drives the HS-0 state after + * the last payload clock bit of a HS transmission burst + */ + reg = DSIM_PHYTIMING1_CLK_PREPARE(0x07) | + DSIM_PHYTIMING1_CLK_ZERO(0x27) | + DSIM_PHYTIMING1_CLK_POST(0x0d) | + DSIM_PHYTIMING1_CLK_TRAIL(0x08); + writel(reg, dsi->reg_base + DSIM_PHYTIMING1_REG); + + /* + * T HS-PREPARE: Time that the transmitter drives the Data Lane LP-00 + * Line state immediately before the HS-0 Line state starting the + * HS transmission + * T HS-ZERO: Time that the transmitter drives the HS-0 state prior to + * transmitting the Sync sequence. + * T HS-TRAIL: Time that the transmitter drives the flipped differential + * state after last payload data bit of a HS transmission burst + */ + reg = DSIM_PHYTIMING2_HS_PREPARE(0x09) | DSIM_PHYTIMING2_HS_ZERO(0x0d) | + DSIM_PHYTIMING2_HS_TRAIL(0x0b); + writel(reg, dsi->reg_base + DSIM_PHYTIMING2_REG); +} + static void exynos_dsi_disable_clock(struct exynos_dsi *dsi) { u32 reg; @@ -468,13 +588,20 @@ static int exynos_dsi_init_link(struct exynos_dsi *dsi) /* DSI configuration */ reg = 0; + /* + * The first bit of mode_flags specifies display configuration. + * If this bit is set[= MIPI_DSI_MODE_VIDEO], dsi will support video + * mode, otherwise it will support command mode. + */ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) { reg |= DSIM_VIDEO_MODE; + /* + * The user manual describes that following bits are ignored in + * command mode. + */ if (!(dsi->mode_flags & MIPI_DSI_MODE_VSYNC_FLUSH)) reg |= DSIM_MFLUSH_VS; - if (!(dsi->mode_flags & MIPI_DSI_MODE_EOT_PACKET)) - reg |= DSIM_EOT_DISABLE; if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) reg |= DSIM_SYNC_INFORM; if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) @@ -491,6 +618,9 @@ static int exynos_dsi_init_link(struct exynos_dsi *dsi) reg |= DSIM_HSA_MODE; } + if (!(dsi->mode_flags & MIPI_DSI_MODE_EOT_PACKET)) + reg |= DSIM_EOT_DISABLE; + switch (dsi->format) { case MIPI_DSI_FMT_RGB888: reg |= DSIM_MAIN_PIX_FORMAT_RGB888; @@ -944,17 +1074,90 @@ static irqreturn_t exynos_dsi_irq(int irq, void *dev_id) return IRQ_HANDLED; } +static irqreturn_t exynos_dsi_te_irq_handler(int irq, void *dev_id) +{ + struct exynos_dsi *dsi = (struct exynos_dsi *)dev_id; + struct drm_encoder *encoder = dsi->encoder; + + if (dsi->state & DSIM_STATE_ENABLED) + exynos_drm_crtc_te_handler(encoder->crtc); + + return IRQ_HANDLED; +} + +static void exynos_dsi_enable_irq(struct exynos_dsi *dsi) +{ + enable_irq(dsi->irq); + + if (gpio_is_valid(dsi->te_gpio)) + enable_irq(gpio_to_irq(dsi->te_gpio)); +} + +static void exynos_dsi_disable_irq(struct exynos_dsi *dsi) +{ + if (gpio_is_valid(dsi->te_gpio)) + disable_irq(gpio_to_irq(dsi->te_gpio)); + + disable_irq(dsi->irq); +} + static int exynos_dsi_init(struct exynos_dsi *dsi) { - exynos_dsi_enable_clock(dsi); exynos_dsi_reset(dsi); - enable_irq(dsi->irq); + exynos_dsi_enable_irq(dsi); + exynos_dsi_enable_clock(dsi); exynos_dsi_wait_for_reset(dsi); + exynos_dsi_set_phy_ctrl(dsi); exynos_dsi_init_link(dsi); return 0; } +static int exynos_dsi_register_te_irq(struct exynos_dsi *dsi) +{ + int ret; + + dsi->te_gpio = of_get_named_gpio(dsi->panel_node, "te-gpios", 0); + if (!gpio_is_valid(dsi->te_gpio)) { + dev_err(dsi->dev, "no te-gpios specified\n"); + ret = dsi->te_gpio; + goto out; + } + + ret = gpio_request_one(dsi->te_gpio, GPIOF_IN, "te_gpio"); + if (ret) { + dev_err(dsi->dev, "gpio request failed with %d\n", ret); + goto out; + } + + /* + * This TE GPIO IRQ should not be set to IRQ_NOAUTOEN, because panel + * calls drm_panel_init() first then calls mipi_dsi_attach() in probe(). + * It means that te_gpio is invalid when exynos_dsi_enable_irq() is + * called by drm_panel_init() before panel is attached. + */ + ret = request_threaded_irq(gpio_to_irq(dsi->te_gpio), + exynos_dsi_te_irq_handler, NULL, + IRQF_TRIGGER_RISING, "TE", dsi); + if (ret) { + dev_err(dsi->dev, "request interrupt failed with %d\n", ret); + gpio_free(dsi->te_gpio); + goto out; + } + +out: + return ret; +} + +static void exynos_dsi_unregister_te_irq(struct exynos_dsi *dsi) +{ + if (gpio_is_valid(dsi->te_gpio)) { + free_irq(gpio_to_irq(dsi->te_gpio), dsi); + gpio_free(dsi->te_gpio); + dsi->te_gpio = -ENOENT; + } +} + static int exynos_dsi_host_attach(struct mipi_dsi_host *host, struct mipi_dsi_device *device) { @@ -968,6 +1171,19 @@ static int exynos_dsi_host_attach(struct mipi_dsi_host *host, if (dsi->connector.dev) drm_helper_hpd_irq_event(dsi->connector.dev); + /* + * This is a temporary solution and should be made by more generic way. + * + * If attached panel device is for command mode one, dsi should register + * TE interrupt handler. + */ + if (!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO)) { + int ret = exynos_dsi_register_te_irq(dsi); + + if (ret) + return ret; + } + return 0; } @@ -976,6 +1192,8 @@ static int exynos_dsi_host_detach(struct mipi_dsi_host *host, { struct exynos_dsi *dsi = host_to_dsi(host); + exynos_dsi_unregister_te_irq(dsi); + dsi->panel_node = NULL; if (dsi->connector.dev) @@ -1089,7 +1307,7 @@ static void exynos_dsi_poweroff(struct exynos_dsi *dsi) exynos_dsi_disable_clock(dsi); - disable_irq(dsi->irq); + exynos_dsi_disable_irq(dsi); } dsi->state &= ~DSIM_STATE_CMD_LPM; @@ -1115,7 +1333,7 @@ static int exynos_dsi_enable(struct exynos_dsi *dsi) if (ret < 0) return ret; - ret = drm_panel_enable(dsi->panel); + ret = drm_panel_prepare(dsi->panel); if (ret < 0) { exynos_dsi_poweroff(dsi); return ret; @@ -1124,6 +1342,14 @@ static int exynos_dsi_enable(struct exynos_dsi *dsi) exynos_dsi_set_display_mode(dsi); exynos_dsi_set_display_enable(dsi, true); + ret = drm_panel_enable(dsi->panel); + if (ret < 0) { + exynos_dsi_set_display_enable(dsi, false); + drm_panel_unprepare(dsi->panel); + exynos_dsi_poweroff(dsi); + return ret; + } + dsi->state |= DSIM_STATE_ENABLED; return 0; @@ -1134,8 +1360,9 @@ static void exynos_dsi_disable(struct exynos_dsi *dsi) if (!(dsi->state & DSIM_STATE_ENABLED)) return; - exynos_dsi_set_display_enable(dsi, false); drm_panel_disable(dsi->panel); + exynos_dsi_set_display_enable(dsi, false); + drm_panel_unprepare(dsi->panel); exynos_dsi_poweroff(dsi); dsi->state &= ~DSIM_STATE_ENABLED; @@ -1246,7 +1473,7 @@ static int exynos_dsi_create_connector(struct exynos_drm_display *display, } drm_connector_helper_add(connector, &exynos_dsi_connector_helper_funcs); - drm_sysfs_connector_add(connector); + drm_connector_register(connector); drm_mode_connector_attach_encoder(connector, encoder); return 0; @@ -1278,6 +1505,7 @@ static struct exynos_drm_display exynos_dsi_display = { .type = EXYNOS_DISPLAY_TYPE_LCD, .ops = &exynos_dsi_display_ops, }; +MODULE_DEVICE_TABLE(of, exynos_dsi_of_match); /* of_* functions will be removed after merge of of_graph patches */ static struct device_node * @@ -1435,6 +1663,9 @@ static int exynos_dsi_probe(struct platform_device *pdev) goto err_del_component; } + /* To be checked as invalid one */ + dsi->te_gpio = -ENOENT; + init_completion(&dsi->completed); spin_lock_init(&dsi->transfer_lock); INIT_LIST_HEAD(&dsi->transfer_list); @@ -1443,6 +1674,7 @@ static int exynos_dsi_probe(struct platform_device *pdev) dsi->dsi_host.dev = &pdev->dev; dsi->dev = &pdev->dev; + dsi->driver_data = exynos_dsi_get_driver_data(pdev); ret = exynos_dsi_parse_dt(dsi); if (ret) @@ -1525,11 +1757,6 @@ static int exynos_dsi_remove(struct platform_device *pdev) return 0; } -static struct of_device_id exynos_dsi_of_match[] = { - { .compatible = "samsung,exynos4210-mipi-dsi" }, - { } -}; - struct platform_driver dsi_driver = { .probe = exynos_dsi_probe, .remove = exynos_dsi_remove, diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c index d771b467cf0c..32e63f60e1d1 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c @@ -225,7 +225,7 @@ out: return ret; } -static struct drm_fb_helper_funcs exynos_drm_fb_helper_funcs = { +static const struct drm_fb_helper_funcs exynos_drm_fb_helper_funcs = { .fb_probe = exynos_drm_fbdev_create, }; @@ -266,7 +266,8 @@ int exynos_drm_fbdev_init(struct drm_device *dev) return -ENOMEM; private->fb_helper = helper = &fbdev->drm_fb_helper; - helper->funcs = &exynos_drm_fb_helper_funcs; + + drm_fb_helper_prepare(dev, helper, &exynos_drm_fb_helper_funcs); num_crtc = dev->mode_config.num_crtc; diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c index 831dde9034c6..ec7cc9ea50df 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c @@ -1887,6 +1887,7 @@ static const struct of_device_id fimc_of_match[] = { { .compatible = "samsung,exynos4212-fimc" }, { }, }; +MODULE_DEVICE_TABLE(of, fimc_of_match); struct platform_driver fimc_driver = { .probe = fimc_probe, diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index 33161ad38201..5d09e33fef87 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -20,6 +20,8 @@ #include <linux/of_device.h> #include <linux/pm_runtime.h> #include <linux/component.h> +#include <linux/mfd/syscon.h> +#include <linux/regmap.h> #include <video/of_display_timing.h> #include <video/of_videomode.h> @@ -61,6 +63,24 @@ /* color key value register for hardware window 1 ~ 4. */ #define WKEYCON1_BASE(x) ((WKEYCON1 + 0x140) + ((x - 1) * 8)) +/* I80 / RGB trigger control register */ +#define TRIGCON 0x1A4 +#define TRGMODE_I80_RGB_ENABLE_I80 (1 << 0) +#define SWTRGCMD_I80_RGB_ENABLE (1 << 1) + +/* display mode change control register except exynos4 */ +#define VIDOUT_CON 0x000 +#define VIDOUT_CON_F_I80_LDI0 (0x2 << 8) + +/* I80 interface control for main LDI register */ +#define I80IFCONFAx(x) (0x1B0 + (x) * 4) +#define I80IFCONFBx(x) (0x1B8 + (x) * 4) +#define LCD_CS_SETUP(x) ((x) << 16) +#define LCD_WR_SETUP(x) ((x) << 12) +#define LCD_WR_ACTIVE(x) ((x) << 8) +#define LCD_WR_HOLD(x) ((x) << 4) +#define I80IFEN_ENABLE (1 << 0) + /* FIMD has totally five hardware windows. */ #define WINDOWS_NR 5 @@ -68,10 +88,14 @@ struct fimd_driver_data { unsigned int timing_base; + unsigned int lcdblk_offset; + unsigned int lcdblk_vt_shift; + unsigned int lcdblk_bypass_shift; unsigned int has_shadowcon:1; unsigned int has_clksel:1; unsigned int has_limited_fmt:1; + unsigned int has_vidoutcon:1; }; static struct fimd_driver_data s3c64xx_fimd_driver_data = { @@ -82,12 +106,19 @@ static struct fimd_driver_data s3c64xx_fimd_driver_data = { static struct fimd_driver_data exynos4_fimd_driver_data = { .timing_base = 0x0, + .lcdblk_offset = 0x210, + .lcdblk_vt_shift = 10, + .lcdblk_bypass_shift = 1, .has_shadowcon = 1, }; static struct fimd_driver_data exynos5_fimd_driver_data = { .timing_base = 0x20000, + .lcdblk_offset = 0x214, + .lcdblk_vt_shift = 24, + .lcdblk_bypass_shift = 15, .has_shadowcon = 1, + .has_vidoutcon = 1, }; struct fimd_win_data { @@ -112,15 +143,22 @@ struct fimd_context { struct clk *bus_clk; struct clk *lcd_clk; void __iomem *regs; + struct regmap *sysreg; struct drm_display_mode mode; struct fimd_win_data win_data[WINDOWS_NR]; unsigned int default_win; unsigned long irq_flags; + u32 vidcon0; u32 vidcon1; + u32 vidout_con; + u32 i80ifcon; + bool i80_if; bool suspended; int pipe; wait_queue_head_t wait_vsync_queue; atomic_t wait_vsync_event; + atomic_t win_updated; + atomic_t triggering; struct exynos_drm_panel_info panel; struct fimd_driver_data *driver_data; @@ -136,6 +174,7 @@ static const struct of_device_id fimd_driver_dt_match[] = { .data = &exynos5_fimd_driver_data }, {}, }; +MODULE_DEVICE_TABLE(of, fimd_driver_dt_match); static inline struct fimd_driver_data *drm_fimd_get_driver_data( struct platform_device *pdev) @@ -243,6 +282,14 @@ static u32 fimd_calc_clkdiv(struct fimd_context *ctx, unsigned long ideal_clk = mode->htotal * mode->vtotal * mode->vrefresh; u32 clkdiv; + if (ctx->i80_if) { + /* + * The frame done interrupt should be occurred prior to the + * next TE signal. + */ + ideal_clk *= 2; + } + /* Find the clock divider value that gets us closest to ideal_clk */ clkdiv = DIV_ROUND_UP(clk_get_rate(ctx->lcd_clk), ideal_clk); @@ -271,11 +318,10 @@ static void fimd_commit(struct exynos_drm_manager *mgr) { struct fimd_context *ctx = mgr->ctx; struct drm_display_mode *mode = &ctx->mode; - struct fimd_driver_data *driver_data; - u32 val, clkdiv, vidcon1; - int vsync_len, vbpd, vfpd, hsync_len, hbpd, hfpd; + struct fimd_driver_data *driver_data = ctx->driver_data; + void *timing_base = ctx->regs + driver_data->timing_base; + u32 val, clkdiv; - driver_data = ctx->driver_data; if (ctx->suspended) return; @@ -283,33 +329,65 @@ static void fimd_commit(struct exynos_drm_manager *mgr) if (mode->htotal == 0 || mode->vtotal == 0) return; - /* setup polarity values */ - vidcon1 = ctx->vidcon1; - if (mode->flags & DRM_MODE_FLAG_NVSYNC) - vidcon1 |= VIDCON1_INV_VSYNC; - if (mode->flags & DRM_MODE_FLAG_NHSYNC) - vidcon1 |= VIDCON1_INV_HSYNC; - writel(vidcon1, ctx->regs + driver_data->timing_base + VIDCON1); - - /* setup vertical timing values. */ - vsync_len = mode->crtc_vsync_end - mode->crtc_vsync_start; - vbpd = mode->crtc_vtotal - mode->crtc_vsync_end; - vfpd = mode->crtc_vsync_start - mode->crtc_vdisplay; - - val = VIDTCON0_VBPD(vbpd - 1) | - VIDTCON0_VFPD(vfpd - 1) | - VIDTCON0_VSPW(vsync_len - 1); - writel(val, ctx->regs + driver_data->timing_base + VIDTCON0); - - /* setup horizontal timing values. */ - hsync_len = mode->crtc_hsync_end - mode->crtc_hsync_start; - hbpd = mode->crtc_htotal - mode->crtc_hsync_end; - hfpd = mode->crtc_hsync_start - mode->crtc_hdisplay; - - val = VIDTCON1_HBPD(hbpd - 1) | - VIDTCON1_HFPD(hfpd - 1) | - VIDTCON1_HSPW(hsync_len - 1); - writel(val, ctx->regs + driver_data->timing_base + VIDTCON1); + if (ctx->i80_if) { + val = ctx->i80ifcon | I80IFEN_ENABLE; + writel(val, timing_base + I80IFCONFAx(0)); + + /* disable auto frame rate */ + writel(0, timing_base + I80IFCONFBx(0)); + + /* set video type selection to I80 interface */ + if (ctx->sysreg && regmap_update_bits(ctx->sysreg, + driver_data->lcdblk_offset, + 0x3 << driver_data->lcdblk_vt_shift, + 0x1 << driver_data->lcdblk_vt_shift)) { + DRM_ERROR("Failed to update sysreg for I80 i/f.\n"); + return; + } + } else { + int vsync_len, vbpd, vfpd, hsync_len, hbpd, hfpd; + u32 vidcon1; + + /* setup polarity values */ + vidcon1 = ctx->vidcon1; + if (mode->flags & DRM_MODE_FLAG_NVSYNC) + vidcon1 |= VIDCON1_INV_VSYNC; + if (mode->flags & DRM_MODE_FLAG_NHSYNC) + vidcon1 |= VIDCON1_INV_HSYNC; + writel(vidcon1, ctx->regs + driver_data->timing_base + VIDCON1); + + /* setup vertical timing values. */ + vsync_len = mode->crtc_vsync_end - mode->crtc_vsync_start; + vbpd = mode->crtc_vtotal - mode->crtc_vsync_end; + vfpd = mode->crtc_vsync_start - mode->crtc_vdisplay; + + val = VIDTCON0_VBPD(vbpd - 1) | + VIDTCON0_VFPD(vfpd - 1) | + VIDTCON0_VSPW(vsync_len - 1); + writel(val, ctx->regs + driver_data->timing_base + VIDTCON0); + + /* setup horizontal timing values. */ + hsync_len = mode->crtc_hsync_end - mode->crtc_hsync_start; + hbpd = mode->crtc_htotal - mode->crtc_hsync_end; + hfpd = mode->crtc_hsync_start - mode->crtc_hdisplay; + + val = VIDTCON1_HBPD(hbpd - 1) | + VIDTCON1_HFPD(hfpd - 1) | + VIDTCON1_HSPW(hsync_len - 1); + writel(val, ctx->regs + driver_data->timing_base + VIDTCON1); + } + + if (driver_data->has_vidoutcon) + writel(ctx->vidout_con, timing_base + VIDOUT_CON); + + /* set bypass selection */ + if (ctx->sysreg && regmap_update_bits(ctx->sysreg, + driver_data->lcdblk_offset, + 0x1 << driver_data->lcdblk_bypass_shift, + 0x1 << driver_data->lcdblk_bypass_shift)) { + DRM_ERROR("Failed to update sysreg for bypass setting.\n"); + return; + } /* setup horizontal and vertical display size. */ val = VIDTCON2_LINEVAL(mode->vdisplay - 1) | @@ -322,7 +400,8 @@ static void fimd_commit(struct exynos_drm_manager *mgr) * fields of register with prefix '_F' would be updated * at vsync(same as dma start) */ - val = VIDCON0_ENVID | VIDCON0_ENVID_F; + val = ctx->vidcon0; + val |= VIDCON0_ENVID | VIDCON0_ENVID_F; if (ctx->driver_data->has_clksel) val |= VIDCON0_CLKSEL_LCD; @@ -660,6 +739,9 @@ static void fimd_win_commit(struct exynos_drm_manager *mgr, int zpos) } win_data->enabled = true; + + if (ctx->i80_if) + atomic_set(&ctx->win_updated, 1); } static void fimd_win_disable(struct exynos_drm_manager *mgr, int zpos) @@ -838,6 +920,58 @@ static void fimd_dpms(struct exynos_drm_manager *mgr, int mode) } } +static void fimd_trigger(struct device *dev) +{ + struct exynos_drm_manager *mgr = get_fimd_manager(dev); + struct fimd_context *ctx = mgr->ctx; + struct fimd_driver_data *driver_data = ctx->driver_data; + void *timing_base = ctx->regs + driver_data->timing_base; + u32 reg; + + atomic_set(&ctx->triggering, 1); + + reg = readl(ctx->regs + VIDINTCON0); + reg |= (VIDINTCON0_INT_ENABLE | VIDINTCON0_INT_I80IFDONE | + VIDINTCON0_INT_SYSMAINCON); + writel(reg, ctx->regs + VIDINTCON0); + + reg = readl(timing_base + TRIGCON); + reg |= (TRGMODE_I80_RGB_ENABLE_I80 | SWTRGCMD_I80_RGB_ENABLE); + writel(reg, timing_base + TRIGCON); +} + +static void fimd_te_handler(struct exynos_drm_manager *mgr) +{ + struct fimd_context *ctx = mgr->ctx; + + /* Checks the crtc is detached already from encoder */ + if (ctx->pipe < 0 || !ctx->drm_dev) + return; + + /* + * Skips to trigger if in triggering state, because multiple triggering + * requests can cause panel reset. + */ + if (atomic_read(&ctx->triggering)) + return; + + /* + * If there is a page flip request, triggers and handles the page flip + * event so that current fb can be updated into panel GRAM. + */ + if (atomic_add_unless(&ctx->win_updated, -1, 0)) + fimd_trigger(ctx->dev); + + /* Wakes up vsync event queue */ + if (atomic_read(&ctx->wait_vsync_event)) { + atomic_set(&ctx->wait_vsync_event, 0); + wake_up(&ctx->wait_vsync_queue); + + if (!atomic_read(&ctx->triggering)) + drm_handle_vblank(ctx->drm_dev, ctx->pipe); + } +} + static struct exynos_drm_manager_ops fimd_manager_ops = { .dpms = fimd_dpms, .mode_fixup = fimd_mode_fixup, @@ -849,6 +983,7 @@ static struct exynos_drm_manager_ops fimd_manager_ops = { .win_mode_set = fimd_win_mode_set, .win_commit = fimd_win_commit, .win_disable = fimd_win_disable, + .te_handler = fimd_te_handler, }; static struct exynos_drm_manager fimd_manager = { @@ -859,26 +994,40 @@ static struct exynos_drm_manager fimd_manager = { static irqreturn_t fimd_irq_handler(int irq, void *dev_id) { struct fimd_context *ctx = (struct fimd_context *)dev_id; - u32 val; + u32 val, clear_bit; val = readl(ctx->regs + VIDINTCON1); - if (val & VIDINTCON1_INT_FRAME) - /* VSYNC interrupt */ - writel(VIDINTCON1_INT_FRAME, ctx->regs + VIDINTCON1); + clear_bit = ctx->i80_if ? VIDINTCON1_INT_I80 : VIDINTCON1_INT_FRAME; + if (val & clear_bit) + writel(clear_bit, ctx->regs + VIDINTCON1); /* check the crtc is detached already from encoder */ if (ctx->pipe < 0 || !ctx->drm_dev) goto out; - drm_handle_vblank(ctx->drm_dev, ctx->pipe); - exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe); + if (ctx->i80_if) { + /* unset I80 frame done interrupt */ + val = readl(ctx->regs + VIDINTCON0); + val &= ~(VIDINTCON0_INT_I80IFDONE | VIDINTCON0_INT_SYSMAINCON); + writel(val, ctx->regs + VIDINTCON0); - /* set wait vsync event to zero and wake up queue. */ - if (atomic_read(&ctx->wait_vsync_event)) { - atomic_set(&ctx->wait_vsync_event, 0); - wake_up(&ctx->wait_vsync_queue); + /* exit triggering mode */ + atomic_set(&ctx->triggering, 0); + + drm_handle_vblank(ctx->drm_dev, ctx->pipe); + exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe); + } else { + drm_handle_vblank(ctx->drm_dev, ctx->pipe); + exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe); + + /* set wait vsync event to zero and wake up queue. */ + if (atomic_read(&ctx->wait_vsync_event)) { + atomic_set(&ctx->wait_vsync_event, 0); + wake_up(&ctx->wait_vsync_queue); + } } + out: return IRQ_HANDLED; } @@ -923,6 +1072,7 @@ static int fimd_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct fimd_context *ctx; + struct device_node *i80_if_timings; struct resource *res; int ret = -EINVAL; @@ -944,12 +1094,51 @@ static int fimd_probe(struct platform_device *pdev) ctx->dev = dev; ctx->suspended = true; + ctx->driver_data = drm_fimd_get_driver_data(pdev); if (of_property_read_bool(dev->of_node, "samsung,invert-vden")) ctx->vidcon1 |= VIDCON1_INV_VDEN; if (of_property_read_bool(dev->of_node, "samsung,invert-vclk")) ctx->vidcon1 |= VIDCON1_INV_VCLK; + i80_if_timings = of_get_child_by_name(dev->of_node, "i80-if-timings"); + if (i80_if_timings) { + u32 val; + + ctx->i80_if = true; + + if (ctx->driver_data->has_vidoutcon) + ctx->vidout_con |= VIDOUT_CON_F_I80_LDI0; + else + ctx->vidcon0 |= VIDCON0_VIDOUT_I80_LDI0; + /* + * The user manual describes that this "DSI_EN" bit is required + * to enable I80 24-bit data interface. + */ + ctx->vidcon0 |= VIDCON0_DSI_EN; + + if (of_property_read_u32(i80_if_timings, "cs-setup", &val)) + val = 0; + ctx->i80ifcon = LCD_CS_SETUP(val); + if (of_property_read_u32(i80_if_timings, "wr-setup", &val)) + val = 0; + ctx->i80ifcon |= LCD_WR_SETUP(val); + if (of_property_read_u32(i80_if_timings, "wr-active", &val)) + val = 1; + ctx->i80ifcon |= LCD_WR_ACTIVE(val); + if (of_property_read_u32(i80_if_timings, "wr-hold", &val)) + val = 0; + ctx->i80ifcon |= LCD_WR_HOLD(val); + } + of_node_put(i80_if_timings); + + ctx->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node, + "samsung,sysreg"); + if (IS_ERR(ctx->sysreg)) { + dev_warn(dev, "failed to get system register.\n"); + ctx->sysreg = NULL; + } + ctx->bus_clk = devm_clk_get(dev, "fimd"); if (IS_ERR(ctx->bus_clk)) { dev_err(dev, "failed to get bus clock\n"); @@ -972,7 +1161,8 @@ static int fimd_probe(struct platform_device *pdev) goto err_del_component; } - res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "vsync"); + res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, + ctx->i80_if ? "lcd_sys" : "vsync"); if (!res) { dev_err(dev, "irq request failed.\n"); ret = -ENXIO; @@ -986,7 +1176,6 @@ static int fimd_probe(struct platform_device *pdev) goto err_del_component; } - ctx->driver_data = drm_fimd_get_driver_data(pdev); init_waitqueue_head(&ctx->wait_vsync_queue); atomic_set(&ctx->wait_vsync_event, 0); diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 800158714473..df7a77d3eff8 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c @@ -1042,8 +1042,23 @@ err: int exynos_g2d_get_ver_ioctl(struct drm_device *drm_dev, void *data, struct drm_file *file) { + struct drm_exynos_file_private *file_priv = file->driver_priv; + struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv; + struct device *dev; + struct g2d_data *g2d; struct drm_exynos_g2d_get_ver *ver = data; + if (!g2d_priv) + return -ENODEV; + + dev = g2d_priv->dev; + if (!dev) + return -ENODEV; + + g2d = dev_get_drvdata(dev); + if (!g2d) + return -EFAULT; + ver->major = G2D_HW_MAJOR_VER; ver->minor = G2D_HW_MINOR_VER; @@ -1056,7 +1071,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data, { struct drm_exynos_file_private *file_priv = file->driver_priv; struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv; - struct device *dev = g2d_priv->dev; + struct device *dev; struct g2d_data *g2d; struct drm_exynos_g2d_set_cmdlist *req = data; struct drm_exynos_g2d_cmd *cmd; @@ -1067,6 +1082,10 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data, int size; int ret; + if (!g2d_priv) + return -ENODEV; + + dev = g2d_priv->dev; if (!dev) return -ENODEV; @@ -1223,13 +1242,17 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data, { struct drm_exynos_file_private *file_priv = file->driver_priv; struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv; - struct device *dev = g2d_priv->dev; + struct device *dev; struct g2d_data *g2d; struct drm_exynos_g2d_exec *req = data; struct g2d_runqueue_node *runqueue_node; struct list_head *run_cmdlist; struct list_head *event_list; + if (!g2d_priv) + return -ENODEV; + + dev = g2d_priv->dev; if (!dev) return -ENODEV; @@ -1544,8 +1567,10 @@ static const struct dev_pm_ops g2d_pm_ops = { static const struct of_device_id exynos_g2d_match[] = { { .compatible = "samsung,exynos5250-g2d" }, + { .compatible = "samsung,exynos4212-g2d" }, {}, }; +MODULE_DEVICE_TABLE(of, exynos_g2d_match); struct platform_driver g2d_driver = { .probe = g2d_probe, diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index 163a054922cb..15db80138382 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -301,7 +301,6 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev, unsigned int gem_handle, struct drm_file *filp) { - struct exynos_drm_gem_obj *exynos_gem_obj; struct drm_gem_object *obj; obj = drm_gem_object_lookup(dev, filp, gem_handle); @@ -310,8 +309,6 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev, return; } - exynos_gem_obj = to_exynos_gem_obj(obj); - drm_gem_object_unreference_unlocked(obj); /* diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c index a1888e128f1d..c411399070d6 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c +++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c @@ -129,9 +129,6 @@ void exynos_platform_device_ipp_unregister(void) int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv) { - if (!ippdrv) - return -EINVAL; - mutex_lock(&exynos_drm_ippdrv_lock); list_add_tail(&ippdrv->drv_list, &exynos_drm_ippdrv_list); mutex_unlock(&exynos_drm_ippdrv_lock); @@ -141,9 +138,6 @@ int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv) int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv) { - if (!ippdrv) - return -EINVAL; - mutex_lock(&exynos_drm_ippdrv_lock); list_del(&ippdrv->drv_list); mutex_unlock(&exynos_drm_ippdrv_lock); @@ -151,20 +145,15 @@ int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv) return 0; } -static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj, - u32 *idp) +static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj) { int ret; - /* do the allocation under our mutexlock */ mutex_lock(lock); ret = idr_alloc(id_idr, obj, 1, 0, GFP_KERNEL); mutex_unlock(lock); - if (ret < 0) - return ret; - *idp = ret; - return 0; + return ret; } static void ipp_remove_id(struct idr *id_idr, struct mutex *lock, u32 id) @@ -178,35 +167,25 @@ static void *ipp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id) { void *obj; - DRM_DEBUG_KMS("id[%d]\n", id); - mutex_lock(lock); - - /* find object using handle */ obj = idr_find(id_idr, id); - if (!obj) { - DRM_ERROR("failed to find object.\n"); - mutex_unlock(lock); - return ERR_PTR(-ENODEV); - } - mutex_unlock(lock); return obj; } -static inline bool ipp_check_dedicated(struct exynos_drm_ippdrv *ippdrv, - enum drm_exynos_ipp_cmd cmd) +static int ipp_check_driver(struct exynos_drm_ippdrv *ippdrv, + struct drm_exynos_ipp_property *property) { - /* - * check dedicated flag and WB, OUTPUT operation with - * power on state. - */ - if (ippdrv->dedicated || (!ipp_is_m2m_cmd(cmd) && - !pm_runtime_suspended(ippdrv->dev))) - return true; + if (ippdrv->dedicated || (!ipp_is_m2m_cmd(property->cmd) && + !pm_runtime_suspended(ippdrv->dev))) + return -EBUSY; - return false; + if (ippdrv->check_property && + ippdrv->check_property(ippdrv->dev, property)) + return -EINVAL; + + return 0; } static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx, @@ -214,62 +193,30 @@ static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx, { struct exynos_drm_ippdrv *ippdrv; u32 ipp_id = property->ipp_id; - - DRM_DEBUG_KMS("ipp_id[%d]\n", ipp_id); + int ret; if (ipp_id) { - /* find ipp driver using idr */ - ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock, - ipp_id); - if (IS_ERR(ippdrv)) { - DRM_ERROR("not found ipp%d driver.\n", ipp_id); - return ippdrv; + ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock, ipp_id); + if (!ippdrv) { + DRM_DEBUG("ipp%d driver not found\n", ipp_id); + return ERR_PTR(-ENODEV); } - /* - * WB, OUTPUT opertion not supported multi-operation. - * so, make dedicated state at set property ioctl. - * when ipp driver finished operations, clear dedicated flags. - */ - if (ipp_check_dedicated(ippdrv, property->cmd)) { - DRM_ERROR("already used choose device.\n"); - return ERR_PTR(-EBUSY); - } - - /* - * This is necessary to find correct device in ipp drivers. - * ipp drivers have different abilities, - * so need to check property. - */ - if (ippdrv->check_property && - ippdrv->check_property(ippdrv->dev, property)) { - DRM_ERROR("not support property.\n"); - return ERR_PTR(-EINVAL); + ret = ipp_check_driver(ippdrv, property); + if (ret < 0) { + DRM_DEBUG("ipp%d driver check error %d\n", ipp_id, ret); + return ERR_PTR(ret); } return ippdrv; } else { - /* - * This case is search all ipp driver for finding. - * user application don't set ipp_id in this case, - * so ipp subsystem search correct driver in driver list. - */ list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { - if (ipp_check_dedicated(ippdrv, property->cmd)) { - DRM_DEBUG_KMS("used device.\n"); - continue; - } - - if (ippdrv->check_property && - ippdrv->check_property(ippdrv->dev, property)) { - DRM_DEBUG_KMS("not support property.\n"); - continue; - } - - return ippdrv; + ret = ipp_check_driver(ippdrv, property); + if (ret == 0) + return ippdrv; } - DRM_ERROR("not support ipp driver operations.\n"); + DRM_DEBUG("cannot find driver suitable for given property.\n"); } return ERR_PTR(-ENODEV); @@ -308,8 +255,7 @@ int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data, struct drm_file *file) { struct drm_exynos_file_private *file_priv = file->driver_priv; - struct exynos_drm_ipp_private *priv = file_priv->ipp_priv; - struct device *dev = priv->dev; + struct device *dev = file_priv->ipp_dev; struct ipp_context *ctx = get_ipp_context(dev); struct drm_exynos_ipp_prop_list *prop_list = data; struct exynos_drm_ippdrv *ippdrv; @@ -346,10 +292,10 @@ int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data, */ ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock, prop_list->ipp_id); - if (IS_ERR(ippdrv)) { + if (!ippdrv) { DRM_ERROR("not found ipp%d driver.\n", prop_list->ipp_id); - return PTR_ERR(ippdrv); + return -ENODEV; } *prop_list = ippdrv->prop_list; @@ -432,7 +378,7 @@ static struct drm_exynos_ipp_event_work *ipp_create_event_work(void) if (!event_work) return ERR_PTR(-ENOMEM); - INIT_WORK((struct work_struct *)event_work, ipp_sched_event); + INIT_WORK(&event_work->work, ipp_sched_event); return event_work; } @@ -441,8 +387,7 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data, struct drm_file *file) { struct drm_exynos_file_private *file_priv = file->driver_priv; - struct exynos_drm_ipp_private *priv = file_priv->ipp_priv; - struct device *dev = priv->dev; + struct device *dev = file_priv->ipp_dev; struct ipp_context *ctx = get_ipp_context(dev); struct drm_exynos_ipp_property *property = data; struct exynos_drm_ippdrv *ippdrv; @@ -489,19 +434,18 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data, if (!c_node) return -ENOMEM; - /* create property id */ - ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node, - &property->prop_id); - if (ret) { + ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node); + if (ret < 0) { DRM_ERROR("failed to create id.\n"); goto err_clear; } + property->prop_id = ret; DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[0x%x]\n", property->prop_id, property->cmd, (int)ippdrv); /* stored property information and ippdrv in private data */ - c_node->priv = priv; + c_node->dev = dev; c_node->property = *property; c_node->state = IPP_STATE_IDLE; @@ -534,7 +478,6 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data, INIT_LIST_HEAD(&c_node->mem_list[i]); INIT_LIST_HEAD(&c_node->event_list); - list_splice_init(&priv->event_list, &c_node->event_list); mutex_lock(&ippdrv->cmd_lock); list_add_tail(&c_node->list, &ippdrv->cmd_list); mutex_unlock(&ippdrv->cmd_lock); @@ -577,42 +520,18 @@ static void ipp_clean_cmd_node(struct ipp_context *ctx, kfree(c_node); } -static int ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node) +static bool ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node) { - struct drm_exynos_ipp_property *property = &c_node->property; - struct drm_exynos_ipp_mem_node *m_node; - struct list_head *head; - int ret, i, count[EXYNOS_DRM_OPS_MAX] = { 0, }; - - for_each_ipp_ops(i) { - /* source/destination memory list */ - head = &c_node->mem_list[i]; - - /* find memory node entry */ - list_for_each_entry(m_node, head, list) { - DRM_DEBUG_KMS("%s,count[%d]m_node[0x%x]\n", - i ? "dst" : "src", count[i], (int)m_node); - count[i]++; - } + switch (c_node->property.cmd) { + case IPP_CMD_WB: + return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]); + case IPP_CMD_OUTPUT: + return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]); + case IPP_CMD_M2M: + default: + return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]) && + !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]); } - - DRM_DEBUG_KMS("min[%d]max[%d]\n", - min(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]), - max(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST])); - - /* - * M2M operations should be need paired memory address. - * so, need to check minimum count about src, dst. - * other case not use paired memory, so use maximum count - */ - if (ipp_is_m2m_cmd(property->cmd)) - ret = min(count[EXYNOS_DRM_OPS_SRC], - count[EXYNOS_DRM_OPS_DST]); - else - ret = max(count[EXYNOS_DRM_OPS_SRC], - count[EXYNOS_DRM_OPS_DST]); - - return ret; } static struct drm_exynos_ipp_mem_node @@ -683,16 +602,14 @@ static struct drm_exynos_ipp_mem_node struct drm_exynos_ipp_queue_buf *qbuf) { struct drm_exynos_ipp_mem_node *m_node; - struct drm_exynos_ipp_buf_info buf_info; - void *addr; + struct drm_exynos_ipp_buf_info *buf_info; int i; m_node = kzalloc(sizeof(*m_node), GFP_KERNEL); if (!m_node) return ERR_PTR(-ENOMEM); - /* clear base address for error handling */ - memset(&buf_info, 0x0, sizeof(buf_info)); + buf_info = &m_node->buf_info; /* operations, buffer id */ m_node->ops_id = qbuf->ops_id; @@ -707,6 +624,8 @@ static struct drm_exynos_ipp_mem_node /* get dma address by handle */ if (qbuf->handle[i]) { + dma_addr_t *addr; + addr = exynos_drm_gem_get_dma_addr(drm_dev, qbuf->handle[i], file); if (IS_ERR(addr)) { @@ -714,15 +633,14 @@ static struct drm_exynos_ipp_mem_node goto err_clear; } - buf_info.handles[i] = qbuf->handle[i]; - buf_info.base[i] = *(dma_addr_t *) addr; - DRM_DEBUG_KMS("i[%d]base[0x%x]hd[0x%x]\n", - i, buf_info.base[i], (int)buf_info.handles[i]); + buf_info->handles[i] = qbuf->handle[i]; + buf_info->base[i] = *addr; + DRM_DEBUG_KMS("i[%d]base[0x%x]hd[0x%lx]\n", i, + buf_info->base[i], buf_info->handles[i]); } } m_node->filp = file; - m_node->buf_info = buf_info; mutex_lock(&c_node->mem_lock); list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]); mutex_unlock(&c_node->mem_lock); @@ -930,8 +848,7 @@ int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data, struct drm_file *file) { struct drm_exynos_file_private *file_priv = file->driver_priv; - struct exynos_drm_ipp_private *priv = file_priv->ipp_priv; - struct device *dev = priv->dev; + struct device *dev = file_priv->ipp_dev; struct ipp_context *ctx = get_ipp_context(dev); struct drm_exynos_ipp_queue_buf *qbuf = data; struct drm_exynos_ipp_cmd_node *c_node; @@ -955,9 +872,9 @@ int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data, /* find command node */ c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock, qbuf->prop_id); - if (IS_ERR(c_node)) { + if (!c_node) { DRM_ERROR("failed to get command node.\n"); - return PTR_ERR(c_node); + return -ENODEV; } /* buffer control */ @@ -1062,9 +979,8 @@ int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data, struct drm_file *file) { struct drm_exynos_file_private *file_priv = file->driver_priv; - struct exynos_drm_ipp_private *priv = file_priv->ipp_priv; struct exynos_drm_ippdrv *ippdrv = NULL; - struct device *dev = priv->dev; + struct device *dev = file_priv->ipp_dev; struct ipp_context *ctx = get_ipp_context(dev); struct drm_exynos_ipp_cmd_ctrl *cmd_ctrl = data; struct drm_exynos_ipp_cmd_work *cmd_work; @@ -1091,9 +1007,9 @@ int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data, c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock, cmd_ctrl->prop_id); - if (IS_ERR(c_node)) { + if (!c_node) { DRM_ERROR("invalid command node list.\n"); - return PTR_ERR(c_node); + return -ENODEV; } if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl, @@ -1198,7 +1114,6 @@ static int ipp_set_property(struct exynos_drm_ippdrv *ippdrv, /* reset h/w block */ if (ippdrv->reset && ippdrv->reset(ippdrv->dev)) { - DRM_ERROR("failed to reset.\n"); return -EINVAL; } @@ -1216,30 +1131,24 @@ static int ipp_set_property(struct exynos_drm_ippdrv *ippdrv, /* set format */ if (ops->set_fmt) { ret = ops->set_fmt(ippdrv->dev, config->fmt); - if (ret) { - DRM_ERROR("not support format.\n"); + if (ret) return ret; - } } /* set transform for rotation, flip */ if (ops->set_transf) { ret = ops->set_transf(ippdrv->dev, config->degree, config->flip, &swap); - if (ret) { - DRM_ERROR("not support tranf.\n"); - return -EINVAL; - } + if (ret) + return ret; } /* set size */ if (ops->set_size) { ret = ops->set_size(ippdrv->dev, swap, &config->pos, &config->sz); - if (ret) { - DRM_ERROR("not support size.\n"); + if (ret) return ret; - } } } @@ -1283,11 +1192,6 @@ static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv, m_node = list_first_entry(head, struct drm_exynos_ipp_mem_node, list); - if (!m_node) { - DRM_ERROR("failed to get node.\n"); - ret = -EFAULT; - goto err_unlock; - } DRM_DEBUG_KMS("m_node[0x%x]\n", (int)m_node); @@ -1545,11 +1449,6 @@ static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv, m_node = list_first_entry(head, struct drm_exynos_ipp_mem_node, list); - if (!m_node) { - DRM_ERROR("empty memory node.\n"); - ret = -ENOMEM; - goto err_mem_unlock; - } tbuf_id[i] = m_node->buf_id; DRM_DEBUG_KMS("%s buf_id[%d]\n", @@ -1586,11 +1485,6 @@ static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv, m_node = list_first_entry(head, struct drm_exynos_ipp_mem_node, list); - if (!m_node) { - DRM_ERROR("empty memory node.\n"); - ret = -ENOMEM; - goto err_mem_unlock; - } tbuf_id[EXYNOS_DRM_OPS_SRC] = m_node->buf_id; @@ -1704,21 +1598,17 @@ static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev) /* get ipp driver entry */ list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { - u32 ipp_id; - ippdrv->drm_dev = drm_dev; - ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv, - &ipp_id); - if (ret || ipp_id == 0) { + ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv); + if (ret < 0) { DRM_ERROR("failed to create id.\n"); goto err; } + ippdrv->prop_list.ipp_id = ret; DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]ipp_id[%d]\n", - count++, (int)ippdrv, ipp_id); - - ippdrv->prop_list.ipp_id = ipp_id; + count++, (int)ippdrv, ret); /* store parent device for node */ ippdrv->parent_dev = dev; @@ -1776,17 +1666,10 @@ static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev, struct drm_file *file) { struct drm_exynos_file_private *file_priv = file->driver_priv; - struct exynos_drm_ipp_private *priv; - - priv = kzalloc(sizeof(*priv), GFP_KERNEL); - if (!priv) - return -ENOMEM; - priv->dev = dev; - file_priv->ipp_priv = priv; - INIT_LIST_HEAD(&priv->event_list); + file_priv->ipp_dev = dev; - DRM_DEBUG_KMS("done priv[0x%x]\n", (int)priv); + DRM_DEBUG_KMS("done priv[0x%x]\n", (int)dev); return 0; } @@ -1795,13 +1678,12 @@ static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev, struct drm_file *file) { struct drm_exynos_file_private *file_priv = file->driver_priv; - struct exynos_drm_ipp_private *priv = file_priv->ipp_priv; struct exynos_drm_ippdrv *ippdrv = NULL; struct ipp_context *ctx = get_ipp_context(dev); struct drm_exynos_ipp_cmd_node *c_node, *tc_node; int count = 0; - DRM_DEBUG_KMS("for priv[0x%x]\n", (int)priv); + DRM_DEBUG_KMS("for priv[0x%x]\n", (int)file_priv->ipp_dev); list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { mutex_lock(&ippdrv->cmd_lock); @@ -1810,7 +1692,7 @@ static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev, DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n", count++, (int)ippdrv); - if (c_node->priv == priv) { + if (c_node->dev == file_priv->ipp_dev) { /* * userland goto unnormal state. process killed. * and close the file. @@ -1832,7 +1714,6 @@ static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev, mutex_unlock(&ippdrv->cmd_lock); } - kfree(priv); return; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.h b/drivers/gpu/drm/exynos/exynos_drm_ipp.h index 7aaeaae757c2..6f48d62aeb30 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_ipp.h +++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.h @@ -48,7 +48,7 @@ struct drm_exynos_ipp_cmd_work { /* * A structure of command node. * - * @priv: IPP private information. + * @dev: IPP device. * @list: list head to command queue information. * @event_list: list head of event. * @mem_list: list head to source,destination memory queue information. @@ -64,7 +64,7 @@ struct drm_exynos_ipp_cmd_work { * @state: state of command node. */ struct drm_exynos_ipp_cmd_node { - struct exynos_drm_ipp_private *priv; + struct device *dev; struct list_head list; struct list_head event_list; struct list_head mem_list[EXYNOS_DRM_OPS_MAX]; diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c index f01fbb6dc1f0..55af6b41c1df 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c +++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c @@ -691,6 +691,7 @@ static const struct of_device_id exynos_rotator_match[] = { }, {}, }; +MODULE_DEVICE_TABLE(of, exynos_rotator_match); static int rotator_probe(struct platform_device *pdev) { diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c index 2fb8705d6461..9528d81d8004 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c @@ -562,7 +562,7 @@ static int vidi_create_connector(struct exynos_drm_display *display, } drm_connector_helper_add(connector, &vidi_connector_helper_funcs); - drm_sysfs_connector_add(connector); + drm_connector_register(connector); drm_mode_connector_attach_encoder(connector, encoder); return 0; diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index aa259b0a873a..562966db2aa1 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -84,6 +84,7 @@ struct hdmi_resources { struct clk *sclk_hdmiphy; struct clk *mout_hdmi; struct regulator_bulk_data *regul_bulk; + struct regulator *reg_hdmi_en; int regul_count; }; @@ -592,6 +593,13 @@ static struct hdmi_driver_data exynos4212_hdmi_driver_data = { .is_apb_phy = 0, }; +static struct hdmi_driver_data exynos4210_hdmi_driver_data = { + .type = HDMI_TYPE13, + .phy_confs = hdmiphy_v13_configs, + .phy_conf_count = ARRAY_SIZE(hdmiphy_v13_configs), + .is_apb_phy = 0, +}; + static struct hdmi_driver_data exynos5_hdmi_driver_data = { .type = HDMI_TYPE14, .phy_confs = hdmiphy_v13_configs, @@ -1129,7 +1137,7 @@ static int hdmi_create_connector(struct exynos_drm_display *display, } drm_connector_helper_add(connector, &hdmi_connector_helper_funcs); - drm_sysfs_connector_add(connector); + drm_connector_register(connector); drm_mode_connector_attach_encoder(connector, encoder); return 0; @@ -1241,14 +1249,13 @@ static void hdmi_reg_acr(struct hdmi_context *hdata, u8 *acr) static void hdmi_audio_init(struct hdmi_context *hdata) { - u32 sample_rate, bits_per_sample, frame_size_code; + u32 sample_rate, bits_per_sample; u32 data_num, bit_ch, sample_frq; u32 val; u8 acr[7]; sample_rate = 44100; bits_per_sample = 16; - frame_size_code = 0; switch (bits_per_sample) { case 20: @@ -2168,7 +2175,6 @@ static int hdmi_resources_init(struct hdmi_context *hdata) struct device *dev = hdata->dev; struct hdmi_resources *res = &hdata->res; static char *supply[] = { - "hdmi-en", "vdd", "vdd_osc", "vdd_pll", @@ -2228,6 +2234,20 @@ static int hdmi_resources_init(struct hdmi_context *hdata) } res->regul_count = ARRAY_SIZE(supply); + res->reg_hdmi_en = devm_regulator_get(dev, "hdmi-en"); + if (IS_ERR(res->reg_hdmi_en) && PTR_ERR(res->reg_hdmi_en) != -ENOENT) { + DRM_ERROR("failed to get hdmi-en regulator\n"); + return PTR_ERR(res->reg_hdmi_en); + } + if (!IS_ERR(res->reg_hdmi_en)) { + ret = regulator_enable(res->reg_hdmi_en); + if (ret) { + DRM_ERROR("failed to enable hdmi-en regulator\n"); + return ret; + } + } else + res->reg_hdmi_en = NULL; + return ret; fail: DRM_ERROR("HDMI resource init - failed\n"); @@ -2263,6 +2283,9 @@ static struct of_device_id hdmi_match_types[] = { .compatible = "samsung,exynos5-hdmi", .data = &exynos5_hdmi_driver_data, }, { + .compatible = "samsung,exynos4210-hdmi", + .data = &exynos4210_hdmi_driver_data, + }, { .compatible = "samsung,exynos4212-hdmi", .data = &exynos4212_hdmi_driver_data, }, { @@ -2272,6 +2295,7 @@ static struct of_device_id hdmi_match_types[] = { /* end node */ } }; +MODULE_DEVICE_TABLE (of, hdmi_match_types); static int hdmi_bind(struct device *dev, struct device *master, void *data) { @@ -2494,7 +2518,11 @@ static int hdmi_remove(struct platform_device *pdev) cancel_delayed_work_sync(&hdata->hotplug_work); - put_device(&hdata->hdmiphy_port->dev); + if (hdata->res.reg_hdmi_en) + regulator_disable(hdata->res.reg_hdmi_en); + + if (hdata->hdmiphy_port) + put_device(&hdata->hdmiphy_port->dev); put_device(&hdata->ddc_adpt->dev); pm_runtime_disable(&pdev->dev); diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index 7529946d0a74..e8b4ec84b312 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c @@ -76,7 +76,7 @@ struct mixer_resources { struct clk *vp; struct clk *sclk_mixer; struct clk *sclk_hdmi; - struct clk *sclk_dac; + struct clk *mout_mixer; }; enum mixer_version_id { @@ -93,6 +93,7 @@ struct mixer_context { bool interlace; bool powered; bool vp_enabled; + bool has_sclk; u32 int_en; struct mutex mixer_mutex; @@ -106,6 +107,7 @@ struct mixer_context { struct mixer_drv_data { enum mixer_version_id version; bool is_vp_enabled; + bool has_sclk; }; static const u8 filter_y_horiz_tap8[] = { @@ -363,6 +365,11 @@ static void mixer_cfg_layer(struct mixer_context *ctx, int win, bool enable) vp_reg_writemask(res, VP_ENABLE, val, VP_ENABLE_ON); mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_VP_ENABLE); + + /* control blending of graphic layer 0 */ + mixer_reg_writemask(res, MXR_GRAPHIC_CFG(0), val, + MXR_GRP_CFG_BLEND_PRE_MUL | + MXR_GRP_CFG_PIXEL_BLEND_EN); } break; } @@ -809,19 +816,23 @@ static int vp_resources_init(struct mixer_context *mixer_ctx) dev_err(dev, "failed to get clock 'vp'\n"); return -ENODEV; } - mixer_res->sclk_mixer = devm_clk_get(dev, "sclk_mixer"); - if (IS_ERR(mixer_res->sclk_mixer)) { - dev_err(dev, "failed to get clock 'sclk_mixer'\n"); - return -ENODEV; - } - mixer_res->sclk_dac = devm_clk_get(dev, "sclk_dac"); - if (IS_ERR(mixer_res->sclk_dac)) { - dev_err(dev, "failed to get clock 'sclk_dac'\n"); - return -ENODEV; - } - if (mixer_res->sclk_hdmi) - clk_set_parent(mixer_res->sclk_mixer, mixer_res->sclk_hdmi); + if (mixer_ctx->has_sclk) { + mixer_res->sclk_mixer = devm_clk_get(dev, "sclk_mixer"); + if (IS_ERR(mixer_res->sclk_mixer)) { + dev_err(dev, "failed to get clock 'sclk_mixer'\n"); + return -ENODEV; + } + mixer_res->mout_mixer = devm_clk_get(dev, "mout_mixer"); + if (IS_ERR(mixer_res->mout_mixer)) { + dev_err(dev, "failed to get clock 'mout_mixer'\n"); + return -ENODEV; + } + + if (mixer_res->sclk_hdmi && mixer_res->mout_mixer) + clk_set_parent(mixer_res->mout_mixer, + mixer_res->sclk_hdmi); + } res = platform_get_resource(mixer_ctx->pdev, IORESOURCE_MEM, 1); if (res == NULL) { @@ -1082,7 +1093,8 @@ static void mixer_poweron(struct exynos_drm_manager *mgr) clk_prepare_enable(res->mixer); if (ctx->vp_enabled) { clk_prepare_enable(res->vp); - clk_prepare_enable(res->sclk_mixer); + if (ctx->has_sclk) + clk_prepare_enable(res->sclk_mixer); } mutex_lock(&ctx->mixer_mutex); @@ -1121,7 +1133,8 @@ static void mixer_poweroff(struct exynos_drm_manager *mgr) clk_disable_unprepare(res->mixer); if (ctx->vp_enabled) { clk_disable_unprepare(res->vp); - clk_disable_unprepare(res->sclk_mixer); + if (ctx->has_sclk) + clk_disable_unprepare(res->sclk_mixer); } pm_runtime_put_sync(ctx->dev); @@ -1189,9 +1202,15 @@ static struct mixer_drv_data exynos5250_mxr_drv_data = { .is_vp_enabled = 0, }; +static struct mixer_drv_data exynos4212_mxr_drv_data = { + .version = MXR_VER_0_0_0_16, + .is_vp_enabled = 1, +}; + static struct mixer_drv_data exynos4210_mxr_drv_data = { .version = MXR_VER_0_0_0_16, .is_vp_enabled = 1, + .has_sclk = 1, }; static struct platform_device_id mixer_driver_types[] = { @@ -1208,6 +1227,12 @@ static struct platform_device_id mixer_driver_types[] = { static struct of_device_id mixer_match_types[] = { { + .compatible = "samsung,exynos4210-mixer", + .data = &exynos4210_mxr_drv_data, + }, { + .compatible = "samsung,exynos4212-mixer", + .data = &exynos4212_mxr_drv_data, + }, { .compatible = "samsung,exynos5-mixer", .data = &exynos5250_mxr_drv_data, }, { @@ -1220,6 +1245,7 @@ static struct of_device_id mixer_match_types[] = { /* end node */ } }; +MODULE_DEVICE_TABLE(of, mixer_match_types); static int mixer_bind(struct device *dev, struct device *manager, void *data) { @@ -1251,6 +1277,7 @@ static int mixer_bind(struct device *dev, struct device *manager, void *data) ctx->pdev = pdev; ctx->dev = dev; ctx->vp_enabled = drv->is_vp_enabled; + ctx->has_sclk = drv->has_sclk; ctx->mxr_ver = drv->version; init_waitqueue_head(&ctx->wait_vsync_queue); atomic_set(&ctx->wait_vsync_event, 0); diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c index c18268cd516e..248c33a35ebf 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_crt.c +++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c @@ -192,7 +192,7 @@ static void cdv_intel_crt_destroy(struct drm_connector *connector) struct gma_encoder *gma_encoder = gma_attached_encoder(connector); psb_intel_i2c_destroy(gma_encoder->ddc_bus); - drm_sysfs_connector_remove(connector); + drm_connector_unregister(connector); drm_connector_cleanup(connector); kfree(connector); } @@ -304,7 +304,7 @@ void cdv_intel_crt_init(struct drm_device *dev, drm_connector_helper_add(connector, &cdv_intel_crt_connector_helper_funcs); - drm_sysfs_connector_add(connector); + drm_connector_register(connector); return; failed_ddc: diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c index 9ff30c2efadb..a4cc0e60a1be 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_dp.c +++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c @@ -1713,7 +1713,7 @@ cdv_intel_dp_destroy(struct drm_connector *connector) } } i2c_del_adapter(&intel_dp->adapter); - drm_sysfs_connector_remove(connector); + drm_connector_unregister(connector); drm_connector_cleanup(connector); kfree(connector); } @@ -1847,7 +1847,7 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev connector->interlace_allowed = false; connector->doublescan_allowed = false; - drm_sysfs_connector_add(connector); + drm_connector_register(connector); /* Set up the DDC bus. */ switch (output_reg) { diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c index b99084b3f706..4268bf210034 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c +++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c @@ -248,7 +248,7 @@ static void cdv_hdmi_destroy(struct drm_connector *connector) if (gma_encoder->i2c_bus) psb_intel_i2c_destroy(gma_encoder->i2c_bus); - drm_sysfs_connector_remove(connector); + drm_connector_unregister(connector); drm_connector_cleanup(connector); kfree(connector); } @@ -356,7 +356,7 @@ void cdv_hdmi_init(struct drm_device *dev, hdmi_priv->hdmi_i2c_adapter = &(gma_encoder->i2c_bus->adapter); hdmi_priv->dev = dev; - drm_sysfs_connector_add(connector); + drm_connector_register(connector); return; failed_ddc: diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c index 8ecc920fc26d..0b770396548c 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c +++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c @@ -446,7 +446,7 @@ static void cdv_intel_lvds_destroy(struct drm_connector *connector) if (gma_encoder->i2c_bus) psb_intel_i2c_destroy(gma_encoder->i2c_bus); - drm_sysfs_connector_remove(connector); + drm_connector_unregister(connector); drm_connector_cleanup(connector); kfree(connector); } @@ -774,7 +774,7 @@ void cdv_intel_lvds_init(struct drm_device *dev, out: mutex_unlock(&dev->mode_config.mutex); - drm_sysfs_connector_add(connector); + drm_connector_register(connector); return; failed_find: diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c index e7fcc148f333..d0dd3bea8aa5 100644 --- a/drivers/gpu/drm/gma500/framebuffer.c +++ b/drivers/gpu/drm/gma500/framebuffer.c @@ -561,7 +561,7 @@ static int psbfb_probe(struct drm_fb_helper *helper, return psbfb_create(psb_fbdev, sizes); } -static struct drm_fb_helper_funcs psb_fb_helper_funcs = { +static const struct drm_fb_helper_funcs psb_fb_helper_funcs = { .gamma_set = psbfb_gamma_set, .gamma_get = psbfb_gamma_get, .fb_probe = psbfb_probe, @@ -600,7 +600,8 @@ int psb_fbdev_init(struct drm_device *dev) } dev_priv->fbdev = fbdev; - fbdev->psb_fb_helper.funcs = &psb_fb_helper_funcs; + + drm_fb_helper_prepare(dev, &fbdev->psb_fb_helper, &psb_fb_helper_funcs); drm_fb_helper_init(dev, &fbdev->psb_fb_helper, dev_priv->ops->crtcs, INTELFB_CONN_LIMIT); diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c index 592d205a0089..ce015db59dc6 100644 --- a/drivers/gpu/drm/gma500/gtt.c +++ b/drivers/gpu/drm/gma500/gtt.c @@ -206,7 +206,7 @@ static int psb_gtt_attach_pages(struct gtt_range *gt) WARN_ON(gt->pages); - pages = drm_gem_get_pages(>->gem, 0); + pages = drm_gem_get_pages(>->gem); if (IS_ERR(pages)) return PTR_ERR(pages); diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c index 6e91b20ce2e5..abf2248da61e 100644 --- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c +++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c @@ -318,7 +318,7 @@ static void mdfld_dsi_connector_destroy(struct drm_connector *connector) if (!dsi_connector) return; - drm_sysfs_connector_remove(connector); + drm_connector_unregister(connector); drm_connector_cleanup(connector); sender = dsi_connector->pkg_sender; mdfld_dsi_pkg_sender_destroy(sender); @@ -597,7 +597,7 @@ void mdfld_dsi_output_init(struct drm_device *dev, dsi_config->encoder = encoder; encoder->base.type = (pipe == 0) ? INTEL_OUTPUT_MIPI : INTEL_OUTPUT_MIPI2; - drm_sysfs_connector_add(connector); + drm_connector_register(connector); return; /*TODO: add code to destroy outputs on error*/ diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c index cf018ddcc5a6..54f73f50571a 100644 --- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c +++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c @@ -665,7 +665,7 @@ void oaktrail_hdmi_init(struct drm_device *dev, connector->display_info.subpixel_order = SubPixelHorizontalRGB; connector->interlace_allowed = false; connector->doublescan_allowed = false; - drm_sysfs_connector_add(connector); + drm_connector_register(connector); dev_info(dev->dev, "HDMI initialised.\n"); return; @@ -674,7 +674,7 @@ failed_connector: kfree(gma_encoder); } -static DEFINE_PCI_DEVICE_TABLE(hdmi_ids) = { +static const struct pci_device_id hdmi_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080d) }, { 0 } }; diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c index 9b099468a5db..0d39da6e8b7a 100644 --- a/drivers/gpu/drm/gma500/oaktrail_lvds.c +++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c @@ -404,7 +404,7 @@ void oaktrail_lvds_init(struct drm_device *dev, out: mutex_unlock(&dev->mode_config.mutex); - drm_sysfs_connector_add(connector); + drm_connector_register(connector); return; failed_find: diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c index 6e8fe9ec02b5..eec993f93b1a 100644 --- a/drivers/gpu/drm/gma500/psb_drv.c +++ b/drivers/gpu/drm/gma500/psb_drv.c @@ -54,7 +54,7 @@ static int psb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent); * PowerVR SGX545 - Cedartrail - Intel GMA 3650, Intel Atom D2550, D2700, * N2800 */ -static DEFINE_PCI_DEVICE_TABLE(pciidlist) = { +static const struct pci_device_id pciidlist[] = { { 0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &psb_chip_ops }, { 0x8086, 0x8109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &psb_chip_ops }, #if defined(CONFIG_DRM_GMA600) diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c index d7778d0472c1..88aad95bde09 100644 --- a/drivers/gpu/drm/gma500/psb_intel_lvds.c +++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c @@ -563,7 +563,7 @@ void psb_intel_lvds_destroy(struct drm_connector *connector) if (lvds_priv->ddc_bus) psb_intel_i2c_destroy(lvds_priv->ddc_bus); - drm_sysfs_connector_remove(connector); + drm_connector_unregister(connector); drm_connector_cleanup(connector); kfree(connector); } @@ -829,7 +829,7 @@ void psb_intel_lvds_init(struct drm_device *dev, */ out: mutex_unlock(&dev->mode_config.mutex); - drm_sysfs_connector_add(connector); + drm_connector_register(connector); return; failed_find: diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c index deeb0829b129..0be96fdb5e28 100644 --- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c +++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c @@ -1682,7 +1682,7 @@ static void psb_intel_sdvo_destroy(struct drm_connector *connector) psb_intel_sdvo_connector->tv_format); psb_intel_sdvo_destroy_enhance_property(connector); - drm_sysfs_connector_remove(connector); + drm_connector_unregister(connector); drm_connector_cleanup(connector); kfree(connector); } @@ -2071,7 +2071,7 @@ psb_intel_sdvo_connector_init(struct psb_intel_sdvo_connector *connector, connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB; gma_connector_attach_encoder(&connector->base, &encoder->base); - drm_sysfs_connector_add(&connector->base.base); + drm_connector_register(&connector->base.base); } static void diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c index ac357b02bd35..d4762799351d 100644 --- a/drivers/gpu/drm/i2c/tda998x_drv.c +++ b/drivers/gpu/drm/i2c/tda998x_drv.c @@ -15,8 +15,7 @@ * this program. If not, see <http://www.gnu.org/licenses/>. */ - - +#include <linux/component.h> #include <linux/hdmi.h> #include <linux/module.h> #include <linux/irq.h> @@ -730,12 +729,9 @@ tda998x_configure_audio(struct tda998x_priv *priv, /* DRM encoder functions */ -static void -tda998x_encoder_set_config(struct drm_encoder *encoder, void *params) +static void tda998x_encoder_set_config(struct tda998x_priv *priv, + const struct tda998x_encoder_params *p) { - struct tda998x_priv *priv = to_tda998x_priv(encoder); - struct tda998x_encoder_params *p = params; - priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(p->swap_a) | (p->mirr_a ? VIP_CNTRL_0_MIRR_A : 0) | VIP_CNTRL_0_SWAP_B(p->swap_b) | @@ -752,11 +748,8 @@ tda998x_encoder_set_config(struct drm_encoder *encoder, void *params) priv->params = *p; } -static void -tda998x_encoder_dpms(struct drm_encoder *encoder, int mode) +static void tda998x_encoder_dpms(struct tda998x_priv *priv, int mode) { - struct tda998x_priv *priv = to_tda998x_priv(encoder); - /* we only care about on or off: */ if (mode != DRM_MODE_DPMS_ON) mode = DRM_MODE_DPMS_OFF; @@ -806,9 +799,8 @@ tda998x_encoder_mode_fixup(struct drm_encoder *encoder, return true; } -static int -tda998x_encoder_mode_valid(struct drm_encoder *encoder, - struct drm_display_mode *mode) +static int tda998x_encoder_mode_valid(struct tda998x_priv *priv, + struct drm_display_mode *mode) { if (mode->clock > 150000) return MODE_CLOCK_HIGH; @@ -820,11 +812,10 @@ tda998x_encoder_mode_valid(struct drm_encoder *encoder, } static void -tda998x_encoder_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) +tda998x_encoder_mode_set(struct tda998x_priv *priv, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) { - struct tda998x_priv *priv = to_tda998x_priv(encoder); uint16_t ref_pix, ref_line, n_pix, n_line; uint16_t hs_pix_s, hs_pix_e; uint16_t vs1_pix_s, vs1_pix_e, vs1_line_s, vs1_line_e; @@ -1012,20 +1003,16 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder, } static enum drm_connector_status -tda998x_encoder_detect(struct drm_encoder *encoder, - struct drm_connector *connector) +tda998x_encoder_detect(struct tda998x_priv *priv) { - struct tda998x_priv *priv = to_tda998x_priv(encoder); uint8_t val = cec_read(priv, REG_CEC_RXSHPDLEV); return (val & CEC_RXSHPDLEV_HPD) ? connector_status_connected : connector_status_disconnected; } -static int -read_edid_block(struct drm_encoder *encoder, uint8_t *buf, int blk) +static int read_edid_block(struct tda998x_priv *priv, uint8_t *buf, int blk) { - struct tda998x_priv *priv = to_tda998x_priv(encoder); uint8_t offset, segptr; int ret, i; @@ -1079,10 +1066,8 @@ read_edid_block(struct drm_encoder *encoder, uint8_t *buf, int blk) return 0; } -static uint8_t * -do_get_edid(struct drm_encoder *encoder) +static uint8_t *do_get_edid(struct tda998x_priv *priv) { - struct tda998x_priv *priv = to_tda998x_priv(encoder); int j, valid_extensions = 0; uint8_t *block, *new; bool print_bad_edid = drm_debug & DRM_UT_KMS; @@ -1094,7 +1079,7 @@ do_get_edid(struct drm_encoder *encoder) reg_clear(priv, REG_TX4, TX4_PD_RAM); /* base block fetch */ - if (read_edid_block(encoder, block, 0)) + if (read_edid_block(priv, block, 0)) goto fail; if (!drm_edid_block_valid(block, 0, print_bad_edid)) @@ -1111,7 +1096,7 @@ do_get_edid(struct drm_encoder *encoder) for (j = 1; j <= block[0x7e]; j++) { uint8_t *ext_block = block + (valid_extensions + 1) * EDID_LENGTH; - if (read_edid_block(encoder, ext_block, j)) + if (read_edid_block(priv, ext_block, j)) goto fail; if (!drm_edid_block_valid(ext_block, j, print_bad_edid)) @@ -1144,11 +1129,10 @@ fail: } static int -tda998x_encoder_get_modes(struct drm_encoder *encoder, - struct drm_connector *connector) +tda998x_encoder_get_modes(struct tda998x_priv *priv, + struct drm_connector *connector) { - struct tda998x_priv *priv = to_tda998x_priv(encoder); - struct edid *edid = (struct edid *)do_get_edid(encoder); + struct edid *edid = (struct edid *)do_get_edid(priv); int n = 0; if (edid) { @@ -1161,18 +1145,14 @@ tda998x_encoder_get_modes(struct drm_encoder *encoder, return n; } -static int -tda998x_encoder_create_resources(struct drm_encoder *encoder, - struct drm_connector *connector) +static void tda998x_encoder_set_polling(struct tda998x_priv *priv, + struct drm_connector *connector) { - struct tda998x_priv *priv = to_tda998x_priv(encoder); - if (priv->hdmi->irq) connector->polled = DRM_CONNECTOR_POLL_HPD; else connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; - return 0; } static int @@ -1185,66 +1165,97 @@ tda998x_encoder_set_property(struct drm_encoder *encoder, return 0; } -static void -tda998x_encoder_destroy(struct drm_encoder *encoder) +static void tda998x_destroy(struct tda998x_priv *priv) { - struct tda998x_priv *priv = to_tda998x_priv(encoder); - /* disable all IRQs and free the IRQ handler */ cec_write(priv, REG_CEC_RXSHPDINTENA, 0); reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD); if (priv->hdmi->irq) free_irq(priv->hdmi->irq, priv); - if (priv->cec) - i2c_unregister_device(priv->cec); + i2c_unregister_device(priv->cec); +} + +/* Slave encoder support */ + +static void +tda998x_encoder_slave_set_config(struct drm_encoder *encoder, void *params) +{ + tda998x_encoder_set_config(to_tda998x_priv(encoder), params); +} + +static void tda998x_encoder_slave_destroy(struct drm_encoder *encoder) +{ + struct tda998x_priv *priv = to_tda998x_priv(encoder); + + tda998x_destroy(priv); drm_i2c_encoder_destroy(encoder); kfree(priv); } -static struct drm_encoder_slave_funcs tda998x_encoder_funcs = { - .set_config = tda998x_encoder_set_config, - .destroy = tda998x_encoder_destroy, - .dpms = tda998x_encoder_dpms, - .save = tda998x_encoder_save, - .restore = tda998x_encoder_restore, - .mode_fixup = tda998x_encoder_mode_fixup, - .mode_valid = tda998x_encoder_mode_valid, - .mode_set = tda998x_encoder_mode_set, - .detect = tda998x_encoder_detect, - .get_modes = tda998x_encoder_get_modes, - .create_resources = tda998x_encoder_create_resources, - .set_property = tda998x_encoder_set_property, -}; +static void tda998x_encoder_slave_dpms(struct drm_encoder *encoder, int mode) +{ + tda998x_encoder_dpms(to_tda998x_priv(encoder), mode); +} -/* I2C driver functions */ +static int tda998x_encoder_slave_mode_valid(struct drm_encoder *encoder, + struct drm_display_mode *mode) +{ + return tda998x_encoder_mode_valid(to_tda998x_priv(encoder), mode); +} -static int -tda998x_probe(struct i2c_client *client, const struct i2c_device_id *id) +static void +tda998x_encoder_slave_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) { - return 0; + tda998x_encoder_mode_set(to_tda998x_priv(encoder), mode, adjusted_mode); +} + +static enum drm_connector_status +tda998x_encoder_slave_detect(struct drm_encoder *encoder, + struct drm_connector *connector) +{ + return tda998x_encoder_detect(to_tda998x_priv(encoder)); +} + +static int tda998x_encoder_slave_get_modes(struct drm_encoder *encoder, + struct drm_connector *connector) +{ + return tda998x_encoder_get_modes(to_tda998x_priv(encoder), connector); } static int -tda998x_remove(struct i2c_client *client) +tda998x_encoder_slave_create_resources(struct drm_encoder *encoder, + struct drm_connector *connector) { + tda998x_encoder_set_polling(to_tda998x_priv(encoder), connector); return 0; } -static int -tda998x_encoder_init(struct i2c_client *client, - struct drm_device *dev, - struct drm_encoder_slave *encoder_slave) +static struct drm_encoder_slave_funcs tda998x_encoder_slave_funcs = { + .set_config = tda998x_encoder_slave_set_config, + .destroy = tda998x_encoder_slave_destroy, + .dpms = tda998x_encoder_slave_dpms, + .save = tda998x_encoder_save, + .restore = tda998x_encoder_restore, + .mode_fixup = tda998x_encoder_mode_fixup, + .mode_valid = tda998x_encoder_slave_mode_valid, + .mode_set = tda998x_encoder_slave_mode_set, + .detect = tda998x_encoder_slave_detect, + .get_modes = tda998x_encoder_slave_get_modes, + .create_resources = tda998x_encoder_slave_create_resources, + .set_property = tda998x_encoder_set_property, +}; + +/* I2C driver functions */ + +static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv) { - struct tda998x_priv *priv; struct device_node *np = client->dev.of_node; u32 video; int rev_lo, rev_hi, ret; - priv = kzalloc(sizeof(*priv), GFP_KERNEL); - if (!priv) - return -ENOMEM; - priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3); priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1); priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(4) | VIP_CNTRL_2_SWAP_F(5); @@ -1252,17 +1263,11 @@ tda998x_encoder_init(struct i2c_client *client, priv->current_page = 0xff; priv->hdmi = client; priv->cec = i2c_new_dummy(client->adapter, 0x34); - if (!priv->cec) { - kfree(priv); + if (!priv->cec) return -ENODEV; - } - priv->encoder = &encoder_slave->base; priv->dpms = DRM_MODE_DPMS_OFF; - encoder_slave->slave_priv = priv; - encoder_slave->slave_funcs = &tda998x_encoder_funcs; - /* wake up the device: */ cec_write(priv, REG_CEC_ENAMODS, CEC_ENAMODS_EN_RXSENS | CEC_ENAMODS_EN_HDMI); @@ -1365,12 +1370,231 @@ fail: */ if (priv->cec) i2c_unregister_device(priv->cec); - kfree(priv); - encoder_slave->slave_priv = NULL; - encoder_slave->slave_funcs = NULL; return -ENXIO; } +static int tda998x_encoder_init(struct i2c_client *client, + struct drm_device *dev, + struct drm_encoder_slave *encoder_slave) +{ + struct tda998x_priv *priv; + int ret; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->encoder = &encoder_slave->base; + + ret = tda998x_create(client, priv); + if (ret) { + kfree(priv); + return ret; + } + + encoder_slave->slave_priv = priv; + encoder_slave->slave_funcs = &tda998x_encoder_slave_funcs; + + return 0; +} + +struct tda998x_priv2 { + struct tda998x_priv base; + struct drm_encoder encoder; + struct drm_connector connector; +}; + +#define conn_to_tda998x_priv2(x) \ + container_of(x, struct tda998x_priv2, connector); + +#define enc_to_tda998x_priv2(x) \ + container_of(x, struct tda998x_priv2, encoder); + +static void tda998x_encoder2_dpms(struct drm_encoder *encoder, int mode) +{ + struct tda998x_priv2 *priv = enc_to_tda998x_priv2(encoder); + + tda998x_encoder_dpms(&priv->base, mode); +} + +static void tda998x_encoder_prepare(struct drm_encoder *encoder) +{ + tda998x_encoder2_dpms(encoder, DRM_MODE_DPMS_OFF); +} + +static void tda998x_encoder_commit(struct drm_encoder *encoder) +{ + tda998x_encoder2_dpms(encoder, DRM_MODE_DPMS_ON); +} + +static void tda998x_encoder2_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct tda998x_priv2 *priv = enc_to_tda998x_priv2(encoder); + + tda998x_encoder_mode_set(&priv->base, mode, adjusted_mode); +} + +static const struct drm_encoder_helper_funcs tda998x_encoder_helper_funcs = { + .dpms = tda998x_encoder2_dpms, + .save = tda998x_encoder_save, + .restore = tda998x_encoder_restore, + .mode_fixup = tda998x_encoder_mode_fixup, + .prepare = tda998x_encoder_prepare, + .commit = tda998x_encoder_commit, + .mode_set = tda998x_encoder2_mode_set, +}; + +static void tda998x_encoder_destroy(struct drm_encoder *encoder) +{ + struct tda998x_priv2 *priv = enc_to_tda998x_priv2(encoder); + + tda998x_destroy(&priv->base); + drm_encoder_cleanup(encoder); +} + +static const struct drm_encoder_funcs tda998x_encoder_funcs = { + .destroy = tda998x_encoder_destroy, +}; + +static int tda998x_connector_get_modes(struct drm_connector *connector) +{ + struct tda998x_priv2 *priv = conn_to_tda998x_priv2(connector); + + return tda998x_encoder_get_modes(&priv->base, connector); +} + +static int tda998x_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + struct tda998x_priv2 *priv = conn_to_tda998x_priv2(connector); + + return tda998x_encoder_mode_valid(&priv->base, mode); +} + +static struct drm_encoder * +tda998x_connector_best_encoder(struct drm_connector *connector) +{ + struct tda998x_priv2 *priv = conn_to_tda998x_priv2(connector); + + return &priv->encoder; +} + +static +const struct drm_connector_helper_funcs tda998x_connector_helper_funcs = { + .get_modes = tda998x_connector_get_modes, + .mode_valid = tda998x_connector_mode_valid, + .best_encoder = tda998x_connector_best_encoder, +}; + +static enum drm_connector_status +tda998x_connector_detect(struct drm_connector *connector, bool force) +{ + struct tda998x_priv2 *priv = conn_to_tda998x_priv2(connector); + + return tda998x_encoder_detect(&priv->base); +} + +static void tda998x_connector_destroy(struct drm_connector *connector) +{ + drm_connector_unregister(connector); + drm_connector_cleanup(connector); +} + +static const struct drm_connector_funcs tda998x_connector_funcs = { + .dpms = drm_helper_connector_dpms, + .fill_modes = drm_helper_probe_single_connector_modes, + .detect = tda998x_connector_detect, + .destroy = tda998x_connector_destroy, +}; + +static int tda998x_bind(struct device *dev, struct device *master, void *data) +{ + struct tda998x_encoder_params *params = dev->platform_data; + struct i2c_client *client = to_i2c_client(dev); + struct drm_device *drm = data; + struct tda998x_priv2 *priv; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + dev_set_drvdata(dev, priv); + + priv->base.encoder = &priv->encoder; + priv->connector.interlace_allowed = 1; + priv->encoder.possible_crtcs = 1 << 0; + + ret = tda998x_create(client, &priv->base); + if (ret) + return ret; + + if (!dev->of_node && params) + tda998x_encoder_set_config(&priv->base, params); + + tda998x_encoder_set_polling(&priv->base, &priv->connector); + + drm_encoder_helper_add(&priv->encoder, &tda998x_encoder_helper_funcs); + ret = drm_encoder_init(drm, &priv->encoder, &tda998x_encoder_funcs, + DRM_MODE_ENCODER_TMDS); + if (ret) + goto err_encoder; + + drm_connector_helper_add(&priv->connector, + &tda998x_connector_helper_funcs); + ret = drm_connector_init(drm, &priv->connector, + &tda998x_connector_funcs, + DRM_MODE_CONNECTOR_HDMIA); + if (ret) + goto err_connector; + + ret = drm_connector_register(&priv->connector); + if (ret) + goto err_sysfs; + + priv->connector.encoder = &priv->encoder; + drm_mode_connector_attach_encoder(&priv->connector, &priv->encoder); + + return 0; + +err_sysfs: + drm_connector_cleanup(&priv->connector); +err_connector: + drm_encoder_cleanup(&priv->encoder); +err_encoder: + tda998x_destroy(&priv->base); + return ret; +} + +static void tda998x_unbind(struct device *dev, struct device *master, + void *data) +{ + struct tda998x_priv2 *priv = dev_get_drvdata(dev); + + drm_connector_cleanup(&priv->connector); + drm_encoder_cleanup(&priv->encoder); + tda998x_destroy(&priv->base); +} + +static const struct component_ops tda998x_ops = { + .bind = tda998x_bind, + .unbind = tda998x_unbind, +}; + +static int +tda998x_probe(struct i2c_client *client, const struct i2c_device_id *id) +{ + return component_add(&client->dev, &tda998x_ops); +} + +static int tda998x_remove(struct i2c_client *client) +{ + component_del(&client->dev, &tda998x_ops); + return 0; +} + #ifdef CONFIG_OF static const struct of_device_id tda998x_dt_ids[] = { { .compatible = "nxp,tda998x", }, diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c index e88bac1d781f..bae897de9468 100644 --- a/drivers/gpu/drm/i810/i810_dma.c +++ b/drivers/gpu/drm/i810/i810_dma.c @@ -393,15 +393,14 @@ static int i810_dma_initialize(struct drm_device *dev, /* Program Hardware Status Page */ dev_priv->hw_status_page = - pci_alloc_consistent(dev->pdev, PAGE_SIZE, - &dev_priv->dma_status_page); + pci_zalloc_consistent(dev->pdev, PAGE_SIZE, + &dev_priv->dma_status_page); if (!dev_priv->hw_status_page) { dev->dev_private = (void *)dev_priv; i810_dma_cleanup(dev); DRM_ERROR("Can not allocate hardware status page\n"); return -ENOMEM; } - memset(dev_priv->hw_status_page, 0, PAGE_SIZE); DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page); I810_WRITE(0x02080, dev_priv->dma_status_page); diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index 437e1824d0bf..4e39ab34eb1c 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -69,15 +69,3 @@ config DRM_I915_PRELIMINARY_HW_SUPPORT option changes the default for that module option. If in doubt, say "N". - -config DRM_I915_UMS - bool "Enable userspace modesetting on Intel hardware (DEPRECATED)" - depends on DRM_I915 && BROKEN - default n - help - Choose this option if you still need userspace modesetting. - - Userspace modesetting is deprecated for quite some time now, so - enable this only if you have ancient versions of the DDX drivers. - - If in doubt, say "N". diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index cad1683d8bb5..91bd167e1cb7 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -59,6 +59,7 @@ i915-y += dvo_ch7017.o \ intel_crt.o \ intel_ddi.o \ intel_dp.o \ + intel_dp_mst.o \ intel_dsi_cmd.o \ intel_dsi.o \ intel_dsi_pll.o \ diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index 9d7954366bd2..4b7ed5289217 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c @@ -426,6 +426,9 @@ static const u32 gen7_render_regs[] = { GEN7_SO_WRITE_OFFSET(1), GEN7_SO_WRITE_OFFSET(2), GEN7_SO_WRITE_OFFSET(3), + GEN7_L3SQCREG1, + GEN7_L3CNTLREG2, + GEN7_L3CNTLREG3, }; static const u32 gen7_blt_regs[] = { @@ -706,11 +709,13 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *ring) BUG_ON(!validate_cmds_sorted(ring, cmd_tables, cmd_table_count)); BUG_ON(!validate_regs_sorted(ring)); - ret = init_hash_table(ring, cmd_tables, cmd_table_count); - if (ret) { - DRM_ERROR("CMD: cmd_parser_init failed!\n"); - fini_hash_table(ring); - return ret; + if (hash_empty(ring->cmd_hash)) { + ret = init_hash_table(ring, cmd_tables, cmd_table_count); + if (ret) { + DRM_ERROR("CMD: cmd_parser_init failed!\n"); + fini_hash_table(ring); + return ret; + } } ring->needs_cmd_parser = true; diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index b8c689202c40..9e737b771c40 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -170,11 +170,13 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) } if (obj->ring != NULL) seq_printf(m, " (%s)", obj->ring->name); + if (obj->frontbuffer_bits) + seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits); } static void describe_ctx(struct seq_file *m, struct intel_context *ctx) { - seq_putc(m, ctx->is_initialized ? 'I' : 'i'); + seq_putc(m, ctx->legacy_hw_ctx.initialized ? 'I' : 'i'); seq_putc(m, ctx->remap_slice ? 'R' : 'r'); seq_putc(m, ' '); } @@ -515,6 +517,11 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data) struct drm_device *dev = node->minor->dev; unsigned long flags; struct intel_crtc *crtc; + int ret; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; for_each_intel_crtc(dev, crtc) { const char pipe = pipe_name(crtc->pipe); @@ -556,6 +563,8 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data) spin_unlock_irqrestore(&dev->event_lock, flags); } + mutex_unlock(&dev->struct_mutex); + return 0; } @@ -985,29 +994,6 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops, i915_next_seqno_get, i915_next_seqno_set, "0x%llx\n"); -static int i915_rstdby_delays(struct seq_file *m, void *unused) -{ - struct drm_info_node *node = m->private; - struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - u16 crstanddelay; - int ret; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; - intel_runtime_pm_get(dev_priv); - - crstanddelay = I915_READ16(CRSTANDVID); - - intel_runtime_pm_put(dev_priv); - mutex_unlock(&dev->struct_mutex); - - seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f)); - - return 0; -} - static int i915_frequency_info(struct seq_file *m, void *unused) { struct drm_info_node *node = m->private; @@ -1029,7 +1015,8 @@ static int i915_frequency_info(struct seq_file *m, void *unused) MEMSTAT_VID_SHIFT); seq_printf(m, "Current P-state: %d\n", (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); - } else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) { + } else if (IS_GEN6(dev) || (IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) || + IS_BROADWELL(dev)) { u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); @@ -1048,7 +1035,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused) reqf = I915_READ(GEN6_RPNSWREQ); reqf &= ~GEN6_TURBO_DISABLE; - if (IS_HASWELL(dev)) + if (IS_HASWELL(dev) || IS_BROADWELL(dev)) reqf >>= 24; else reqf >>= 25; @@ -1065,7 +1052,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused) rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI); rpcurdown = I915_READ(GEN6_RP_CUR_DOWN); rpprevdown = I915_READ(GEN6_RP_PREV_DOWN); - if (IS_HASWELL(dev)) + if (IS_HASWELL(dev) || IS_BROADWELL(dev)) cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT; else cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT; @@ -1121,20 +1108,21 @@ static int i915_frequency_info(struct seq_file *m, void *unused) seq_printf(m, "Max overclocked frequency: %dMHz\n", dev_priv->rps.max_freq * GT_FREQUENCY_MULTIPLIER); } else if (IS_VALLEYVIEW(dev)) { - u32 freq_sts, val; + u32 freq_sts; mutex_lock(&dev_priv->rps.hw_lock); freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts); seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq); - val = valleyview_rps_max_freq(dev_priv); seq_printf(m, "max GPU freq: %d MHz\n", - vlv_gpu_freq(dev_priv, val)); + vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq)); - val = valleyview_rps_min_freq(dev_priv); seq_printf(m, "min GPU freq: %d MHz\n", - vlv_gpu_freq(dev_priv, val)); + vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq)); + + seq_printf(m, "efficient (RPe) frequency: %d MHz\n", + vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq)); seq_printf(m, "current GPU freq: %d MHz\n", vlv_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff)); @@ -1148,61 +1136,6 @@ out: return ret; } -static int i915_delayfreq_table(struct seq_file *m, void *unused) -{ - struct drm_info_node *node = m->private; - struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - u32 delayfreq; - int ret, i; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; - intel_runtime_pm_get(dev_priv); - - for (i = 0; i < 16; i++) { - delayfreq = I915_READ(PXVFREQ_BASE + i * 4); - seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq, - (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT); - } - - intel_runtime_pm_put(dev_priv); - - mutex_unlock(&dev->struct_mutex); - - return 0; -} - -static inline int MAP_TO_MV(int map) -{ - return 1250 - (map * 25); -} - -static int i915_inttoext_table(struct seq_file *m, void *unused) -{ - struct drm_info_node *node = m->private; - struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - u32 inttoext; - int ret, i; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; - intel_runtime_pm_get(dev_priv); - - for (i = 1; i <= 32; i++) { - inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4); - seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext); - } - - intel_runtime_pm_put(dev_priv); - mutex_unlock(&dev->struct_mutex); - - return 0; -} - static int ironlake_drpc_info(struct seq_file *m) { struct drm_info_node *node = m->private; @@ -1513,10 +1446,17 @@ static int i915_ips_status(struct seq_file *m, void *unused) intel_runtime_pm_get(dev_priv); - if (IS_BROADWELL(dev) || I915_READ(IPS_CTL) & IPS_ENABLE) - seq_puts(m, "enabled\n"); - else - seq_puts(m, "disabled\n"); + seq_printf(m, "Enabled by kernel parameter: %s\n", + yesno(i915.enable_ips)); + + if (INTEL_INFO(dev)->gen >= 8) { + seq_puts(m, "Currently: unknown\n"); + } else { + if (I915_READ(IPS_CTL) & IPS_ENABLE) + seq_puts(m, "Currently: enabled\n"); + else + seq_puts(m, "Currently: disabled\n"); + } intel_runtime_pm_put(dev_priv); @@ -1620,26 +1560,6 @@ out: return ret; } -static int i915_gfxec(struct seq_file *m, void *unused) -{ - struct drm_info_node *node = m->private; - struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - int ret; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; - intel_runtime_pm_get(dev_priv); - - seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4)); - intel_runtime_pm_put(dev_priv); - - mutex_unlock(&dev->struct_mutex); - - return 0; -} - static int i915_opregion(struct seq_file *m, void *unused) { struct drm_info_node *node = m->private; @@ -1677,9 +1597,6 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) #ifdef CONFIG_DRM_I915_FBDEV struct drm_i915_private *dev_priv = dev->dev_private; - int ret = mutex_lock_interruptible(&dev->mode_config.mutex); - if (ret) - return ret; ifbdev = dev_priv->fbdev; fb = to_intel_framebuffer(ifbdev->helper.fb); @@ -1692,7 +1609,6 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) atomic_read(&fb->base.refcount.refcount)); describe_obj(m, fb->obj); seq_putc(m, '\n'); - mutex_unlock(&dev->mode_config.mutex); #endif mutex_lock(&dev->mode_config.fb_lock); @@ -1723,7 +1639,7 @@ static int i915_context_status(struct seq_file *m, void *unused) struct intel_context *ctx; int ret, i; - ret = mutex_lock_interruptible(&dev->mode_config.mutex); + ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) return ret; @@ -1740,7 +1656,7 @@ static int i915_context_status(struct seq_file *m, void *unused) } list_for_each_entry(ctx, &dev_priv->context_list, link) { - if (ctx->obj == NULL) + if (ctx->legacy_hw_ctx.rcs_state == NULL) continue; seq_puts(m, "HW context "); @@ -1749,11 +1665,11 @@ static int i915_context_status(struct seq_file *m, void *unused) if (ring->default_context == ctx) seq_printf(m, "(default context %s) ", ring->name); - describe_obj(m, ctx->obj); + describe_obj(m, ctx->legacy_hw_ctx.rcs_state); seq_putc(m, '\n'); } - mutex_unlock(&dev->mode_config.mutex); + mutex_unlock(&dev->struct_mutex); return 0; } @@ -1863,7 +1779,7 @@ static int per_file_ctx(int id, void *ptr, void *data) if (i915_gem_context_is_default(ctx)) seq_puts(m, " default context:\n"); else - seq_printf(m, " context %d:\n", ctx->id); + seq_printf(m, " context %d:\n", ctx->user_handle); ppgtt->debug_dump(ppgtt, m); return 0; @@ -1976,17 +1892,25 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) intel_runtime_pm_get(dev_priv); + mutex_lock(&dev_priv->psr.lock); seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support)); seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok)); + seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled)); + seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active)); + seq_printf(m, "Busy frontbuffer bits: 0x%03x\n", + dev_priv->psr.busy_frontbuffer_bits); + seq_printf(m, "Re-enable work scheduled: %s\n", + yesno(work_busy(&dev_priv->psr.work.work))); enabled = HAS_PSR(dev) && I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE; - seq_printf(m, "Enabled: %s\n", yesno(enabled)); + seq_printf(m, "HW Enabled & Active bit: %s\n", yesno(enabled)); if (HAS_PSR(dev)) psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) & EDP_PSR_PERF_CNT_MASK; seq_printf(m, "Performance_Counter: %u\n", psrperf); + mutex_unlock(&dev_priv->psr.lock); intel_runtime_pm_put(dev_priv); return 0; @@ -2072,7 +1996,7 @@ static int i915_pc8_status(struct seq_file *m, void *unused) seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy)); seq_printf(m, "IRQs disabled: %s\n", - yesno(dev_priv->pm.irqs_disabled)); + yesno(!intel_irqs_enabled(dev_priv))); return 0; } @@ -2126,6 +2050,8 @@ static const char *power_domain_str(enum intel_display_power_domain domain) return "VGA"; case POWER_DOMAIN_AUDIO: return "AUDIO"; + case POWER_DOMAIN_PLLS: + return "PLLS"; case POWER_DOMAIN_INIT: return "INIT"; default: @@ -2223,9 +2149,12 @@ static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc) struct drm_crtc *crtc = &intel_crtc->base; struct intel_encoder *intel_encoder; - seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n", - crtc->primary->fb->base.id, crtc->x, crtc->y, - crtc->primary->fb->width, crtc->primary->fb->height); + if (crtc->primary->fb) + seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n", + crtc->primary->fb->base.id, crtc->x, crtc->y, + crtc->primary->fb->width, crtc->primary->fb->height); + else + seq_puts(m, "\tprimary plane disabled\n"); for_each_encoder_on_crtc(dev, crtc, intel_encoder) intel_encoder_info(m, intel_crtc, intel_encoder); } @@ -2287,13 +2216,15 @@ static void intel_connector_info(struct seq_file *m, seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev); } - if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || - intel_encoder->type == INTEL_OUTPUT_EDP) - intel_dp_info(m, intel_connector); - else if (intel_encoder->type == INTEL_OUTPUT_HDMI) - intel_hdmi_info(m, intel_connector); - else if (intel_encoder->type == INTEL_OUTPUT_LVDS) - intel_lvds_info(m, intel_connector); + if (intel_encoder) { + if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || + intel_encoder->type == INTEL_OUTPUT_EDP) + intel_dp_info(m, intel_connector); + else if (intel_encoder->type == INTEL_OUTPUT_HDMI) + intel_hdmi_info(m, intel_connector); + else if (intel_encoder->type == INTEL_OUTPUT_LVDS) + intel_lvds_info(m, intel_connector); + } seq_printf(m, "\tmodes:\n"); list_for_each_entry(mode, &connector->modes, head) @@ -2347,17 +2278,17 @@ static int i915_display_info(struct seq_file *m, void *unused) bool active; int x, y; - seq_printf(m, "CRTC %d: pipe: %c, active: %s\n", + seq_printf(m, "CRTC %d: pipe: %c, active=%s (size=%dx%d)\n", crtc->base.base.id, pipe_name(crtc->pipe), - yesno(crtc->active)); + yesno(crtc->active), crtc->config.pipe_src_w, crtc->config.pipe_src_h); if (crtc->active) { intel_crtc_info(m, crtc); active = cursor_position(dev, crtc->pipe, &x, &y); - seq_printf(m, "\tcursor visible? %s, position (%d, %d), addr 0x%08x, active? %s\n", + seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x, active? %s\n", yesno(crtc->cursor_base), - x, y, crtc->cursor_addr, - yesno(active)); + x, y, crtc->cursor_width, crtc->cursor_height, + crtc->cursor_addr, yesno(active)); } seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n", @@ -2377,12 +2308,132 @@ static int i915_display_info(struct seq_file *m, void *unused) return 0; } +static int i915_semaphore_status(struct seq_file *m, void *unused) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_engine_cs *ring; + int num_rings = hweight32(INTEL_INFO(dev)->ring_mask); + int i, j, ret; + + if (!i915_semaphore_is_enabled(dev)) { + seq_puts(m, "Semaphores are disabled\n"); + return 0; + } + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + intel_runtime_pm_get(dev_priv); + + if (IS_BROADWELL(dev)) { + struct page *page; + uint64_t *seqno; + + page = i915_gem_object_get_page(dev_priv->semaphore_obj, 0); + + seqno = (uint64_t *)kmap_atomic(page); + for_each_ring(ring, dev_priv, i) { + uint64_t offset; + + seq_printf(m, "%s\n", ring->name); + + seq_puts(m, " Last signal:"); + for (j = 0; j < num_rings; j++) { + offset = i * I915_NUM_RINGS + j; + seq_printf(m, "0x%08llx (0x%02llx) ", + seqno[offset], offset * 8); + } + seq_putc(m, '\n'); + + seq_puts(m, " Last wait: "); + for (j = 0; j < num_rings; j++) { + offset = i + (j * I915_NUM_RINGS); + seq_printf(m, "0x%08llx (0x%02llx) ", + seqno[offset], offset * 8); + } + seq_putc(m, '\n'); + + } + kunmap_atomic(seqno); + } else { + seq_puts(m, " Last signal:"); + for_each_ring(ring, dev_priv, i) + for (j = 0; j < num_rings; j++) + seq_printf(m, "0x%08x\n", + I915_READ(ring->semaphore.mbox.signal[j])); + seq_putc(m, '\n'); + } + + seq_puts(m, "\nSync seqno:\n"); + for_each_ring(ring, dev_priv, i) { + for (j = 0; j < num_rings; j++) { + seq_printf(m, " 0x%08x ", ring->semaphore.sync_seqno[j]); + } + seq_putc(m, '\n'); + } + seq_putc(m, '\n'); + + intel_runtime_pm_put(dev_priv); + mutex_unlock(&dev->struct_mutex); + return 0; +} + +static int i915_shared_dplls_info(struct seq_file *m, void *unused) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int i; + + drm_modeset_lock_all(dev); + for (i = 0; i < dev_priv->num_shared_dpll; i++) { + struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; + + seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id); + seq_printf(m, " refcount: %i, active: %i, on: %s\n", pll->refcount, + pll->active, yesno(pll->on)); + seq_printf(m, " tracked hardware state:\n"); + seq_printf(m, " dpll: 0x%08x\n", pll->hw_state.dpll); + seq_printf(m, " dpll_md: 0x%08x\n", pll->hw_state.dpll_md); + seq_printf(m, " fp0: 0x%08x\n", pll->hw_state.fp0); + seq_printf(m, " fp1: 0x%08x\n", pll->hw_state.fp1); + seq_printf(m, " wrpll: 0x%08x\n", pll->hw_state.wrpll); + } + drm_modeset_unlock_all(dev); + + return 0; +} + struct pipe_crc_info { const char *name; struct drm_device *dev; enum pipe pipe; }; +static int i915_dp_mst_info(struct seq_file *m, void *unused) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct drm_encoder *encoder; + struct intel_encoder *intel_encoder; + struct intel_digital_port *intel_dig_port; + drm_modeset_lock_all(dev); + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + intel_encoder = to_intel_encoder(encoder); + if (intel_encoder->type != INTEL_OUTPUT_DISPLAYPORT) + continue; + intel_dig_port = enc_to_dig_port(encoder); + if (!intel_dig_port->dp.can_mst) + continue; + + drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr); + } + drm_modeset_unlock_all(dev); + return 0; +} + static int i915_pipe_crc_open(struct inode *inode, struct file *filep) { struct pipe_crc_info *info = inode->i_private; @@ -2849,7 +2900,60 @@ static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, return 0; } -static int ivb_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, +static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *crtc = + to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]); + + drm_modeset_lock_all(dev); + /* + * If we use the eDP transcoder we need to make sure that we don't + * bypass the pfit, since otherwise the pipe CRC source won't work. Only + * relevant on hsw with pipe A when using the always-on power well + * routing. + */ + if (crtc->config.cpu_transcoder == TRANSCODER_EDP && + !crtc->config.pch_pfit.enabled) { + crtc->config.pch_pfit.force_thru = true; + + intel_display_power_get(dev_priv, + POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A)); + + dev_priv->display.crtc_disable(&crtc->base); + dev_priv->display.crtc_enable(&crtc->base); + } + drm_modeset_unlock_all(dev); +} + +static void hsw_undo_trans_edp_pipe_A_crc_wa(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *crtc = + to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]); + + drm_modeset_lock_all(dev); + /* + * If we use the eDP transcoder we need to make sure that we don't + * bypass the pfit, since otherwise the pipe CRC source won't work. Only + * relevant on hsw with pipe A when using the always-on power well + * routing. + */ + if (crtc->config.pch_pfit.force_thru) { + crtc->config.pch_pfit.force_thru = false; + + dev_priv->display.crtc_disable(&crtc->base); + dev_priv->display.crtc_enable(&crtc->base); + + intel_display_power_put(dev_priv, + POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A)); + } + drm_modeset_unlock_all(dev); +} + +static int ivb_pipe_crc_ctl_reg(struct drm_device *dev, + enum pipe pipe, + enum intel_pipe_crc_source *source, uint32_t *val) { if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) @@ -2863,6 +2967,9 @@ static int ivb_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB; break; case INTEL_PIPE_CRC_SOURCE_PF: + if (IS_HASWELL(dev) && pipe == PIPE_A) + hsw_trans_edp_pipe_A_crc_wa(dev); + *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB; break; case INTEL_PIPE_CRC_SOURCE_NONE: @@ -2895,11 +3002,11 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe, else if (INTEL_INFO(dev)->gen < 5) ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val); else if (IS_VALLEYVIEW(dev)) - ret = vlv_pipe_crc_ctl_reg(dev,pipe, &source, &val); + ret = vlv_pipe_crc_ctl_reg(dev, pipe, &source, &val); else if (IS_GEN5(dev) || IS_GEN6(dev)) ret = ilk_pipe_crc_ctl_reg(&source, &val); else - ret = ivb_pipe_crc_ctl_reg(&source, &val); + ret = ivb_pipe_crc_ctl_reg(dev, pipe, &source, &val); if (ret != 0) return ret; @@ -2929,11 +3036,16 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe, /* real source -> none transition */ if (source == INTEL_PIPE_CRC_SOURCE_NONE) { struct intel_pipe_crc_entry *entries; + struct intel_crtc *crtc = + to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n", pipe_name(pipe)); - intel_wait_for_vblank(dev, pipe); + drm_modeset_lock(&crtc->base.mutex, NULL); + if (crtc->active) + intel_wait_for_vblank(dev, pipe); + drm_modeset_unlock(&crtc->base.mutex); spin_lock_irq(&pipe_crc->lock); entries = pipe_crc->entries; @@ -2946,6 +3058,8 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe, g4x_undo_pipe_scramble_reset(dev, pipe); else if (IS_VALLEYVIEW(dev)) vlv_undo_pipe_scramble_reset(dev, pipe); + else if (IS_HASWELL(dev) && pipe == PIPE_A) + hsw_undo_trans_edp_pipe_A_crc_wa(dev); } return 0; @@ -3177,7 +3291,7 @@ static int pri_wm_latency_open(struct inode *inode, struct file *file) { struct drm_device *dev = inode->i_private; - if (!HAS_PCH_SPLIT(dev)) + if (HAS_GMCH_DISPLAY(dev)) return -ENODEV; return single_open(file, pri_wm_latency_show, dev); @@ -3187,7 +3301,7 @@ static int spr_wm_latency_open(struct inode *inode, struct file *file) { struct drm_device *dev = inode->i_private; - if (!HAS_PCH_SPLIT(dev)) + if (HAS_GMCH_DISPLAY(dev)) return -ENODEV; return single_open(file, spr_wm_latency_show, dev); @@ -3197,7 +3311,7 @@ static int cur_wm_latency_open(struct inode *inode, struct file *file) { struct drm_device *dev = inode->i_private; - if (!HAS_PCH_SPLIT(dev)) + if (HAS_GMCH_DISPLAY(dev)) return -ENODEV; return single_open(file, cur_wm_latency_show, dev); @@ -3506,7 +3620,7 @@ i915_max_freq_get(void *data, u64 *val) struct drm_i915_private *dev_priv = dev->dev_private; int ret; - if (!(IS_GEN6(dev) || IS_GEN7(dev))) + if (INTEL_INFO(dev)->gen < 6) return -ENODEV; flush_delayed_work(&dev_priv->rps.delayed_resume_work); @@ -3532,7 +3646,7 @@ i915_max_freq_set(void *data, u64 val) u32 rp_state_cap, hw_max, hw_min; int ret; - if (!(IS_GEN6(dev) || IS_GEN7(dev))) + if (INTEL_INFO(dev)->gen < 6) return -ENODEV; flush_delayed_work(&dev_priv->rps.delayed_resume_work); @@ -3549,8 +3663,8 @@ i915_max_freq_set(void *data, u64 val) if (IS_VALLEYVIEW(dev)) { val = vlv_freq_opcode(dev_priv, val); - hw_max = valleyview_rps_max_freq(dev_priv); - hw_min = valleyview_rps_min_freq(dev_priv); + hw_max = dev_priv->rps.max_freq; + hw_min = dev_priv->rps.min_freq; } else { do_div(val, GT_FREQUENCY_MULTIPLIER); @@ -3587,7 +3701,7 @@ i915_min_freq_get(void *data, u64 *val) struct drm_i915_private *dev_priv = dev->dev_private; int ret; - if (!(IS_GEN6(dev) || IS_GEN7(dev))) + if (INTEL_INFO(dev)->gen < 6) return -ENODEV; flush_delayed_work(&dev_priv->rps.delayed_resume_work); @@ -3613,7 +3727,7 @@ i915_min_freq_set(void *data, u64 val) u32 rp_state_cap, hw_max, hw_min; int ret; - if (!(IS_GEN6(dev) || IS_GEN7(dev))) + if (INTEL_INFO(dev)->gen < 6) return -ENODEV; flush_delayed_work(&dev_priv->rps.delayed_resume_work); @@ -3630,8 +3744,8 @@ i915_min_freq_set(void *data, u64 val) if (IS_VALLEYVIEW(dev)) { val = vlv_freq_opcode(dev_priv, val); - hw_max = valleyview_rps_max_freq(dev_priv); - hw_min = valleyview_rps_min_freq(dev_priv); + hw_max = dev_priv->rps.max_freq; + hw_min = dev_priv->rps.min_freq; } else { do_div(val, GT_FREQUENCY_MULTIPLIER); @@ -3799,14 +3913,10 @@ static const struct drm_info_list i915_debugfs_list[] = { {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS}, {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS}, {"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS}, - {"i915_rstdby_delays", i915_rstdby_delays, 0}, {"i915_frequency_info", i915_frequency_info, 0}, - {"i915_delayfreq_table", i915_delayfreq_table, 0}, - {"i915_inttoext_table", i915_inttoext_table, 0}, {"i915_drpc_info", i915_drpc_info, 0}, {"i915_emon_status", i915_emon_status, 0}, {"i915_ring_freq_table", i915_ring_freq_table, 0}, - {"i915_gfxec", i915_gfxec, 0}, {"i915_fbc_status", i915_fbc_status, 0}, {"i915_ips_status", i915_ips_status, 0}, {"i915_sr_status", i915_sr_status, 0}, @@ -3823,6 +3933,9 @@ static const struct drm_info_list i915_debugfs_list[] = { {"i915_pc8_status", i915_pc8_status, 0}, {"i915_power_domain_info", i915_power_domain_info, 0}, {"i915_display_info", i915_display_info, 0}, + {"i915_semaphore_status", i915_semaphore_status, 0}, + {"i915_shared_dplls_info", i915_shared_dplls_info, 0}, + {"i915_dp_mst_info", i915_dp_mst_info, 0}, }; #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index d44344140627..9933c26017ed 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -138,7 +138,7 @@ static void i915_free_hws(struct drm_device *dev) I915_WRITE(HWS_PGA, 0x1ffff000); } -void i915_kernel_lost_context(struct drm_device * dev) +void i915_kernel_lost_context(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv; @@ -166,7 +166,7 @@ void i915_kernel_lost_context(struct drm_device * dev) master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY; } -static int i915_dma_cleanup(struct drm_device * dev) +static int i915_dma_cleanup(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int i; @@ -190,7 +190,7 @@ static int i915_dma_cleanup(struct drm_device * dev) return 0; } -static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) +static int i915_initialize(struct drm_device *dev, drm_i915_init_t *init) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; @@ -235,7 +235,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) return 0; } -static int i915_dma_resume(struct drm_device * dev) +static int i915_dma_resume(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *ring = LP_RING(dev_priv); @@ -359,7 +359,7 @@ static int validate_cmd(int cmd) return 0; } -static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) +static int i915_emit_cmds(struct drm_device *dev, int *buffer, int dwords) { struct drm_i915_private *dev_priv = dev->dev_private; int i, ret; @@ -369,6 +369,7 @@ static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) for (i = 0; i < dwords;) { int sz = validate_cmd(buffer[i]); + if (sz == 0 || i + sz > dwords) return -EINVAL; i += sz; @@ -453,7 +454,7 @@ static void i915_emit_breadcrumb(struct drm_device *dev) } } -static int i915_dispatch_cmdbuffer(struct drm_device * dev, +static int i915_dispatch_cmdbuffer(struct drm_device *dev, drm_i915_cmdbuffer_t *cmd, struct drm_clip_rect *cliprects, void *cmdbuf) @@ -487,8 +488,8 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev, return 0; } -static int i915_dispatch_batchbuffer(struct drm_device * dev, - drm_i915_batchbuffer_t * batch, +static int i915_dispatch_batchbuffer(struct drm_device *dev, + drm_i915_batchbuffer_t *batch, struct drm_clip_rect *cliprects) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -549,7 +550,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev, return 0; } -static int i915_dispatch_flip(struct drm_device * dev) +static int i915_dispatch_flip(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv = @@ -755,7 +756,7 @@ fail_batch_free: return ret; } -static int i915_emit_irq(struct drm_device * dev) +static int i915_emit_irq(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; @@ -781,7 +782,7 @@ static int i915_emit_irq(struct drm_device * dev) return dev_priv->dri1.counter; } -static int i915_wait_irq(struct drm_device * dev, int irq_nr) +static int i915_wait_irq(struct drm_device *dev, int irq_nr) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; @@ -1266,6 +1267,7 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_ { struct drm_device *dev = pci_get_drvdata(pdev); pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; + if (state == VGA_SWITCHEROO_ON) { pr_info("switched on\n"); dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; @@ -1334,6 +1336,13 @@ static int i915_load_modeset_init(struct drm_device *dev) intel_power_domains_init_hw(dev_priv); + /* + * We enable some interrupt sources in our postinstall hooks, so mark + * interrupts as enabled _before_ actually enabling them to avoid + * special cases in our ordering checks. + */ + dev_priv->pm._irqs_disabled = false; + ret = drm_irq_install(dev, dev->pdev->irq); if (ret) goto cleanup_gem_stolen; @@ -1375,9 +1384,6 @@ static int i915_load_modeset_init(struct drm_device *dev) */ intel_fbdev_initial_config(dev); - /* Only enable hotplug handling once the fbdev is fully set up. */ - dev_priv->enable_hotplug_processing = true; - drm_kms_helper_poll_init(dev); return 0; @@ -1425,15 +1431,16 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master) } #if IS_ENABLED(CONFIG_FB) -static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) +static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) { struct apertures_struct *ap; struct pci_dev *pdev = dev_priv->dev->pdev; bool primary; + int ret; ap = alloc_apertures(1); if (!ap) - return; + return -ENOMEM; ap->ranges[0].base = dev_priv->gtt.mappable_base; ap->ranges[0].size = dev_priv->gtt.mappable_end; @@ -1441,13 +1448,16 @@ static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; - remove_conflicting_framebuffers(ap, "inteldrmfb", primary); + ret = remove_conflicting_framebuffers(ap, "inteldrmfb", primary); kfree(ap); + + return ret; } #else -static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) +static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) { + return 0; } #endif @@ -1492,10 +1502,11 @@ static void i915_dump_device_info(struct drm_i915_private *dev_priv) #define SEP_EMPTY #define PRINT_FLAG(name) info->name ? #name "," : "" #define SEP_COMMA , - DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags=" + DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x flags=" DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY), info->gen, dev_priv->dev->pdev->device, + dev_priv->dev->pdev->revision, DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA)); #undef PRINT_S #undef SEP_EMPTY @@ -1594,7 +1605,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) if (dev_priv == NULL) return -ENOMEM; - dev->dev_private = (void *)dev_priv; + dev->dev_private = dev_priv; dev_priv->dev = dev; /* copy initial configuration to dev_priv->info */ @@ -1606,6 +1617,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) spin_lock_init(&dev_priv->backlight_lock); spin_lock_init(&dev_priv->uncore.lock); spin_lock_init(&dev_priv->mm.object_stat_lock); + spin_lock_init(&dev_priv->mmio_flip_lock); mutex_init(&dev_priv->dpio_lock); mutex_init(&dev_priv->modeset_restore_lock); @@ -1664,7 +1676,11 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) goto out_gtt; } - i915_kick_out_firmware_fb(dev_priv); + ret = i915_kick_out_firmware_fb(dev_priv); + if (ret) { + DRM_ERROR("failed to remove conflicting framebuffer drivers\n"); + goto out_gtt; + } } pci_set_master(dev->pdev); @@ -1717,6 +1733,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) goto out_mtrrfree; } + dev_priv->dp_wq = alloc_ordered_workqueue("i915-dp", 0); + if (dev_priv->dp_wq == NULL) { + DRM_ERROR("Failed to create our dp workqueue.\n"); + ret = -ENOMEM; + goto out_freewq; + } + intel_irq_init(dev); intel_uncore_sanitize(dev); @@ -1792,6 +1815,8 @@ out_gem_unload: intel_teardown_gmbus(dev); intel_teardown_mchbar(dev); pm_qos_remove_request(&dev_priv->pm_qos); + destroy_workqueue(dev_priv->dp_wq); +out_freewq: destroy_workqueue(dev_priv->wq); out_mtrrfree: arch_phys_wc_del(dev_priv->gtt.mtrr); @@ -1892,6 +1917,7 @@ int i915_driver_unload(struct drm_device *dev) intel_teardown_gmbus(dev); intel_teardown_mchbar(dev); + destroy_workqueue(dev_priv->dp_wq); destroy_workqueue(dev_priv->wq); pm_qos_remove_request(&dev_priv->pm_qos); @@ -1933,7 +1959,7 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file) * and DMA structures, since the kernel won't be using them, and clea * up any GEM state. */ -void i915_driver_lastclose(struct drm_device * dev) +void i915_driver_lastclose(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -1954,11 +1980,11 @@ void i915_driver_lastclose(struct drm_device * dev) i915_dma_cleanup(dev); } -void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) +void i915_driver_preclose(struct drm_device *dev, struct drm_file *file) { mutex_lock(&dev->struct_mutex); - i915_gem_context_close(dev, file_priv); - i915_gem_release(dev, file_priv); + i915_gem_context_close(dev, file); + i915_gem_release(dev, file); mutex_unlock(&dev->struct_mutex); } @@ -2031,7 +2057,7 @@ int i915_max_ioctl = ARRAY_SIZE(i915_ioctls); * manage the gtt, we need to claim that all intel devices are agp. For * otherwise the drm core refuses to initialize the agp support code. */ -int i915_driver_device_is_agp(struct drm_device * dev) +int i915_driver_device_is_agp(struct drm_device *dev) { return 1; } diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 651e65e051c0..e27cdbe9d524 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -28,6 +28,7 @@ */ #include <linux/device.h> +#include <linux/acpi.h> #include <drm/drmP.h> #include <drm/i915_drm.h> #include "i915_drv.h" @@ -46,8 +47,6 @@ static struct drm_driver driver; PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \ .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \ TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \ - .dpll_offsets = { DPLL_A_OFFSET, DPLL_B_OFFSET }, \ - .dpll_md_offsets = { DPLL_A_MD_OFFSET, DPLL_B_MD_OFFSET }, \ .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET } #define GEN_CHV_PIPEOFFSETS \ @@ -55,10 +54,6 @@ static struct drm_driver driver; CHV_PIPE_C_OFFSET }, \ .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \ CHV_TRANSCODER_C_OFFSET, }, \ - .dpll_offsets = { DPLL_A_OFFSET, DPLL_B_OFFSET, \ - CHV_DPLL_C_OFFSET }, \ - .dpll_md_offsets = { DPLL_A_MD_OFFSET, DPLL_B_MD_OFFSET, \ - CHV_DPLL_C_MD_OFFSET }, \ .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \ CHV_PALETTE_C_OFFSET } @@ -308,6 +303,7 @@ static const struct intel_device_info intel_broadwell_d_info = { .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, .has_llc = 1, .has_ddi = 1, + .has_fpga_dbg = 1, .has_fbc = 1, GEN_DEFAULT_PIPEOFFSETS, IVB_CURSOR_OFFSETS, @@ -319,6 +315,7 @@ static const struct intel_device_info intel_broadwell_m_info = { .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, .has_llc = 1, .has_ddi = 1, + .has_fpga_dbg = 1, .has_fbc = 1, GEN_DEFAULT_PIPEOFFSETS, IVB_CURSOR_OFFSETS, @@ -330,6 +327,7 @@ static const struct intel_device_info intel_broadwell_gt3d_info = { .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, .has_llc = 1, .has_ddi = 1, + .has_fpga_dbg = 1, .has_fbc = 1, GEN_DEFAULT_PIPEOFFSETS, IVB_CURSOR_OFFSETS, @@ -341,6 +339,7 @@ static const struct intel_device_info intel_broadwell_gt3m_info = { .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, .has_llc = 1, .has_ddi = 1, + .has_fpga_dbg = 1, .has_fbc = 1, GEN_DEFAULT_PIPEOFFSETS, IVB_CURSOR_OFFSETS, @@ -495,12 +494,41 @@ bool i915_semaphore_is_enabled(struct drm_device *dev) return true; } +void intel_hpd_cancel_work(struct drm_i915_private *dev_priv) +{ + spin_lock_irq(&dev_priv->irq_lock); + + dev_priv->long_hpd_port_mask = 0; + dev_priv->short_hpd_port_mask = 0; + dev_priv->hpd_event_bits = 0; + + spin_unlock_irq(&dev_priv->irq_lock); + + cancel_work_sync(&dev_priv->dig_port_work); + cancel_work_sync(&dev_priv->hotplug_work); + cancel_delayed_work_sync(&dev_priv->hotplug_reenable_work); +} + +static void intel_suspend_encoders(struct drm_i915_private *dev_priv) +{ + struct drm_device *dev = dev_priv->dev; + struct drm_encoder *encoder; + + drm_modeset_lock_all(dev); + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + struct intel_encoder *intel_encoder = to_intel_encoder(encoder); + + if (intel_encoder->suspend) + intel_encoder->suspend(intel_encoder); + } + drm_modeset_unlock_all(dev); +} + static int i915_drm_freeze(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *crtc; - - intel_runtime_pm_get(dev_priv); + pci_power_t opregion_target_state; /* ignore lid events during suspend */ mutex_lock(&dev_priv->modeset_restore_lock); @@ -526,21 +554,26 @@ static int i915_drm_freeze(struct drm_device *dev) return error; } - drm_irq_uninstall(dev); - dev_priv->enable_hotplug_processing = false; - - intel_disable_gt_powersave(dev); - /* * Disable CRTCs directly since we want to preserve sw state - * for _thaw. + * for _thaw. Also, power gate the CRTC power wells. */ drm_modeset_lock_all(dev); - for_each_crtc(dev, crtc) { - dev_priv->display.crtc_disable(crtc); - } + for_each_crtc(dev, crtc) + intel_crtc_control(crtc, false); drm_modeset_unlock_all(dev); + intel_dp_mst_suspend(dev); + + flush_delayed_work(&dev_priv->rps.delayed_resume_work); + + intel_runtime_pm_disable_interrupts(dev); + intel_hpd_cancel_work(dev_priv); + + intel_suspend_encoders(dev_priv); + + intel_suspend_gt_powersave(dev); + intel_modeset_suspend_hw(dev); } @@ -548,8 +581,15 @@ static int i915_drm_freeze(struct drm_device *dev) i915_save_state(dev); + opregion_target_state = PCI_D3cold; +#if IS_ENABLED(CONFIG_ACPI_SLEEP) + if (acpi_target_system_state() < ACPI_STATE_S3) + opregion_target_state = PCI_D1; +#endif + intel_opregion_notify_adapter(dev, opregion_target_state); + + intel_uncore_forcewake_reset(dev, false); intel_opregion_fini(dev); - intel_uncore_fini(dev); console_lock(); intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED); @@ -557,6 +597,8 @@ static int i915_drm_freeze(struct drm_device *dev) dev_priv->suspend_count++; + intel_display_set_init_power(dev_priv, false); + return 0; } @@ -606,7 +648,10 @@ static int i915_drm_thaw_early(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - intel_uncore_early_sanitize(dev); + if (IS_HASWELL(dev) || IS_BROADWELL(dev)) + hsw_disable_pc8(dev_priv); + + intel_uncore_early_sanitize(dev, true); intel_uncore_sanitize(dev); intel_power_domains_init_hw(dev_priv); @@ -639,11 +684,19 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings) } mutex_unlock(&dev->struct_mutex); - /* We need working interrupts for modeset enabling ... */ - drm_irq_install(dev, dev->pdev->irq); + intel_runtime_pm_restore_interrupts(dev); intel_modeset_init_hw(dev); + { + unsigned long irqflags; + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + if (dev_priv->display.hpd_irq_setup) + dev_priv->display.hpd_irq_setup(dev); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + } + + intel_dp_mst_resume(dev); drm_modeset_lock_all(dev); intel_modeset_setup_hw_state(dev, true); drm_modeset_unlock_all(dev); @@ -655,7 +708,6 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings) * notifications. * */ intel_hpd_init(dev); - dev_priv->enable_hotplug_processing = true; /* Config may have changed between suspend and resume */ drm_helper_hpd_irq_event(dev); } @@ -678,7 +730,8 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings) dev_priv->modeset_restore = MODESET_DONE; mutex_unlock(&dev_priv->modeset_restore_lock); - intel_runtime_pm_put(dev_priv); + intel_opregion_notify_adapter(dev, PCI_D0); + return 0; } @@ -887,6 +940,7 @@ static int i915_pm_suspend_late(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); + struct drm_i915_private *dev_priv = drm_dev->dev_private; /* * We have a suspedn ordering issue with the snd-hda driver also @@ -900,6 +954,9 @@ static int i915_pm_suspend_late(struct device *dev) if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; + if (IS_HASWELL(drm_dev) || IS_BROADWELL(drm_dev)) + hsw_enable_pc8(dev_priv); + pci_disable_device(pdev); pci_set_power_state(pdev, PCI_D3hot); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 1f7700897dfc..3524306d8cfb 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -53,7 +53,7 @@ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20080730" +#define DRIVER_DATE "20140725" enum pipe { INVALID_PIPE = -1, @@ -129,6 +129,7 @@ enum intel_display_power_domain { POWER_DOMAIN_PORT_OTHER, POWER_DOMAIN_VGA, POWER_DOMAIN_AUDIO, + POWER_DOMAIN_PLLS, POWER_DOMAIN_INIT, POWER_DOMAIN_NUM, @@ -178,14 +179,21 @@ enum hpd_pin { list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \ if ((intel_connector)->base.encoder == (__encoder)) +#define for_each_power_domain(domain, mask) \ + for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \ + if ((1 << (domain)) & (mask)) + struct drm_i915_private; +struct i915_mm_struct; struct i915_mmu_object; enum intel_dpll_id { DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */ /* real shared dpll ids must be >= 0 */ - DPLL_ID_PCH_PLL_A, - DPLL_ID_PCH_PLL_B, + DPLL_ID_PCH_PLL_A = 0, + DPLL_ID_PCH_PLL_B = 1, + DPLL_ID_WRPLL1 = 0, + DPLL_ID_WRPLL2 = 1, }; #define I915_NUM_PLLS 2 @@ -194,6 +202,7 @@ struct intel_dpll_hw_state { uint32_t dpll_md; uint32_t fp0; uint32_t fp1; + uint32_t wrpll; }; struct intel_shared_dpll { @@ -204,6 +213,8 @@ struct intel_shared_dpll { /* should match the index in the dev_priv->shared_dplls array */ enum intel_dpll_id id; struct intel_dpll_hw_state hw_state; + /* The mode_set hook is optional and should be used together with the + * intel_prepare_shared_dpll function. */ void (*mode_set)(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll); void (*enable)(struct drm_i915_private *dev_priv, @@ -228,12 +239,6 @@ void intel_link_compute_m_n(int bpp, int nlanes, int pixel_clock, int link_clock, struct intel_link_m_n *m_n); -struct intel_ddi_plls { - int spll_refcount; - int wrpll1_refcount; - int wrpll2_refcount; -}; - /* Interface history: * * 1.1: Original. @@ -310,6 +315,7 @@ struct drm_i915_error_state { u32 eir; u32 pgtbl_er; u32 ier; + u32 gtier[4]; u32 ccid; u32 derrmr; u32 forcewake; @@ -324,6 +330,7 @@ struct drm_i915_error_state { u64 fence[I915_MAX_NUM_FENCES]; struct intel_overlay_error_state *overlay; struct intel_display_error_state *display; + struct drm_i915_error_object *semaphore_obj; struct drm_i915_error_ring { bool valid; @@ -435,8 +442,8 @@ struct drm_i915_display_funcs { void (*update_wm)(struct drm_crtc *crtc); void (*update_sprite_wm)(struct drm_plane *plane, struct drm_crtc *crtc, - uint32_t sprite_width, int pixel_size, - bool enable, bool scaled); + uint32_t sprite_width, uint32_t sprite_height, + int pixel_size, bool enable, bool scaled); void (*modeset_global_resources)(struct drm_device *dev); /* Returns the active state of the crtc, and if the crtc is active, * fills out the pipe-config with the hw state. */ @@ -552,8 +559,6 @@ struct intel_device_info { /* Register offsets for the various display pipes and transcoders */ int pipe_offsets[I915_MAX_TRANSCODERS]; int trans_offsets[I915_MAX_TRANSCODERS]; - int dpll_offsets[I915_MAX_PIPES]; - int dpll_md_offsets[I915_MAX_PIPES]; int palette_offsets[I915_MAX_PIPES]; int cursor_offsets[I915_MAX_PIPES]; }; @@ -586,28 +591,48 @@ struct i915_ctx_hang_stats { }; /* This must match up with the value previously used for execbuf2.rsvd1. */ -#define DEFAULT_CONTEXT_ID 0 +#define DEFAULT_CONTEXT_HANDLE 0 +/** + * struct intel_context - as the name implies, represents a context. + * @ref: reference count. + * @user_handle: userspace tracking identity for this context. + * @remap_slice: l3 row remapping information. + * @file_priv: filp associated with this context (NULL for global default + * context). + * @hang_stats: information about the role of this context in possible GPU + * hangs. + * @vm: virtual memory space used by this context. + * @legacy_hw_ctx: render context backing object and whether it is correctly + * initialized (legacy ring submission mechanism only). + * @link: link in the global list of contexts. + * + * Contexts are memory images used by the hardware to store copies of their + * internal state. + */ struct intel_context { struct kref ref; - int id; - bool is_initialized; + int user_handle; uint8_t remap_slice; struct drm_i915_file_private *file_priv; - struct intel_engine_cs *last_ring; - struct drm_i915_gem_object *obj; struct i915_ctx_hang_stats hang_stats; struct i915_address_space *vm; + struct { + struct drm_i915_gem_object *rcs_state; + bool initialized; + } legacy_hw_ctx; + struct list_head link; }; struct i915_fbc { unsigned long size; + unsigned threshold; unsigned int fb_id; enum plane plane; int y; - struct drm_mm_node *compressed_fb; + struct drm_mm_node compressed_fb; struct drm_mm_node *compressed_llb; struct intel_fbc_work { @@ -635,9 +660,15 @@ struct i915_drrs { struct intel_connector *connector; }; +struct intel_dp; struct i915_psr { + struct mutex lock; bool sink_support; bool source_ok; + struct intel_dp *enabled; + bool active; + struct delayed_work work; + unsigned busy_frontbuffer_bits; }; enum intel_pch { @@ -880,6 +911,12 @@ struct vlv_s0ix_state { u32 clock_gate_dis2; }; +struct intel_rps_ei { + u32 cz_clock; + u32 render_c0; + u32 media_c0; +}; + struct intel_gen6_power_mgmt { /* work and pm_iir are protected by dev_priv->irq_lock */ struct work_struct work; @@ -903,6 +940,9 @@ struct intel_gen6_power_mgmt { u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */ u8 rp1_freq; /* "less than" RP0 power/freqency */ u8 rp0_freq; /* Non-overclocked max frequency. */ + u32 cz_freq; + + u32 ei_interrupt_count; int last_adj; enum { LOW_POWER, BETWEEN, HIGH_POWER } power; @@ -910,6 +950,9 @@ struct intel_gen6_power_mgmt { bool enabled; struct delayed_work delayed_resume_work; + /* manual wa residency calculations */ + struct intel_rps_ei up_ei, down_ei; + /* * Protects RPS/RC6 register access and PCU communication. * Must be taken after struct_mutex if nested. @@ -1230,6 +1273,7 @@ struct intel_vbt_data { u16 pwm_freq_hz; bool present; bool active_low_pwm; + u8 min_brightness; /* min_brightness/255 of max */ } backlight; /* MIPI DSI */ @@ -1299,7 +1343,7 @@ struct ilk_wm_values { */ struct i915_runtime_pm { bool suspended; - bool irqs_disabled; + bool _irqs_disabled; }; enum intel_pipe_crc_source { @@ -1332,6 +1376,17 @@ struct intel_pipe_crc { wait_queue_head_t wq; }; +struct i915_frontbuffer_tracking { + struct mutex lock; + + /* + * Tracking bits for delayed frontbuffer flushing du to gpu activity or + * scheduled flips. + */ + unsigned busy_bits; + unsigned flip_bits; +}; + struct drm_i915_private { struct drm_device *dev; struct kmem_cache *slab; @@ -1363,6 +1418,7 @@ struct drm_i915_private { struct pci_dev *bridge_dev; struct intel_engine_cs ring[I915_NUM_RINGS]; + struct drm_i915_gem_object *semaphore_obj; uint32_t last_seqno, next_seqno; drm_dma_handle_t *status_page_dmah; @@ -1371,6 +1427,9 @@ struct drm_i915_private { /* protects the irq masks */ spinlock_t irq_lock; + /* protects the mmio flip data */ + spinlock_t mmio_flip_lock; + bool display_irqs_enabled; /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */ @@ -1390,7 +1449,6 @@ struct drm_i915_private { u32 pipestat_irq_mask[I915_MAX_PIPES]; struct work_struct hotplug_work; - bool enable_hotplug_processing; struct { unsigned long hpd_last_jiffies; int hpd_cnt; @@ -1401,7 +1459,7 @@ struct drm_i915_private { } hpd_mark; } hpd_stats[HPD_NUM_PINS]; u32 hpd_event_bits; - struct timer_list hotplug_reenable_timer; + struct delayed_work hotplug_reenable_work; struct i915_fbc fbc; struct i915_drrs drrs; @@ -1449,9 +1507,8 @@ struct drm_i915_private { struct i915_gtt gtt; /* VM representing the global address space */ struct i915_gem_mm mm; -#if defined(CONFIG_MMU_NOTIFIER) - DECLARE_HASHTABLE(mmu_notifiers, 7); -#endif + DECLARE_HASHTABLE(mm_structs, 7); + struct mutex mm_lock; /* Kernel Modesetting */ @@ -1467,7 +1524,6 @@ struct drm_i915_private { int num_shared_dpll; struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; - struct intel_ddi_plls ddi_plls; int dpio_phy_iosf_port[I915_NUM_PHYS_VLV]; /* Reclocking support */ @@ -1475,6 +1531,9 @@ struct drm_i915_private { bool lvds_downclock_avail; /* indicates the reduced downclock for LVDS*/ int lvds_downclock; + + struct i915_frontbuffer_tracking fb_tracking; + u16 orig_clock; bool mchbar_need_disable; @@ -1541,6 +1600,20 @@ struct drm_i915_private { struct i915_runtime_pm pm; + struct intel_digital_port *hpd_irq_port[I915_MAX_PORTS]; + u32 long_hpd_port_mask; + u32 short_hpd_port_mask; + struct work_struct dig_port_work; + + /* + * if we get a HPD irq from DP and a HPD irq from non-DP + * the non-DP HPD could block the workqueue on a mode config + * mutex getting, that userspace may have taken. However + * userspace is waiting on the DP workqueue to run which is + * blocked behind the non-DP one. + */ + struct workqueue_struct *dp_wq; + /* Old dri1 support infrastructure, beware the dragons ya fools entering * here! */ struct i915_dri1_state dri1; @@ -1592,6 +1665,28 @@ struct drm_i915_gem_object_ops { void (*release)(struct drm_i915_gem_object *); }; +/* + * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is + * considered to be the frontbuffer for the given plane interface-vise. This + * doesn't mean that the hw necessarily already scans it out, but that any + * rendering (by the cpu or gpu) will land in the frontbuffer eventually. + * + * We have one bit per pipe and per scanout plane type. + */ +#define INTEL_FRONTBUFFER_BITS_PER_PIPE 4 +#define INTEL_FRONTBUFFER_BITS \ + (INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES) +#define INTEL_FRONTBUFFER_PRIMARY(pipe) \ + (1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))) +#define INTEL_FRONTBUFFER_CURSOR(pipe) \ + (1 << (1 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) +#define INTEL_FRONTBUFFER_SPRITE(pipe) \ + (1 << (2 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) +#define INTEL_FRONTBUFFER_OVERLAY(pipe) \ + (1 << (3 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) +#define INTEL_FRONTBUFFER_ALL_MASK(pipe) \ + (0xf << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))) + struct drm_i915_gem_object { struct drm_gem_object base; @@ -1662,6 +1757,12 @@ struct drm_i915_gem_object { unsigned int pin_display:1; /* + * Is the object to be mapped as read-only to the GPU + * Only honoured if hardware has relevant pte bit + */ + unsigned long gt_ro:1; + + /* * Is the GPU currently using a fence to access this buffer, */ unsigned int pending_fenced_gpu_access:1; @@ -1673,6 +1774,8 @@ struct drm_i915_gem_object { unsigned int has_global_gtt_mapping:1; unsigned int has_dma_mapping:1; + unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS; + struct sg_table *pages; int pages_pin_count; @@ -1711,14 +1814,18 @@ struct drm_i915_gem_object { unsigned workers :4; #define I915_GEM_USERPTR_MAX_WORKERS 15 - struct mm_struct *mm; - struct i915_mmu_object *mn; + struct i915_mm_struct *mm; + struct i915_mmu_object *mmu_object; struct work_struct *work; } userptr; }; }; #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) +void i915_gem_track_fb(struct drm_i915_gem_object *old, + struct drm_i915_gem_object *new, + unsigned frontbuffer_bits); + /** * Request queue structure. * @@ -1940,10 +2047,8 @@ struct drm_i915_cmd_table { #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) #define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6) -#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >= 6 && \ - (!IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))) -#define HAS_PPGTT(dev) (INTEL_INFO(dev)->gen >= 7 \ - && !IS_GEN8(dev)) +#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >= 6) +#define HAS_PPGTT(dev) (INTEL_INFO(dev)->gen >= 7 && !IS_GEN8(dev)) #define USES_PPGTT(dev) intel_enable_ppgtt(dev, false) #define USES_FULL_PPGTT(dev) intel_enable_ppgtt(dev, true) @@ -1998,6 +2103,8 @@ struct drm_i915_cmd_table { #define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP) #define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE) +#define HAS_GMCH_DISPLAY(dev) (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev)) + /* DPF == dynamic parity feature */ #define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) #define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev)) @@ -2040,6 +2147,8 @@ struct i915_params { bool reset; bool disable_display; bool disable_vtd_wa; + int use_mmio_flip; + bool mmio_debug; }; extern struct i915_params i915 __read_mostly; @@ -2048,12 +2157,12 @@ void i915_update_dri1_breadcrumb(struct drm_device *dev); extern void i915_kernel_lost_context(struct drm_device * dev); extern int i915_driver_load(struct drm_device *, unsigned long flags); extern int i915_driver_unload(struct drm_device *); -extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv); +extern int i915_driver_open(struct drm_device *dev, struct drm_file *file); extern void i915_driver_lastclose(struct drm_device * dev); extern void i915_driver_preclose(struct drm_device *dev, - struct drm_file *file_priv); + struct drm_file *file); extern void i915_driver_postclose(struct drm_device *dev, - struct drm_file *file_priv); + struct drm_file *file); extern int i915_driver_device_is_agp(struct drm_device * dev); #ifdef CONFIG_COMPAT extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, @@ -2069,6 +2178,7 @@ extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); +void intel_hpd_cancel_work(struct drm_i915_private *dev_priv); extern void intel_console_resume(struct work_struct *work); @@ -2084,10 +2194,12 @@ extern void intel_irq_init(struct drm_device *dev); extern void intel_hpd_init(struct drm_device *dev); extern void intel_uncore_sanitize(struct drm_device *dev); -extern void intel_uncore_early_sanitize(struct drm_device *dev); +extern void intel_uncore_early_sanitize(struct drm_device *dev, + bool restore_forcewake); extern void intel_uncore_init(struct drm_device *dev); extern void intel_uncore_check_errors(struct drm_device *dev); extern void intel_uncore_fini(struct drm_device *dev); +extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore); void i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, @@ -2235,6 +2347,8 @@ bool i915_gem_retire_requests(struct drm_device *dev); void i915_gem_retire_requests_ring(struct intel_engine_cs *ring); int __must_check i915_gem_check_wedge(struct i915_gpu_error *error, bool interruptible); +int __must_check i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno); + static inline bool i915_reset_in_progress(struct i915_gpu_error *error) { return unlikely(atomic_read(&error->reset_counter) @@ -2404,7 +2518,7 @@ static inline void i915_gem_context_unreference(struct intel_context *ctx) static inline bool i915_gem_context_is_default(const struct intel_context *c) { - return c->id == DEFAULT_CONTEXT_ID; + return c->user_handle == DEFAULT_CONTEXT_HANDLE; } int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, @@ -2435,7 +2549,7 @@ static inline void i915_gem_chipset_flush(struct drm_device *dev) /* i915_gem_stolen.c */ int i915_gem_init_stolen(struct drm_device *dev); -int i915_gem_stolen_setup_compression(struct drm_device *dev, int size); +int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp); void i915_gem_stolen_cleanup_compression(struct drm_device *dev); void i915_gem_cleanup_stolen(struct drm_device *dev); struct drm_i915_gem_object * @@ -2445,7 +2559,6 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, u32 stolen_offset, u32 gtt_offset, u32 size); -void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj); /* i915_gem_tiling.c */ static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) @@ -2593,8 +2706,8 @@ extern bool ironlake_set_drps(struct drm_device *dev, u8 val); extern void intel_init_pch_refclk(struct drm_device *dev); extern void gen6_set_rps(struct drm_device *dev, u8 val); extern void valleyview_set_rps(struct drm_device *dev, u8 val); -extern int valleyview_rps_max_freq(struct drm_i915_private *dev_priv); -extern int valleyview_rps_min_freq(struct drm_i915_private *dev_priv); +extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, + bool enable); extern void intel_detect_pch(struct drm_device *dev); extern int intel_trans_dp_port_sel(struct drm_crtc *crtc); extern int intel_enable_rc6(const struct drm_device *dev); @@ -2605,6 +2718,8 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data, int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data, struct drm_file *file); +void intel_notify_mmio_flip(struct intel_engine_cs *ring); + /* overlay */ extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e, @@ -2700,10 +2815,10 @@ int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val); static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev) { - if (HAS_PCH_SPLIT(dev)) - return CPU_VGACNTRL; - else if (IS_VALLEYVIEW(dev)) + if (IS_VALLEYVIEW(dev)) return VLV_VGACNTRL; + else if (INTEL_INFO(dev)->gen >= 5) + return CPU_VGACNTRL; else return VGACNTRL; } diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index f247d922e44a..ad55b06a3cb1 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1095,7 +1095,7 @@ i915_gem_check_wedge(struct i915_gpu_error *error, * Compare seqno against outstanding lazy request. Emit a request if they are * equal. */ -static int +int i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno) { int ret; @@ -1161,14 +1161,14 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno, s64 before, now; int ret; - WARN(dev_priv->pm.irqs_disabled, "IRQs disabled\n"); + WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled"); if (i915_seqno_passed(ring->get_seqno(ring, true), seqno)) return 0; timeout_expire = timeout ? jiffies + nsecs_to_jiffies((u64)*timeout) : 0; - if (INTEL_INFO(dev)->gen >= 6 && can_wait_boost(file_priv)) { + if (INTEL_INFO(dev)->gen >= 6 && ring->id == RCS && can_wait_boost(file_priv)) { gen6_rps_boost(dev_priv); if (file_priv) mod_delayed_work(dev_priv->wq, @@ -1560,14 +1560,29 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) if (ret) goto unpin; - obj->fault_mappable = true; - + /* Finally, remap it using the new GTT offset */ pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj); pfn >>= PAGE_SHIFT; - pfn += page_offset; - /* Finally, remap it using the new GTT offset */ - ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); + if (!obj->fault_mappable) { + unsigned long size = min_t(unsigned long, + vma->vm_end - vma->vm_start, + obj->base.size); + int i; + + for (i = 0; i < size >> PAGE_SHIFT; i++) { + ret = vm_insert_pfn(vma, + (unsigned long)vma->vm_start + i * PAGE_SIZE, + pfn + i); + if (ret) + break; + } + + obj->fault_mappable = true; + } else + ret = vm_insert_pfn(vma, + (unsigned long)vmf->virtual_address, + pfn + page_offset); unpin: i915_gem_object_ggtt_unpin(obj); unlock: @@ -1575,10 +1590,13 @@ unlock: out: switch (ret) { case -EIO: - /* If this -EIO is due to a gpu hang, give the reset code a - * chance to clean up the mess. Otherwise return the proper - * SIGBUS. */ - if (i915_terminally_wedged(&dev_priv->gpu_error)) { + /* + * We eat errors when the gpu is terminally wedged to avoid + * userspace unduly crashing (gl has no provisions for mmaps to + * fail). But any other -EIO isn't ours (e.g. swap in failure) + * and so needs to be reported. + */ + if (!i915_terminally_wedged(&dev_priv->gpu_error)) { ret = VM_FAULT_SIGBUS; break; } @@ -2051,16 +2069,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) * our own buffer, now let the real VM do its job and * go down in flames if truly OOM. */ - gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD); - gfp |= __GFP_IO | __GFP_WAIT; - i915_gem_shrink_all(dev_priv); - page = shmem_read_mapping_page_gfp(mapping, i, gfp); + page = shmem_read_mapping_page(mapping, i); if (IS_ERR(page)) goto err_pages; - - gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD; - gfp &= ~(__GFP_IO | __GFP_WAIT); } #ifdef CONFIG_SWIOTLB if (swiotlb_nr_tbl()) { @@ -2209,6 +2221,8 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) list_move_tail(&vma->mm_list, &vm->inactive_list); } + intel_fb_obj_flush(obj, true); + list_del_init(&obj->ring_list); obj->ring = NULL; @@ -2318,7 +2332,7 @@ int __i915_add_request(struct intel_engine_cs *ring, u32 request_ring_position, request_start; int ret; - request_start = intel_ring_get_tail(ring); + request_start = intel_ring_get_tail(ring->buffer); /* * Emit any outstanding flushes - execbuf can fail to emit the flush * after having emitted the batchbuffer command. Hence we need to fix @@ -2339,7 +2353,7 @@ int __i915_add_request(struct intel_engine_cs *ring, * GPU processing the request, we never over-estimate the * position of the head. */ - request_ring_position = intel_ring_get_tail(ring); + request_ring_position = intel_ring_get_tail(ring->buffer); ret = ring->add_request(ring); if (ret) @@ -2822,6 +2836,8 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj, idx = intel_ring_sync_index(from, to); seqno = obj->last_read_seqno; + /* Optimization: Avoid semaphore sync when we are sure we already + * waited for an object with higher seqno */ if (seqno <= from->semaphore.sync_seqno[idx]) return 0; @@ -2905,8 +2921,6 @@ int i915_vma_unbind(struct i915_vma *vma) vma->unbind_vma(vma); - i915_gem_gtt_finish_object(obj); - list_del_init(&vma->mm_list); /* Avoid an unnecessary call to unbind on rebind. */ if (i915_is_ggtt(vma->vm)) @@ -2917,8 +2931,10 @@ int i915_vma_unbind(struct i915_vma *vma) /* Since the unbound list is global, only move to that list if * no more VMAs exist. */ - if (list_empty(&obj->vma_list)) + if (list_empty(&obj->vma_list)) { + i915_gem_gtt_finish_object(obj); list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list); + } /* And finally now the object is completely decoupled from this vma, * we can drop its hold on the backing storage and allow it to be @@ -3530,6 +3546,8 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj) old_write_domain = obj->base.write_domain; obj->base.write_domain = 0; + intel_fb_obj_flush(obj, false); + trace_i915_gem_object_change_domain(obj, obj->base.read_domains, old_write_domain); @@ -3551,6 +3569,8 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj, old_write_domain = obj->base.write_domain; obj->base.write_domain = 0; + intel_fb_obj_flush(obj, false); + trace_i915_gem_object_change_domain(obj, obj->base.read_domains, old_write_domain); @@ -3604,6 +3624,9 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) obj->dirty = 1; } + if (write) + intel_fb_obj_invalidate(obj, NULL); + trace_i915_gem_object_change_domain(obj, old_read_domains, old_write_domain); @@ -3940,6 +3963,9 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) obj->base.write_domain = I915_GEM_DOMAIN_CPU; } + if (write) + intel_fb_obj_invalidate(obj, NULL); + trace_i915_gem_object_change_domain(obj, old_read_domains, old_write_domain); @@ -4428,13 +4454,14 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) if (obj->stolen) i915_gem_object_unpin_pages(obj); + WARN_ON(obj->frontbuffer_bits); + if (WARN_ON(obj->pages_pin_count)) obj->pages_pin_count = 0; if (discard_backing_storage(obj)) obj->madv = I915_MADV_DONTNEED; i915_gem_object_put_pages(obj); i915_gem_object_free_mmap_offset(obj); - i915_gem_object_release_stolen(obj); BUG_ON(obj->pages); @@ -4521,7 +4548,7 @@ i915_gem_suspend(struct drm_device *dev) del_timer_sync(&dev_priv->gpu_error.hangcheck_timer); cancel_delayed_work_sync(&dev_priv->mm.retire_work); - cancel_delayed_work_sync(&dev_priv->mm.idle_work); + flush_delayed_work(&dev_priv->mm.idle_work); return 0; @@ -4912,6 +4939,8 @@ i915_gem_load(struct drm_device *dev) dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom; register_oom_notifier(&dev_priv->mm.oom_notifier); + + mutex_init(&dev_priv->fb_tracking.lock); } void i915_gem_release(struct drm_device *dev, struct drm_file *file) @@ -4973,6 +5002,23 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file) return ret; } +void i915_gem_track_fb(struct drm_i915_gem_object *old, + struct drm_i915_gem_object *new, + unsigned frontbuffer_bits) +{ + if (old) { + WARN_ON(!mutex_is_locked(&old->base.dev->struct_mutex)); + WARN_ON(!(old->frontbuffer_bits & frontbuffer_bits)); + old->frontbuffer_bits &= ~frontbuffer_bits; + } + + if (new) { + WARN_ON(!mutex_is_locked(&new->base.dev->struct_mutex)); + WARN_ON(new->frontbuffer_bits & frontbuffer_bits); + new->frontbuffer_bits |= frontbuffer_bits; + } +} + static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task) { if (!mutex_is_locked(mutex)) @@ -5055,12 +5101,13 @@ unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o, vm == &dev_priv->mm.aliasing_ppgtt->base) vm = &dev_priv->gtt.base; - BUG_ON(list_empty(&o->vma_list)); list_for_each_entry(vma, &o->vma_list, vma_link) { if (vma->vm == vm) return vma->node.start; } + WARN(1, "%s vma for this object not found.\n", + i915_is_ggtt(vm) ? "global" : "ppgtt"); return -1; } @@ -5141,8 +5188,11 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) bool was_interruptible; bool unlock; - while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout) + while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout) { schedule_timeout_killable(1); + if (fatal_signal_pending(current)) + return NOTIFY_DONE; + } if (timeout == 0) { pr_err("Unable to purge GPU memory due lock contention.\n"); return NOTIFY_DONE; diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index a5ddf3bce9c3..3b99390e467a 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -182,22 +182,50 @@ void i915_gem_context_free(struct kref *ctx_ref) typeof(*ctx), ref); struct i915_hw_ppgtt *ppgtt = NULL; - if (ctx->obj) { + if (ctx->legacy_hw_ctx.rcs_state) { /* We refcount even the aliasing PPGTT to keep the code symmetric */ - if (USES_PPGTT(ctx->obj->base.dev)) + if (USES_PPGTT(ctx->legacy_hw_ctx.rcs_state->base.dev)) ppgtt = ctx_to_ppgtt(ctx); - - /* XXX: Free up the object before tearing down the address space, in - * case we're bound in the PPGTT */ - drm_gem_object_unreference(&ctx->obj->base); } if (ppgtt) kref_put(&ppgtt->ref, ppgtt_release); + if (ctx->legacy_hw_ctx.rcs_state) + drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base); list_del(&ctx->link); kfree(ctx); } +static struct drm_i915_gem_object * +i915_gem_alloc_context_obj(struct drm_device *dev, size_t size) +{ + struct drm_i915_gem_object *obj; + int ret; + + obj = i915_gem_alloc_object(dev, size); + if (obj == NULL) + return ERR_PTR(-ENOMEM); + + /* + * Try to make the context utilize L3 as well as LLC. + * + * On VLV we don't have L3 controls in the PTEs so we + * shouldn't touch the cache level, especially as that + * would make the object snooped which might have a + * negative performance impact. + */ + if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) { + ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC); + /* Failure shouldn't ever happen this early */ + if (WARN_ON(ret)) { + drm_gem_object_unreference(&obj->base); + return ERR_PTR(ret); + } + } + + return obj; +} + static struct i915_hw_ppgtt * create_vm_for_ctx(struct drm_device *dev, struct intel_context *ctx) { @@ -234,40 +262,26 @@ __create_hw_context(struct drm_device *dev, list_add_tail(&ctx->link, &dev_priv->context_list); if (dev_priv->hw_context_size) { - ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size); - if (ctx->obj == NULL) { - ret = -ENOMEM; + struct drm_i915_gem_object *obj = + i915_gem_alloc_context_obj(dev, dev_priv->hw_context_size); + if (IS_ERR(obj)) { + ret = PTR_ERR(obj); goto err_out; } - - /* - * Try to make the context utilize L3 as well as LLC. - * - * On VLV we don't have L3 controls in the PTEs so we - * shouldn't touch the cache level, especially as that - * would make the object snooped which might have a - * negative performance impact. - */ - if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) { - ret = i915_gem_object_set_cache_level(ctx->obj, - I915_CACHE_L3_LLC); - /* Failure shouldn't ever happen this early */ - if (WARN_ON(ret)) - goto err_out; - } + ctx->legacy_hw_ctx.rcs_state = obj; } /* Default context will never have a file_priv */ if (file_priv != NULL) { ret = idr_alloc(&file_priv->context_idr, ctx, - DEFAULT_CONTEXT_ID, 0, GFP_KERNEL); + DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL); if (ret < 0) goto err_out; } else - ret = DEFAULT_CONTEXT_ID; + ret = DEFAULT_CONTEXT_HANDLE; ctx->file_priv = file_priv; - ctx->id = ret; + ctx->user_handle = ret; /* NB: Mark all slices as needing a remap so that when the context first * loads it will restore whatever remap state already exists. If there * is no remap info, it will be a NOP. */ @@ -301,7 +315,7 @@ i915_gem_create_context(struct drm_device *dev, if (IS_ERR(ctx)) return ctx; - if (is_global_default_ctx && ctx->obj) { + if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state) { /* We may need to do things with the shrinker which * require us to immediately switch back to the default * context. This can cause a problem as pinning the @@ -309,7 +323,7 @@ i915_gem_create_context(struct drm_device *dev, * be available. To avoid this we always pin the default * context. */ - ret = i915_gem_obj_ggtt_pin(ctx->obj, + ret = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state, get_context_alignment(dev), 0); if (ret) { DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret); @@ -349,8 +363,8 @@ i915_gem_create_context(struct drm_device *dev, return ctx; err_unpin: - if (is_global_default_ctx && ctx->obj) - i915_gem_object_ggtt_unpin(ctx->obj); + if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state) + i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state); err_destroy: i915_gem_context_unreference(ctx); return ERR_PTR(ret); @@ -366,23 +380,27 @@ void i915_gem_context_reset(struct drm_device *dev) for (i = 0; i < I915_NUM_RINGS; i++) { struct intel_engine_cs *ring = &dev_priv->ring[i]; struct intel_context *dctx = ring->default_context; + struct intel_context *lctx = ring->last_context; /* Do a fake switch to the default context */ - if (ring->last_context == dctx) + if (lctx == dctx) continue; - if (!ring->last_context) + if (!lctx) continue; - if (dctx->obj && i == RCS) { - WARN_ON(i915_gem_obj_ggtt_pin(dctx->obj, + if (dctx->legacy_hw_ctx.rcs_state && i == RCS) { + WARN_ON(i915_gem_obj_ggtt_pin(dctx->legacy_hw_ctx.rcs_state, get_context_alignment(dev), 0)); /* Fake a finish/inactive */ - dctx->obj->base.write_domain = 0; - dctx->obj->active = 0; + dctx->legacy_hw_ctx.rcs_state->base.write_domain = 0; + dctx->legacy_hw_ctx.rcs_state->active = 0; } - i915_gem_context_unreference(ring->last_context); + if (lctx->legacy_hw_ctx.rcs_state && i == RCS) + i915_gem_object_ggtt_unpin(lctx->legacy_hw_ctx.rcs_state); + + i915_gem_context_unreference(lctx); i915_gem_context_reference(dctx); ring->last_context = dctx; } @@ -429,7 +447,7 @@ void i915_gem_context_fini(struct drm_device *dev) struct intel_context *dctx = dev_priv->ring[RCS].default_context; int i; - if (dctx->obj) { + if (dctx->legacy_hw_ctx.rcs_state) { /* The only known way to stop the gpu from accessing the hw context is * to reset it. Do this as the very last operation to avoid confusing * other code, leading to spurious errors. */ @@ -444,13 +462,13 @@ void i915_gem_context_fini(struct drm_device *dev) WARN_ON(!dev_priv->ring[RCS].last_context); if (dev_priv->ring[RCS].last_context == dctx) { /* Fake switch to NULL context */ - WARN_ON(dctx->obj->active); - i915_gem_object_ggtt_unpin(dctx->obj); + WARN_ON(dctx->legacy_hw_ctx.rcs_state->active); + i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state); i915_gem_context_unreference(dctx); dev_priv->ring[RCS].last_context = NULL; } - i915_gem_object_ggtt_unpin(dctx->obj); + i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state); } for (i = 0; i < I915_NUM_RINGS; i++) { @@ -570,7 +588,7 @@ mi_set_context(struct intel_engine_cs *ring, intel_ring_emit(ring, MI_NOOP); intel_ring_emit(ring, MI_SET_CONTEXT); - intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->obj) | + intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->legacy_hw_ctx.rcs_state) | MI_MM_SPACE_GTT | MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN | @@ -602,16 +620,16 @@ static int do_switch(struct intel_engine_cs *ring, int ret, i; if (from != NULL && ring == &dev_priv->ring[RCS]) { - BUG_ON(from->obj == NULL); - BUG_ON(!i915_gem_obj_is_pinned(from->obj)); + BUG_ON(from->legacy_hw_ctx.rcs_state == NULL); + BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state)); } - if (from == to && from->last_ring == ring && !to->remap_slice) + if (from == to && !to->remap_slice) return 0; /* Trying to pin first makes error handling easier. */ if (ring == &dev_priv->ring[RCS]) { - ret = i915_gem_obj_ggtt_pin(to->obj, + ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state, get_context_alignment(ring->dev), 0); if (ret) return ret; @@ -644,17 +662,17 @@ static int do_switch(struct intel_engine_cs *ring, * * XXX: We need a real interface to do this instead of trickery. */ - ret = i915_gem_object_set_to_gtt_domain(to->obj, false); + ret = i915_gem_object_set_to_gtt_domain(to->legacy_hw_ctx.rcs_state, false); if (ret) goto unpin_out; - if (!to->obj->has_global_gtt_mapping) { - struct i915_vma *vma = i915_gem_obj_to_vma(to->obj, + if (!to->legacy_hw_ctx.rcs_state->has_global_gtt_mapping) { + struct i915_vma *vma = i915_gem_obj_to_vma(to->legacy_hw_ctx.rcs_state, &dev_priv->gtt.base); - vma->bind_vma(vma, to->obj->cache_level, GLOBAL_BIND); + vma->bind_vma(vma, to->legacy_hw_ctx.rcs_state->cache_level, GLOBAL_BIND); } - if (!to->is_initialized || i915_gem_context_is_default(to)) + if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to)) hw_flags |= MI_RESTORE_INHIBIT; ret = mi_set_context(ring, to, hw_flags); @@ -680,8 +698,8 @@ static int do_switch(struct intel_engine_cs *ring, * MI_SET_CONTEXT instead of when the next seqno has completed. */ if (from != NULL) { - from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; - i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->obj), ring); + from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; + i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), ring); /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the * whole damn pipeline, we don't need to explicitly mark the * object dirty. The only exception is that the context must be @@ -689,21 +707,20 @@ static int do_switch(struct intel_engine_cs *ring, * able to defer doing this until we know the object would be * swapped, but there is no way to do that yet. */ - from->obj->dirty = 1; - BUG_ON(from->obj->ring != ring); + from->legacy_hw_ctx.rcs_state->dirty = 1; + BUG_ON(from->legacy_hw_ctx.rcs_state->ring != ring); /* obj is kept alive until the next request by its active ref */ - i915_gem_object_ggtt_unpin(from->obj); + i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state); i915_gem_context_unreference(from); } - uninitialized = !to->is_initialized && from == NULL; - to->is_initialized = true; + uninitialized = !to->legacy_hw_ctx.initialized && from == NULL; + to->legacy_hw_ctx.initialized = true; done: i915_gem_context_reference(to); ring->last_context = to; - to->last_ring = ring; if (uninitialized) { ret = i915_gem_render_state_init(ring); @@ -715,7 +732,7 @@ done: unpin_out: if (ring->id == RCS) - i915_gem_object_ggtt_unpin(to->obj); + i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state); return ret; } @@ -736,7 +753,7 @@ int i915_switch_context(struct intel_engine_cs *ring, WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); - if (to->obj == NULL) { /* We have the fake context */ + if (to->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */ if (to != ring->last_context) { i915_gem_context_reference(to); if (ring->last_context) @@ -774,7 +791,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, if (IS_ERR(ctx)) return PTR_ERR(ctx); - args->ctx_id = ctx->id; + args->ctx_id = ctx->user_handle; DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id); return 0; @@ -788,7 +805,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, struct intel_context *ctx; int ret; - if (args->ctx_id == DEFAULT_CONTEXT_ID) + if (args->ctx_id == DEFAULT_CONTEXT_HANDLE) return -ENOENT; ret = i915_mutex_lock_interruptible(dev); @@ -801,7 +818,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, return PTR_ERR(ctx); } - idr_remove(&ctx->file_priv->context_idr, ctx->id); + idr_remove(&ctx->file_priv->context_idr, ctx->user_handle); i915_gem_context_unreference(ctx); mutex_unlock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 3a30133f93e8..60998fc4e5b2 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -938,7 +938,7 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file, struct intel_context *ctx = NULL; struct i915_ctx_hang_stats *hs; - if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_ID) + if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE) return ERR_PTR(-EINVAL); ctx = i915_gem_context_get(file->driver_priv, ctx_id); @@ -975,10 +975,8 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas, if (obj->base.write_domain) { obj->dirty = 1; obj->last_write_seqno = intel_ring_get_seqno(ring); - /* check for potential scanout */ - if (i915_gem_obj_ggtt_bound(obj) && - i915_gem_obj_to_ggtt(obj)->pin_count) - intel_mark_fb_busy(obj, ring); + + intel_fb_obj_invalidate(obj, ring); /* update for the implicit flush after a batch */ obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; @@ -1028,6 +1026,163 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev, return 0; } +static int +legacy_ringbuffer_submission(struct drm_device *dev, struct drm_file *file, + struct intel_engine_cs *ring, + struct intel_context *ctx, + struct drm_i915_gem_execbuffer2 *args, + struct list_head *vmas, + struct drm_i915_gem_object *batch_obj, + u64 exec_start, u32 flags) +{ + struct drm_clip_rect *cliprects = NULL; + struct drm_i915_private *dev_priv = dev->dev_private; + u64 exec_len; + int instp_mode; + u32 instp_mask; + int i, ret = 0; + + if (args->num_cliprects != 0) { + if (ring != &dev_priv->ring[RCS]) { + DRM_DEBUG("clip rectangles are only valid with the render ring\n"); + return -EINVAL; + } + + if (INTEL_INFO(dev)->gen >= 5) { + DRM_DEBUG("clip rectangles are only valid on pre-gen5\n"); + return -EINVAL; + } + + if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) { + DRM_DEBUG("execbuf with %u cliprects\n", + args->num_cliprects); + return -EINVAL; + } + + cliprects = kcalloc(args->num_cliprects, + sizeof(*cliprects), + GFP_KERNEL); + if (cliprects == NULL) { + ret = -ENOMEM; + goto error; + } + + if (copy_from_user(cliprects, + to_user_ptr(args->cliprects_ptr), + sizeof(*cliprects)*args->num_cliprects)) { + ret = -EFAULT; + goto error; + } + } else { + if (args->DR4 == 0xffffffff) { + DRM_DEBUG("UXA submitting garbage DR4, fixing up\n"); + args->DR4 = 0; + } + + if (args->DR1 || args->DR4 || args->cliprects_ptr) { + DRM_DEBUG("0 cliprects but dirt in cliprects fields\n"); + return -EINVAL; + } + } + + ret = i915_gem_execbuffer_move_to_gpu(ring, vmas); + if (ret) + goto error; + + ret = i915_switch_context(ring, ctx); + if (ret) + goto error; + + instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK; + instp_mask = I915_EXEC_CONSTANTS_MASK; + switch (instp_mode) { + case I915_EXEC_CONSTANTS_REL_GENERAL: + case I915_EXEC_CONSTANTS_ABSOLUTE: + case I915_EXEC_CONSTANTS_REL_SURFACE: + if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) { + DRM_DEBUG("non-0 rel constants mode on non-RCS\n"); + ret = -EINVAL; + goto error; + } + + if (instp_mode != dev_priv->relative_constants_mode) { + if (INTEL_INFO(dev)->gen < 4) { + DRM_DEBUG("no rel constants on pre-gen4\n"); + ret = -EINVAL; + goto error; + } + + if (INTEL_INFO(dev)->gen > 5 && + instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) { + DRM_DEBUG("rel surface constants mode invalid on gen5+\n"); + ret = -EINVAL; + goto error; + } + + /* The HW changed the meaning on this bit on gen6 */ + if (INTEL_INFO(dev)->gen >= 6) + instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE; + } + break; + default: + DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode); + ret = -EINVAL; + goto error; + } + + if (ring == &dev_priv->ring[RCS] && + instp_mode != dev_priv->relative_constants_mode) { + ret = intel_ring_begin(ring, 4); + if (ret) + goto error; + + intel_ring_emit(ring, MI_NOOP); + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); + intel_ring_emit(ring, INSTPM); + intel_ring_emit(ring, instp_mask << 16 | instp_mode); + intel_ring_advance(ring); + + dev_priv->relative_constants_mode = instp_mode; + } + + if (args->flags & I915_EXEC_GEN7_SOL_RESET) { + ret = i915_reset_gen7_sol_offsets(dev, ring); + if (ret) + goto error; + } + + exec_len = args->batch_len; + if (cliprects) { + for (i = 0; i < args->num_cliprects; i++) { + ret = i915_emit_box(dev, &cliprects[i], + args->DR1, args->DR4); + if (ret) + goto error; + + ret = ring->dispatch_execbuffer(ring, + exec_start, exec_len, + flags); + if (ret) + goto error; + } + } else { + ret = ring->dispatch_execbuffer(ring, + exec_start, exec_len, + flags); + if (ret) + return ret; + } + + trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags); + + i915_gem_execbuffer_move_to_active(vmas, ring); + i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj); + +error: + kfree(cliprects); + return ret; +} + /** * Find one BSD ring to dispatch the corresponding BSD command. * The Ring ID is returned. @@ -1087,14 +1242,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct drm_i915_private *dev_priv = dev->dev_private; struct eb_vmas *eb; struct drm_i915_gem_object *batch_obj; - struct drm_clip_rect *cliprects = NULL; struct intel_engine_cs *ring; struct intel_context *ctx; struct i915_address_space *vm; const u32 ctx_id = i915_execbuffer2_get_context_id(*args); - u64 exec_start = args->batch_start_offset, exec_len; - u32 mask, flags; - int ret, mode, i; + u64 exec_start = args->batch_start_offset; + u32 flags; + int ret; bool need_relocs; if (!i915_gem_check_execbuffer(args)) @@ -1138,87 +1292,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, return -EINVAL; } - mode = args->flags & I915_EXEC_CONSTANTS_MASK; - mask = I915_EXEC_CONSTANTS_MASK; - switch (mode) { - case I915_EXEC_CONSTANTS_REL_GENERAL: - case I915_EXEC_CONSTANTS_ABSOLUTE: - case I915_EXEC_CONSTANTS_REL_SURFACE: - if (mode != 0 && ring != &dev_priv->ring[RCS]) { - DRM_DEBUG("non-0 rel constants mode on non-RCS\n"); - return -EINVAL; - } - - if (mode != dev_priv->relative_constants_mode) { - if (INTEL_INFO(dev)->gen < 4) { - DRM_DEBUG("no rel constants on pre-gen4\n"); - return -EINVAL; - } - - if (INTEL_INFO(dev)->gen > 5 && - mode == I915_EXEC_CONSTANTS_REL_SURFACE) { - DRM_DEBUG("rel surface constants mode invalid on gen5+\n"); - return -EINVAL; - } - - /* The HW changed the meaning on this bit on gen6 */ - if (INTEL_INFO(dev)->gen >= 6) - mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE; - } - break; - default: - DRM_DEBUG("execbuf with unknown constants: %d\n", mode); - return -EINVAL; - } - if (args->buffer_count < 1) { DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count); return -EINVAL; } - if (args->num_cliprects != 0) { - if (ring != &dev_priv->ring[RCS]) { - DRM_DEBUG("clip rectangles are only valid with the render ring\n"); - return -EINVAL; - } - - if (INTEL_INFO(dev)->gen >= 5) { - DRM_DEBUG("clip rectangles are only valid on pre-gen5\n"); - return -EINVAL; - } - - if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) { - DRM_DEBUG("execbuf with %u cliprects\n", - args->num_cliprects); - return -EINVAL; - } - - cliprects = kcalloc(args->num_cliprects, - sizeof(*cliprects), - GFP_KERNEL); - if (cliprects == NULL) { - ret = -ENOMEM; - goto pre_mutex_err; - } - - if (copy_from_user(cliprects, - to_user_ptr(args->cliprects_ptr), - sizeof(*cliprects)*args->num_cliprects)) { - ret = -EFAULT; - goto pre_mutex_err; - } - } else { - if (args->DR4 == 0xffffffff) { - DRM_DEBUG("UXA submitting garbage DR4, fixing up\n"); - args->DR4 = 0; - } - - if (args->DR1 || args->DR4 || args->cliprects_ptr) { - DRM_DEBUG("0 cliprects but dirt in cliprects fields\n"); - return -EINVAL; - } - } - intel_runtime_pm_get(dev_priv); ret = i915_mutex_lock_interruptible(dev); @@ -1322,63 +1400,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, else exec_start += i915_gem_obj_offset(batch_obj, vm); - ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas); + ret = legacy_ringbuffer_submission(dev, file, ring, ctx, + args, &eb->vmas, batch_obj, exec_start, flags); if (ret) goto err; - ret = i915_switch_context(ring, ctx); - if (ret) - goto err; - - if (ring == &dev_priv->ring[RCS] && - mode != dev_priv->relative_constants_mode) { - ret = intel_ring_begin(ring, 4); - if (ret) - goto err; - - intel_ring_emit(ring, MI_NOOP); - intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); - intel_ring_emit(ring, INSTPM); - intel_ring_emit(ring, mask << 16 | mode); - intel_ring_advance(ring); - - dev_priv->relative_constants_mode = mode; - } - - if (args->flags & I915_EXEC_GEN7_SOL_RESET) { - ret = i915_reset_gen7_sol_offsets(dev, ring); - if (ret) - goto err; - } - - - exec_len = args->batch_len; - if (cliprects) { - for (i = 0; i < args->num_cliprects; i++) { - ret = i915_emit_box(dev, &cliprects[i], - args->DR1, args->DR4); - if (ret) - goto err; - - ret = ring->dispatch_execbuffer(ring, - exec_start, exec_len, - flags); - if (ret) - goto err; - } - } else { - ret = ring->dispatch_execbuffer(ring, - exec_start, exec_len, - flags); - if (ret) - goto err; - } - - trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags); - - i915_gem_execbuffer_move_to_active(&eb->vmas, ring); - i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj); - err: /* the request owns the ref now */ i915_gem_context_unreference(ctx); @@ -1387,8 +1413,6 @@ err: mutex_unlock(&dev->struct_mutex); pre_mutex_err: - kfree(cliprects); - /* intel_gpu_busy should also get a ref, so it will free when the device * is really idle. */ intel_runtime_pm_put(dev_priv); @@ -1525,7 +1549,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list); if (!ret) { /* Copy the new buffer offsets back to the user's exec list. */ - struct drm_i915_gem_exec_object2 *user_exec_list = + struct drm_i915_gem_exec_object2 __user *user_exec_list = to_user_ptr(args->buffers_ptr); int i; diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 8b3cde703364..e42925f76b4b 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -63,6 +63,13 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) } #endif + /* Early VLV doesn't have this */ + if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && + dev->pdev->revision < 0xb) { + DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n"); + return 0; + } + return HAS_ALIASING_PPGTT(dev) ? 1 : 0; } @@ -110,7 +117,7 @@ static inline gen8_ppgtt_pde_t gen8_pde_encode(struct drm_device *dev, static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr, enum i915_cache_level level, - bool valid) + bool valid, u32 unused) { gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; pte |= GEN6_PTE_ADDR_ENCODE(addr); @@ -132,7 +139,7 @@ static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr, static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr, enum i915_cache_level level, - bool valid) + bool valid, u32 unused) { gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; pte |= GEN6_PTE_ADDR_ENCODE(addr); @@ -156,7 +163,7 @@ static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr, static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr, enum i915_cache_level level, - bool valid) + bool valid, u32 flags) { gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; pte |= GEN6_PTE_ADDR_ENCODE(addr); @@ -164,7 +171,8 @@ static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr, /* Mark the page as writeable. Other platforms don't have a * setting for read-only/writable, so this matches that behavior. */ - pte |= BYT_PTE_WRITEABLE; + if (!(flags & PTE_READ_ONLY)) + pte |= BYT_PTE_WRITEABLE; if (level != I915_CACHE_NONE) pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES; @@ -174,7 +182,7 @@ static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr, static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr, enum i915_cache_level level, - bool valid) + bool valid, u32 unused) { gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; pte |= HSW_PTE_ADDR_ENCODE(addr); @@ -187,7 +195,7 @@ static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr, static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr, enum i915_cache_level level, - bool valid) + bool valid, u32 unused) { gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; pte |= HSW_PTE_ADDR_ENCODE(addr); @@ -301,7 +309,7 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm, static void gen8_ppgtt_insert_entries(struct i915_address_space *vm, struct sg_table *pages, uint64_t start, - enum i915_cache_level cache_level) + enum i915_cache_level cache_level, u32 unused) { struct i915_hw_ppgtt *ppgtt = container_of(vm, struct i915_hw_ppgtt, base); @@ -639,7 +647,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) uint32_t pd_entry; int pte, pde; - scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true); + scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0); pd_addr = (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + ppgtt->pd_offset / sizeof(gen6_gtt_pte_t); @@ -941,7 +949,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm, unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; unsigned last_pte, i; - scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true); + scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0); while (num_entries) { last_pte = first_pte + num_entries; @@ -964,7 +972,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm, static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, struct sg_table *pages, uint64_t start, - enum i915_cache_level cache_level) + enum i915_cache_level cache_level, u32 flags) { struct i915_hw_ppgtt *ppgtt = container_of(vm, struct i915_hw_ppgtt, base); @@ -981,7 +989,8 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, pt_vaddr[act_pte] = vm->pte_encode(sg_page_iter_dma_address(&sg_iter), - cache_level, true); + cache_level, true, flags); + if (++act_pte == I915_PPGTT_PT_ENTRIES) { kunmap_atomic(pt_vaddr); pt_vaddr = NULL; @@ -1218,8 +1227,12 @@ ppgtt_bind_vma(struct i915_vma *vma, enum i915_cache_level cache_level, u32 flags) { + /* Currently applicable only to VLV */ + if (vma->obj->gt_ro) + flags |= PTE_READ_ONLY; + vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start, - cache_level); + cache_level, flags); } static void ppgtt_unbind_vma(struct i915_vma *vma) @@ -1297,6 +1310,16 @@ void i915_check_and_clear_faults(struct drm_device *dev) POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS])); } +static void i915_ggtt_flush(struct drm_i915_private *dev_priv) +{ + if (INTEL_INFO(dev_priv->dev)->gen < 6) { + intel_gtt_chipset_flush(); + } else { + I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); + POSTING_READ(GFX_FLSH_CNTL_GEN6); + } +} + void i915_gem_suspend_gtt_mappings(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -1313,6 +1336,8 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev) dev_priv->gtt.base.start, dev_priv->gtt.base.total, true); + + i915_ggtt_flush(dev_priv); } void i915_gem_restore_gtt_mappings(struct drm_device *dev) @@ -1365,7 +1390,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) gen6_write_pdes(container_of(vm, struct i915_hw_ppgtt, base)); } - i915_gem_chipset_flush(dev); + i915_ggtt_flush(dev_priv); } int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) @@ -1394,7 +1419,7 @@ static inline void gen8_set_pte(void __iomem *addr, gen8_gtt_pte_t pte) static void gen8_ggtt_insert_entries(struct i915_address_space *vm, struct sg_table *st, uint64_t start, - enum i915_cache_level level) + enum i915_cache_level level, u32 unused) { struct drm_i915_private *dev_priv = vm->dev->dev_private; unsigned first_entry = start >> PAGE_SHIFT; @@ -1402,7 +1427,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm, (gen8_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; int i = 0; struct sg_page_iter sg_iter; - dma_addr_t addr = 0; + dma_addr_t addr = 0; /* shut up gcc */ for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { addr = sg_dma_address(sg_iter.sg) + @@ -1440,7 +1465,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm, static void gen6_ggtt_insert_entries(struct i915_address_space *vm, struct sg_table *st, uint64_t start, - enum i915_cache_level level) + enum i915_cache_level level, u32 flags) { struct drm_i915_private *dev_priv = vm->dev->dev_private; unsigned first_entry = start >> PAGE_SHIFT; @@ -1448,11 +1473,11 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm, (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; int i = 0; struct sg_page_iter sg_iter; - dma_addr_t addr; + dma_addr_t addr = 0; for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { addr = sg_page_iter_dma_address(&sg_iter); - iowrite32(vm->pte_encode(addr, level, true), >t_entries[i]); + iowrite32(vm->pte_encode(addr, level, true, flags), >t_entries[i]); i++; } @@ -1462,9 +1487,10 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm, * of NUMA access patterns. Therefore, even with the way we assume * hardware should work, we must keep this posting read for paranoia. */ - if (i != 0) - WARN_ON(readl(>t_entries[i-1]) != - vm->pte_encode(addr, level, true)); + if (i != 0) { + unsigned long gtt = readl(>t_entries[i-1]); + WARN_ON(gtt != vm->pte_encode(addr, level, true, flags)); + } /* This next bit makes the above posting read even more important. We * want to flush the TLBs only after we're certain all the PTE updates @@ -1518,7 +1544,7 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm, first_entry, num_entries, max_entries)) num_entries = max_entries; - scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch); + scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch, 0); for (i = 0; i < num_entries; i++) iowrite32(scratch_pte, >t_base[i]); @@ -1567,6 +1593,10 @@ static void ggtt_bind_vma(struct i915_vma *vma, struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj = vma->obj; + /* Currently applicable only to VLV */ + if (obj->gt_ro) + flags |= PTE_READ_ONLY; + /* If there is no aliasing PPGTT, or the caller needs a global mapping, * or we have a global mapping already but the cacheability flags have * changed, set the global PTEs. @@ -1583,7 +1613,7 @@ static void ggtt_bind_vma(struct i915_vma *vma, (cache_level != obj->cache_level)) { vma->vm->insert_entries(vma->vm, obj->pages, vma->node.start, - cache_level); + cache_level, flags); obj->has_global_gtt_mapping = 1; } } @@ -1595,7 +1625,7 @@ static void ggtt_bind_vma(struct i915_vma *vma, appgtt->base.insert_entries(&appgtt->base, vma->obj->pages, vma->node.start, - cache_level); + cache_level, flags); vma->obj->has_aliasing_ppgtt_mapping = 1; } } diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 1b96a06be3cb..8d6f7c18c404 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -154,6 +154,7 @@ struct i915_vma { void (*unbind_vma)(struct i915_vma *vma); /* Map an object into an address space with the given cache flags. */ #define GLOBAL_BIND (1<<0) +#define PTE_READ_ONLY (1<<1) void (*bind_vma)(struct i915_vma *vma, enum i915_cache_level cache_level, u32 flags); @@ -197,7 +198,7 @@ struct i915_address_space { /* FIXME: Need a more generic return type */ gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr, enum i915_cache_level level, - bool valid); /* Create a valid PTE */ + bool valid, u32 flags); /* Create a valid PTE */ void (*clear_range)(struct i915_address_space *vm, uint64_t start, uint64_t length, @@ -205,7 +206,7 @@ struct i915_address_space { void (*insert_entries)(struct i915_address_space *vm, struct sg_table *st, uint64_t start, - enum i915_cache_level cache_level); + enum i915_cache_level cache_level, u32 flags); void (*cleanup)(struct i915_address_space *vm); }; diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c index 34894b573064..e60be3f552a6 100644 --- a/drivers/gpu/drm/i915/i915_gem_render_state.c +++ b/drivers/gpu/drm/i915/i915_gem_render_state.c @@ -28,64 +28,13 @@ #include "i915_drv.h" #include "intel_renderstate.h" -struct i915_render_state { +struct render_state { + const struct intel_renderstate_rodata *rodata; struct drm_i915_gem_object *obj; - unsigned long ggtt_offset; - u32 *batch; - u32 size; - u32 len; + u64 ggtt_offset; + int gen; }; -static struct i915_render_state *render_state_alloc(struct drm_device *dev) -{ - struct i915_render_state *so; - struct page *page; - int ret; - - so = kzalloc(sizeof(*so), GFP_KERNEL); - if (!so) - return ERR_PTR(-ENOMEM); - - so->obj = i915_gem_alloc_object(dev, 4096); - if (so->obj == NULL) { - ret = -ENOMEM; - goto free; - } - so->size = 4096; - - ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0); - if (ret) - goto free_gem; - - BUG_ON(so->obj->pages->nents != 1); - page = sg_page(so->obj->pages->sgl); - - so->batch = kmap(page); - if (!so->batch) { - ret = -ENOMEM; - goto unpin; - } - - so->ggtt_offset = i915_gem_obj_ggtt_offset(so->obj); - - return so; -unpin: - i915_gem_object_ggtt_unpin(so->obj); -free_gem: - drm_gem_object_unreference(&so->obj->base); -free: - kfree(so); - return ERR_PTR(ret); -} - -static void render_state_free(struct i915_render_state *so) -{ - kunmap(kmap_to_page(so->batch)); - i915_gem_object_ggtt_unpin(so->obj); - drm_gem_object_unreference(&so->obj->base); - kfree(so); -} - static const struct intel_renderstate_rodata * render_state_get_rodata(struct drm_device *dev, const int gen) { @@ -101,98 +50,120 @@ render_state_get_rodata(struct drm_device *dev, const int gen) return NULL; } -static int render_state_setup(const int gen, - const struct intel_renderstate_rodata *rodata, - struct i915_render_state *so) +static int render_state_init(struct render_state *so, struct drm_device *dev) { - const u64 goffset = i915_gem_obj_ggtt_offset(so->obj); - u32 reloc_index = 0; - u32 * const d = so->batch; - unsigned int i = 0; int ret; - if (!rodata || rodata->batch_items * 4 > so->size) + so->gen = INTEL_INFO(dev)->gen; + so->rodata = render_state_get_rodata(dev, so->gen); + if (so->rodata == NULL) + return 0; + + if (so->rodata->batch_items * 4 > 4096) return -EINVAL; + so->obj = i915_gem_alloc_object(dev, 4096); + if (so->obj == NULL) + return -ENOMEM; + + ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0); + if (ret) + goto free_gem; + + so->ggtt_offset = i915_gem_obj_ggtt_offset(so->obj); + return 0; + +free_gem: + drm_gem_object_unreference(&so->obj->base); + return ret; +} + +static int render_state_setup(struct render_state *so) +{ + const struct intel_renderstate_rodata *rodata = so->rodata; + unsigned int i = 0, reloc_index = 0; + struct page *page; + u32 *d; + int ret; + ret = i915_gem_object_set_to_cpu_domain(so->obj, true); if (ret) return ret; + page = sg_page(so->obj->pages->sgl); + d = kmap(page); + while (i < rodata->batch_items) { u32 s = rodata->batch[i]; - if (reloc_index < rodata->reloc_items && - i * 4 == rodata->reloc[reloc_index]) { - - s += goffset & 0xffffffff; - - /* We keep batch offsets max 32bit */ - if (gen >= 8) { + if (i * 4 == rodata->reloc[reloc_index]) { + u64 r = s + so->ggtt_offset; + s = lower_32_bits(r); + if (so->gen >= 8) { if (i + 1 >= rodata->batch_items || rodata->batch[i + 1] != 0) return -EINVAL; - d[i] = s; - i++; - s = (goffset & 0xffffffff00000000ull) >> 32; + d[i++] = s; + s = upper_32_bits(r); } reloc_index++; } - d[i] = s; - i++; + d[i++] = s; } + kunmap(page); ret = i915_gem_object_set_to_gtt_domain(so->obj, false); if (ret) return ret; - if (rodata->reloc_items != reloc_index) { - DRM_ERROR("not all relocs resolved, %d out of %d\n", - reloc_index, rodata->reloc_items); + if (rodata->reloc[reloc_index] != -1) { + DRM_ERROR("only %d relocs resolved\n", reloc_index); return -EINVAL; } - so->len = rodata->batch_items * 4; - return 0; } +static void render_state_fini(struct render_state *so) +{ + i915_gem_object_ggtt_unpin(so->obj); + drm_gem_object_unreference(&so->obj->base); +} + int i915_gem_render_state_init(struct intel_engine_cs *ring) { - const int gen = INTEL_INFO(ring->dev)->gen; - struct i915_render_state *so; - const struct intel_renderstate_rodata *rodata; + struct render_state so; int ret; if (WARN_ON(ring->id != RCS)) return -ENOENT; - rodata = render_state_get_rodata(ring->dev, gen); - if (rodata == NULL) - return 0; + ret = render_state_init(&so, ring->dev); + if (ret) + return ret; - so = render_state_alloc(ring->dev); - if (IS_ERR(so)) - return PTR_ERR(so); + if (so.rodata == NULL) + return 0; - ret = render_state_setup(gen, rodata, so); + ret = render_state_setup(&so); if (ret) goto out; ret = ring->dispatch_execbuffer(ring, - i915_gem_obj_ggtt_offset(so->obj), - so->len, + so.ggtt_offset, + so.rodata->batch_items * 4, I915_DISPATCH_SECURE); if (ret) goto out; - i915_vma_move_to_active(i915_gem_obj_to_ggtt(so->obj), ring); + i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring); - ret = __i915_add_request(ring, NULL, so->obj, NULL); + ret = __i915_add_request(ring, NULL, so.obj, NULL); /* __i915_add_request moves object to inactive if it fails */ out: - render_state_free(so); + render_state_fini(&so); return ret; } diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index 7465ab0fd396..21c025a209c0 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -147,30 +147,68 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) return base; } -static int i915_setup_compression(struct drm_device *dev, int size) +static int find_compression_threshold(struct drm_device *dev, + struct drm_mm_node *node, + int size, + int fb_cpp) { struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb); + int compression_threshold = 1; int ret; - compressed_fb = kzalloc(sizeof(*compressed_fb), GFP_KERNEL); - if (!compressed_fb) - goto err_llb; + /* HACK: This code depends on what we will do in *_enable_fbc. If that + * code changes, this code needs to change as well. + * + * The enable_fbc code will attempt to use one of our 2 compression + * thresholds, therefore, in that case, we only have 1 resort. + */ - /* Try to over-allocate to reduce reallocations and fragmentation */ - ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb, + /* Try to over-allocate to reduce reallocations and fragmentation. */ + ret = drm_mm_insert_node(&dev_priv->mm.stolen, node, size <<= 1, 4096, DRM_MM_SEARCH_DEFAULT); - if (ret) - ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb, - size >>= 1, 4096, - DRM_MM_SEARCH_DEFAULT); - if (ret) + if (ret == 0) + return compression_threshold; + +again: + /* HW's ability to limit the CFB is 1:4 */ + if (compression_threshold > 4 || + (fb_cpp == 2 && compression_threshold == 2)) + return 0; + + ret = drm_mm_insert_node(&dev_priv->mm.stolen, node, + size >>= 1, 4096, + DRM_MM_SEARCH_DEFAULT); + if (ret && INTEL_INFO(dev)->gen <= 4) { + return 0; + } else if (ret) { + compression_threshold <<= 1; + goto again; + } else { + return compression_threshold; + } +} + +static int i915_setup_compression(struct drm_device *dev, int size, int fb_cpp) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_mm_node *uninitialized_var(compressed_llb); + int ret; + + ret = find_compression_threshold(dev, &dev_priv->fbc.compressed_fb, + size, fb_cpp); + if (!ret) goto err_llb; + else if (ret > 1) { + DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n"); + + } + + dev_priv->fbc.threshold = ret; if (HAS_PCH_SPLIT(dev)) - I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start); + I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start); else if (IS_GM45(dev)) { - I915_WRITE(DPFC_CB_BASE, compressed_fb->start); + I915_WRITE(DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start); } else { compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL); if (!compressed_llb) @@ -184,13 +222,12 @@ static int i915_setup_compression(struct drm_device *dev, int size) dev_priv->fbc.compressed_llb = compressed_llb; I915_WRITE(FBC_CFB_BASE, - dev_priv->mm.stolen_base + compressed_fb->start); + dev_priv->mm.stolen_base + dev_priv->fbc.compressed_fb.start); I915_WRITE(FBC_LL_BASE, dev_priv->mm.stolen_base + compressed_llb->start); } - dev_priv->fbc.compressed_fb = compressed_fb; - dev_priv->fbc.size = size; + dev_priv->fbc.size = size / dev_priv->fbc.threshold; DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n", size); @@ -199,14 +236,13 @@ static int i915_setup_compression(struct drm_device *dev, int size) err_fb: kfree(compressed_llb); - drm_mm_remove_node(compressed_fb); + drm_mm_remove_node(&dev_priv->fbc.compressed_fb); err_llb: - kfree(compressed_fb); pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size); return -ENOSPC; } -int i915_gem_stolen_setup_compression(struct drm_device *dev, int size) +int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -219,7 +255,7 @@ int i915_gem_stolen_setup_compression(struct drm_device *dev, int size) /* Release any current block */ i915_gem_stolen_cleanup_compression(dev); - return i915_setup_compression(dev, size); + return i915_setup_compression(dev, size, fb_cpp); } void i915_gem_stolen_cleanup_compression(struct drm_device *dev) @@ -229,10 +265,7 @@ void i915_gem_stolen_cleanup_compression(struct drm_device *dev) if (dev_priv->fbc.size == 0) return; - if (dev_priv->fbc.compressed_fb) { - drm_mm_remove_node(dev_priv->fbc.compressed_fb); - kfree(dev_priv->fbc.compressed_fb); - } + drm_mm_remove_node(&dev_priv->fbc.compressed_fb); if (dev_priv->fbc.compressed_llb) { drm_mm_remove_node(dev_priv->fbc.compressed_llb); @@ -336,9 +369,20 @@ static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj) kfree(obj->pages); } + +static void +i915_gem_object_release_stolen(struct drm_i915_gem_object *obj) +{ + if (obj->stolen) { + drm_mm_remove_node(obj->stolen); + kfree(obj->stolen); + obj->stolen = NULL; + } +} static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = { .get_pages = i915_gem_object_get_pages_stolen, .put_pages = i915_gem_object_put_pages_stolen, + .release = i915_gem_object_release_stolen, }; static struct drm_i915_gem_object * @@ -496,13 +540,3 @@ err_out: drm_gem_object_unreference(&obj->base); return NULL; } - -void -i915_gem_object_release_stolen(struct drm_i915_gem_object *obj) -{ - if (obj->stolen) { - drm_mm_remove_node(obj->stolen); - kfree(obj->stolen); - obj->stolen = NULL; - } -} diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index 21ea92886a56..d38413997379 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c @@ -32,6 +32,15 @@ #include <linux/mempolicy.h> #include <linux/swap.h> +struct i915_mm_struct { + struct mm_struct *mm; + struct drm_device *dev; + struct i915_mmu_notifier *mn; + struct hlist_node node; + struct kref kref; + struct work_struct work; +}; + #if defined(CONFIG_MMU_NOTIFIER) #include <linux/interval_tree.h> @@ -40,19 +49,83 @@ struct i915_mmu_notifier { struct hlist_node node; struct mmu_notifier mn; struct rb_root objects; - struct drm_device *dev; - struct mm_struct *mm; - struct work_struct work; - unsigned long count; + struct list_head linear; unsigned long serial; + bool has_linear; }; struct i915_mmu_object { - struct i915_mmu_notifier *mmu; + struct i915_mmu_notifier *mn; struct interval_tree_node it; + struct list_head link; struct drm_i915_gem_object *obj; + bool is_linear; }; +static unsigned long cancel_userptr(struct drm_i915_gem_object *obj) +{ + struct drm_device *dev = obj->base.dev; + unsigned long end; + + mutex_lock(&dev->struct_mutex); + /* Cancel any active worker and force us to re-evaluate gup */ + obj->userptr.work = NULL; + + if (obj->pages != NULL) { + struct drm_i915_private *dev_priv = to_i915(dev); + struct i915_vma *vma, *tmp; + bool was_interruptible; + + was_interruptible = dev_priv->mm.interruptible; + dev_priv->mm.interruptible = false; + + list_for_each_entry_safe(vma, tmp, &obj->vma_list, vma_link) { + int ret = i915_vma_unbind(vma); + WARN_ON(ret && ret != -EIO); + } + WARN_ON(i915_gem_object_put_pages(obj)); + + dev_priv->mm.interruptible = was_interruptible; + } + + end = obj->userptr.ptr + obj->base.size; + + drm_gem_object_unreference(&obj->base); + mutex_unlock(&dev->struct_mutex); + + return end; +} + +static void *invalidate_range__linear(struct i915_mmu_notifier *mn, + struct mm_struct *mm, + unsigned long start, + unsigned long end) +{ + struct i915_mmu_object *mo; + unsigned long serial; + +restart: + serial = mn->serial; + list_for_each_entry(mo, &mn->linear, link) { + struct drm_i915_gem_object *obj; + + if (mo->it.last < start || mo->it.start > end) + continue; + + obj = mo->obj; + drm_gem_object_reference(&obj->base); + spin_unlock(&mn->lock); + + cancel_userptr(obj); + + spin_lock(&mn->lock); + if (serial != mn->serial) + goto restart; + } + + return NULL; +} + static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, struct mm_struct *mm, unsigned long start, @@ -60,16 +133,18 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, { struct i915_mmu_notifier *mn = container_of(_mn, struct i915_mmu_notifier, mn); struct interval_tree_node *it = NULL; + unsigned long next = start; unsigned long serial = 0; end--; /* interval ranges are inclusive, but invalidate range is exclusive */ - while (start < end) { - struct drm_i915_gem_object *obj; + while (next < end) { + struct drm_i915_gem_object *obj = NULL; - obj = NULL; spin_lock(&mn->lock); - if (serial == mn->serial) - it = interval_tree_iter_next(it, start, end); + if (mn->has_linear) + it = invalidate_range__linear(mn, mm, start, end); + else if (serial == mn->serial) + it = interval_tree_iter_next(it, next, end); else it = interval_tree_iter_first(&mn->objects, start, end); if (it != NULL) { @@ -81,31 +156,7 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, if (obj == NULL) return; - mutex_lock(&mn->dev->struct_mutex); - /* Cancel any active worker and force us to re-evaluate gup */ - obj->userptr.work = NULL; - - if (obj->pages != NULL) { - struct drm_i915_private *dev_priv = to_i915(mn->dev); - struct i915_vma *vma, *tmp; - bool was_interruptible; - - was_interruptible = dev_priv->mm.interruptible; - dev_priv->mm.interruptible = false; - - list_for_each_entry_safe(vma, tmp, &obj->vma_list, vma_link) { - int ret = i915_vma_unbind(vma); - WARN_ON(ret && ret != -EIO); - } - WARN_ON(i915_gem_object_put_pages(obj)); - - dev_priv->mm.interruptible = was_interruptible; - } - - start = obj->userptr.ptr + obj->base.size; - - drm_gem_object_unreference(&obj->base); - mutex_unlock(&mn->dev->struct_mutex); + next = cancel_userptr(obj); } } @@ -114,113 +165,47 @@ static const struct mmu_notifier_ops i915_gem_userptr_notifier = { }; static struct i915_mmu_notifier * -__i915_mmu_notifier_lookup(struct drm_device *dev, struct mm_struct *mm) +i915_mmu_notifier_create(struct mm_struct *mm) { - struct drm_i915_private *dev_priv = to_i915(dev); - struct i915_mmu_notifier *mmu; - - /* Protected by dev->struct_mutex */ - hash_for_each_possible(dev_priv->mmu_notifiers, mmu, node, (unsigned long)mm) - if (mmu->mm == mm) - return mmu; - - return NULL; -} - -static struct i915_mmu_notifier * -i915_mmu_notifier_get(struct drm_device *dev, struct mm_struct *mm) -{ - struct drm_i915_private *dev_priv = to_i915(dev); - struct i915_mmu_notifier *mmu; + struct i915_mmu_notifier *mn; int ret; - lockdep_assert_held(&dev->struct_mutex); - - mmu = __i915_mmu_notifier_lookup(dev, mm); - if (mmu) - return mmu; - - mmu = kmalloc(sizeof(*mmu), GFP_KERNEL); - if (mmu == NULL) + mn = kmalloc(sizeof(*mn), GFP_KERNEL); + if (mn == NULL) return ERR_PTR(-ENOMEM); - spin_lock_init(&mmu->lock); - mmu->dev = dev; - mmu->mn.ops = &i915_gem_userptr_notifier; - mmu->mm = mm; - mmu->objects = RB_ROOT; - mmu->count = 0; - mmu->serial = 0; + spin_lock_init(&mn->lock); + mn->mn.ops = &i915_gem_userptr_notifier; + mn->objects = RB_ROOT; + mn->serial = 1; + INIT_LIST_HEAD(&mn->linear); + mn->has_linear = false; - /* Protected by mmap_sem (write-lock) */ - ret = __mmu_notifier_register(&mmu->mn, mm); + /* Protected by mmap_sem (write-lock) */ + ret = __mmu_notifier_register(&mn->mn, mm); if (ret) { - kfree(mmu); + kfree(mn); return ERR_PTR(ret); } - /* Protected by dev->struct_mutex */ - hash_add(dev_priv->mmu_notifiers, &mmu->node, (unsigned long)mm); - return mmu; -} - -static void -__i915_mmu_notifier_destroy_worker(struct work_struct *work) -{ - struct i915_mmu_notifier *mmu = container_of(work, typeof(*mmu), work); - mmu_notifier_unregister(&mmu->mn, mmu->mm); - kfree(mmu); + return mn; } -static void -__i915_mmu_notifier_destroy(struct i915_mmu_notifier *mmu) +static void __i915_mmu_notifier_update_serial(struct i915_mmu_notifier *mn) { - lockdep_assert_held(&mmu->dev->struct_mutex); - - /* Protected by dev->struct_mutex */ - hash_del(&mmu->node); - - /* Our lock ordering is: mmap_sem, mmu_notifier_scru, struct_mutex. - * We enter the function holding struct_mutex, therefore we need - * to drop our mutex prior to calling mmu_notifier_unregister in - * order to prevent lock inversion (and system-wide deadlock) - * between the mmap_sem and struct-mutex. Hence we defer the - * unregistration to a workqueue where we hold no locks. - */ - INIT_WORK(&mmu->work, __i915_mmu_notifier_destroy_worker); - schedule_work(&mmu->work); -} - -static void __i915_mmu_notifier_update_serial(struct i915_mmu_notifier *mmu) -{ - if (++mmu->serial == 0) - mmu->serial = 1; -} - -static void -i915_mmu_notifier_del(struct i915_mmu_notifier *mmu, - struct i915_mmu_object *mn) -{ - lockdep_assert_held(&mmu->dev->struct_mutex); - - spin_lock(&mmu->lock); - interval_tree_remove(&mn->it, &mmu->objects); - __i915_mmu_notifier_update_serial(mmu); - spin_unlock(&mmu->lock); - - /* Protected against _add() by dev->struct_mutex */ - if (--mmu->count == 0) - __i915_mmu_notifier_destroy(mmu); + if (++mn->serial == 0) + mn->serial = 1; } static int -i915_mmu_notifier_add(struct i915_mmu_notifier *mmu, - struct i915_mmu_object *mn) +i915_mmu_notifier_add(struct drm_device *dev, + struct i915_mmu_notifier *mn, + struct i915_mmu_object *mo) { struct interval_tree_node *it; int ret; - ret = i915_mutex_lock_interruptible(mmu->dev); + ret = i915_mutex_lock_interruptible(dev); if (ret) return ret; @@ -228,12 +213,11 @@ i915_mmu_notifier_add(struct i915_mmu_notifier *mmu, * remove the objects from the interval tree) before we do * the check for overlapping objects. */ - i915_gem_retire_requests(mmu->dev); + i915_gem_retire_requests(dev); - /* Disallow overlapping userptr objects */ - spin_lock(&mmu->lock); - it = interval_tree_iter_first(&mmu->objects, - mn->it.start, mn->it.last); + spin_lock(&mn->lock); + it = interval_tree_iter_first(&mn->objects, + mo->it.start, mo->it.last); if (it) { struct drm_i915_gem_object *obj; @@ -243,85 +227,129 @@ i915_mmu_notifier_add(struct i915_mmu_notifier *mmu, * to flush their object references upon which the object will * be removed from the interval-tree, or the the range is * still in use by another client and the overlap is invalid. + * + * If we do have an overlap, we cannot use the interval tree + * for fast range invalidation. */ obj = container_of(it, struct i915_mmu_object, it)->obj; - ret = obj->userptr.workers ? -EAGAIN : -EINVAL; - } else { - interval_tree_insert(&mn->it, &mmu->objects); - __i915_mmu_notifier_update_serial(mmu); - ret = 0; + if (!obj->userptr.workers) + mn->has_linear = mo->is_linear = true; + else + ret = -EAGAIN; + } else + interval_tree_insert(&mo->it, &mn->objects); + + if (ret == 0) { + list_add(&mo->link, &mn->linear); + __i915_mmu_notifier_update_serial(mn); } - spin_unlock(&mmu->lock); - mutex_unlock(&mmu->dev->struct_mutex); + spin_unlock(&mn->lock); + mutex_unlock(&dev->struct_mutex); return ret; } +static bool i915_mmu_notifier_has_linear(struct i915_mmu_notifier *mn) +{ + struct i915_mmu_object *mo; + + list_for_each_entry(mo, &mn->linear, link) + if (mo->is_linear) + return true; + + return false; +} + +static void +i915_mmu_notifier_del(struct i915_mmu_notifier *mn, + struct i915_mmu_object *mo) +{ + spin_lock(&mn->lock); + list_del(&mo->link); + if (mo->is_linear) + mn->has_linear = i915_mmu_notifier_has_linear(mn); + else + interval_tree_remove(&mo->it, &mn->objects); + __i915_mmu_notifier_update_serial(mn); + spin_unlock(&mn->lock); +} + static void i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj) { - struct i915_mmu_object *mn; + struct i915_mmu_object *mo; - mn = obj->userptr.mn; - if (mn == NULL) + mo = obj->userptr.mmu_object; + if (mo == NULL) return; - i915_mmu_notifier_del(mn->mmu, mn); - obj->userptr.mn = NULL; + i915_mmu_notifier_del(mo->mn, mo); + kfree(mo); + + obj->userptr.mmu_object = NULL; +} + +static struct i915_mmu_notifier * +i915_mmu_notifier_find(struct i915_mm_struct *mm) +{ + if (mm->mn == NULL) { + down_write(&mm->mm->mmap_sem); + mutex_lock(&to_i915(mm->dev)->mm_lock); + if (mm->mn == NULL) + mm->mn = i915_mmu_notifier_create(mm->mm); + mutex_unlock(&to_i915(mm->dev)->mm_lock); + up_write(&mm->mm->mmap_sem); + } + return mm->mn; } static int i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj, unsigned flags) { - struct i915_mmu_notifier *mmu; - struct i915_mmu_object *mn; + struct i915_mmu_notifier *mn; + struct i915_mmu_object *mo; int ret; if (flags & I915_USERPTR_UNSYNCHRONIZED) return capable(CAP_SYS_ADMIN) ? 0 : -EPERM; - down_write(&obj->userptr.mm->mmap_sem); - ret = i915_mutex_lock_interruptible(obj->base.dev); - if (ret == 0) { - mmu = i915_mmu_notifier_get(obj->base.dev, obj->userptr.mm); - if (!IS_ERR(mmu)) - mmu->count++; /* preemptive add to act as a refcount */ - else - ret = PTR_ERR(mmu); - mutex_unlock(&obj->base.dev->struct_mutex); - } - up_write(&obj->userptr.mm->mmap_sem); - if (ret) - return ret; + if (WARN_ON(obj->userptr.mm == NULL)) + return -EINVAL; - mn = kzalloc(sizeof(*mn), GFP_KERNEL); - if (mn == NULL) { - ret = -ENOMEM; - goto destroy_mmu; - } + mn = i915_mmu_notifier_find(obj->userptr.mm); + if (IS_ERR(mn)) + return PTR_ERR(mn); - mn->mmu = mmu; - mn->it.start = obj->userptr.ptr; - mn->it.last = mn->it.start + obj->base.size - 1; - mn->obj = obj; + mo = kzalloc(sizeof(*mo), GFP_KERNEL); + if (mo == NULL) + return -ENOMEM; - ret = i915_mmu_notifier_add(mmu, mn); - if (ret) - goto free_mn; + mo->mn = mn; + mo->it.start = obj->userptr.ptr; + mo->it.last = mo->it.start + obj->base.size - 1; + mo->obj = obj; - obj->userptr.mn = mn; + ret = i915_mmu_notifier_add(obj->base.dev, mn, mo); + if (ret) { + kfree(mo); + return ret; + } + + obj->userptr.mmu_object = mo; return 0; +} -free_mn: +static void +i915_mmu_notifier_free(struct i915_mmu_notifier *mn, + struct mm_struct *mm) +{ + if (mn == NULL) + return; + + mmu_notifier_unregister(&mn->mn, mm); kfree(mn); -destroy_mmu: - mutex_lock(&obj->base.dev->struct_mutex); - if (--mmu->count == 0) - __i915_mmu_notifier_destroy(mmu); - mutex_unlock(&obj->base.dev->struct_mutex); - return ret; } #else @@ -343,15 +371,114 @@ i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj, return 0; } + +static void +i915_mmu_notifier_free(struct i915_mmu_notifier *mn, + struct mm_struct *mm) +{ +} + #endif +static struct i915_mm_struct * +__i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real) +{ + struct i915_mm_struct *mm; + + /* Protected by dev_priv->mm_lock */ + hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real) + if (mm->mm == real) + return mm; + + return NULL; +} + +static int +i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj) +{ + struct drm_i915_private *dev_priv = to_i915(obj->base.dev); + struct i915_mm_struct *mm; + int ret = 0; + + /* During release of the GEM object we hold the struct_mutex. This + * precludes us from calling mmput() at that time as that may be + * the last reference and so call exit_mmap(). exit_mmap() will + * attempt to reap the vma, and if we were holding a GTT mmap + * would then call drm_gem_vm_close() and attempt to reacquire + * the struct mutex. So in order to avoid that recursion, we have + * to defer releasing the mm reference until after we drop the + * struct_mutex, i.e. we need to schedule a worker to do the clean + * up. + */ + mutex_lock(&dev_priv->mm_lock); + mm = __i915_mm_struct_find(dev_priv, current->mm); + if (mm == NULL) { + mm = kmalloc(sizeof(*mm), GFP_KERNEL); + if (mm == NULL) { + ret = -ENOMEM; + goto out; + } + + kref_init(&mm->kref); + mm->dev = obj->base.dev; + + mm->mm = current->mm; + atomic_inc(¤t->mm->mm_count); + + mm->mn = NULL; + + /* Protected by dev_priv->mm_lock */ + hash_add(dev_priv->mm_structs, + &mm->node, (unsigned long)mm->mm); + } else + kref_get(&mm->kref); + + obj->userptr.mm = mm; +out: + mutex_unlock(&dev_priv->mm_lock); + return ret; +} + +static void +__i915_mm_struct_free__worker(struct work_struct *work) +{ + struct i915_mm_struct *mm = container_of(work, typeof(*mm), work); + i915_mmu_notifier_free(mm->mn, mm->mm); + mmdrop(mm->mm); + kfree(mm); +} + +static void +__i915_mm_struct_free(struct kref *kref) +{ + struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref); + + /* Protected by dev_priv->mm_lock */ + hash_del(&mm->node); + mutex_unlock(&to_i915(mm->dev)->mm_lock); + + INIT_WORK(&mm->work, __i915_mm_struct_free__worker); + schedule_work(&mm->work); +} + +static void +i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj) +{ + if (obj->userptr.mm == NULL) + return; + + kref_put_mutex(&obj->userptr.mm->kref, + __i915_mm_struct_free, + &to_i915(obj->base.dev)->mm_lock); + obj->userptr.mm = NULL; +} + struct get_pages_work { struct work_struct work; struct drm_i915_gem_object *obj; struct task_struct *task; }; - #if IS_ENABLED(CONFIG_SWIOTLB) #define swiotlb_active() swiotlb_nr_tbl() #else @@ -409,7 +536,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) if (pvec == NULL) pvec = drm_malloc_ab(num_pages, sizeof(struct page *)); if (pvec != NULL) { - struct mm_struct *mm = obj->userptr.mm; + struct mm_struct *mm = obj->userptr.mm->mm; down_read(&mm->mmap_sem); while (pinned < num_pages) { @@ -475,7 +602,7 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) pvec = NULL; pinned = 0; - if (obj->userptr.mm == current->mm) { + if (obj->userptr.mm->mm == current->mm) { pvec = kmalloc(num_pages*sizeof(struct page *), GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); if (pvec == NULL) { @@ -581,17 +708,13 @@ static void i915_gem_userptr_release(struct drm_i915_gem_object *obj) { i915_gem_userptr_release__mmu_notifier(obj); - - if (obj->userptr.mm) { - mmput(obj->userptr.mm); - obj->userptr.mm = NULL; - } + i915_gem_userptr_release__mm_struct(obj); } static int i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj) { - if (obj->userptr.mn) + if (obj->userptr.mmu_object) return 0; return i915_gem_userptr_init__mmu_notifier(obj, 0); @@ -611,12 +734,11 @@ static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = { * We impose several restrictions upon the memory being mapped * into the GPU. * 1. It must be page aligned (both start/end addresses, i.e ptr and size). - * 2. It cannot overlap any other userptr object in the same address space. - * 3. It must be normal system memory, not a pointer into another map of IO + * 2. It must be normal system memory, not a pointer into another map of IO * space (e.g. it must not be a GTT mmapping of another object). - * 4. We only allow a bo as large as we could in theory map into the GTT, + * 3. We only allow a bo as large as we could in theory map into the GTT, * that is we limit the size to the total size of the GTT. - * 5. The bo is marked as being snoopable. The backing pages are left + * 4. The bo is marked as being snoopable. The backing pages are left * accessible directly by the CPU, but reads and writes by the GPU may * incur the cost of a snoop (unless you have an LLC architecture). * @@ -667,7 +789,6 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file return -ENODEV; } - /* Allocate the new object */ obj = i915_gem_object_alloc(dev); if (obj == NULL) return -ENOMEM; @@ -685,8 +806,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file * at binding. This means that we need to hook into the mmu_notifier * in order to detect if the mmu is destroyed. */ - ret = -ENOMEM; - if ((obj->userptr.mm = get_task_mm(current))) + ret = i915_gem_userptr_init__mm_struct(obj); + if (ret == 0) ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags); if (ret == 0) ret = drm_gem_handle_create(file, &obj->base, &handle); @@ -703,9 +824,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file int i915_gem_init_userptr(struct drm_device *dev) { -#if defined(CONFIG_MMU_NOTIFIER) struct drm_i915_private *dev_priv = to_i915(dev); - hash_init(dev_priv->mmu_notifiers); -#endif + mutex_init(&dev_priv->mm_lock); + hash_init(dev_priv->mm_structs); return 0; } diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 66cf41765bf9..eab41f9390f8 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -229,6 +229,8 @@ static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a) return "wait"; case HANGCHECK_ACTIVE: return "active"; + case HANGCHECK_ACTIVE_LOOP: + return "active (loop)"; case HANGCHECK_KICK: return "kick"; case HANGCHECK_HUNG: @@ -327,6 +329,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, struct drm_device *dev = error_priv->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_error_state *error = error_priv->error; + struct drm_i915_error_object *obj; int i, j, offset, elt; int max_hangcheck_score; @@ -358,6 +361,12 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device); err_printf(m, "EIR: 0x%08x\n", error->eir); err_printf(m, "IER: 0x%08x\n", error->ier); + if (INTEL_INFO(dev)->gen >= 8) { + for (i = 0; i < 4; i++) + err_printf(m, "GTIER gt %d: 0x%08x\n", i, + error->gtier[i]); + } else if (HAS_PCH_SPLIT(dev) || IS_VALLEYVIEW(dev)) + err_printf(m, "GTIER: 0x%08x\n", error->gtier[0]); err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er); err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake); err_printf(m, "DERRMR: 0x%08x\n", error->derrmr); @@ -395,8 +404,6 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, error->pinned_bo_count[0]); for (i = 0; i < ARRAY_SIZE(error->ring); i++) { - struct drm_i915_error_object *obj; - obj = error->ring[i].batchbuffer; if (obj) { err_puts(m, dev_priv->ring[i].name); @@ -459,6 +466,18 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, } } + if ((obj = error->semaphore_obj)) { + err_printf(m, "Semaphore page = 0x%08x\n", obj->gtt_offset); + for (elt = 0; elt < PAGE_SIZE/16; elt += 4) { + err_printf(m, "[%04x] %08x %08x %08x %08x\n", + elt * 4, + obj->pages[0][elt], + obj->pages[0][elt+1], + obj->pages[0][elt+2], + obj->pages[0][elt+3]); + } + } + if (error->overlay) intel_overlay_print_error_state(m, error->overlay); @@ -529,6 +548,7 @@ static void i915_error_state_free(struct kref *error_ref) kfree(error->ring[i].requests); } + i915_error_object_free(error->semaphore_obj); kfree(error->active_bo); kfree(error->overlay); kfree(error->display); @@ -746,7 +766,60 @@ static void i915_gem_record_fences(struct drm_device *dev, } } + +static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv, + struct drm_i915_error_state *error, + struct intel_engine_cs *ring, + struct drm_i915_error_ring *ering) +{ + struct intel_engine_cs *to; + int i; + + if (!i915_semaphore_is_enabled(dev_priv->dev)) + return; + + if (!error->semaphore_obj) + error->semaphore_obj = + i915_error_object_create(dev_priv, + dev_priv->semaphore_obj, + &dev_priv->gtt.base); + + for_each_ring(to, dev_priv, i) { + int idx; + u16 signal_offset; + u32 *tmp; + + if (ring == to) + continue; + + signal_offset = (GEN8_SIGNAL_OFFSET(ring, i) & (PAGE_SIZE - 1)) + / 4; + tmp = error->semaphore_obj->pages[0]; + idx = intel_ring_sync_index(ring, to); + + ering->semaphore_mboxes[idx] = tmp[signal_offset]; + ering->semaphore_seqno[idx] = ring->semaphore.sync_seqno[idx]; + } +} + +static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv, + struct intel_engine_cs *ring, + struct drm_i915_error_ring *ering) +{ + ering->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(ring->mmio_base)); + ering->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(ring->mmio_base)); + ering->semaphore_seqno[0] = ring->semaphore.sync_seqno[0]; + ering->semaphore_seqno[1] = ring->semaphore.sync_seqno[1]; + + if (HAS_VEBOX(dev_priv->dev)) { + ering->semaphore_mboxes[2] = + I915_READ(RING_SYNC_2(ring->mmio_base)); + ering->semaphore_seqno[2] = ring->semaphore.sync_seqno[2]; + } +} + static void i915_record_ring_state(struct drm_device *dev, + struct drm_i915_error_state *error, struct intel_engine_cs *ring, struct drm_i915_error_ring *ering) { @@ -755,18 +828,10 @@ static void i915_record_ring_state(struct drm_device *dev, if (INTEL_INFO(dev)->gen >= 6) { ering->rc_psmi = I915_READ(ring->mmio_base + 0x50); ering->fault_reg = I915_READ(RING_FAULT_REG(ring)); - ering->semaphore_mboxes[0] - = I915_READ(RING_SYNC_0(ring->mmio_base)); - ering->semaphore_mboxes[1] - = I915_READ(RING_SYNC_1(ring->mmio_base)); - ering->semaphore_seqno[0] = ring->semaphore.sync_seqno[0]; - ering->semaphore_seqno[1] = ring->semaphore.sync_seqno[1]; - } - - if (HAS_VEBOX(dev)) { - ering->semaphore_mboxes[2] = - I915_READ(RING_SYNC_2(ring->mmio_base)); - ering->semaphore_seqno[2] = ring->semaphore.sync_seqno[2]; + if (INTEL_INFO(dev)->gen >= 8) + gen8_record_semaphore_state(dev_priv, error, ring, ering); + else + gen6_record_semaphore_state(dev_priv, ring, ering); } if (INTEL_INFO(dev)->gen >= 4) { @@ -871,6 +936,9 @@ static void i915_gem_record_active_context(struct intel_engine_cs *ring, return; list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { + if (!i915_gem_obj_ggtt_bound(obj)) + continue; + if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) { ering->ctx = i915_error_ggtt_object_create(dev_priv, obj); break; @@ -895,7 +963,7 @@ static void i915_gem_record_rings(struct drm_device *dev, error->ring[i].valid = true; - i915_record_ring_state(dev, ring, &error->ring[i]); + i915_record_ring_state(dev, error, ring, &error->ring[i]); request = i915_gem_find_active_request(ring); if (request) { @@ -1032,6 +1100,7 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv, struct drm_i915_error_state *error) { struct drm_device *dev = dev_priv->dev; + int i; /* General organization * 1. Registers specific to a single generation @@ -1043,7 +1112,8 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv, /* 1: Registers specific to a single generation */ if (IS_VALLEYVIEW(dev)) { - error->ier = I915_READ(GTIER) | I915_READ(VLV_IER); + error->gtier[0] = I915_READ(GTIER); + error->ier = I915_READ(VLV_IER); error->forcewake = I915_READ(FORCEWAKE_VLV); } @@ -1076,16 +1146,18 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv, if (HAS_HW_CONTEXTS(dev)) error->ccid = I915_READ(CCID); - if (HAS_PCH_SPLIT(dev)) - error->ier = I915_READ(DEIER) | I915_READ(GTIER); - else { - if (IS_GEN2(dev)) - error->ier = I915_READ16(IER); - else - error->ier = I915_READ(IER); + if (INTEL_INFO(dev)->gen >= 8) { + error->ier = I915_READ(GEN8_DE_MISC_IER); + for (i = 0; i < 4; i++) + error->gtier[i] = I915_READ(GEN8_GT_IER(i)); + } else if (HAS_PCH_SPLIT(dev)) { + error->ier = I915_READ(DEIER); + error->gtier[0] = I915_READ(GTIER); + } else if (IS_GEN2(dev)) { + error->ier = I915_READ16(IER); + } else if (!IS_VALLEYVIEW(dev)) { + error->ier = I915_READ(IER); } - - /* 4: Everything else */ error->eir = I915_READ(EIR); error->pgtbl_er = I915_READ(PGTBL_ER); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index c05c84f3f091..0050ee9470f1 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -136,7 +136,7 @@ ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask) { assert_spin_locked(&dev_priv->irq_lock); - if (WARN_ON(dev_priv->pm.irqs_disabled)) + if (WARN_ON(!intel_irqs_enabled(dev_priv))) return; if ((dev_priv->irq_mask & mask) != 0) { @@ -151,7 +151,7 @@ ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask) { assert_spin_locked(&dev_priv->irq_lock); - if (WARN_ON(dev_priv->pm.irqs_disabled)) + if (!intel_irqs_enabled(dev_priv)) return; if ((dev_priv->irq_mask & mask) != mask) { @@ -173,7 +173,7 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, { assert_spin_locked(&dev_priv->irq_lock); - if (WARN_ON(dev_priv->pm.irqs_disabled)) + if (WARN_ON(!intel_irqs_enabled(dev_priv))) return; dev_priv->gt_irq_mask &= ~interrupt_mask; @@ -182,12 +182,12 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, POSTING_READ(GTIMR); } -void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) +void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) { ilk_update_gt_irq(dev_priv, mask, mask); } -void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) +void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) { ilk_update_gt_irq(dev_priv, mask, 0); } @@ -206,7 +206,7 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv, assert_spin_locked(&dev_priv->irq_lock); - if (WARN_ON(dev_priv->pm.irqs_disabled)) + if (WARN_ON(!intel_irqs_enabled(dev_priv))) return; new_val = dev_priv->pm_irq_mask; @@ -220,12 +220,12 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv, } } -void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) +void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) { snb_update_pm_irq(dev_priv, mask, mask); } -void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) +void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) { snb_update_pm_irq(dev_priv, mask, 0); } @@ -264,7 +264,7 @@ static void bdw_update_pm_irq(struct drm_i915_private *dev_priv, assert_spin_locked(&dev_priv->irq_lock); - if (WARN_ON(dev_priv->pm.irqs_disabled)) + if (WARN_ON(!intel_irqs_enabled(dev_priv))) return; new_val = dev_priv->pm_irq_mask; @@ -278,12 +278,12 @@ static void bdw_update_pm_irq(struct drm_i915_private *dev_priv, } } -void bdw_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) +void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) { bdw_update_pm_irq(dev_priv, mask, mask); } -void bdw_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) +void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) { bdw_update_pm_irq(dev_priv, mask, 0); } @@ -420,7 +420,7 @@ static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, assert_spin_locked(&dev_priv->irq_lock); - if (WARN_ON(dev_priv->pm.irqs_disabled)) + if (WARN_ON(!intel_irqs_enabled(dev_priv))) return; I915_WRITE(SDEIMR, sdeimr); @@ -1090,6 +1090,53 @@ static bool intel_hpd_irq_event(struct drm_device *dev, return true; } +static void i915_digport_work_func(struct work_struct *work) +{ + struct drm_i915_private *dev_priv = + container_of(work, struct drm_i915_private, dig_port_work); + unsigned long irqflags; + u32 long_port_mask, short_port_mask; + struct intel_digital_port *intel_dig_port; + int i, ret; + u32 old_bits = 0; + + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + long_port_mask = dev_priv->long_hpd_port_mask; + dev_priv->long_hpd_port_mask = 0; + short_port_mask = dev_priv->short_hpd_port_mask; + dev_priv->short_hpd_port_mask = 0; + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + + for (i = 0; i < I915_MAX_PORTS; i++) { + bool valid = false; + bool long_hpd = false; + intel_dig_port = dev_priv->hpd_irq_port[i]; + if (!intel_dig_port || !intel_dig_port->hpd_pulse) + continue; + + if (long_port_mask & (1 << i)) { + valid = true; + long_hpd = true; + } else if (short_port_mask & (1 << i)) + valid = true; + + if (valid) { + ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd); + if (ret == true) { + /* if we get true fallback to old school hpd */ + old_bits |= (1 << intel_dig_port->base.hpd_pin); + } + } + } + + if (old_bits) { + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + dev_priv->hpd_event_bits |= old_bits; + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + schedule_work(&dev_priv->hotplug_work); + } +} + /* * Handle hotplug events outside the interrupt handler proper. */ @@ -1109,10 +1156,6 @@ static void i915_hotplug_work_func(struct work_struct *work) bool changed = false; u32 hpd_event_bits; - /* HPD irq before everything is fully set up. */ - if (!dev_priv->enable_hotplug_processing) - return; - mutex_lock(&mode_config->mutex); DRM_DEBUG_KMS("running encoder hotplug functions\n"); @@ -1122,6 +1165,8 @@ static void i915_hotplug_work_func(struct work_struct *work) dev_priv->hpd_event_bits = 0; list_for_each_entry(connector, &mode_config->connector_list, head) { intel_connector = to_intel_connector(connector); + if (!intel_connector->encoder) + continue; intel_encoder = intel_connector->encoder; if (intel_encoder->hpd_pin > HPD_NONE && dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED && @@ -1144,14 +1189,16 @@ static void i915_hotplug_work_func(struct work_struct *work) * some connectors */ if (hpd_disabled) { drm_kms_helper_poll_enable(dev); - mod_timer(&dev_priv->hotplug_reenable_timer, - jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); + mod_delayed_work(system_wq, &dev_priv->hotplug_reenable_work, + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); } spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); list_for_each_entry(connector, &mode_config->connector_list, head) { intel_connector = to_intel_connector(connector); + if (!intel_connector->encoder) + continue; intel_encoder = intel_connector->encoder; if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { if (intel_encoder->hot_plug) @@ -1166,11 +1213,6 @@ static void i915_hotplug_work_func(struct work_struct *work) drm_kms_helper_hotplug_event(dev); } -static void intel_hpd_irq_uninstall(struct drm_i915_private *dev_priv) -{ - del_timer_sync(&dev_priv->hotplug_reenable_timer); -} - static void ironlake_rps_change_irq_handler(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -1218,10 +1260,138 @@ static void notify_ring(struct drm_device *dev, trace_i915_gem_request_complete(ring); + if (drm_core_check_feature(dev, DRIVER_MODESET)) + intel_notify_mmio_flip(ring); + wake_up_all(&ring->irq_queue); i915_queue_hangcheck(dev); } +static u32 vlv_c0_residency(struct drm_i915_private *dev_priv, + struct intel_rps_ei *rps_ei) +{ + u32 cz_ts, cz_freq_khz; + u32 render_count, media_count; + u32 elapsed_render, elapsed_media, elapsed_time; + u32 residency = 0; + + cz_ts = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP); + cz_freq_khz = DIV_ROUND_CLOSEST(dev_priv->mem_freq * 1000, 4); + + render_count = I915_READ(VLV_RENDER_C0_COUNT_REG); + media_count = I915_READ(VLV_MEDIA_C0_COUNT_REG); + + if (rps_ei->cz_clock == 0) { + rps_ei->cz_clock = cz_ts; + rps_ei->render_c0 = render_count; + rps_ei->media_c0 = media_count; + + return dev_priv->rps.cur_freq; + } + + elapsed_time = cz_ts - rps_ei->cz_clock; + rps_ei->cz_clock = cz_ts; + + elapsed_render = render_count - rps_ei->render_c0; + rps_ei->render_c0 = render_count; + + elapsed_media = media_count - rps_ei->media_c0; + rps_ei->media_c0 = media_count; + + /* Convert all the counters into common unit of milli sec */ + elapsed_time /= VLV_CZ_CLOCK_TO_MILLI_SEC; + elapsed_render /= cz_freq_khz; + elapsed_media /= cz_freq_khz; + + /* + * Calculate overall C0 residency percentage + * only if elapsed time is non zero + */ + if (elapsed_time) { + residency = + ((max(elapsed_render, elapsed_media) * 100) + / elapsed_time); + } + + return residency; +} + +/** + * vlv_calc_delay_from_C0_counters - Increase/Decrease freq based on GPU + * busy-ness calculated from C0 counters of render & media power wells + * @dev_priv: DRM device private + * + */ +static u32 vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv) +{ + u32 residency_C0_up = 0, residency_C0_down = 0; + u8 new_delay, adj; + + dev_priv->rps.ei_interrupt_count++; + + WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); + + + if (dev_priv->rps.up_ei.cz_clock == 0) { + vlv_c0_residency(dev_priv, &dev_priv->rps.up_ei); + vlv_c0_residency(dev_priv, &dev_priv->rps.down_ei); + return dev_priv->rps.cur_freq; + } + + + /* + * To down throttle, C0 residency should be less than down threshold + * for continous EI intervals. So calculate down EI counters + * once in VLV_INT_COUNT_FOR_DOWN_EI + */ + if (dev_priv->rps.ei_interrupt_count == VLV_INT_COUNT_FOR_DOWN_EI) { + + dev_priv->rps.ei_interrupt_count = 0; + + residency_C0_down = vlv_c0_residency(dev_priv, + &dev_priv->rps.down_ei); + } else { + residency_C0_up = vlv_c0_residency(dev_priv, + &dev_priv->rps.up_ei); + } + + new_delay = dev_priv->rps.cur_freq; + + adj = dev_priv->rps.last_adj; + /* C0 residency is greater than UP threshold. Increase Frequency */ + if (residency_C0_up >= VLV_RP_UP_EI_THRESHOLD) { + if (adj > 0) + adj *= 2; + else + adj = 1; + + if (dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit) + new_delay = dev_priv->rps.cur_freq + adj; + + /* + * For better performance, jump directly + * to RPe if we're below it. + */ + if (new_delay < dev_priv->rps.efficient_freq) + new_delay = dev_priv->rps.efficient_freq; + + } else if (!dev_priv->rps.ei_interrupt_count && + (residency_C0_down < VLV_RP_DOWN_EI_THRESHOLD)) { + if (adj < 0) + adj *= 2; + else + adj = -1; + /* + * This means, C0 residency is less than down threshold over + * a period of VLV_INT_COUNT_FOR_DOWN_EI. So, reduce the freq + */ + if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit) + new_delay = dev_priv->rps.cur_freq + adj; + } + + return new_delay; +} + static void gen6_pm_rps_work(struct work_struct *work) { struct drm_i915_private *dev_priv = @@ -1232,11 +1402,11 @@ static void gen6_pm_rps_work(struct work_struct *work) spin_lock_irq(&dev_priv->irq_lock); pm_iir = dev_priv->rps.pm_iir; dev_priv->rps.pm_iir = 0; - if (IS_BROADWELL(dev_priv->dev)) - bdw_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); + if (INTEL_INFO(dev_priv->dev)->gen >= 8) + gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); else { /* Make sure not to corrupt PMIMR state used by ringbuffer */ - snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); + gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); } spin_unlock_irq(&dev_priv->irq_lock); @@ -1252,8 +1422,10 @@ static void gen6_pm_rps_work(struct work_struct *work) if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { if (adj > 0) adj *= 2; - else - adj = 1; + else { + /* CHV needs even encode values */ + adj = IS_CHERRYVIEW(dev_priv->dev) ? 2 : 1; + } new_delay = dev_priv->rps.cur_freq + adj; /* @@ -1268,11 +1440,15 @@ static void gen6_pm_rps_work(struct work_struct *work) else new_delay = dev_priv->rps.min_freq_softlimit; adj = 0; + } else if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) { + new_delay = vlv_calc_delay_from_C0_counters(dev_priv); } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { if (adj < 0) adj *= 2; - else - adj = -1; + else { + /* CHV needs even encode values */ + adj = IS_CHERRYVIEW(dev_priv->dev) ? -2 : -1; + } new_delay = dev_priv->rps.cur_freq + adj; } else { /* unknown event */ new_delay = dev_priv->rps.cur_freq; @@ -1372,7 +1548,7 @@ static void ivybridge_parity_work(struct work_struct *work) out: WARN_ON(dev_priv->l3_parity.which_slice); spin_lock_irqsave(&dev_priv->irq_lock, flags); - ilk_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev)); + gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev)); spin_unlock_irqrestore(&dev_priv->irq_lock, flags); mutex_unlock(&dev_priv->dev->struct_mutex); @@ -1386,7 +1562,7 @@ static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir) return; spin_lock(&dev_priv->irq_lock); - ilk_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev)); + gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev)); spin_unlock(&dev_priv->irq_lock); iir &= GT_PARITY_ERROR(dev); @@ -1441,7 +1617,7 @@ static void gen8_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) spin_lock(&dev_priv->irq_lock); dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; - bdw_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); + gen8_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); spin_unlock(&dev_priv->irq_lock); queue_work(dev_priv->wq, &dev_priv->rps.work); @@ -1458,6 +1634,7 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { tmp = I915_READ(GEN8_GT_IIR(0)); if (tmp) { + I915_WRITE(GEN8_GT_IIR(0), tmp); ret = IRQ_HANDLED; rcs = tmp >> GEN8_RCS_IRQ_SHIFT; bcs = tmp >> GEN8_BCS_IRQ_SHIFT; @@ -1465,7 +1642,6 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, notify_ring(dev, &dev_priv->ring[RCS]); if (bcs & GT_RENDER_USER_INTERRUPT) notify_ring(dev, &dev_priv->ring[BCS]); - I915_WRITE(GEN8_GT_IIR(0), tmp); } else DRM_ERROR("The master control interrupt lied (GT0)!\n"); } @@ -1473,6 +1649,7 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { tmp = I915_READ(GEN8_GT_IIR(1)); if (tmp) { + I915_WRITE(GEN8_GT_IIR(1), tmp); ret = IRQ_HANDLED; vcs = tmp >> GEN8_VCS1_IRQ_SHIFT; if (vcs & GT_RENDER_USER_INTERRUPT) @@ -1480,7 +1657,6 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, vcs = tmp >> GEN8_VCS2_IRQ_SHIFT; if (vcs & GT_RENDER_USER_INTERRUPT) notify_ring(dev, &dev_priv->ring[VCS2]); - I915_WRITE(GEN8_GT_IIR(1), tmp); } else DRM_ERROR("The master control interrupt lied (GT1)!\n"); } @@ -1488,10 +1664,10 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, if (master_ctl & GEN8_GT_PM_IRQ) { tmp = I915_READ(GEN8_GT_IIR(2)); if (tmp & dev_priv->pm_rps_events) { - ret = IRQ_HANDLED; - gen8_rps_irq_handler(dev_priv, tmp); I915_WRITE(GEN8_GT_IIR(2), tmp & dev_priv->pm_rps_events); + ret = IRQ_HANDLED; + gen8_rps_irq_handler(dev_priv, tmp); } else DRM_ERROR("The master control interrupt lied (PM)!\n"); } @@ -1499,11 +1675,11 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, if (master_ctl & GEN8_GT_VECS_IRQ) { tmp = I915_READ(GEN8_GT_IIR(3)); if (tmp) { + I915_WRITE(GEN8_GT_IIR(3), tmp); ret = IRQ_HANDLED; vcs = tmp >> GEN8_VECS_IRQ_SHIFT; if (vcs & GT_RENDER_USER_INTERRUPT) notify_ring(dev, &dev_priv->ring[VECS]); - I915_WRITE(GEN8_GT_IIR(3), tmp); } else DRM_ERROR("The master control interrupt lied (GT3)!\n"); } @@ -1514,23 +1690,104 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, #define HPD_STORM_DETECT_PERIOD 1000 #define HPD_STORM_THRESHOLD 5 +static int ilk_port_to_hotplug_shift(enum port port) +{ + switch (port) { + case PORT_A: + case PORT_E: + default: + return -1; + case PORT_B: + return 0; + case PORT_C: + return 8; + case PORT_D: + return 16; + } +} + +static int g4x_port_to_hotplug_shift(enum port port) +{ + switch (port) { + case PORT_A: + case PORT_E: + default: + return -1; + case PORT_B: + return 17; + case PORT_C: + return 19; + case PORT_D: + return 21; + } +} + +static inline enum port get_port_from_pin(enum hpd_pin pin) +{ + switch (pin) { + case HPD_PORT_B: + return PORT_B; + case HPD_PORT_C: + return PORT_C; + case HPD_PORT_D: + return PORT_D; + default: + return PORT_A; /* no hpd */ + } +} + static inline void intel_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger, + u32 dig_hotplug_reg, const u32 *hpd) { struct drm_i915_private *dev_priv = dev->dev_private; int i; + enum port port; bool storm_detected = false; + bool queue_dig = false, queue_hp = false; + u32 dig_shift; + u32 dig_port_mask = 0; if (!hotplug_trigger) return; - DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", - hotplug_trigger); + DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n", + hotplug_trigger, dig_hotplug_reg); spin_lock(&dev_priv->irq_lock); for (i = 1; i < HPD_NUM_PINS; i++) { + if (!(hpd[i] & hotplug_trigger)) + continue; + + port = get_port_from_pin(i); + if (port && dev_priv->hpd_irq_port[port]) { + bool long_hpd; + + if (IS_G4X(dev)) { + dig_shift = g4x_port_to_hotplug_shift(port); + long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT; + } else { + dig_shift = ilk_port_to_hotplug_shift(port); + long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT; + } + + DRM_DEBUG_DRIVER("digital hpd port %d %d\n", port, long_hpd); + /* for long HPD pulses we want to have the digital queue happen, + but we still want HPD storm detection to function. */ + if (long_hpd) { + dev_priv->long_hpd_port_mask |= (1 << port); + dig_port_mask |= hpd[i]; + } else { + /* for short HPD just trigger the digital queue */ + dev_priv->short_hpd_port_mask |= (1 << port); + hotplug_trigger &= ~hpd[i]; + } + queue_dig = true; + } + } + for (i = 1; i < HPD_NUM_PINS; i++) { if (hpd[i] & hotplug_trigger && dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) { /* @@ -1550,7 +1807,11 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev, dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) continue; - dev_priv->hpd_event_bits |= (1 << i); + if (!(dig_port_mask & hpd[i])) { + dev_priv->hpd_event_bits |= (1 << i); + queue_hp = true; + } + if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) { @@ -1579,7 +1840,10 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev, * queue for otherwise the flush_work in the pageflip code will * deadlock. */ - schedule_work(&dev_priv->hotplug_work); + if (queue_dig) + queue_work(dev_priv->dp_wq, &dev_priv->dig_port_work); + if (queue_hp) + schedule_work(&dev_priv->hotplug_work); } static void gmbus_irq_handler(struct drm_device *dev) @@ -1700,7 +1964,7 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) if (pm_iir & dev_priv->pm_rps_events) { spin_lock(&dev_priv->irq_lock); dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; - snb_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); + gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); spin_unlock(&dev_priv->irq_lock); queue_work(dev_priv->wq, &dev_priv->rps.work); @@ -1809,26 +2073,28 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); - if (IS_G4X(dev)) { - u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; + if (hotplug_status) { + I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); + /* + * Make sure hotplug status is cleared before we clear IIR, or else we + * may miss hotplug events. + */ + POSTING_READ(PORT_HOTPLUG_STAT); - intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_g4x); - } else { - u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; + if (IS_G4X(dev)) { + u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; - intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); - } + intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x); + } else { + u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; - if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && - hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) - dp_aux_irq_handler(dev); + intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_i915); + } - I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); - /* - * Make sure hotplug status is cleared before we clear IIR, or else we - * may miss hotplug events. - */ - POSTING_READ(PORT_HOTPLUG_STAT); + if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && + hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) + dp_aux_irq_handler(dev); + } } static irqreturn_t valleyview_irq_handler(int irq, void *arg) @@ -1839,29 +2105,36 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg) irqreturn_t ret = IRQ_NONE; while (true) { - iir = I915_READ(VLV_IIR); + /* Find, clear, then process each source of interrupt */ + gt_iir = I915_READ(GTIIR); + if (gt_iir) + I915_WRITE(GTIIR, gt_iir); + pm_iir = I915_READ(GEN6_PMIIR); + if (pm_iir) + I915_WRITE(GEN6_PMIIR, pm_iir); + + iir = I915_READ(VLV_IIR); + if (iir) { + /* Consume port before clearing IIR or we'll miss events */ + if (iir & I915_DISPLAY_PORT_INTERRUPT) + i9xx_hpd_irq_handler(dev); + I915_WRITE(VLV_IIR, iir); + } if (gt_iir == 0 && pm_iir == 0 && iir == 0) goto out; ret = IRQ_HANDLED; - snb_gt_irq_handler(dev, dev_priv, gt_iir); - - valleyview_pipestat_irq_handler(dev, iir); - - /* Consume port. Then clear IIR or we'll miss events */ - if (iir & I915_DISPLAY_PORT_INTERRUPT) - i9xx_hpd_irq_handler(dev); - + if (gt_iir) + snb_gt_irq_handler(dev, dev_priv, gt_iir); if (pm_iir) gen6_rps_irq_handler(dev_priv, pm_iir); - - I915_WRITE(GTIIR, gt_iir); - I915_WRITE(GEN6_PMIIR, pm_iir); - I915_WRITE(VLV_IIR, iir); + /* Call regardless, as some status bits might not be + * signalled in iir */ + valleyview_pipestat_irq_handler(dev, iir); } out: @@ -1882,21 +2155,27 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg) if (master_ctl == 0 && iir == 0) break; + ret = IRQ_HANDLED; + I915_WRITE(GEN8_MASTER_IRQ, 0); - gen8_gt_irq_handler(dev, dev_priv, master_ctl); + /* Find, clear, then process each source of interrupt */ - valleyview_pipestat_irq_handler(dev, iir); + if (iir) { + /* Consume port before clearing IIR or we'll miss events */ + if (iir & I915_DISPLAY_PORT_INTERRUPT) + i9xx_hpd_irq_handler(dev); + I915_WRITE(VLV_IIR, iir); + } - /* Consume port. Then clear IIR or we'll miss events */ - i9xx_hpd_irq_handler(dev); + gen8_gt_irq_handler(dev, dev_priv, master_ctl); - I915_WRITE(VLV_IIR, iir); + /* Call regardless, as some status bits might not be + * signalled in iir */ + valleyview_pipestat_irq_handler(dev, iir); I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL); POSTING_READ(GEN8_MASTER_IRQ); - - ret = IRQ_HANDLED; } return ret; @@ -1907,8 +2186,12 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) struct drm_i915_private *dev_priv = dev->dev_private; int pipe; u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; + u32 dig_hotplug_reg; + + dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); + I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); - intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx); + intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_ibx); if (pch_iir & SDE_AUDIO_POWER_MASK) { int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> @@ -2014,8 +2297,12 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) struct drm_i915_private *dev_priv = dev->dev_private; int pipe; u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; + u32 dig_hotplug_reg; - intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt); + dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); + I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); + + intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_cpt); if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> @@ -2132,6 +2419,14 @@ static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) } } +/* + * To handle irqs with the minimum potential races with fresh interrupts, we: + * 1 - Disable Master Interrupt Control. + * 2 - Find the source(s) of the interrupt. + * 3 - Clear the Interrupt Identity bits (IIR). + * 4 - Process the interrupt(s) that had bits set in the IIRs. + * 5 - Re-enable Master Interrupt Control. + */ static irqreturn_t ironlake_irq_handler(int irq, void *arg) { struct drm_device *dev = arg; @@ -2159,32 +2454,34 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) POSTING_READ(SDEIER); } + /* Find, clear, then process each source of interrupt */ + gt_iir = I915_READ(GTIIR); if (gt_iir) { + I915_WRITE(GTIIR, gt_iir); + ret = IRQ_HANDLED; if (INTEL_INFO(dev)->gen >= 6) snb_gt_irq_handler(dev, dev_priv, gt_iir); else ilk_gt_irq_handler(dev, dev_priv, gt_iir); - I915_WRITE(GTIIR, gt_iir); - ret = IRQ_HANDLED; } de_iir = I915_READ(DEIIR); if (de_iir) { + I915_WRITE(DEIIR, de_iir); + ret = IRQ_HANDLED; if (INTEL_INFO(dev)->gen >= 7) ivb_display_irq_handler(dev, de_iir); else ilk_display_irq_handler(dev, de_iir); - I915_WRITE(DEIIR, de_iir); - ret = IRQ_HANDLED; } if (INTEL_INFO(dev)->gen >= 6) { u32 pm_iir = I915_READ(GEN6_PMIIR); if (pm_iir) { - gen6_rps_irq_handler(dev_priv, pm_iir); I915_WRITE(GEN6_PMIIR, pm_iir); ret = IRQ_HANDLED; + gen6_rps_irq_handler(dev_priv, pm_iir); } } @@ -2215,36 +2512,36 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) I915_WRITE(GEN8_MASTER_IRQ, 0); POSTING_READ(GEN8_MASTER_IRQ); + /* Find, clear, then process each source of interrupt */ + ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl); if (master_ctl & GEN8_DE_MISC_IRQ) { tmp = I915_READ(GEN8_DE_MISC_IIR); - if (tmp & GEN8_DE_MISC_GSE) - intel_opregion_asle_intr(dev); - else if (tmp) - DRM_ERROR("Unexpected DE Misc interrupt\n"); - else - DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); - if (tmp) { I915_WRITE(GEN8_DE_MISC_IIR, tmp); ret = IRQ_HANDLED; + if (tmp & GEN8_DE_MISC_GSE) + intel_opregion_asle_intr(dev); + else + DRM_ERROR("Unexpected DE Misc interrupt\n"); } + else + DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); } if (master_ctl & GEN8_DE_PORT_IRQ) { tmp = I915_READ(GEN8_DE_PORT_IIR); - if (tmp & GEN8_AUX_CHANNEL_A) - dp_aux_irq_handler(dev); - else if (tmp) - DRM_ERROR("Unexpected DE Port interrupt\n"); - else - DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); - if (tmp) { I915_WRITE(GEN8_DE_PORT_IIR, tmp); ret = IRQ_HANDLED; + if (tmp & GEN8_AUX_CHANNEL_A) + dp_aux_irq_handler(dev); + else + DRM_ERROR("Unexpected DE Port interrupt\n"); } + else + DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); } for_each_pipe(pipe) { @@ -2254,33 +2551,32 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) continue; pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); - if (pipe_iir & GEN8_PIPE_VBLANK) - intel_pipe_handle_vblank(dev, pipe); - - if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) { - intel_prepare_page_flip(dev, pipe); - intel_finish_page_flip_plane(dev, pipe); - } + if (pipe_iir) { + ret = IRQ_HANDLED; + I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir); + if (pipe_iir & GEN8_PIPE_VBLANK) + intel_pipe_handle_vblank(dev, pipe); - if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE) - hsw_pipe_crc_irq_handler(dev, pipe); + if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) { + intel_prepare_page_flip(dev, pipe); + intel_finish_page_flip_plane(dev, pipe); + } - if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) { - if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, - false)) - DRM_ERROR("Pipe %c FIFO underrun\n", - pipe_name(pipe)); - } + if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE) + hsw_pipe_crc_irq_handler(dev, pipe); - if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) { - DRM_ERROR("Fault errors on pipe %c\n: 0x%08x", - pipe_name(pipe), - pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS); - } + if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) { + if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, + false)) + DRM_ERROR("Pipe %c FIFO underrun\n", + pipe_name(pipe)); + } - if (pipe_iir) { - ret = IRQ_HANDLED; - I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir); + if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) { + DRM_ERROR("Fault errors on pipe %c\n: 0x%08x", + pipe_name(pipe), + pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS); + } } else DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); } @@ -2292,13 +2588,13 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) * on older pch-split platforms. But this needs testing. */ u32 pch_iir = I915_READ(SDEIIR); - - cpt_irq_handler(dev, pch_iir); - if (pch_iir) { I915_WRITE(SDEIIR, pch_iir); ret = IRQ_HANDLED; - } + cpt_irq_handler(dev, pch_iir); + } else + DRM_ERROR("The master control interrupt lied (SDE)!\n"); + } I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); @@ -2753,12 +3049,7 @@ static bool ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr) { if (INTEL_INFO(dev)->gen >= 8) { - /* - * FIXME: gen8 semaphore support - currently we don't emit - * semaphores on bdw anyway, but this needs to be addressed when - * we merge that code. - */ - return false; + return (ipehr >> 23) == 0x1c; } else { ipehr &= ~MI_SEMAPHORE_SYNC_MASK; return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | @@ -2767,19 +3058,20 @@ ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr) } static struct intel_engine_cs * -semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr) +semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset) { struct drm_i915_private *dev_priv = ring->dev->dev_private; struct intel_engine_cs *signaller; int i; if (INTEL_INFO(dev_priv->dev)->gen >= 8) { - /* - * FIXME: gen8 semaphore support - currently we don't emit - * semaphores on bdw anyway, but this needs to be addressed when - * we merge that code. - */ - return NULL; + for_each_ring(signaller, dev_priv, i) { + if (ring == signaller) + continue; + + if (offset == signaller->semaphore.signal_ggtt[ring->id]) + return signaller; + } } else { u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK; @@ -2792,8 +3084,8 @@ semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr) } } - DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x\n", - ring->id, ipehr); + DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n", + ring->id, ipehr, offset); return NULL; } @@ -2803,7 +3095,8 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno) { struct drm_i915_private *dev_priv = ring->dev->dev_private; u32 cmd, ipehr, head; - int i; + u64 offset = 0; + int i, backwards; ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); if (!ipehr_is_semaphore_wait(ring->dev, ipehr)) @@ -2812,13 +3105,15 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno) /* * HEAD is likely pointing to the dword after the actual command, * so scan backwards until we find the MBOX. But limit it to just 3 - * dwords. Note that we don't care about ACTHD here since that might + * or 4 dwords depending on the semaphore wait command size. + * Note that we don't care about ACTHD here since that might * point at at batch, and semaphores are always emitted into the * ringbuffer itself. */ head = I915_READ_HEAD(ring) & HEAD_ADDR; + backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4; - for (i = 4; i; --i) { + for (i = backwards; i; --i) { /* * Be paranoid and presume the hw has gone off into the wild - * our ring is smaller than what the hardware (and hence @@ -2838,7 +3133,12 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno) return NULL; *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1; - return semaphore_wait_to_signaller_ring(ring, ipehr); + if (INTEL_INFO(ring->dev)->gen >= 8) { + offset = ioread32(ring->buffer->virtual_start + head + 12); + offset <<= 32; + offset = ioread32(ring->buffer->virtual_start + head + 8); + } + return semaphore_wait_to_signaller_ring(ring, ipehr, offset); } static int semaphore_passed(struct intel_engine_cs *ring) @@ -2884,8 +3184,14 @@ ring_stuck(struct intel_engine_cs *ring, u64 acthd) struct drm_i915_private *dev_priv = dev->dev_private; u32 tmp; - if (ring->hangcheck.acthd != acthd) - return HANGCHECK_ACTIVE; + if (acthd != ring->hangcheck.acthd) { + if (acthd > ring->hangcheck.max_acthd) { + ring->hangcheck.max_acthd = acthd; + return HANGCHECK_ACTIVE; + } + + return HANGCHECK_ACTIVE_LOOP; + } if (IS_GEN2(dev)) return HANGCHECK_HUNG; @@ -2996,8 +3302,9 @@ static void i915_hangcheck_elapsed(unsigned long data) switch (ring->hangcheck.action) { case HANGCHECK_IDLE: case HANGCHECK_WAIT: - break; case HANGCHECK_ACTIVE: + break; + case HANGCHECK_ACTIVE_LOOP: ring->hangcheck.score += BUSY; break; case HANGCHECK_KICK: @@ -3017,6 +3324,8 @@ static void i915_hangcheck_elapsed(unsigned long data) */ if (ring->hangcheck.score > 0) ring->hangcheck.score--; + + ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0; } ring->hangcheck.seqno = seqno; @@ -3159,7 +3468,9 @@ static void gen8_irq_reset(struct drm_device *dev) gen8_gt_irq_reset(dev_priv); for_each_pipe(pipe) - GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); + if (intel_display_power_enabled(dev_priv, + POWER_DOMAIN_PIPE(pipe))) + GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); GEN5_IRQ_RESET(GEN8_DE_PORT_); GEN5_IRQ_RESET(GEN8_DE_MISC_); @@ -3168,6 +3479,18 @@ static void gen8_irq_reset(struct drm_device *dev) ibx_irq_reset(dev); } +void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv) +{ + unsigned long irqflags; + + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B], + ~dev_priv->de_irq_mask[PIPE_B]); + GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C], + ~dev_priv->de_irq_mask[PIPE_C]); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); +} + static void cherryview_irq_preinstall(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -3492,8 +3815,11 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked; for_each_pipe(pipe) - GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, dev_priv->de_irq_mask[pipe], - de_pipe_enables); + if (intel_display_power_enabled(dev_priv, + POWER_DOMAIN_PIPE(pipe))) + GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, + dev_priv->de_irq_mask[pipe], + de_pipe_enables); GEN5_IRQ_INIT(GEN8_DE_PORT_, ~GEN8_AUX_CHANNEL_A, GEN8_AUX_CHANNEL_A); } @@ -3561,8 +3887,6 @@ static void gen8_irq_uninstall(struct drm_device *dev) if (!dev_priv) return; - intel_hpd_irq_uninstall(dev_priv); - gen8_irq_reset(dev); } @@ -3577,8 +3901,6 @@ static void valleyview_irq_uninstall(struct drm_device *dev) I915_WRITE(VLV_MASTER_IER, 0); - intel_hpd_irq_uninstall(dev_priv); - for_each_pipe(pipe) I915_WRITE(PIPESTAT(pipe), 0xffff); @@ -3657,8 +3979,6 @@ static void ironlake_irq_uninstall(struct drm_device *dev) if (!dev_priv) return; - intel_hpd_irq_uninstall(dev_priv); - ironlake_irq_reset(dev); } @@ -4029,8 +4349,6 @@ static void i915_irq_uninstall(struct drm_device * dev) struct drm_i915_private *dev_priv = dev->dev_private; int pipe; - intel_hpd_irq_uninstall(dev_priv); - if (I915_HAS_HOTPLUG(dev)) { I915_WRITE(PORT_HOTPLUG_EN, 0); I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); @@ -4267,8 +4585,6 @@ static void i965_irq_uninstall(struct drm_device * dev) if (!dev_priv) return; - intel_hpd_irq_uninstall(dev_priv); - I915_WRITE(PORT_HOTPLUG_EN, 0); I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); @@ -4284,14 +4600,18 @@ static void i965_irq_uninstall(struct drm_device * dev) I915_WRITE(IIR, I915_READ(IIR)); } -static void intel_hpd_irq_reenable(unsigned long data) +static void intel_hpd_irq_reenable(struct work_struct *work) { - struct drm_i915_private *dev_priv = (struct drm_i915_private *)data; + struct drm_i915_private *dev_priv = + container_of(work, typeof(*dev_priv), + hotplug_reenable_work.work); struct drm_device *dev = dev_priv->dev; struct drm_mode_config *mode_config = &dev->mode_config; unsigned long irqflags; int i; + intel_runtime_pm_get(dev_priv); + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) { struct drm_connector *connector; @@ -4317,6 +4637,8 @@ static void intel_hpd_irq_reenable(unsigned long data) if (dev_priv->display.hpd_irq_setup) dev_priv->display.hpd_irq_setup(dev); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + + intel_runtime_pm_put(dev_priv); } void intel_irq_init(struct drm_device *dev) @@ -4324,21 +4646,29 @@ void intel_irq_init(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); + INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func); INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func); INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); /* Let's track the enabled rps events */ - dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; + if (IS_VALLEYVIEW(dev)) + /* WaGsvRC0ResidenncyMethod:VLV */ + dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED; + else + dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; setup_timer(&dev_priv->gpu_error.hangcheck_timer, i915_hangcheck_elapsed, (unsigned long) dev); - setup_timer(&dev_priv->hotplug_reenable_timer, intel_hpd_irq_reenable, - (unsigned long) dev_priv); + INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work, + intel_hpd_irq_reenable); pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); + /* Haven't installed the IRQ handler yet */ + dev_priv->pm._irqs_disabled = true; + if (IS_GEN2(dev)) { dev->max_vblank_count = 0; dev->driver->get_vblank_counter = i8xx_get_vblank_counter; @@ -4426,7 +4756,9 @@ void intel_hpd_init(struct drm_device *dev) list_for_each_entry(connector, &mode_config->connector_list, head) { struct intel_connector *intel_connector = to_intel_connector(connector); connector->polled = intel_connector->polled; - if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE) + if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE) + connector->polled = DRM_CONNECTOR_POLL_HPD; + if (intel_connector->mst_port) connector->polled = DRM_CONNECTOR_POLL_HPD; } @@ -4444,7 +4776,7 @@ void intel_runtime_pm_disable_interrupts(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; dev->driver->irq_uninstall(dev); - dev_priv->pm.irqs_disabled = true; + dev_priv->pm._irqs_disabled = true; } /* Restore interrupts so we can recover from runtime PM. */ @@ -4452,7 +4784,7 @@ void intel_runtime_pm_restore_interrupts(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - dev_priv->pm.irqs_disabled = false; + dev_priv->pm._irqs_disabled = false; dev->driver->irq_preinstall(dev); dev->driver->irq_postinstall(dev); } diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index d05a2afa17dc..9842fd2e742a 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c @@ -48,6 +48,8 @@ struct i915_params i915 __read_mostly = { .disable_display = 0, .enable_cmd_parser = 1, .disable_vtd_wa = 0, + .use_mmio_flip = 0, + .mmio_debug = 0, }; module_param_named(modeset, i915.modeset, int, 0400); @@ -64,12 +66,12 @@ module_param_named(powersave, i915.powersave, int, 0600); MODULE_PARM_DESC(powersave, "Enable powersavings, fbc, downclocking, etc. (default: true)"); -module_param_named(semaphores, i915.semaphores, int, 0400); +module_param_named_unsafe(semaphores, i915.semaphores, int, 0400); MODULE_PARM_DESC(semaphores, "Use semaphores for inter-ring sync " "(default: -1 (use per-chip defaults))"); -module_param_named(enable_rc6, i915.enable_rc6, int, 0400); +module_param_named_unsafe(enable_rc6, i915.enable_rc6, int, 0400); MODULE_PARM_DESC(enable_rc6, "Enable power-saving render C-state 6. " "Different stages can be selected via bitmask values " @@ -77,7 +79,7 @@ MODULE_PARM_DESC(enable_rc6, "For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. " "default: -1 (use per-chip default)"); -module_param_named(enable_fbc, i915.enable_fbc, int, 0600); +module_param_named_unsafe(enable_fbc, i915.enable_fbc, int, 0600); MODULE_PARM_DESC(enable_fbc, "Enable frame buffer compression for power savings " "(default: -1 (use per-chip default))"); @@ -111,7 +113,7 @@ MODULE_PARM_DESC(enable_hangcheck, "WARNING: Disabling this can cause system wide hangs. " "(default: true)"); -module_param_named(enable_ppgtt, i915.enable_ppgtt, int, 0400); +module_param_named_unsafe(enable_ppgtt, i915.enable_ppgtt, int, 0400); MODULE_PARM_DESC(enable_ppgtt, "Override PPGTT usage. " "(-1=auto [default], 0=disabled, 1=aliasing, 2=full)"); @@ -156,3 +158,12 @@ MODULE_PARM_DESC(disable_vtd_wa, "Disable all VT-d workarounds (default: false)" module_param_named(enable_cmd_parser, i915.enable_cmd_parser, int, 0600); MODULE_PARM_DESC(enable_cmd_parser, "Enable command parsing (1=enabled [default], 0=disabled)"); + +module_param_named(use_mmio_flip, i915.use_mmio_flip, int, 0600); +MODULE_PARM_DESC(use_mmio_flip, + "use MMIO flips (-1=never, 0=driver discretion [default], 1=always)"); + +module_param_named(mmio_debug, i915.mmio_debug, bool, 0600); +MODULE_PARM_DESC(mmio_debug, + "Enable the MMIO debug code (default: false). This may negatively " + "affect performance."); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index a5bab61bfc00..f29b44c86a2f 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -29,8 +29,8 @@ #define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a))) #define _PORT(port, a, b) ((a) + (port)*((b)-(a))) -#define _PIPE3(pipe, a, b, c) (pipe < 2 ? _PIPE(pipe, a, b) : c) -#define _PORT3(port, a, b, c) (port < 2 ? _PORT(port, a, b) : c) +#define _PIPE3(pipe, a, b, c) ((pipe) == PIPE_A ? (a) : \ + (pipe) == PIPE_B ? (b) : (c)) #define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a)) #define _MASKED_BIT_DISABLE(a) ((a) << 16) @@ -240,7 +240,7 @@ #define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19) #define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19) #define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19) -#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */ +#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6, gen7 */ #define MI_SEMAPHORE_GLOBAL_GTT (1<<22) #define MI_SEMAPHORE_UPDATE (1<<21) #define MI_SEMAPHORE_COMPARE (1<<20) @@ -266,6 +266,11 @@ #define MI_RESTORE_EXT_STATE_EN (1<<2) #define MI_FORCE_RESTORE (1<<1) #define MI_RESTORE_INHIBIT (1<<0) +#define MI_SEMAPHORE_SIGNAL MI_INSTR(0x1b, 0) /* GEN8+ */ +#define MI_SEMAPHORE_TARGET(engine) ((engine)<<15) +#define MI_SEMAPHORE_WAIT MI_INSTR(0x1c, 2) /* GEN8+ */ +#define MI_SEMAPHORE_POLL (1<<15) +#define MI_SEMAPHORE_SAD_GTE_SDD (1<<12) #define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) #define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */ #define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1) @@ -329,16 +334,20 @@ #define GFX_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1) #define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3)) #define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2) -#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4) + +#define COLOR_BLT_CMD (2<<29 | 0x40<<22 | (5-2)) +#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4) #define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6) #define XY_MONO_SRC_COPY_IMM_BLT ((2<<29)|(0x71<<22)|5) -#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21) -#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20) +#define BLT_WRITE_A (2<<20) +#define BLT_WRITE_RGB (1<<20) +#define BLT_WRITE_RGBA (BLT_WRITE_RGB | BLT_WRITE_A) #define BLT_DEPTH_8 (0<<24) #define BLT_DEPTH_16_565 (1<<24) #define BLT_DEPTH_16_1555 (2<<24) #define BLT_DEPTH_32 (3<<24) -#define BLT_ROP_GXCOPY (0xcc<<16) +#define BLT_ROP_SRC_COPY (0xcc<<16) +#define BLT_ROP_COLOR_COPY (0xf0<<16) #define XY_SRC_COPY_BLT_SRC_TILED (1<<15) /* 965+ only */ #define XY_SRC_COPY_BLT_DST_TILED (1<<11) /* 965+ only */ #define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2) @@ -360,6 +369,7 @@ #define PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE (1<<10) /* GM45+ only */ #define PIPE_CONTROL_INDIRECT_STATE_DISABLE (1<<9) #define PIPE_CONTROL_NOTIFY (1<<8) +#define PIPE_CONTROL_FLUSH_ENABLE (1<<7) /* gen7+ */ #define PIPE_CONTROL_VF_CACHE_INVALIDATE (1<<4) #define PIPE_CONTROL_CONST_CACHE_INVALIDATE (1<<3) #define PIPE_CONTROL_STATE_CACHE_INVALIDATE (1<<2) @@ -525,10 +535,21 @@ enum punit_power_well { #define PUNIT_REG_GPU_FREQ_STS 0xd8 #define GENFREQSTATUS (1<<0) #define PUNIT_REG_MEDIA_TURBO_FREQ_REQ 0xdc +#define PUNIT_REG_CZ_TIMESTAMP 0xce #define PUNIT_FUSE_BUS2 0xf6 /* bits 47:40 */ #define PUNIT_FUSE_BUS1 0xf5 /* bits 55:48 */ +#define PUNIT_GPU_STATUS_REG 0xdb +#define PUNIT_GPU_STATUS_MAX_FREQ_SHIFT 16 +#define PUNIT_GPU_STATUS_MAX_FREQ_MASK 0xff +#define PUNIT_GPU_STATIS_GFX_MIN_FREQ_SHIFT 8 +#define PUNIT_GPU_STATUS_GFX_MIN_FREQ_MASK 0xff + +#define PUNIT_GPU_DUTYCYCLE_REG 0xdf +#define PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT 8 +#define PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK 0xff + #define IOSF_NC_FB_GFX_FREQ_FUSE 0x1c #define FB_GFX_MAX_FREQ_FUSE_SHIFT 3 #define FB_GFX_MAX_FREQ_FUSE_MASK 0x000007f8 @@ -540,6 +561,11 @@ enum punit_power_well { #define FB_FMAX_VMIN_FREQ_LO_SHIFT 27 #define FB_FMAX_VMIN_FREQ_LO_MASK 0xf8000000 +#define VLV_CZ_CLOCK_TO_MILLI_SEC 100000 +#define VLV_RP_UP_EI_THRESHOLD 90 +#define VLV_RP_DOWN_EI_THRESHOLD 70 +#define VLV_INT_COUNT_FOR_DOWN_EI 5 + /* vlv2 north clock has */ #define CCK_FUSE_REG 0x8 #define CCK_FUSE_HPLL_FREQ_MASK 0x3 @@ -574,6 +600,11 @@ enum punit_power_well { #define DSI_PLL_M1_DIV_SHIFT 0 #define DSI_PLL_M1_DIV_MASK (0x1ff << 0) #define CCK_DISPLAY_CLOCK_CONTROL 0x6b +#define DISPLAY_TRUNK_FORCE_ON (1 << 17) +#define DISPLAY_TRUNK_FORCE_OFF (1 << 16) +#define DISPLAY_FREQUENCY_STATUS (0x1f << 8) +#define DISPLAY_FREQUENCY_STATUS_SHIFT 8 +#define DISPLAY_FREQUENCY_VALUES (0x1f << 0) /** * DOC: DPIO @@ -761,6 +792,8 @@ enum punit_power_well { #define _VLV_PCS_DW8_CH0 0x8220 #define _VLV_PCS_DW8_CH1 0x8420 +#define CHV_PCS_USEDCLKCHANNEL_OVRRIDE (1 << 20) +#define CHV_PCS_USEDCLKCHANNEL (1 << 21) #define VLV_PCS_DW8(ch) _PORT(ch, _VLV_PCS_DW8_CH0, _VLV_PCS_DW8_CH1) #define _VLV_PCS01_DW8_CH0 0x0220 @@ -869,6 +902,16 @@ enum punit_power_well { #define DPIO_CHV_PROP_COEFF_SHIFT 0 #define CHV_PLL_DW6(ch) _PIPE(ch, _CHV_PLL_DW6_CH0, _CHV_PLL_DW6_CH1) +#define _CHV_CMN_DW5_CH0 0x8114 +#define CHV_BUFRIGHTENA1_DISABLE (0 << 20) +#define CHV_BUFRIGHTENA1_NORMAL (1 << 20) +#define CHV_BUFRIGHTENA1_FORCE (3 << 20) +#define CHV_BUFRIGHTENA1_MASK (3 << 20) +#define CHV_BUFLEFTENA1_DISABLE (0 << 22) +#define CHV_BUFLEFTENA1_NORMAL (1 << 22) +#define CHV_BUFLEFTENA1_FORCE (3 << 22) +#define CHV_BUFLEFTENA1_MASK (3 << 22) + #define _CHV_CMN_DW13_CH0 0x8134 #define _CHV_CMN_DW0_CH1 0x8080 #define DPIO_CHV_S1_DIV_SHIFT 21 @@ -883,8 +926,21 @@ enum punit_power_well { #define _CHV_CMN_DW1_CH1 0x8084 #define DPIO_AFC_RECAL (1 << 14) #define DPIO_DCLKP_EN (1 << 13) +#define CHV_BUFLEFTENA2_DISABLE (0 << 17) /* CL2 DW1 only */ +#define CHV_BUFLEFTENA2_NORMAL (1 << 17) /* CL2 DW1 only */ +#define CHV_BUFLEFTENA2_FORCE (3 << 17) /* CL2 DW1 only */ +#define CHV_BUFLEFTENA2_MASK (3 << 17) /* CL2 DW1 only */ +#define CHV_BUFRIGHTENA2_DISABLE (0 << 19) /* CL2 DW1 only */ +#define CHV_BUFRIGHTENA2_NORMAL (1 << 19) /* CL2 DW1 only */ +#define CHV_BUFRIGHTENA2_FORCE (3 << 19) /* CL2 DW1 only */ +#define CHV_BUFRIGHTENA2_MASK (3 << 19) /* CL2 DW1 only */ #define CHV_CMN_DW14(ch) _PIPE(ch, _CHV_CMN_DW14_CH0, _CHV_CMN_DW1_CH1) +#define _CHV_CMN_DW19_CH0 0x814c +#define _CHV_CMN_DW6_CH1 0x8098 +#define CHV_CMN_USEDCLKCHANNEL (1 << 13) +#define CHV_CMN_DW19(ch) _PIPE(ch, _CHV_CMN_DW19_CH0, _CHV_CMN_DW6_CH1) + #define CHV_CMN_DW30 0x8178 #define DPIO_LRC_BYPASS (1 << 3) @@ -933,6 +989,7 @@ enum punit_power_well { #define SANDYBRIDGE_FENCE_PITCH_SHIFT 32 #define GEN7_FENCE_MAX_PITCH_VAL 0x0800 + /* control register for cpu gtt access */ #define TILECTL 0x101000 #define TILECTL_SWZCTL (1 << 0) @@ -1170,6 +1227,8 @@ enum punit_power_well { #define VLV_IMR (VLV_DISPLAY_BASE + 0x20a8) #define VLV_ISR (VLV_DISPLAY_BASE + 0x20ac) #define VLV_PCBR (VLV_DISPLAY_BASE + 0x2120) +#define VLV_PCBR_ADDR_SHIFT 12 + #define DISPLAY_PLANE_FLIP_PENDING(plane) (1<<(11-(plane))) /* A and B only */ #define EIR 0x020b0 #define EMR 0x020b4 @@ -1570,11 +1629,10 @@ enum punit_power_well { /* * Clock control & power management */ -#define DPLL_A_OFFSET 0x6014 -#define DPLL_B_OFFSET 0x6018 -#define CHV_DPLL_C_OFFSET 0x6030 -#define DPLL(pipe) (dev_priv->info.dpll_offsets[pipe] + \ - dev_priv->info.display_mmio_offset) +#define _DPLL_A (dev_priv->info.display_mmio_offset + 0x6014) +#define _DPLL_B (dev_priv->info.display_mmio_offset + 0x6018) +#define _CHV_DPLL_C (dev_priv->info.display_mmio_offset + 0x6030) +#define DPLL(pipe) _PIPE3((pipe), _DPLL_A, _DPLL_B, _CHV_DPLL_C) #define VGA0 0x6000 #define VGA1 0x6004 @@ -1662,11 +1720,10 @@ enum punit_power_well { #define SDVO_MULTIPLIER_SHIFT_HIRES 4 #define SDVO_MULTIPLIER_SHIFT_VGA 0 -#define DPLL_A_MD_OFFSET 0x601c /* 965+ only */ -#define DPLL_B_MD_OFFSET 0x6020 /* 965+ only */ -#define CHV_DPLL_C_MD_OFFSET 0x603c -#define DPLL_MD(pipe) (dev_priv->info.dpll_md_offsets[pipe] + \ - dev_priv->info.display_mmio_offset) +#define _DPLL_A_MD (dev_priv->info.display_mmio_offset + 0x601c) +#define _DPLL_B_MD (dev_priv->info.display_mmio_offset + 0x6020) +#define _CHV_DPLL_C_MD (dev_priv->info.display_mmio_offset + 0x603c) +#define DPLL_MD(pipe) _PIPE3((pipe), _DPLL_A_MD, _DPLL_B_MD, _CHV_DPLL_C_MD) /* * UDI pixel divider, controlling how many pixels are stuffed into a packet. @@ -2231,7 +2288,7 @@ enum punit_power_well { /* Same as Haswell, but 72064 bytes now. */ #define GEN8_CXT_TOTAL_SIZE (18 * PAGE_SIZE) - +#define CHV_CLK_CTL1 0x101100 #define VLV_CLK_CTL2 0x101104 #define CLK_CTL2_CZCOUNT_30NS_SHIFT 28 @@ -2376,6 +2433,7 @@ enum punit_power_well { #define EDP_PSR_BASE(dev) (IS_HASWELL(dev) ? 0x64800 : 0x6f800) #define EDP_PSR_CTL(dev) (EDP_PSR_BASE(dev) + 0) #define EDP_PSR_ENABLE (1<<31) +#define BDW_PSR_SINGLE_FRAME (1<<30) #define EDP_PSR_LINK_DISABLE (0<<27) #define EDP_PSR_LINK_STANDBY (1<<27) #define EDP_PSR_MIN_LINK_ENTRY_TIME_MASK (3<<25) @@ -2533,8 +2591,14 @@ enum punit_power_well { #define PORTC_HOTPLUG_LIVE_STATUS_VLV (1 << 28) #define PORTB_HOTPLUG_LIVE_STATUS_VLV (1 << 29) #define PORTD_HOTPLUG_INT_STATUS (3 << 21) +#define PORTD_HOTPLUG_INT_LONG_PULSE (2 << 21) +#define PORTD_HOTPLUG_INT_SHORT_PULSE (1 << 21) #define PORTC_HOTPLUG_INT_STATUS (3 << 19) +#define PORTC_HOTPLUG_INT_LONG_PULSE (2 << 19) +#define PORTC_HOTPLUG_INT_SHORT_PULSE (1 << 19) #define PORTB_HOTPLUG_INT_STATUS (3 << 17) +#define PORTB_HOTPLUG_INT_LONG_PULSE (2 << 17) +#define PORTB_HOTPLUG_INT_SHORT_PLUSE (1 << 17) /* CRT/TV common between gen3+ */ #define CRT_HOTPLUG_INT_STATUS (1 << 11) #define TV_HOTPLUG_INT_STATUS (1 << 10) @@ -2588,7 +2652,7 @@ enum punit_power_well { #define PORT_DFT_I9XX 0x61150 #define DC_BALANCE_RESET (1 << 25) -#define PORT_DFT2_G4X 0x61154 +#define PORT_DFT2_G4X (dev_priv->info.display_mmio_offset + 0x61154) #define DC_BALANCE_RESET_VLV (1 << 31) #define PIPE_SCRAMBLE_RESET_MASK (0x3 << 0) #define PIPE_B_SCRAMBLE_RESET (1 << 1) @@ -3803,47 +3867,47 @@ enum punit_power_well { /* drain latency register values*/ #define DRAIN_LATENCY_PRECISION_32 32 -#define DRAIN_LATENCY_PRECISION_16 16 +#define DRAIN_LATENCY_PRECISION_64 64 #define VLV_DDL1 (VLV_DISPLAY_BASE + 0x70050) -#define DDL_CURSORA_PRECISION_32 (1<<31) -#define DDL_CURSORA_PRECISION_16 (0<<31) +#define DDL_CURSORA_PRECISION_64 (1<<31) +#define DDL_CURSORA_PRECISION_32 (0<<31) #define DDL_CURSORA_SHIFT 24 -#define DDL_SPRITEB_PRECISION_32 (1<<23) -#define DDL_SPRITEB_PRECISION_16 (0<<23) +#define DDL_SPRITEB_PRECISION_64 (1<<23) +#define DDL_SPRITEB_PRECISION_32 (0<<23) #define DDL_SPRITEB_SHIFT 16 -#define DDL_SPRITEA_PRECISION_32 (1<<15) -#define DDL_SPRITEA_PRECISION_16 (0<<15) +#define DDL_SPRITEA_PRECISION_64 (1<<15) +#define DDL_SPRITEA_PRECISION_32 (0<<15) #define DDL_SPRITEA_SHIFT 8 -#define DDL_PLANEA_PRECISION_32 (1<<7) -#define DDL_PLANEA_PRECISION_16 (0<<7) +#define DDL_PLANEA_PRECISION_64 (1<<7) +#define DDL_PLANEA_PRECISION_32 (0<<7) #define DDL_PLANEA_SHIFT 0 #define VLV_DDL2 (VLV_DISPLAY_BASE + 0x70054) -#define DDL_CURSORB_PRECISION_32 (1<<31) -#define DDL_CURSORB_PRECISION_16 (0<<31) +#define DDL_CURSORB_PRECISION_64 (1<<31) +#define DDL_CURSORB_PRECISION_32 (0<<31) #define DDL_CURSORB_SHIFT 24 -#define DDL_SPRITED_PRECISION_32 (1<<23) -#define DDL_SPRITED_PRECISION_16 (0<<23) +#define DDL_SPRITED_PRECISION_64 (1<<23) +#define DDL_SPRITED_PRECISION_32 (0<<23) #define DDL_SPRITED_SHIFT 16 -#define DDL_SPRITEC_PRECISION_32 (1<<15) -#define DDL_SPRITEC_PRECISION_16 (0<<15) +#define DDL_SPRITEC_PRECISION_64 (1<<15) +#define DDL_SPRITEC_PRECISION_32 (0<<15) #define DDL_SPRITEC_SHIFT 8 -#define DDL_PLANEB_PRECISION_32 (1<<7) -#define DDL_PLANEB_PRECISION_16 (0<<7) +#define DDL_PLANEB_PRECISION_64 (1<<7) +#define DDL_PLANEB_PRECISION_32 (0<<7) #define DDL_PLANEB_SHIFT 0 #define VLV_DDL3 (VLV_DISPLAY_BASE + 0x70058) -#define DDL_CURSORC_PRECISION_32 (1<<31) -#define DDL_CURSORC_PRECISION_16 (0<<31) +#define DDL_CURSORC_PRECISION_64 (1<<31) +#define DDL_CURSORC_PRECISION_32 (0<<31) #define DDL_CURSORC_SHIFT 24 -#define DDL_SPRITEF_PRECISION_32 (1<<23) -#define DDL_SPRITEF_PRECISION_16 (0<<23) +#define DDL_SPRITEF_PRECISION_64 (1<<23) +#define DDL_SPRITEF_PRECISION_32 (0<<23) #define DDL_SPRITEF_SHIFT 16 -#define DDL_SPRITEE_PRECISION_32 (1<<15) -#define DDL_SPRITEE_PRECISION_16 (0<<15) +#define DDL_SPRITEE_PRECISION_64 (1<<15) +#define DDL_SPRITEE_PRECISION_32 (0<<15) #define DDL_SPRITEE_SHIFT 8 -#define DDL_PLANEC_PRECISION_32 (1<<7) -#define DDL_PLANEC_PRECISION_16 (0<<7) +#define DDL_PLANEC_PRECISION_64 (1<<7) +#define DDL_PLANEC_PRECISION_32 (0<<7) #define DDL_PLANEC_SHIFT 0 /* FIFO watermark sizes etc */ @@ -4630,6 +4694,8 @@ enum punit_power_well { #define GEN7_L3CNTLREG1 0xB01C #define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C #define GEN7_L3AGDIS (1<<19) +#define GEN7_L3CNTLREG2 0xB020 +#define GEN7_L3CNTLREG3 0xB024 #define GEN7_L3_CHICKEN_MODE_REGISTER 0xB030 #define GEN7_WA_L3_CHICKEN_MODE 0x20000000 @@ -4876,8 +4942,7 @@ enum punit_power_well { #define _PCH_TRANSA_LINK_M2 0xe0048 #define _PCH_TRANSA_LINK_N2 0xe004c -/* Per-transcoder DIP controls */ - +/* Per-transcoder DIP controls (PCH) */ #define _VIDEO_DIP_CTL_A 0xe0200 #define _VIDEO_DIP_DATA_A 0xe0208 #define _VIDEO_DIP_GCP_A 0xe0210 @@ -4890,6 +4955,7 @@ enum punit_power_well { #define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B) #define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B) +/* Per-transcoder DIP controls (VLV) */ #define VLV_VIDEO_DIP_CTL_A (VLV_DISPLAY_BASE + 0x60200) #define VLV_VIDEO_DIP_DATA_A (VLV_DISPLAY_BASE + 0x60208) #define VLV_VIDEO_DIP_GDCP_PAYLOAD_A (VLV_DISPLAY_BASE + 0x60210) @@ -4898,12 +4964,19 @@ enum punit_power_well { #define VLV_VIDEO_DIP_DATA_B (VLV_DISPLAY_BASE + 0x61174) #define VLV_VIDEO_DIP_GDCP_PAYLOAD_B (VLV_DISPLAY_BASE + 0x61178) +#define CHV_VIDEO_DIP_CTL_C (VLV_DISPLAY_BASE + 0x611f0) +#define CHV_VIDEO_DIP_DATA_C (VLV_DISPLAY_BASE + 0x611f4) +#define CHV_VIDEO_DIP_GDCP_PAYLOAD_C (VLV_DISPLAY_BASE + 0x611f8) + #define VLV_TVIDEO_DIP_CTL(pipe) \ - _PIPE(pipe, VLV_VIDEO_DIP_CTL_A, VLV_VIDEO_DIP_CTL_B) + _PIPE3((pipe), VLV_VIDEO_DIP_CTL_A, \ + VLV_VIDEO_DIP_CTL_B, CHV_VIDEO_DIP_CTL_C) #define VLV_TVIDEO_DIP_DATA(pipe) \ - _PIPE(pipe, VLV_VIDEO_DIP_DATA_A, VLV_VIDEO_DIP_DATA_B) + _PIPE3((pipe), VLV_VIDEO_DIP_DATA_A, \ + VLV_VIDEO_DIP_DATA_B, CHV_VIDEO_DIP_DATA_C) #define VLV_TVIDEO_DIP_GCP(pipe) \ - _PIPE(pipe, VLV_VIDEO_DIP_GDCP_PAYLOAD_A, VLV_VIDEO_DIP_GDCP_PAYLOAD_B) + _PIPE3((pipe), VLV_VIDEO_DIP_GDCP_PAYLOAD_A, \ + VLV_VIDEO_DIP_GDCP_PAYLOAD_B, CHV_VIDEO_DIP_GDCP_PAYLOAD_C) /* Haswell DIP controls */ #define HSW_VIDEO_DIP_CTL_A 0x60200 @@ -5334,6 +5407,7 @@ enum punit_power_well { #define VLV_GTLC_ALLOWWAKEERR (1 << 1) #define VLV_GTLC_PW_MEDIA_STATUS_MASK (1 << 5) #define VLV_GTLC_PW_RENDER_STATUS_MASK (1 << 7) +#define VLV_GTLC_SURVIVABILITY_REG 0x130098 #define FORCEWAKE_MT 0xa188 /* multi-threaded */ #define FORCEWAKE_KERNEL 0x1 #define FORCEWAKE_USER 0x2 @@ -5471,6 +5545,12 @@ enum punit_power_well { GEN6_PM_RP_DOWN_THRESHOLD | \ GEN6_PM_RP_DOWN_TIMEOUT) +#define CHV_CZ_CLOCK_FREQ_MODE_200 200 +#define CHV_CZ_CLOCK_FREQ_MODE_267 267 +#define CHV_CZ_CLOCK_FREQ_MODE_320 320 +#define CHV_CZ_CLOCK_FREQ_MODE_333 333 +#define CHV_CZ_CLOCK_FREQ_MODE_400 400 + #define GEN7_GT_SCRATCH_BASE 0x4F100 #define GEN7_GT_SCRATCH_REG_NUM 8 @@ -5481,6 +5561,8 @@ enum punit_power_well { #define GEN6_GT_GFX_RC6_LOCKED 0x138104 #define VLV_COUNTER_CONTROL 0x138104 #define VLV_COUNT_RANGE_HIGH (1<<15) +#define VLV_MEDIA_RC0_COUNT_EN (1<<5) +#define VLV_RENDER_RC0_COUNT_EN (1<<4) #define VLV_MEDIA_RC6_COUNT_EN (1<<1) #define VLV_RENDER_RC6_COUNT_EN (1<<0) #define GEN6_GT_GFX_RC6 0x138108 @@ -5489,6 +5571,8 @@ enum punit_power_well { #define GEN6_GT_GFX_RC6p 0x13810C #define GEN6_GT_GFX_RC6pp 0x138110 +#define VLV_RENDER_C0_COUNT_REG 0x138118 +#define VLV_MEDIA_C0_COUNT_REG 0x13811C #define GEN6_PCODE_MAILBOX 0x138124 #define GEN6_PCODE_READY (1<<31) @@ -5723,6 +5807,7 @@ enum punit_power_well { #define TRANS_DDI_FUNC_ENABLE (1<<31) /* Those bits are ignored by pipe EDP since it can only connect to DDI A */ #define TRANS_DDI_PORT_MASK (7<<28) +#define TRANS_DDI_PORT_SHIFT 28 #define TRANS_DDI_SELECT_PORT(x) ((x)<<28) #define TRANS_DDI_PORT_NONE (0<<28) #define TRANS_DDI_MODE_SELECT_MASK (7<<24) @@ -5743,6 +5828,7 @@ enum punit_power_well { #define TRANS_DDI_EDP_INPUT_A_ONOFF (4<<12) #define TRANS_DDI_EDP_INPUT_B_ONOFF (5<<12) #define TRANS_DDI_EDP_INPUT_C_ONOFF (6<<12) +#define TRANS_DDI_DP_VC_PAYLOAD_ALLOC (1<<8) #define TRANS_DDI_BFI_ENABLE (1<<4) /* DisplayPort Transport Control */ @@ -5752,6 +5838,7 @@ enum punit_power_well { #define DP_TP_CTL_ENABLE (1<<31) #define DP_TP_CTL_MODE_SST (0<<27) #define DP_TP_CTL_MODE_MST (1<<27) +#define DP_TP_CTL_FORCE_ACT (1<<25) #define DP_TP_CTL_ENHANCED_FRAME_ENABLE (1<<18) #define DP_TP_CTL_FDI_AUTOTRAIN (1<<15) #define DP_TP_CTL_LINK_TRAIN_MASK (7<<8) @@ -5766,15 +5853,19 @@ enum punit_power_well { #define DP_TP_STATUS_A 0x64044 #define DP_TP_STATUS_B 0x64144 #define DP_TP_STATUS(port) _PORT(port, DP_TP_STATUS_A, DP_TP_STATUS_B) -#define DP_TP_STATUS_IDLE_DONE (1<<25) -#define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12) +#define DP_TP_STATUS_IDLE_DONE (1<<25) +#define DP_TP_STATUS_ACT_SENT (1<<24) +#define DP_TP_STATUS_MODE_STATUS_MST (1<<23) +#define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12) +#define DP_TP_STATUS_PAYLOAD_MAPPING_VC2 (3 << 8) +#define DP_TP_STATUS_PAYLOAD_MAPPING_VC1 (3 << 4) +#define DP_TP_STATUS_PAYLOAD_MAPPING_VC0 (3 << 0) /* DDI Buffer Control */ #define DDI_BUF_CTL_A 0x64000 #define DDI_BUF_CTL_B 0x64100 #define DDI_BUF_CTL(port) _PORT(port, DDI_BUF_CTL_A, DDI_BUF_CTL_B) #define DDI_BUF_CTL_ENABLE (1<<31) -/* Haswell */ #define DDI_BUF_EMP_400MV_0DB_HSW (0<<24) /* Sel0 */ #define DDI_BUF_EMP_400MV_3_5DB_HSW (1<<24) /* Sel1 */ #define DDI_BUF_EMP_400MV_6DB_HSW (2<<24) /* Sel2 */ @@ -5784,16 +5875,6 @@ enum punit_power_well { #define DDI_BUF_EMP_600MV_6DB_HSW (6<<24) /* Sel6 */ #define DDI_BUF_EMP_800MV_0DB_HSW (7<<24) /* Sel7 */ #define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */ -/* Broadwell */ -#define DDI_BUF_EMP_400MV_0DB_BDW (0<<24) /* Sel0 */ -#define DDI_BUF_EMP_400MV_3_5DB_BDW (1<<24) /* Sel1 */ -#define DDI_BUF_EMP_400MV_6DB_BDW (2<<24) /* Sel2 */ -#define DDI_BUF_EMP_600MV_0DB_BDW (3<<24) /* Sel3 */ -#define DDI_BUF_EMP_600MV_3_5DB_BDW (4<<24) /* Sel4 */ -#define DDI_BUF_EMP_600MV_6DB_BDW (5<<24) /* Sel5 */ -#define DDI_BUF_EMP_800MV_0DB_BDW (6<<24) /* Sel6 */ -#define DDI_BUF_EMP_800MV_3_5DB_BDW (7<<24) /* Sel7 */ -#define DDI_BUF_EMP_1200MV_0DB_BDW (8<<24) /* Sel8 */ #define DDI_BUF_EMP_MASK (0xf<<24) #define DDI_BUF_PORT_REVERSAL (1<<16) #define DDI_BUF_IS_IDLE (1<<7) @@ -5861,10 +5942,12 @@ enum punit_power_well { /* WRPLL */ #define WRPLL_CTL1 0x46040 #define WRPLL_CTL2 0x46060 +#define WRPLL_CTL(pll) (pll == 0 ? WRPLL_CTL1 : WRPLL_CTL2) #define WRPLL_PLL_ENABLE (1<<31) -#define WRPLL_PLL_SELECT_SSC (0x01<<28) -#define WRPLL_PLL_SELECT_NON_SSC (0x02<<28) -#define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28) +#define WRPLL_PLL_SSC (1<<28) +#define WRPLL_PLL_NON_SSC (2<<28) +#define WRPLL_PLL_LCPLL (3<<28) +#define WRPLL_PLL_REF_MASK (3<<28) /* WRPLL divider programming */ #define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0) #define WRPLL_DIVIDER_REF_MASK (0xff) @@ -5883,6 +5966,7 @@ enum punit_power_well { #define PORT_CLK_SEL_LCPLL_1350 (1<<29) #define PORT_CLK_SEL_LCPLL_810 (2<<29) #define PORT_CLK_SEL_SPLL (3<<29) +#define PORT_CLK_SEL_WRPLL(pll) (((pll)+4)<<29) #define PORT_CLK_SEL_WRPLL1 (4<<29) #define PORT_CLK_SEL_WRPLL2 (5<<29) #define PORT_CLK_SEL_NONE (7<<29) @@ -5924,7 +6008,10 @@ enum punit_power_well { #define LCPLL_CD_SOURCE_FCLK (1<<21) #define LCPLL_CD_SOURCE_FCLK_DONE (1<<19) -#define D_COMP (MCHBAR_MIRROR_BASE_SNB + 0x5F0C) +/* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register, + * since on HSW we can't write to it using I915_WRITE. */ +#define D_COMP_HSW (MCHBAR_MIRROR_BASE_SNB + 0x5F0C) +#define D_COMP_BDW 0x138144 #define D_COMP_RCOMP_IN_PROGRESS (1<<9) #define D_COMP_COMP_FORCE (1<<8) #define D_COMP_COMP_DISABLE (1<<0) @@ -6005,7 +6092,8 @@ enum punit_power_well { #define _MIPIA_PORT_CTRL (VLV_DISPLAY_BASE + 0x61190) #define _MIPIB_PORT_CTRL (VLV_DISPLAY_BASE + 0x61700) -#define MIPI_PORT_CTRL(pipe) _PIPE(pipe, _MIPIA_PORT_CTRL, _MIPIB_PORT_CTRL) +#define MIPI_PORT_CTRL(tc) _TRANSCODER(tc, _MIPIA_PORT_CTRL, \ + _MIPIB_PORT_CTRL) #define DPI_ENABLE (1 << 31) /* A + B */ #define MIPIA_MIPI4DPHY_DELAY_COUNT_SHIFT 27 #define MIPIA_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 27) @@ -6047,18 +6135,20 @@ enum punit_power_well { #define _MIPIA_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61194) #define _MIPIB_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61704) -#define MIPI_TEARING_CTRL(pipe) _PIPE(pipe, _MIPIA_TEARING_CTRL, _MIPIB_TEARING_CTRL) +#define MIPI_TEARING_CTRL(tc) _TRANSCODER(tc, \ + _MIPIA_TEARING_CTRL, _MIPIB_TEARING_CTRL) #define TEARING_EFFECT_DELAY_SHIFT 0 #define TEARING_EFFECT_DELAY_MASK (0xffff << 0) /* XXX: all bits reserved */ -#define _MIPIA_AUTOPWG (VLV_DISPLAY_BASE + 0x611a0) +#define _MIPIA_AUTOPWG (VLV_DISPLAY_BASE + 0x611a0) /* MIPI DSI Controller and D-PHY registers */ -#define _MIPIA_DEVICE_READY (VLV_DISPLAY_BASE + 0xb000) -#define _MIPIB_DEVICE_READY (VLV_DISPLAY_BASE + 0xb800) -#define MIPI_DEVICE_READY(pipe) _PIPE(pipe, _MIPIA_DEVICE_READY, _MIPIB_DEVICE_READY) +#define _MIPIA_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb000) +#define _MIPIB_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb800) +#define MIPI_DEVICE_READY(tc) _TRANSCODER(tc, _MIPIA_DEVICE_READY, \ + _MIPIB_DEVICE_READY) #define BUS_POSSESSION (1 << 3) /* set to give bus to receiver */ #define ULPS_STATE_MASK (3 << 1) #define ULPS_STATE_ENTER (2 << 1) @@ -6066,12 +6156,14 @@ enum punit_power_well { #define ULPS_STATE_NORMAL_OPERATION (0 << 1) #define DEVICE_READY (1 << 0) -#define _MIPIA_INTR_STAT (VLV_DISPLAY_BASE + 0xb004) -#define _MIPIB_INTR_STAT (VLV_DISPLAY_BASE + 0xb804) -#define MIPI_INTR_STAT(pipe) _PIPE(pipe, _MIPIA_INTR_STAT, _MIPIB_INTR_STAT) -#define _MIPIA_INTR_EN (VLV_DISPLAY_BASE + 0xb008) -#define _MIPIB_INTR_EN (VLV_DISPLAY_BASE + 0xb808) -#define MIPI_INTR_EN(pipe) _PIPE(pipe, _MIPIA_INTR_EN, _MIPIB_INTR_EN) +#define _MIPIA_INTR_STAT (dev_priv->mipi_mmio_base + 0xb004) +#define _MIPIB_INTR_STAT (dev_priv->mipi_mmio_base + 0xb804) +#define MIPI_INTR_STAT(tc) _TRANSCODER(tc, _MIPIA_INTR_STAT, \ + _MIPIB_INTR_STAT) +#define _MIPIA_INTR_EN (dev_priv->mipi_mmio_base + 0xb008) +#define _MIPIB_INTR_EN (dev_priv->mipi_mmio_base + 0xb808) +#define MIPI_INTR_EN(tc) _TRANSCODER(tc, _MIPIA_INTR_EN, \ + _MIPIB_INTR_EN) #define TEARING_EFFECT (1 << 31) #define SPL_PKT_SENT_INTERRUPT (1 << 30) #define GEN_READ_DATA_AVAIL (1 << 29) @@ -6105,9 +6197,10 @@ enum punit_power_well { #define RXSOT_SYNC_ERROR (1 << 1) #define RXSOT_ERROR (1 << 0) -#define _MIPIA_DSI_FUNC_PRG (VLV_DISPLAY_BASE + 0xb00c) -#define _MIPIB_DSI_FUNC_PRG (VLV_DISPLAY_BASE + 0xb80c) -#define MIPI_DSI_FUNC_PRG(pipe) _PIPE(pipe, _MIPIA_DSI_FUNC_PRG, _MIPIB_DSI_FUNC_PRG) +#define _MIPIA_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb00c) +#define _MIPIB_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb80c) +#define MIPI_DSI_FUNC_PRG(tc) _TRANSCODER(tc, _MIPIA_DSI_FUNC_PRG, \ + _MIPIB_DSI_FUNC_PRG) #define CMD_MODE_DATA_WIDTH_MASK (7 << 13) #define CMD_MODE_NOT_SUPPORTED (0 << 13) #define CMD_MODE_DATA_WIDTH_16_BIT (1 << 13) @@ -6128,78 +6221,94 @@ enum punit_power_well { #define DATA_LANES_PRG_REG_SHIFT 0 #define DATA_LANES_PRG_REG_MASK (7 << 0) -#define _MIPIA_HS_TX_TIMEOUT (VLV_DISPLAY_BASE + 0xb010) -#define _MIPIB_HS_TX_TIMEOUT (VLV_DISPLAY_BASE + 0xb810) -#define MIPI_HS_TX_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_HS_TX_TIMEOUT, _MIPIB_HS_TX_TIMEOUT) +#define _MIPIA_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb010) +#define _MIPIB_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb810) +#define MIPI_HS_TX_TIMEOUT(tc) _TRANSCODER(tc, _MIPIA_HS_TX_TIMEOUT, \ + _MIPIB_HS_TX_TIMEOUT) #define HIGH_SPEED_TX_TIMEOUT_COUNTER_MASK 0xffffff -#define _MIPIA_LP_RX_TIMEOUT (VLV_DISPLAY_BASE + 0xb014) -#define _MIPIB_LP_RX_TIMEOUT (VLV_DISPLAY_BASE + 0xb814) -#define MIPI_LP_RX_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_LP_RX_TIMEOUT, _MIPIB_LP_RX_TIMEOUT) +#define _MIPIA_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb014) +#define _MIPIB_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb814) +#define MIPI_LP_RX_TIMEOUT(tc) _TRANSCODER(tc, _MIPIA_LP_RX_TIMEOUT, \ + _MIPIB_LP_RX_TIMEOUT) #define LOW_POWER_RX_TIMEOUT_COUNTER_MASK 0xffffff -#define _MIPIA_TURN_AROUND_TIMEOUT (VLV_DISPLAY_BASE + 0xb018) -#define _MIPIB_TURN_AROUND_TIMEOUT (VLV_DISPLAY_BASE + 0xb818) -#define MIPI_TURN_AROUND_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_TURN_AROUND_TIMEOUT, _MIPIB_TURN_AROUND_TIMEOUT) +#define _MIPIA_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb018) +#define _MIPIB_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb818) +#define MIPI_TURN_AROUND_TIMEOUT(tc) _TRANSCODER(tc, \ + _MIPIA_TURN_AROUND_TIMEOUT, _MIPIB_TURN_AROUND_TIMEOUT) #define TURN_AROUND_TIMEOUT_MASK 0x3f -#define _MIPIA_DEVICE_RESET_TIMER (VLV_DISPLAY_BASE + 0xb01c) -#define _MIPIB_DEVICE_RESET_TIMER (VLV_DISPLAY_BASE + 0xb81c) -#define MIPI_DEVICE_RESET_TIMER(pipe) _PIPE(pipe, _MIPIA_DEVICE_RESET_TIMER, _MIPIB_DEVICE_RESET_TIMER) +#define _MIPIA_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb01c) +#define _MIPIB_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb81c) +#define MIPI_DEVICE_RESET_TIMER(tc) _TRANSCODER(tc, \ + _MIPIA_DEVICE_RESET_TIMER, _MIPIB_DEVICE_RESET_TIMER) #define DEVICE_RESET_TIMER_MASK 0xffff -#define _MIPIA_DPI_RESOLUTION (VLV_DISPLAY_BASE + 0xb020) -#define _MIPIB_DPI_RESOLUTION (VLV_DISPLAY_BASE + 0xb820) -#define MIPI_DPI_RESOLUTION(pipe) _PIPE(pipe, _MIPIA_DPI_RESOLUTION, _MIPIB_DPI_RESOLUTION) +#define _MIPIA_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb020) +#define _MIPIB_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb820) +#define MIPI_DPI_RESOLUTION(tc) _TRANSCODER(tc, _MIPIA_DPI_RESOLUTION, \ + _MIPIB_DPI_RESOLUTION) #define VERTICAL_ADDRESS_SHIFT 16 #define VERTICAL_ADDRESS_MASK (0xffff << 16) #define HORIZONTAL_ADDRESS_SHIFT 0 #define HORIZONTAL_ADDRESS_MASK 0xffff -#define _MIPIA_DBI_FIFO_THROTTLE (VLV_DISPLAY_BASE + 0xb024) -#define _MIPIB_DBI_FIFO_THROTTLE (VLV_DISPLAY_BASE + 0xb824) -#define MIPI_DBI_FIFO_THROTTLE(pipe) _PIPE(pipe, _MIPIA_DBI_FIFO_THROTTLE, _MIPIB_DBI_FIFO_THROTTLE) +#define _MIPIA_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb024) +#define _MIPIB_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb824) +#define MIPI_DBI_FIFO_THROTTLE(tc) _TRANSCODER(tc, \ + _MIPIA_DBI_FIFO_THROTTLE, _MIPIB_DBI_FIFO_THROTTLE) #define DBI_FIFO_EMPTY_HALF (0 << 0) #define DBI_FIFO_EMPTY_QUARTER (1 << 0) #define DBI_FIFO_EMPTY_7_LOCATIONS (2 << 0) /* regs below are bits 15:0 */ -#define _MIPIA_HSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb028) -#define _MIPIB_HSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb828) -#define MIPI_HSYNC_PADDING_COUNT(pipe) _PIPE(pipe, _MIPIA_HSYNC_PADDING_COUNT, _MIPIB_HSYNC_PADDING_COUNT) - -#define _MIPIA_HBP_COUNT (VLV_DISPLAY_BASE + 0xb02c) -#define _MIPIB_HBP_COUNT (VLV_DISPLAY_BASE + 0xb82c) -#define MIPI_HBP_COUNT(pipe) _PIPE(pipe, _MIPIA_HBP_COUNT, _MIPIB_HBP_COUNT) - -#define _MIPIA_HFP_COUNT (VLV_DISPLAY_BASE + 0xb030) -#define _MIPIB_HFP_COUNT (VLV_DISPLAY_BASE + 0xb830) -#define MIPI_HFP_COUNT(pipe) _PIPE(pipe, _MIPIA_HFP_COUNT, _MIPIB_HFP_COUNT) - -#define _MIPIA_HACTIVE_AREA_COUNT (VLV_DISPLAY_BASE + 0xb034) -#define _MIPIB_HACTIVE_AREA_COUNT (VLV_DISPLAY_BASE + 0xb834) -#define MIPI_HACTIVE_AREA_COUNT(pipe) _PIPE(pipe, _MIPIA_HACTIVE_AREA_COUNT, _MIPIB_HACTIVE_AREA_COUNT) - -#define _MIPIA_VSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb038) -#define _MIPIB_VSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb838) -#define MIPI_VSYNC_PADDING_COUNT(pipe) _PIPE(pipe, _MIPIA_VSYNC_PADDING_COUNT, _MIPIB_VSYNC_PADDING_COUNT) +#define _MIPIA_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb028) +#define _MIPIB_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb828) +#define MIPI_HSYNC_PADDING_COUNT(tc) _TRANSCODER(tc, \ + _MIPIA_HSYNC_PADDING_COUNT, _MIPIB_HSYNC_PADDING_COUNT) + +#define _MIPIA_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb02c) +#define _MIPIB_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb82c) +#define MIPI_HBP_COUNT(tc) _TRANSCODER(tc, _MIPIA_HBP_COUNT, \ + _MIPIB_HBP_COUNT) + +#define _MIPIA_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb030) +#define _MIPIB_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb830) +#define MIPI_HFP_COUNT(tc) _TRANSCODER(tc, _MIPIA_HFP_COUNT, \ + _MIPIB_HFP_COUNT) + +#define _MIPIA_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb034) +#define _MIPIB_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb834) +#define MIPI_HACTIVE_AREA_COUNT(tc) _TRANSCODER(tc, \ + _MIPIA_HACTIVE_AREA_COUNT, _MIPIB_HACTIVE_AREA_COUNT) + +#define _MIPIA_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb038) +#define _MIPIB_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb838) +#define MIPI_VSYNC_PADDING_COUNT(tc) _TRANSCODER(tc, \ + _MIPIA_VSYNC_PADDING_COUNT, _MIPIB_VSYNC_PADDING_COUNT) + +#define _MIPIA_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb03c) +#define _MIPIB_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb83c) +#define MIPI_VBP_COUNT(tc) _TRANSCODER(tc, _MIPIA_VBP_COUNT, \ + _MIPIB_VBP_COUNT) + +#define _MIPIA_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb040) +#define _MIPIB_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb840) +#define MIPI_VFP_COUNT(tc) _TRANSCODER(tc, _MIPIA_VFP_COUNT, \ + _MIPIB_VFP_COUNT) + +#define _MIPIA_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb044) +#define _MIPIB_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb844) +#define MIPI_HIGH_LOW_SWITCH_COUNT(tc) _TRANSCODER(tc, \ + _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIB_HIGH_LOW_SWITCH_COUNT) -#define _MIPIA_VBP_COUNT (VLV_DISPLAY_BASE + 0xb03c) -#define _MIPIB_VBP_COUNT (VLV_DISPLAY_BASE + 0xb83c) -#define MIPI_VBP_COUNT(pipe) _PIPE(pipe, _MIPIA_VBP_COUNT, _MIPIB_VBP_COUNT) - -#define _MIPIA_VFP_COUNT (VLV_DISPLAY_BASE + 0xb040) -#define _MIPIB_VFP_COUNT (VLV_DISPLAY_BASE + 0xb840) -#define MIPI_VFP_COUNT(pipe) _PIPE(pipe, _MIPIA_VFP_COUNT, _MIPIB_VFP_COUNT) - -#define _MIPIA_HIGH_LOW_SWITCH_COUNT (VLV_DISPLAY_BASE + 0xb044) -#define _MIPIB_HIGH_LOW_SWITCH_COUNT (VLV_DISPLAY_BASE + 0xb844) -#define MIPI_HIGH_LOW_SWITCH_COUNT(pipe) _PIPE(pipe, _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIB_HIGH_LOW_SWITCH_COUNT) /* regs above are bits 15:0 */ -#define _MIPIA_DPI_CONTROL (VLV_DISPLAY_BASE + 0xb048) -#define _MIPIB_DPI_CONTROL (VLV_DISPLAY_BASE + 0xb848) -#define MIPI_DPI_CONTROL(pipe) _PIPE(pipe, _MIPIA_DPI_CONTROL, _MIPIB_DPI_CONTROL) +#define _MIPIA_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb048) +#define _MIPIB_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb848) +#define MIPI_DPI_CONTROL(tc) _TRANSCODER(tc, _MIPIA_DPI_CONTROL, \ + _MIPIB_DPI_CONTROL) #define DPI_LP_MODE (1 << 6) #define BACKLIGHT_OFF (1 << 5) #define BACKLIGHT_ON (1 << 4) @@ -6208,27 +6317,31 @@ enum punit_power_well { #define TURN_ON (1 << 1) #define SHUTDOWN (1 << 0) -#define _MIPIA_DPI_DATA (VLV_DISPLAY_BASE + 0xb04c) -#define _MIPIB_DPI_DATA (VLV_DISPLAY_BASE + 0xb84c) -#define MIPI_DPI_DATA(pipe) _PIPE(pipe, _MIPIA_DPI_DATA, _MIPIB_DPI_DATA) +#define _MIPIA_DPI_DATA (dev_priv->mipi_mmio_base + 0xb04c) +#define _MIPIB_DPI_DATA (dev_priv->mipi_mmio_base + 0xb84c) +#define MIPI_DPI_DATA(tc) _TRANSCODER(tc, _MIPIA_DPI_DATA, \ + _MIPIB_DPI_DATA) #define COMMAND_BYTE_SHIFT 0 #define COMMAND_BYTE_MASK (0x3f << 0) -#define _MIPIA_INIT_COUNT (VLV_DISPLAY_BASE + 0xb050) -#define _MIPIB_INIT_COUNT (VLV_DISPLAY_BASE + 0xb850) -#define MIPI_INIT_COUNT(pipe) _PIPE(pipe, _MIPIA_INIT_COUNT, _MIPIB_INIT_COUNT) +#define _MIPIA_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb050) +#define _MIPIB_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb850) +#define MIPI_INIT_COUNT(tc) _TRANSCODER(tc, _MIPIA_INIT_COUNT, \ + _MIPIB_INIT_COUNT) #define MASTER_INIT_TIMER_SHIFT 0 #define MASTER_INIT_TIMER_MASK (0xffff << 0) -#define _MIPIA_MAX_RETURN_PKT_SIZE (VLV_DISPLAY_BASE + 0xb054) -#define _MIPIB_MAX_RETURN_PKT_SIZE (VLV_DISPLAY_BASE + 0xb854) -#define MIPI_MAX_RETURN_PKT_SIZE(pipe) _PIPE(pipe, _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIB_MAX_RETURN_PKT_SIZE) +#define _MIPIA_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb054) +#define _MIPIB_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb854) +#define MIPI_MAX_RETURN_PKT_SIZE(tc) _TRANSCODER(tc, \ + _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIB_MAX_RETURN_PKT_SIZE) #define MAX_RETURN_PKT_SIZE_SHIFT 0 #define MAX_RETURN_PKT_SIZE_MASK (0x3ff << 0) -#define _MIPIA_VIDEO_MODE_FORMAT (VLV_DISPLAY_BASE + 0xb058) -#define _MIPIB_VIDEO_MODE_FORMAT (VLV_DISPLAY_BASE + 0xb858) -#define MIPI_VIDEO_MODE_FORMAT(pipe) _PIPE(pipe, _MIPIA_VIDEO_MODE_FORMAT, _MIPIB_VIDEO_MODE_FORMAT) +#define _MIPIA_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb058) +#define _MIPIB_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb858) +#define MIPI_VIDEO_MODE_FORMAT(tc) _TRANSCODER(tc, \ + _MIPIA_VIDEO_MODE_FORMAT, _MIPIB_VIDEO_MODE_FORMAT) #define RANDOM_DPI_DISPLAY_RESOLUTION (1 << 4) #define DISABLE_VIDEO_BTA (1 << 3) #define IP_TG_CONFIG (1 << 2) @@ -6236,9 +6349,10 @@ enum punit_power_well { #define VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS (2 << 0) #define VIDEO_MODE_BURST (3 << 0) -#define _MIPIA_EOT_DISABLE (VLV_DISPLAY_BASE + 0xb05c) -#define _MIPIB_EOT_DISABLE (VLV_DISPLAY_BASE + 0xb85c) -#define MIPI_EOT_DISABLE(pipe) _PIPE(pipe, _MIPIA_EOT_DISABLE, _MIPIB_EOT_DISABLE) +#define _MIPIA_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb05c) +#define _MIPIB_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb85c) +#define MIPI_EOT_DISABLE(tc) _TRANSCODER(tc, _MIPIA_EOT_DISABLE, \ + _MIPIB_EOT_DISABLE) #define LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 7) #define HS_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 6) #define LOW_CONTENTION_RECOVERY_DISABLE (1 << 5) @@ -6248,28 +6362,33 @@ enum punit_power_well { #define CLOCKSTOP (1 << 1) #define EOT_DISABLE (1 << 0) -#define _MIPIA_LP_BYTECLK (VLV_DISPLAY_BASE + 0xb060) -#define _MIPIB_LP_BYTECLK (VLV_DISPLAY_BASE + 0xb860) -#define MIPI_LP_BYTECLK(pipe) _PIPE(pipe, _MIPIA_LP_BYTECLK, _MIPIB_LP_BYTECLK) +#define _MIPIA_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb060) +#define _MIPIB_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb860) +#define MIPI_LP_BYTECLK(tc) _TRANSCODER(tc, _MIPIA_LP_BYTECLK, \ + _MIPIB_LP_BYTECLK) #define LP_BYTECLK_SHIFT 0 #define LP_BYTECLK_MASK (0xffff << 0) /* bits 31:0 */ -#define _MIPIA_LP_GEN_DATA (VLV_DISPLAY_BASE + 0xb064) -#define _MIPIB_LP_GEN_DATA (VLV_DISPLAY_BASE + 0xb864) -#define MIPI_LP_GEN_DATA(pipe) _PIPE(pipe, _MIPIA_LP_GEN_DATA, _MIPIB_LP_GEN_DATA) +#define _MIPIA_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb064) +#define _MIPIB_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb864) +#define MIPI_LP_GEN_DATA(tc) _TRANSCODER(tc, _MIPIA_LP_GEN_DATA, \ + _MIPIB_LP_GEN_DATA) /* bits 31:0 */ -#define _MIPIA_HS_GEN_DATA (VLV_DISPLAY_BASE + 0xb068) -#define _MIPIB_HS_GEN_DATA (VLV_DISPLAY_BASE + 0xb868) -#define MIPI_HS_GEN_DATA(pipe) _PIPE(pipe, _MIPIA_HS_GEN_DATA, _MIPIB_HS_GEN_DATA) - -#define _MIPIA_LP_GEN_CTRL (VLV_DISPLAY_BASE + 0xb06c) -#define _MIPIB_LP_GEN_CTRL (VLV_DISPLAY_BASE + 0xb86c) -#define MIPI_LP_GEN_CTRL(pipe) _PIPE(pipe, _MIPIA_LP_GEN_CTRL, _MIPIB_LP_GEN_CTRL) -#define _MIPIA_HS_GEN_CTRL (VLV_DISPLAY_BASE + 0xb070) -#define _MIPIB_HS_GEN_CTRL (VLV_DISPLAY_BASE + 0xb870) -#define MIPI_HS_GEN_CTRL(pipe) _PIPE(pipe, _MIPIA_HS_GEN_CTRL, _MIPIB_HS_GEN_CTRL) +#define _MIPIA_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb068) +#define _MIPIB_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb868) +#define MIPI_HS_GEN_DATA(tc) _TRANSCODER(tc, _MIPIA_HS_GEN_DATA, \ + _MIPIB_HS_GEN_DATA) + +#define _MIPIA_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb06c) +#define _MIPIB_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb86c) +#define MIPI_LP_GEN_CTRL(tc) _TRANSCODER(tc, _MIPIA_LP_GEN_CTRL, \ + _MIPIB_LP_GEN_CTRL) +#define _MIPIA_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb070) +#define _MIPIB_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb870) +#define MIPI_HS_GEN_CTRL(tc) _TRANSCODER(tc, _MIPIA_HS_GEN_CTRL, \ + _MIPIB_HS_GEN_CTRL) #define LONG_PACKET_WORD_COUNT_SHIFT 8 #define LONG_PACKET_WORD_COUNT_MASK (0xffff << 8) #define SHORT_PACKET_PARAM_SHIFT 8 @@ -6280,9 +6399,10 @@ enum punit_power_well { #define DATA_TYPE_MASK (3f << 0) /* data type values, see include/video/mipi_display.h */ -#define _MIPIA_GEN_FIFO_STAT (VLV_DISPLAY_BASE + 0xb074) -#define _MIPIB_GEN_FIFO_STAT (VLV_DISPLAY_BASE + 0xb874) -#define MIPI_GEN_FIFO_STAT(pipe) _PIPE(pipe, _MIPIA_GEN_FIFO_STAT, _MIPIB_GEN_FIFO_STAT) +#define _MIPIA_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb074) +#define _MIPIB_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb874) +#define MIPI_GEN_FIFO_STAT(tc) _TRANSCODER(tc, _MIPIA_GEN_FIFO_STAT, \ + _MIPIB_GEN_FIFO_STAT) #define DPI_FIFO_EMPTY (1 << 28) #define DBI_FIFO_EMPTY (1 << 27) #define LP_CTRL_FIFO_EMPTY (1 << 26) @@ -6298,16 +6418,18 @@ enum punit_power_well { #define HS_DATA_FIFO_HALF_EMPTY (1 << 1) #define HS_DATA_FIFO_FULL (1 << 0) -#define _MIPIA_HS_LS_DBI_ENABLE (VLV_DISPLAY_BASE + 0xb078) -#define _MIPIB_HS_LS_DBI_ENABLE (VLV_DISPLAY_BASE + 0xb878) -#define MIPI_HS_LP_DBI_ENABLE(pipe) _PIPE(pipe, _MIPIA_HS_LS_DBI_ENABLE, _MIPIB_HS_LS_DBI_ENABLE) +#define _MIPIA_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb078) +#define _MIPIB_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb878) +#define MIPI_HS_LP_DBI_ENABLE(tc) _TRANSCODER(tc, \ + _MIPIA_HS_LS_DBI_ENABLE, _MIPIB_HS_LS_DBI_ENABLE) #define DBI_HS_LP_MODE_MASK (1 << 0) #define DBI_LP_MODE (1 << 0) #define DBI_HS_MODE (0 << 0) -#define _MIPIA_DPHY_PARAM (VLV_DISPLAY_BASE + 0xb080) -#define _MIPIB_DPHY_PARAM (VLV_DISPLAY_BASE + 0xb880) -#define MIPI_DPHY_PARAM(pipe) _PIPE(pipe, _MIPIA_DPHY_PARAM, _MIPIB_DPHY_PARAM) +#define _MIPIA_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb080) +#define _MIPIB_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb880) +#define MIPI_DPHY_PARAM(tc) _TRANSCODER(tc, _MIPIA_DPHY_PARAM, \ + _MIPIB_DPHY_PARAM) #define EXIT_ZERO_COUNT_SHIFT 24 #define EXIT_ZERO_COUNT_MASK (0x3f << 24) #define TRAIL_COUNT_SHIFT 16 @@ -6318,34 +6440,41 @@ enum punit_power_well { #define PREPARE_COUNT_MASK (0x3f << 0) /* bits 31:0 */ -#define _MIPIA_DBI_BW_CTRL (VLV_DISPLAY_BASE + 0xb084) -#define _MIPIB_DBI_BW_CTRL (VLV_DISPLAY_BASE + 0xb884) -#define MIPI_DBI_BW_CTRL(pipe) _PIPE(pipe, _MIPIA_DBI_BW_CTRL, _MIPIB_DBI_BW_CTRL) - -#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (VLV_DISPLAY_BASE + 0xb088) -#define _MIPIB_CLK_LANE_SWITCH_TIME_CNT (VLV_DISPLAY_BASE + 0xb888) -#define MIPI_CLK_LANE_SWITCH_TIME_CNT(pipe) _PIPE(pipe, _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIB_CLK_LANE_SWITCH_TIME_CNT) +#define _MIPIA_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb084) +#define _MIPIB_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb884) +#define MIPI_DBI_BW_CTRL(tc) _TRANSCODER(tc, _MIPIA_DBI_BW_CTRL, \ + _MIPIB_DBI_BW_CTRL) + +#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base \ + + 0xb088) +#define _MIPIB_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base \ + + 0xb888) +#define MIPI_CLK_LANE_SWITCH_TIME_CNT(tc) _TRANSCODER(tc, \ + _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIB_CLK_LANE_SWITCH_TIME_CNT) #define LP_HS_SSW_CNT_SHIFT 16 #define LP_HS_SSW_CNT_MASK (0xffff << 16) #define HS_LP_PWR_SW_CNT_SHIFT 0 #define HS_LP_PWR_SW_CNT_MASK (0xffff << 0) -#define _MIPIA_STOP_STATE_STALL (VLV_DISPLAY_BASE + 0xb08c) -#define _MIPIB_STOP_STATE_STALL (VLV_DISPLAY_BASE + 0xb88c) -#define MIPI_STOP_STATE_STALL(pipe) _PIPE(pipe, _MIPIA_STOP_STATE_STALL, _MIPIB_STOP_STATE_STALL) +#define _MIPIA_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb08c) +#define _MIPIB_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb88c) +#define MIPI_STOP_STATE_STALL(tc) _TRANSCODER(tc, \ + _MIPIA_STOP_STATE_STALL, _MIPIB_STOP_STATE_STALL) #define STOP_STATE_STALL_COUNTER_SHIFT 0 #define STOP_STATE_STALL_COUNTER_MASK (0xff << 0) -#define _MIPIA_INTR_STAT_REG_1 (VLV_DISPLAY_BASE + 0xb090) -#define _MIPIB_INTR_STAT_REG_1 (VLV_DISPLAY_BASE + 0xb890) -#define MIPI_INTR_STAT_REG_1(pipe) _PIPE(pipe, _MIPIA_INTR_STAT_REG_1, _MIPIB_INTR_STAT_REG_1) -#define _MIPIA_INTR_EN_REG_1 (VLV_DISPLAY_BASE + 0xb094) -#define _MIPIB_INTR_EN_REG_1 (VLV_DISPLAY_BASE + 0xb894) -#define MIPI_INTR_EN_REG_1(pipe) _PIPE(pipe, _MIPIA_INTR_EN_REG_1, _MIPIB_INTR_EN_REG_1) +#define _MIPIA_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb090) +#define _MIPIB_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb890) +#define MIPI_INTR_STAT_REG_1(tc) _TRANSCODER(tc, \ + _MIPIA_INTR_STAT_REG_1, _MIPIB_INTR_STAT_REG_1) +#define _MIPIA_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb094) +#define _MIPIB_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb894) +#define MIPI_INTR_EN_REG_1(tc) _TRANSCODER(tc, _MIPIA_INTR_EN_REG_1, \ + _MIPIB_INTR_EN_REG_1) #define RX_CONTENTION_DETECTED (1 << 0) /* XXX: only pipe A ?!? */ -#define MIPIA_DBI_TYPEC_CTRL (VLV_DISPLAY_BASE + 0xb100) +#define MIPIA_DBI_TYPEC_CTRL (dev_priv->mipi_mmio_base + 0xb100) #define DBI_TYPEC_ENABLE (1 << 31) #define DBI_TYPEC_WIP (1 << 30) #define DBI_TYPEC_OPTION_SHIFT 28 @@ -6359,9 +6488,10 @@ enum punit_power_well { /* MIPI adapter registers */ -#define _MIPIA_CTRL (VLV_DISPLAY_BASE + 0xb104) -#define _MIPIB_CTRL (VLV_DISPLAY_BASE + 0xb904) -#define MIPI_CTRL(pipe) _PIPE(pipe, _MIPIA_CTRL, _MIPIB_CTRL) +#define _MIPIA_CTRL (dev_priv->mipi_mmio_base + 0xb104) +#define _MIPIB_CTRL (dev_priv->mipi_mmio_base + 0xb904) +#define MIPI_CTRL(tc) _TRANSCODER(tc, _MIPIA_CTRL, \ + _MIPIB_CTRL) #define ESCAPE_CLOCK_DIVIDER_SHIFT 5 /* A only */ #define ESCAPE_CLOCK_DIVIDER_MASK (3 << 5) #define ESCAPE_CLOCK_DIVIDER_1 (0 << 5) @@ -6373,50 +6503,52 @@ enum punit_power_well { #define READ_REQUEST_PRIORITY_HIGH (3 << 3) #define RGB_FLIP_TO_BGR (1 << 2) -#define _MIPIA_DATA_ADDRESS (VLV_DISPLAY_BASE + 0xb108) -#define _MIPIB_DATA_ADDRESS (VLV_DISPLAY_BASE + 0xb908) -#define MIPI_DATA_ADDRESS(pipe) _PIPE(pipe, _MIPIA_DATA_ADDRESS, _MIPIB_DATA_ADDRESS) +#define _MIPIA_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb108) +#define _MIPIB_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb908) +#define MIPI_DATA_ADDRESS(tc) _TRANSCODER(tc, _MIPIA_DATA_ADDRESS, \ + _MIPIB_DATA_ADDRESS) #define DATA_MEM_ADDRESS_SHIFT 5 #define DATA_MEM_ADDRESS_MASK (0x7ffffff << 5) #define DATA_VALID (1 << 0) -#define _MIPIA_DATA_LENGTH (VLV_DISPLAY_BASE + 0xb10c) -#define _MIPIB_DATA_LENGTH (VLV_DISPLAY_BASE + 0xb90c) -#define MIPI_DATA_LENGTH(pipe) _PIPE(pipe, _MIPIA_DATA_LENGTH, _MIPIB_DATA_LENGTH) +#define _MIPIA_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb10c) +#define _MIPIB_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb90c) +#define MIPI_DATA_LENGTH(tc) _TRANSCODER(tc, _MIPIA_DATA_LENGTH, \ + _MIPIB_DATA_LENGTH) #define DATA_LENGTH_SHIFT 0 #define DATA_LENGTH_MASK (0xfffff << 0) -#define _MIPIA_COMMAND_ADDRESS (VLV_DISPLAY_BASE + 0xb110) -#define _MIPIB_COMMAND_ADDRESS (VLV_DISPLAY_BASE + 0xb910) -#define MIPI_COMMAND_ADDRESS(pipe) _PIPE(pipe, _MIPIA_COMMAND_ADDRESS, _MIPIB_COMMAND_ADDRESS) +#define _MIPIA_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb110) +#define _MIPIB_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb910) +#define MIPI_COMMAND_ADDRESS(tc) _TRANSCODER(tc, \ + _MIPIA_COMMAND_ADDRESS, _MIPIB_COMMAND_ADDRESS) #define COMMAND_MEM_ADDRESS_SHIFT 5 #define COMMAND_MEM_ADDRESS_MASK (0x7ffffff << 5) #define AUTO_PWG_ENABLE (1 << 2) #define MEMORY_WRITE_DATA_FROM_PIPE_RENDERING (1 << 1) #define COMMAND_VALID (1 << 0) -#define _MIPIA_COMMAND_LENGTH (VLV_DISPLAY_BASE + 0xb114) -#define _MIPIB_COMMAND_LENGTH (VLV_DISPLAY_BASE + 0xb914) -#define MIPI_COMMAND_LENGTH(pipe) _PIPE(pipe, _MIPIA_COMMAND_LENGTH, _MIPIB_COMMAND_LENGTH) +#define _MIPIA_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb114) +#define _MIPIB_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb914) +#define MIPI_COMMAND_LENGTH(tc) _TRANSCODER(tc, _MIPIA_COMMAND_LENGTH, \ + _MIPIB_COMMAND_LENGTH) #define COMMAND_LENGTH_SHIFT(n) (8 * (n)) /* n: 0...3 */ #define COMMAND_LENGTH_MASK(n) (0xff << (8 * (n))) -#define _MIPIA_READ_DATA_RETURN0 (VLV_DISPLAY_BASE + 0xb118) -#define _MIPIB_READ_DATA_RETURN0 (VLV_DISPLAY_BASE + 0xb918) -#define MIPI_READ_DATA_RETURN(pipe, n) \ - (_PIPE(pipe, _MIPIA_READ_DATA_RETURN0, _MIPIB_READ_DATA_RETURN0) + 4 * (n)) /* n: 0...7 */ +#define _MIPIA_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb118) +#define _MIPIB_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb918) +#define MIPI_READ_DATA_RETURN(tc, n) \ + (_TRANSCODER(tc, _MIPIA_READ_DATA_RETURN0, _MIPIB_READ_DATA_RETURN0) \ + + 4 * (n)) /* n: 0...7 */ -#define _MIPIA_READ_DATA_VALID (VLV_DISPLAY_BASE + 0xb138) -#define _MIPIB_READ_DATA_VALID (VLV_DISPLAY_BASE + 0xb938) -#define MIPI_READ_DATA_VALID(pipe) _PIPE(pipe, _MIPIA_READ_DATA_VALID, _MIPIB_READ_DATA_VALID) +#define _MIPIA_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb138) +#define _MIPIB_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb938) +#define MIPI_READ_DATA_VALID(tc) _TRANSCODER(tc, \ + _MIPIA_READ_DATA_VALID, _MIPIB_READ_DATA_VALID) #define READ_DATA_VALID(n) (1 << (n)) /* For UMS only (deprecated): */ #define _PALETTE_A (dev_priv->info.display_mmio_offset + 0xa000) #define _PALETTE_B (dev_priv->info.display_mmio_offset + 0xa800) -#define _DPLL_A (dev_priv->info.display_mmio_offset + 0x6014) -#define _DPLL_B (dev_priv->info.display_mmio_offset + 0x6018) -#define _DPLL_A_MD (dev_priv->info.display_mmio_offset + 0x601c) -#define _DPLL_B_MD (dev_priv->info.display_mmio_offset + 0x6020) #endif /* _I915_REG_H_ */ diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index 86ce39aad0ff..ae7fd8fc27f0 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -47,22 +47,45 @@ static u32 calc_residency(struct drm_device *dev, const u32 reg) intel_runtime_pm_get(dev_priv); - /* On VLV, residency time is in CZ units rather than 1.28us */ + /* On VLV and CHV, residency time is in CZ units rather than 1.28us */ if (IS_VALLEYVIEW(dev)) { - u32 clkctl2; + u32 reg, czcount_30ns; - clkctl2 = I915_READ(VLV_CLK_CTL2) >> - CLK_CTL2_CZCOUNT_30NS_SHIFT; - if (!clkctl2) { - WARN(!clkctl2, "bogus CZ count value"); + if (IS_CHERRYVIEW(dev)) + reg = CHV_CLK_CTL1; + else + reg = VLV_CLK_CTL2; + + czcount_30ns = I915_READ(reg) >> CLK_CTL2_CZCOUNT_30NS_SHIFT; + + if (!czcount_30ns) { + WARN(!czcount_30ns, "bogus CZ count value"); ret = 0; goto out; } - units = DIV_ROUND_UP_ULL(30ULL * bias, (u64)clkctl2); + + units = 0; + div = 1000000ULL; + + if (IS_CHERRYVIEW(dev)) { + /* Special case for 320Mhz */ + if (czcount_30ns == 1) { + div = 10000000ULL; + units = 3125ULL; + } else { + /* chv counts are one less */ + czcount_30ns += 1; + } + } + + if (units == 0) + units = DIV_ROUND_UP_ULL(30ULL * bias, + (u64)czcount_30ns); + if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH) units <<= 8; - div = 1000000ULL * bias; + div = div * bias; } raw_time = I915_READ(reg) * units; @@ -461,11 +484,20 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr mutex_unlock(&dev->struct_mutex); if (attr == &dev_attr_gt_RP0_freq_mhz) { - val = ((rp_state_cap & 0x0000ff) >> 0) * GT_FREQUENCY_MULTIPLIER; + if (IS_VALLEYVIEW(dev)) + val = vlv_gpu_freq(dev_priv, dev_priv->rps.rp0_freq); + else + val = ((rp_state_cap & 0x0000ff) >> 0) * GT_FREQUENCY_MULTIPLIER; } else if (attr == &dev_attr_gt_RP1_freq_mhz) { - val = ((rp_state_cap & 0x00ff00) >> 8) * GT_FREQUENCY_MULTIPLIER; + if (IS_VALLEYVIEW(dev)) + val = vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq); + else + val = ((rp_state_cap & 0x00ff00) >> 8) * GT_FREQUENCY_MULTIPLIER; } else if (attr == &dev_attr_gt_RPn_freq_mhz) { - val = ((rp_state_cap & 0xff0000) >> 16) * GT_FREQUENCY_MULTIPLIER; + if (IS_VALLEYVIEW(dev)) + val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq); + else + val = ((rp_state_cap & 0xff0000) >> 16) * GT_FREQUENCY_MULTIPLIER; } else { BUG(); } @@ -486,6 +518,9 @@ static const struct attribute *vlv_attrs[] = { &dev_attr_gt_cur_freq_mhz.attr, &dev_attr_gt_max_freq_mhz.attr, &dev_attr_gt_min_freq_mhz.attr, + &dev_attr_gt_RP0_freq_mhz.attr, + &dev_attr_gt_RP1_freq_mhz.attr, + &dev_attr_gt_RPn_freq_mhz.attr, &dev_attr_vlv_rpe_freq_mhz.attr, NULL, }; diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 21b347efcf1e..afcc8dd40bdd 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -336,11 +336,12 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv, struct bdb_header *bdb) dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz; dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm; + dev_priv->vbt.backlight.min_brightness = entry->min_brightness; DRM_DEBUG_KMS("VBT backlight PWM modulation frequency %u Hz, " "active %s, min brightness %u, level %u\n", dev_priv->vbt.backlight.pwm_freq_hz, dev_priv->vbt.backlight.active_low_pwm ? "low" : "high", - entry->min_brightness, + dev_priv->vbt.backlight.min_brightness, backlight_data->level[panel_type]); } @@ -877,7 +878,7 @@ err: /* error during parsing so set all pointers to null * because of partial parsing */ - memset(dev_priv->vbt.dsi.sequence, 0, MIPI_SEQ_MAX); + memset(dev_priv->vbt.dsi.sequence, 0, sizeof(dev_priv->vbt.dsi.sequence)); } static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, @@ -1122,7 +1123,7 @@ init_vbt_defaults(struct drm_i915_private *dev_priv) } } -static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id) +static int intel_no_opregion_vbt_callback(const struct dmi_system_id *id) { DRM_DEBUG_KMS("Falling back to manually reading VBT from " "VBIOS ROM for %s\n", diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 5a045d3bd77e..9212e6504e0f 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -137,6 +137,18 @@ static void hsw_crt_get_config(struct intel_encoder *encoder, pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder); } +static void hsw_crt_pre_enable(struct intel_encoder *encoder) +{ + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + + WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL already enabled\n"); + I915_WRITE(SPLL_CTL, + SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC); + POSTING_READ(SPLL_CTL); + udelay(20); +} + /* Note: The caller is required to filter out dpms modes not supported by the * platform. */ static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode) @@ -194,6 +206,20 @@ static void intel_disable_crt(struct intel_encoder *encoder) intel_crt_set_dpms(encoder, DRM_MODE_DPMS_OFF); } + +static void hsw_crt_post_disable(struct intel_encoder *encoder) +{ + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t val; + + DRM_DEBUG_KMS("Disabling SPLL\n"); + val = I915_READ(SPLL_CTL); + WARN_ON(!(val & SPLL_PLL_ENABLE)); + I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE); + POSTING_READ(SPLL_CTL); +} + static void intel_enable_crt(struct intel_encoder *encoder) { struct intel_crt *crt = intel_encoder_to_crt(encoder); @@ -289,8 +315,10 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder, pipe_config->pipe_bpp = 24; /* FDI must always be 2.7 GHz */ - if (HAS_DDI(dev)) + if (HAS_DDI(dev)) { + pipe_config->ddi_pll_sel = PORT_CLK_SEL_SPLL; pipe_config->port_clock = 135000 * 2; + } return true; } @@ -632,8 +660,6 @@ intel_crt_detect(struct drm_connector *connector, bool force) struct intel_load_detect_pipe tmp; struct drm_modeset_acquire_ctx ctx; - intel_runtime_pm_get(dev_priv); - DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n", connector->base.id, connector->name, force); @@ -673,20 +699,23 @@ intel_crt_detect(struct drm_connector *connector, bool force) goto out; } + drm_modeset_acquire_init(&ctx, 0); + /* for pre-945g platforms use load detect */ if (intel_get_load_detect_pipe(connector, NULL, &tmp, &ctx)) { if (intel_crt_detect_ddc(connector)) status = connector_status_connected; else status = intel_crt_load_detect(crt); - intel_release_load_detect_pipe(connector, &tmp, &ctx); + intel_release_load_detect_pipe(connector, &tmp); } else status = connector_status_unknown; + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + out: intel_display_power_put(dev_priv, power_domain); - intel_runtime_pm_put(dev_priv); - return status; } @@ -775,7 +804,7 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = { .destroy = intel_encoder_destroy, }; -static int __init intel_no_crt_dmi_callback(const struct dmi_system_id *id) +static int intel_no_crt_dmi_callback(const struct dmi_system_id *id) { DRM_INFO("Skipping CRT initialization for %s\n", id->ident); return 1; @@ -860,6 +889,8 @@ void intel_crt_init(struct drm_device *dev) if (HAS_DDI(dev)) { crt->base.get_config = hsw_crt_get_config; crt->base.get_hw_state = intel_ddi_get_hw_state; + crt->base.pre_enable = hsw_crt_pre_enable; + crt->base.post_disable = hsw_crt_post_disable; } else { crt->base.get_config = intel_crt_get_config; crt->base.get_hw_state = intel_crt_get_hw_state; @@ -869,7 +900,7 @@ void intel_crt_init(struct drm_device *dev) drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); - drm_sysfs_connector_add(connector); + drm_connector_register(connector); if (!I915_HAS_HOTPLUG(dev)) intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT; diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index b17b9c7c769f..5db0b5552e39 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -76,12 +76,12 @@ static const u32 bdw_ddi_translations_edp[] = { 0x00FFFFFF, 0x00000012, /* eDP parameters */ 0x00EBAFFF, 0x00020011, 0x00C71FFF, 0x0006000F, + 0x00AAAFFF, 0x000E000A, 0x00FFFFFF, 0x00020011, 0x00DB6FFF, 0x0005000F, 0x00BEEFFF, 0x000A000C, 0x00FFFFFF, 0x0005000F, 0x00DB6FFF, 0x000A000C, - 0x00FFFFFF, 0x000A000C, 0x00FFFFFF, 0x00140006 /* HDMI parameters 800mV 0dB*/ }; @@ -89,12 +89,12 @@ static const u32 bdw_ddi_translations_dp[] = { 0x00FFFFFF, 0x0007000E, /* DP parameters */ 0x00D75FFF, 0x000E000A, 0x00BEFFFF, 0x00140006, + 0x80B2CFFF, 0x001B0002, 0x00FFFFFF, 0x000E000A, 0x00D75FFF, 0x00180004, 0x80CB2FFF, 0x001B0002, 0x00F7DFFF, 0x00180004, 0x80D75FFF, 0x001B0002, - 0x80FFFFFF, 0x001B0002, 0x00FFFFFF, 0x00140006 /* HDMI parameters 800mV 0dB*/ }; @@ -116,7 +116,10 @@ enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder) struct drm_encoder *encoder = &intel_encoder->base; int type = intel_encoder->type; - if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP || + if (type == INTEL_OUTPUT_DP_MST) { + struct intel_digital_port *intel_dig_port = enc_to_mst(encoder)->primary; + return intel_dig_port->port; + } else if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_HDMI || type == INTEL_OUTPUT_UNKNOWN) { struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); @@ -277,7 +280,8 @@ void hsw_fdi_link_train(struct drm_crtc *crtc) I915_WRITE(_FDI_RXA_CTL, rx_ctl_val); /* Configure Port Clock Select */ - I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->ddi_pll_sel); + I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->config.ddi_pll_sel); + WARN_ON(intel_crtc->config.ddi_pll_sel != PORT_CLK_SEL_SPLL); /* Start the training iterating through available voltages and emphasis, * testing each value twice. */ @@ -364,6 +368,18 @@ void hsw_fdi_link_train(struct drm_crtc *crtc) DRM_ERROR("FDI link training failed!\n"); } +void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_digital_port *intel_dig_port = + enc_to_dig_port(&encoder->base); + + intel_dp->DP = intel_dig_port->saved_port_bits | + DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW; + intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count); + +} + static struct intel_encoder * intel_ddi_get_crtc_encoder(struct drm_crtc *crtc) { @@ -385,53 +401,6 @@ intel_ddi_get_crtc_encoder(struct drm_crtc *crtc) return ret; } -void intel_ddi_put_crtc_pll(struct drm_crtc *crtc) -{ - struct drm_i915_private *dev_priv = crtc->dev->dev_private; - struct intel_ddi_plls *plls = &dev_priv->ddi_plls; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - uint32_t val; - - switch (intel_crtc->ddi_pll_sel) { - case PORT_CLK_SEL_SPLL: - plls->spll_refcount--; - if (plls->spll_refcount == 0) { - DRM_DEBUG_KMS("Disabling SPLL\n"); - val = I915_READ(SPLL_CTL); - WARN_ON(!(val & SPLL_PLL_ENABLE)); - I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE); - POSTING_READ(SPLL_CTL); - } - break; - case PORT_CLK_SEL_WRPLL1: - plls->wrpll1_refcount--; - if (plls->wrpll1_refcount == 0) { - DRM_DEBUG_KMS("Disabling WRPLL 1\n"); - val = I915_READ(WRPLL_CTL1); - WARN_ON(!(val & WRPLL_PLL_ENABLE)); - I915_WRITE(WRPLL_CTL1, val & ~WRPLL_PLL_ENABLE); - POSTING_READ(WRPLL_CTL1); - } - break; - case PORT_CLK_SEL_WRPLL2: - plls->wrpll2_refcount--; - if (plls->wrpll2_refcount == 0) { - DRM_DEBUG_KMS("Disabling WRPLL 2\n"); - val = I915_READ(WRPLL_CTL2); - WARN_ON(!(val & WRPLL_PLL_ENABLE)); - I915_WRITE(WRPLL_CTL2, val & ~WRPLL_PLL_ENABLE); - POSTING_READ(WRPLL_CTL2); - } - break; - } - - WARN(plls->spll_refcount < 0, "Invalid SPLL refcount\n"); - WARN(plls->wrpll1_refcount < 0, "Invalid WRPLL1 refcount\n"); - WARN(plls->wrpll2_refcount < 0, "Invalid WRPLL2 refcount\n"); - - intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE; -} - #define LC_FREQ 2700 #define LC_FREQ_2K (LC_FREQ * 2000) @@ -592,9 +561,9 @@ static int intel_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv, u32 wrpll; wrpll = I915_READ(reg); - switch (wrpll & SPLL_PLL_REF_MASK) { - case SPLL_PLL_SSC: - case SPLL_PLL_NON_SSC: + switch (wrpll & WRPLL_PLL_REF_MASK) { + case WRPLL_PLL_SSC: + case WRPLL_PLL_NON_SSC: /* * We could calculate spread here, but our checking * code only cares about 5% accuracy, and spread is a max of @@ -602,7 +571,7 @@ static int intel_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv, */ refclk = 135; break; - case SPLL_PLL_LCPLL: + case WRPLL_PLL_LCPLL: refclk = LC_FREQ; break; default: @@ -618,15 +587,14 @@ static int intel_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv, return (refclk * n * 100) / (p * r); } -static void intel_ddi_clock_get(struct intel_encoder *encoder, - struct intel_crtc_config *pipe_config) +void intel_ddi_clock_get(struct intel_encoder *encoder, + struct intel_crtc_config *pipe_config) { struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; - enum port port = intel_ddi_get_encoder_port(encoder); int link_clock = 0; u32 val, pll; - val = I915_READ(PORT_CLK_SEL(port)); + val = pipe_config->ddi_pll_sel; switch (val & PORT_CLK_SEL_MASK) { case PORT_CLK_SEL_LCPLL_810: link_clock = 81000; @@ -750,173 +718,37 @@ bool intel_ddi_pll_select(struct intel_crtc *intel_crtc) { struct drm_crtc *crtc = &intel_crtc->base; struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); - struct drm_encoder *encoder = &intel_encoder->base; - struct drm_i915_private *dev_priv = crtc->dev->dev_private; - struct intel_ddi_plls *plls = &dev_priv->ddi_plls; int type = intel_encoder->type; - enum pipe pipe = intel_crtc->pipe; int clock = intel_crtc->config.port_clock; - intel_ddi_put_crtc_pll(crtc); - - if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + intel_put_shared_dpll(intel_crtc); - switch (intel_dp->link_bw) { - case DP_LINK_BW_1_62: - intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810; - break; - case DP_LINK_BW_2_7: - intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350; - break; - case DP_LINK_BW_5_4: - intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700; - break; - default: - DRM_ERROR("Link bandwidth %d unsupported\n", - intel_dp->link_bw); - return false; - } - - } else if (type == INTEL_OUTPUT_HDMI) { - uint32_t reg, val; + if (type == INTEL_OUTPUT_HDMI) { + struct intel_shared_dpll *pll; + uint32_t val; unsigned p, n2, r2; intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p); - val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 | + val = WRPLL_PLL_ENABLE | WRPLL_PLL_LCPLL | WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) | WRPLL_DIVIDER_POST(p); - if (val == I915_READ(WRPLL_CTL1)) { - DRM_DEBUG_KMS("Reusing WRPLL 1 on pipe %c\n", - pipe_name(pipe)); - reg = WRPLL_CTL1; - } else if (val == I915_READ(WRPLL_CTL2)) { - DRM_DEBUG_KMS("Reusing WRPLL 2 on pipe %c\n", - pipe_name(pipe)); - reg = WRPLL_CTL2; - } else if (plls->wrpll1_refcount == 0) { - DRM_DEBUG_KMS("Using WRPLL 1 on pipe %c\n", - pipe_name(pipe)); - reg = WRPLL_CTL1; - } else if (plls->wrpll2_refcount == 0) { - DRM_DEBUG_KMS("Using WRPLL 2 on pipe %c\n", - pipe_name(pipe)); - reg = WRPLL_CTL2; - } else { - DRM_ERROR("No WRPLLs available!\n"); - return false; - } - - DRM_DEBUG_KMS("WRPLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n", - clock, p, n2, r2); - - if (reg == WRPLL_CTL1) { - plls->wrpll1_refcount++; - intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL1; - } else { - plls->wrpll2_refcount++; - intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL2; - } + intel_crtc->config.dpll_hw_state.wrpll = val; - } else if (type == INTEL_OUTPUT_ANALOG) { - if (plls->spll_refcount == 0) { - DRM_DEBUG_KMS("Using SPLL on pipe %c\n", - pipe_name(pipe)); - plls->spll_refcount++; - intel_crtc->ddi_pll_sel = PORT_CLK_SEL_SPLL; - } else { - DRM_ERROR("SPLL already in use\n"); + pll = intel_get_shared_dpll(intel_crtc); + if (pll == NULL) { + DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n", + pipe_name(intel_crtc->pipe)); return false; } - } else { - WARN(1, "Invalid DDI encoder type %d\n", type); - return false; + intel_crtc->config.ddi_pll_sel = PORT_CLK_SEL_WRPLL(pll->id); } return true; } -/* - * To be called after intel_ddi_pll_select(). That one selects the PLL to be - * used, this one actually enables the PLL. - */ -void intel_ddi_pll_enable(struct intel_crtc *crtc) -{ - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_ddi_plls *plls = &dev_priv->ddi_plls; - int clock = crtc->config.port_clock; - uint32_t reg, cur_val, new_val; - int refcount; - const char *pll_name; - uint32_t enable_bit = (1 << 31); - unsigned int p, n2, r2; - - BUILD_BUG_ON(enable_bit != SPLL_PLL_ENABLE); - BUILD_BUG_ON(enable_bit != WRPLL_PLL_ENABLE); - - switch (crtc->ddi_pll_sel) { - case PORT_CLK_SEL_LCPLL_2700: - case PORT_CLK_SEL_LCPLL_1350: - case PORT_CLK_SEL_LCPLL_810: - /* - * LCPLL should always be enabled at this point of the mode set - * sequence, so nothing to do. - */ - return; - - case PORT_CLK_SEL_SPLL: - pll_name = "SPLL"; - reg = SPLL_CTL; - refcount = plls->spll_refcount; - new_val = SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | - SPLL_PLL_SSC; - break; - - case PORT_CLK_SEL_WRPLL1: - case PORT_CLK_SEL_WRPLL2: - if (crtc->ddi_pll_sel == PORT_CLK_SEL_WRPLL1) { - pll_name = "WRPLL1"; - reg = WRPLL_CTL1; - refcount = plls->wrpll1_refcount; - } else { - pll_name = "WRPLL2"; - reg = WRPLL_CTL2; - refcount = plls->wrpll2_refcount; - } - - intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p); - - new_val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 | - WRPLL_DIVIDER_REFERENCE(r2) | - WRPLL_DIVIDER_FEEDBACK(n2) | WRPLL_DIVIDER_POST(p); - - break; - - case PORT_CLK_SEL_NONE: - WARN(1, "Bad selected pll: PORT_CLK_SEL_NONE\n"); - return; - default: - WARN(1, "Bad selected pll: 0x%08x\n", crtc->ddi_pll_sel); - return; - } - - cur_val = I915_READ(reg); - - WARN(refcount < 1, "Bad %s refcount: %d\n", pll_name, refcount); - if (refcount == 1) { - WARN(cur_val & enable_bit, "%s already enabled\n", pll_name); - I915_WRITE(reg, new_val); - POSTING_READ(reg); - udelay(20); - } else { - WARN((cur_val & enable_bit) == 0, "%s disabled\n", pll_name); - } -} - void intel_ddi_set_pipe_settings(struct drm_crtc *crtc) { struct drm_i915_private *dev_priv = crtc->dev->dev_private; @@ -926,8 +758,7 @@ void intel_ddi_set_pipe_settings(struct drm_crtc *crtc) int type = intel_encoder->type; uint32_t temp; - if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { - + if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_DP_MST) { temp = TRANS_MSA_SYNC_CLK; switch (intel_crtc->config.pipe_bpp) { case 18: @@ -949,6 +780,21 @@ void intel_ddi_set_pipe_settings(struct drm_crtc *crtc) } } +void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state) +{ + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; + uint32_t temp; + temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); + if (state == true) + temp |= TRANS_DDI_DP_VC_PAYLOAD_ALLOC; + else + temp &= ~TRANS_DDI_DP_VC_PAYLOAD_ALLOC; + I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp); +} + void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); @@ -995,7 +841,9 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc) * eDP when not using the panel fitter, and when not * using motion blur mitigation (which we don't * support). */ - if (IS_HASWELL(dev) && intel_crtc->config.pch_pfit.enabled) + if (IS_HASWELL(dev) && + (intel_crtc->config.pch_pfit.enabled || + intel_crtc->config.pch_pfit.force_thru)) temp |= TRANS_DDI_EDP_INPUT_A_ONOFF; else temp |= TRANS_DDI_EDP_INPUT_A_ON; @@ -1026,7 +874,19 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc) type == INTEL_OUTPUT_EDP) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - temp |= TRANS_DDI_MODE_SELECT_DP_SST; + if (intel_dp->is_mst) { + temp |= TRANS_DDI_MODE_SELECT_DP_MST; + } else + temp |= TRANS_DDI_MODE_SELECT_DP_SST; + + temp |= DDI_PORT_WIDTH(intel_dp->lane_count); + } else if (type == INTEL_OUTPUT_DP_MST) { + struct intel_dp *intel_dp = &enc_to_mst(encoder)->primary->dp; + + if (intel_dp->is_mst) { + temp |= TRANS_DDI_MODE_SELECT_DP_MST; + } else + temp |= TRANS_DDI_MODE_SELECT_DP_SST; temp |= DDI_PORT_WIDTH(intel_dp->lane_count); } else { @@ -1043,7 +903,7 @@ void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv, uint32_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder); uint32_t val = I915_READ(reg); - val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK); + val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK | TRANS_DDI_DP_VC_PAYLOAD_ALLOC); val |= TRANS_DDI_PORT_NONE; I915_WRITE(reg, val); } @@ -1082,8 +942,11 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector) case TRANS_DDI_MODE_SELECT_DP_SST: if (type == DRM_MODE_CONNECTOR_eDP) return true; - case TRANS_DDI_MODE_SELECT_DP_MST: return (type == DRM_MODE_CONNECTOR_DisplayPort); + case TRANS_DDI_MODE_SELECT_DP_MST: + /* if the transcoder is in MST state then + * connector isn't connected */ + return false; case TRANS_DDI_MODE_SELECT_FDI: return (type == DRM_MODE_CONNECTOR_VGA); @@ -1135,6 +998,9 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder, if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(port)) { + if ((tmp & TRANS_DDI_MODE_SELECT_MASK) == TRANS_DDI_MODE_SELECT_DP_MST) + return false; + *pipe = i; return true; } @@ -1146,76 +1012,6 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder, return false; } -static uint32_t intel_ddi_get_crtc_pll(struct drm_i915_private *dev_priv, - enum pipe pipe) -{ - uint32_t temp, ret; - enum port port = I915_MAX_PORTS; - enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, - pipe); - int i; - - if (cpu_transcoder == TRANSCODER_EDP) { - port = PORT_A; - } else { - temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); - temp &= TRANS_DDI_PORT_MASK; - - for (i = PORT_B; i <= PORT_E; i++) - if (temp == TRANS_DDI_SELECT_PORT(i)) - port = i; - } - - if (port == I915_MAX_PORTS) { - WARN(1, "Pipe %c enabled on an unknown port\n", - pipe_name(pipe)); - ret = PORT_CLK_SEL_NONE; - } else { - ret = I915_READ(PORT_CLK_SEL(port)); - DRM_DEBUG_KMS("Pipe %c connected to port %c using clock " - "0x%08x\n", pipe_name(pipe), port_name(port), - ret); - } - - return ret; -} - -void intel_ddi_setup_hw_pll_state(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - enum pipe pipe; - struct intel_crtc *intel_crtc; - - dev_priv->ddi_plls.spll_refcount = 0; - dev_priv->ddi_plls.wrpll1_refcount = 0; - dev_priv->ddi_plls.wrpll2_refcount = 0; - - for_each_pipe(pipe) { - intel_crtc = - to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); - - if (!intel_crtc->active) { - intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE; - continue; - } - - intel_crtc->ddi_pll_sel = intel_ddi_get_crtc_pll(dev_priv, - pipe); - - switch (intel_crtc->ddi_pll_sel) { - case PORT_CLK_SEL_SPLL: - dev_priv->ddi_plls.spll_refcount++; - break; - case PORT_CLK_SEL_WRPLL1: - dev_priv->ddi_plls.wrpll1_refcount++; - break; - case PORT_CLK_SEL_WRPLL2: - dev_priv->ddi_plls.wrpll2_refcount++; - break; - } - } -} - void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc) { struct drm_crtc *crtc = &intel_crtc->base; @@ -1261,17 +1057,13 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder) intel_edp_panel_on(intel_dp); } - WARN_ON(crtc->ddi_pll_sel == PORT_CLK_SEL_NONE); - I915_WRITE(PORT_CLK_SEL(port), crtc->ddi_pll_sel); + WARN_ON(crtc->config.ddi_pll_sel == PORT_CLK_SEL_NONE); + I915_WRITE(PORT_CLK_SEL(port), crtc->config.ddi_pll_sel); if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - struct intel_digital_port *intel_dig_port = - enc_to_dig_port(encoder); - intel_dp->DP = intel_dig_port->saved_port_bits | - DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW; - intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count); + intel_ddi_init_dp_buf_reg(intel_encoder); intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); intel_dp_start_link_train(intel_dp); @@ -1418,10 +1210,60 @@ int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv) } } +static void hsw_ddi_pll_enable(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll) +{ + I915_WRITE(WRPLL_CTL(pll->id), pll->hw_state.wrpll); + POSTING_READ(WRPLL_CTL(pll->id)); + udelay(20); +} + +static void hsw_ddi_pll_disable(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll) +{ + uint32_t val; + + val = I915_READ(WRPLL_CTL(pll->id)); + I915_WRITE(WRPLL_CTL(pll->id), val & ~WRPLL_PLL_ENABLE); + POSTING_READ(WRPLL_CTL(pll->id)); +} + +static bool hsw_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll, + struct intel_dpll_hw_state *hw_state) +{ + uint32_t val; + + if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_PLLS)) + return false; + + val = I915_READ(WRPLL_CTL(pll->id)); + hw_state->wrpll = val; + + return val & WRPLL_PLL_ENABLE; +} + +static const char * const hsw_ddi_pll_names[] = { + "WRPLL 1", + "WRPLL 2", +}; + void intel_ddi_pll_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; uint32_t val = I915_READ(LCPLL_CTL); + int i; + + dev_priv->num_shared_dpll = 2; + + for (i = 0; i < dev_priv->num_shared_dpll; i++) { + dev_priv->shared_dplls[i].id = i; + dev_priv->shared_dplls[i].name = hsw_ddi_pll_names[i]; + dev_priv->shared_dplls[i].disable = hsw_ddi_pll_disable; + dev_priv->shared_dplls[i].enable = hsw_ddi_pll_enable; + dev_priv->shared_dplls[i].get_hw_state = + hsw_ddi_pll_get_hw_state; + } /* The LCPLL register should be turned on by the BIOS. For now let's * just check its state and print errors in case something is wrong. @@ -1465,10 +1307,15 @@ void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder) intel_wait_ddi_buf_idle(dev_priv, port); } - val = DP_TP_CTL_ENABLE | DP_TP_CTL_MODE_SST | + val = DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE; - if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) - val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE; + if (intel_dp->is_mst) + val |= DP_TP_CTL_MODE_MST; + else { + val |= DP_TP_CTL_MODE_SST; + if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) + val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE; + } I915_WRITE(DP_TP_CTL(port), val); POSTING_READ(DP_TP_CTL(port)); @@ -1507,11 +1354,16 @@ void intel_ddi_fdi_disable(struct drm_crtc *crtc) static void intel_ddi_hot_plug(struct intel_encoder *intel_encoder) { - struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); - int type = intel_encoder->type; + struct intel_digital_port *intel_dig_port = enc_to_dig_port(&intel_encoder->base); + int type = intel_dig_port->base.type; + + if (type != INTEL_OUTPUT_DISPLAYPORT && + type != INTEL_OUTPUT_EDP && + type != INTEL_OUTPUT_UNKNOWN) { + return; + } - if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) - intel_dp_check_link_status(intel_dp); + intel_dp_hot_plug(intel_encoder); } void intel_ddi_get_config(struct intel_encoder *encoder, @@ -1663,15 +1515,13 @@ void intel_ddi_init(struct drm_device *dev, enum port port) struct intel_digital_port *intel_dig_port; struct intel_encoder *intel_encoder; struct drm_encoder *encoder; - struct intel_connector *hdmi_connector = NULL; - struct intel_connector *dp_connector = NULL; bool init_hdmi, init_dp; init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi || dev_priv->vbt.ddi_port_info[port].supports_hdmi); init_dp = dev_priv->vbt.ddi_port_info[port].supports_dp; if (!init_dp && !init_hdmi) { - DRM_DEBUG_KMS("VBT says port %c is not DVI/HDMI/DP compatible\n", + DRM_DEBUG_KMS("VBT says port %c is not DVI/HDMI/DP compatible, assuming it is\n", port_name(port)); init_hdmi = true; init_dp = true; @@ -1701,20 +1551,28 @@ void intel_ddi_init(struct drm_device *dev, enum port port) DDI_A_4_LANES); intel_encoder->type = INTEL_OUTPUT_UNKNOWN; - intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); + intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); intel_encoder->cloneable = 0; intel_encoder->hot_plug = intel_ddi_hot_plug; - if (init_dp) - dp_connector = intel_ddi_init_dp_connector(intel_dig_port); + if (init_dp) { + if (!intel_ddi_init_dp_connector(intel_dig_port)) + goto err; + + intel_dig_port->hpd_pulse = intel_dp_hpd_pulse; + dev_priv->hpd_irq_port[port] = intel_dig_port; + } /* In theory we don't need the encoder->type check, but leave it just in * case we have some really bad VBTs... */ - if (intel_encoder->type != INTEL_OUTPUT_EDP && init_hdmi) - hdmi_connector = intel_ddi_init_hdmi_connector(intel_dig_port); - - if (!dp_connector && !hdmi_connector) { - drm_encoder_cleanup(encoder); - kfree(intel_dig_port); + if (intel_encoder->type != INTEL_OUTPUT_EDP && init_hdmi) { + if (!intel_ddi_init_hdmi_connector(intel_dig_port)) + goto err; } + + return; + +err: + drm_encoder_cleanup(encoder); + kfree(intel_dig_port); } diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index f0be855ddf45..d8324c69fa86 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -39,12 +39,45 @@ #include "i915_trace.h" #include <drm/drm_dp_helper.h> #include <drm/drm_crtc_helper.h> +#include <drm/drm_plane_helper.h> +#include <drm/drm_rect.h> #include <linux/dma_remapping.h> +/* Primary plane formats supported by all gen */ +#define COMMON_PRIMARY_FORMATS \ + DRM_FORMAT_C8, \ + DRM_FORMAT_RGB565, \ + DRM_FORMAT_XRGB8888, \ + DRM_FORMAT_ARGB8888 + +/* Primary plane formats for gen <= 3 */ +static const uint32_t intel_primary_formats_gen2[] = { + COMMON_PRIMARY_FORMATS, + DRM_FORMAT_XRGB1555, + DRM_FORMAT_ARGB1555, +}; + +/* Primary plane formats for gen >= 4 */ +static const uint32_t intel_primary_formats_gen4[] = { + COMMON_PRIMARY_FORMATS, \ + DRM_FORMAT_XBGR8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_XRGB2101010, + DRM_FORMAT_ARGB2101010, + DRM_FORMAT_XBGR2101010, + DRM_FORMAT_ABGR2101010, +}; + +/* Cursor formats */ +static const uint32_t intel_cursor_formats[] = { + DRM_FORMAT_ARGB8888, +}; + #define DIV_ROUND_CLOSEST_ULL(ll, d) \ - ({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; }) +({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; }) -static void intel_increase_pllclock(struct drm_crtc *crtc); +static void intel_increase_pllclock(struct drm_device *dev, + enum pipe pipe); static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); static void i9xx_crtc_clock_get(struct intel_crtc *crtc, @@ -68,6 +101,14 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc); static void intel_set_pipe_csc(struct drm_crtc *crtc); static void vlv_prepare_pll(struct intel_crtc *crtc); +static struct intel_encoder *intel_find_encoder(struct intel_connector *connector, int pipe) +{ + if (!connector->mst_port) + return connector->encoder; + else + return &connector->mst_port->mst_encoders[pipe]->base; +} + typedef struct { int min, max; } intel_range_t; @@ -1061,11 +1102,6 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv, bool cur_state; struct intel_dpll_hw_state hw_state; - if (HAS_PCH_LPT(dev_priv->dev)) { - DRM_DEBUG_DRIVER("LPT detected: skipping PCH PLL test\n"); - return; - } - if (WARN (!pll, "asserting DPLL %s with no DPLL\n", state_string(state))) return; @@ -1481,9 +1517,6 @@ static void intel_reset_dpio(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - if (!IS_VALLEYVIEW(dev)) - return; - if (IS_CHERRYVIEW(dev)) { enum dpio_phy phy; u32 val; @@ -1505,26 +1538,6 @@ static void intel_reset_dpio(struct drm_device *dev) I915_WRITE(DISPLAY_PHY_CONTROL, PHY_COM_LANE_RESET_DEASSERT(phy, val)); } - - } else { - /* - * If DPIO has already been reset, e.g. by BIOS, just skip all - * this. - */ - if (I915_READ(DPIO_CTL) & DPIO_CMNRST) - return; - - /* - * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx: - * Need to assert and de-assert PHY SB reset by gating the - * common lane power, then un-gating it. - * Simply ungating isn't enough to reset the PHY enough to get - * ports and lanes running. - */ - __vlv_set_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC, - false); - __vlv_set_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC, - true); } } @@ -1712,6 +1725,17 @@ static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) val &= ~DPIO_DCLKP_EN; vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val); + /* disable left/right clock distribution */ + if (pipe != PIPE_B) { + val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0); + val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK); + vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val); + } else { + val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1); + val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK); + vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val); + } + mutex_unlock(&dev_priv->dpio_lock); } @@ -1749,6 +1773,9 @@ static void intel_prepare_shared_dpll(struct intel_crtc *crtc) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); + if (WARN_ON(pll == NULL)) + return; + WARN_ON(!pll->refcount); if (pll->active == 0) { DRM_DEBUG_DRIVER("setting up %s\n", pll->name); @@ -1790,12 +1817,14 @@ static void intel_enable_shared_dpll(struct intel_crtc *crtc) } WARN_ON(pll->on); + intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS); + DRM_DEBUG_KMS("enabling %s\n", pll->name); pll->enable(dev_priv, pll); pll->on = true; } -static void intel_disable_shared_dpll(struct intel_crtc *crtc) +void intel_disable_shared_dpll(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -1826,6 +1855,8 @@ static void intel_disable_shared_dpll(struct intel_crtc *crtc) DRM_DEBUG_KMS("disabling %s\n", pll->name); pll->disable(dev_priv, pll); pll->on = false; + + intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); } static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, @@ -2172,6 +2203,8 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, u32 alignment; int ret; + WARN_ON(!mutex_is_locked(&dev->struct_mutex)); + switch (obj->tiling_mode) { case I915_TILING_NONE: if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) @@ -2200,6 +2233,15 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, if (need_vtd_wa(dev) && alignment < 256 * 1024) alignment = 256 * 1024; + /* + * Global gtt pte registers are special registers which actually forward + * writes to a chunk of system memory. Which means that there is no risk + * that the register values disappear as soon as we call + * intel_runtime_pm_put(), so it is correct to wrap only the + * pin/unpin/fence and not more. + */ + intel_runtime_pm_get(dev_priv); + dev_priv->mm.interruptible = false; ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined); if (ret) @@ -2217,17 +2259,21 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, i915_gem_object_pin_fence(obj); dev_priv->mm.interruptible = true; + intel_runtime_pm_put(dev_priv); return 0; err_unpin: i915_gem_object_unpin_from_display_plane(obj); err_interruptible: dev_priv->mm.interruptible = true; + intel_runtime_pm_put(dev_priv); return ret; } void intel_unpin_fb_obj(struct drm_i915_gem_object *obj) { + WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex)); + i915_gem_object_unpin_fence(obj); i915_gem_object_unpin_from_display_plane(obj); } @@ -2314,6 +2360,7 @@ static bool intel_alloc_plane_obj(struct intel_crtc *crtc, goto out_unref_obj; } + obj->frontbuffer_bits = INTEL_FRONTBUFFER_PRIMARY(crtc->pipe); mutex_unlock(&dev->struct_mutex); DRM_DEBUG_KMS("plane fb obj %p\n", obj); @@ -2331,7 +2378,7 @@ static void intel_find_plane_obj(struct intel_crtc *intel_crtc, struct drm_device *dev = intel_crtc->base.dev; struct drm_crtc *c; struct intel_crtc *i; - struct intel_framebuffer *fb; + struct drm_i915_gem_object *obj; if (!intel_crtc->base.primary->fb) return; @@ -2352,13 +2399,17 @@ static void intel_find_plane_obj(struct intel_crtc *intel_crtc, if (c == &intel_crtc->base) continue; - if (!i->active || !c->primary->fb) + if (!i->active) + continue; + + obj = intel_fb_obj(c->primary->fb); + if (obj == NULL) continue; - fb = to_intel_framebuffer(c->primary->fb); - if (i915_gem_obj_ggtt_offset(fb->obj) == plane_config->base) { + if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) { drm_framebuffer_reference(c->primary->fb); intel_crtc->base.primary->fb = c->primary->fb; + obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe); break; } } @@ -2371,16 +2422,12 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc, struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_framebuffer *intel_fb; - struct drm_i915_gem_object *obj; + struct drm_i915_gem_object *obj = intel_fb_obj(fb); int plane = intel_crtc->plane; unsigned long linear_offset; u32 dspcntr; u32 reg; - intel_fb = to_intel_framebuffer(fb); - obj = intel_fb->obj; - reg = DSPCNTR(plane); dspcntr = I915_READ(reg); /* Mask out pixel format bits in case we change it */ @@ -2461,16 +2508,12 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc, struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_framebuffer *intel_fb; - struct drm_i915_gem_object *obj; + struct drm_i915_gem_object *obj = intel_fb_obj(fb); int plane = intel_crtc->plane; unsigned long linear_offset; u32 dspcntr; u32 reg; - intel_fb = to_intel_framebuffer(fb); - obj = intel_fb->obj; - reg = DSPCNTR(plane); dspcntr = I915_READ(reg); /* Mask out pixel format bits in case we change it */ @@ -2546,7 +2589,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, if (dev_priv->display.disable_fbc) dev_priv->display.disable_fbc(dev); - intel_increase_pllclock(crtc); + intel_increase_pllclock(dev, to_intel_crtc(crtc)->pipe); dev_priv->display.update_primary_plane(crtc, fb, x, y); @@ -2601,7 +2644,7 @@ void intel_display_handle_reset(struct drm_device *dev) static int intel_finish_fb(struct drm_framebuffer *old_fb) { - struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj; + struct drm_i915_gem_object *obj = intel_fb_obj(old_fb); struct drm_i915_private *dev_priv = obj->base.dev->dev_private; bool was_interruptible = dev_priv->mm.interruptible; int ret; @@ -2647,7 +2690,10 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct drm_framebuffer *old_fb; + enum pipe pipe = intel_crtc->pipe; + struct drm_framebuffer *old_fb = crtc->primary->fb; + struct drm_i915_gem_object *obj = intel_fb_obj(fb); + struct drm_i915_gem_object *old_obj = intel_fb_obj(old_fb); int ret; if (intel_crtc_has_pending_flip(crtc)) { @@ -2669,9 +2715,10 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, } mutex_lock(&dev->struct_mutex); - ret = intel_pin_and_fence_fb_obj(dev, - to_intel_framebuffer(fb)->obj, - NULL); + ret = intel_pin_and_fence_fb_obj(dev, obj, NULL); + if (ret == 0) + i915_gem_track_fb(old_obj, obj, + INTEL_FRONTBUFFER_PRIMARY(pipe)); mutex_unlock(&dev->struct_mutex); if (ret != 0) { DRM_ERROR("pin & fence failed\n"); @@ -2711,7 +2758,9 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, dev_priv->display.update_primary_plane(crtc, fb, x, y); - old_fb = crtc->primary->fb; + if (intel_crtc->active) + intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_PRIMARY(pipe)); + crtc->primary->fb = fb; crtc->x = x; crtc->y = y; @@ -2720,13 +2769,12 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, if (intel_crtc->active && old_fb != fb) intel_wait_for_vblank(dev, intel_crtc->pipe); mutex_lock(&dev->struct_mutex); - intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj); + intel_unpin_fb_obj(old_obj); mutex_unlock(&dev->struct_mutex); } mutex_lock(&dev->struct_mutex); intel_update_fbc(dev); - intel_edp_psr_update(dev); mutex_unlock(&dev->struct_mutex); return 0; @@ -3587,7 +3635,7 @@ static void lpt_pch_enable(struct drm_crtc *crtc) lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); } -static void intel_put_shared_dpll(struct intel_crtc *crtc) +void intel_put_shared_dpll(struct intel_crtc *crtc) { struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); @@ -3607,7 +3655,7 @@ static void intel_put_shared_dpll(struct intel_crtc *crtc) crtc->config.shared_dpll = DPLL_ID_PRIVATE; } -static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc) +struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); @@ -3818,7 +3866,7 @@ static void intel_crtc_load_lut(struct drm_crtc *crtc) } /* use legacy palette for Ironlake */ - if (HAS_PCH_SPLIT(dev)) + if (!HAS_GMCH_DISPLAY(dev)) palreg = LGC_PALETTE(pipe); /* Workaround : Do not read or write the pipe palette/gamma data while @@ -3860,30 +3908,6 @@ static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable) */ } -/** - * i9xx_fixup_plane - ugly workaround for G45 to fire up the hardware - * cursor plane briefly if not already running after enabling the display - * plane. - * This workaround avoids occasional blank screens when self refresh is - * enabled. - */ -static void -g4x_fixup_plane(struct drm_i915_private *dev_priv, enum pipe pipe) -{ - u32 cntl = I915_READ(CURCNTR(pipe)); - - if ((cntl & CURSOR_MODE) == 0) { - u32 fw_bcl_self = I915_READ(FW_BLC_SELF); - - I915_WRITE(FW_BLC_SELF, fw_bcl_self & ~FW_BLC_SELF_EN); - I915_WRITE(CURCNTR(pipe), CURSOR_MODE_64_ARGB_AX); - intel_wait_for_vblank(dev_priv->dev, pipe); - I915_WRITE(CURCNTR(pipe), cntl); - I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe))); - I915_WRITE(FW_BLC_SELF, fw_bcl_self); - } -} - static void intel_crtc_enable_planes(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; @@ -3892,11 +3916,10 @@ static void intel_crtc_enable_planes(struct drm_crtc *crtc) int pipe = intel_crtc->pipe; int plane = intel_crtc->plane; + drm_vblank_on(dev, pipe); + intel_enable_primary_hw_plane(dev_priv, plane, pipe); intel_enable_planes(crtc); - /* The fixup needs to happen before cursor is enabled */ - if (IS_G4X(dev)) - g4x_fixup_plane(dev_priv, pipe); intel_crtc_update_cursor(crtc, true); intel_crtc_dpms_overlay(intel_crtc, true); @@ -3904,8 +3927,14 @@ static void intel_crtc_enable_planes(struct drm_crtc *crtc) mutex_lock(&dev->struct_mutex); intel_update_fbc(dev); - intel_edp_psr_update(dev); mutex_unlock(&dev->struct_mutex); + + /* + * FIXME: Once we grow proper nuclear flip support out of this we need + * to compute the mask of flip planes precisely. For the time being + * consider this a flip from a NULL plane. + */ + intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe)); } static void intel_crtc_disable_planes(struct drm_crtc *crtc) @@ -3917,7 +3946,6 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc) int plane = intel_crtc->plane; intel_crtc_wait_for_pending_flips(crtc); - drm_crtc_vblank_off(crtc); if (dev_priv->fbc.plane == plane) intel_disable_fbc(dev); @@ -3928,6 +3956,15 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc) intel_crtc_update_cursor(crtc, false); intel_disable_planes(crtc); intel_disable_primary_hw_plane(dev_priv, plane, pipe); + + /* + * FIXME: Once we grow proper nuclear flip support out of this we need + * to compute the mask of flip planes precisely. For the time being + * consider this a flip to a NULL plane. + */ + intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe)); + + drm_vblank_off(dev, pipe); } static void ironlake_crtc_enable(struct drm_crtc *crtc) @@ -4006,8 +4043,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) cpt_verify_modeset(dev, intel_crtc->pipe); intel_crtc_enable_planes(crtc); - - drm_crtc_vblank_on(crtc); } /* IPS only exists on ULT machines and is tied to pipe A. */ @@ -4059,6 +4094,9 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) if (intel_crtc->active) return; + if (intel_crtc_to_shared_dpll(intel_crtc)) + intel_enable_shared_dpll(intel_crtc); + if (intel_crtc->config.has_dp_encoder) intel_dp_set_m_n(intel_crtc); @@ -4083,16 +4121,15 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) intel_crtc->active = true; intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); - if (intel_crtc->config.has_pch_encoder) - intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true); - - if (intel_crtc->config.has_pch_encoder) - dev_priv->display.fdi_link_train(crtc); - for_each_encoder_on_crtc(dev, crtc, encoder) if (encoder->pre_enable) encoder->pre_enable(encoder); + if (intel_crtc->config.has_pch_encoder) { + intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true); + dev_priv->display.fdi_link_train(crtc); + } + intel_ddi_enable_pipe_clock(intel_crtc); ironlake_pfit_enable(intel_crtc); @@ -4112,6 +4149,9 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) if (intel_crtc->config.has_pch_encoder) lpt_pch_enable(crtc); + if (intel_crtc->config.dp_encoder_is_mst) + intel_ddi_set_vc_payload_alloc(crtc, true); + for_each_encoder_on_crtc(dev, crtc, encoder) { encoder->enable(encoder); intel_opregion_notify_encoder(encoder, true); @@ -4121,8 +4161,6 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) * to change the workaround. */ haswell_mode_set_planes_workaround(intel_crtc); intel_crtc_enable_planes(crtc); - - drm_crtc_vblank_on(crtc); } static void ironlake_pfit_disable(struct intel_crtc *crtc) @@ -4161,7 +4199,6 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) intel_set_pch_fifo_underrun_reporting(dev, pipe, false); intel_disable_pipe(dev_priv, pipe); - ironlake_pfit_disable(intel_crtc); for_each_encoder_on_crtc(dev, crtc, encoder) @@ -4200,7 +4237,6 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) mutex_lock(&dev->struct_mutex); intel_update_fbc(dev); - intel_edp_psr_update(dev); mutex_unlock(&dev->struct_mutex); } @@ -4227,29 +4263,34 @@ static void haswell_crtc_disable(struct drm_crtc *crtc) intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false); intel_disable_pipe(dev_priv, pipe); + if (intel_crtc->config.dp_encoder_is_mst) + intel_ddi_set_vc_payload_alloc(crtc, false); + intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder); ironlake_pfit_disable(intel_crtc); intel_ddi_disable_pipe_clock(intel_crtc); - for_each_encoder_on_crtc(dev, crtc, encoder) - if (encoder->post_disable) - encoder->post_disable(encoder); - if (intel_crtc->config.has_pch_encoder) { lpt_disable_pch_transcoder(dev_priv); intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true); intel_ddi_fdi_disable(crtc); } + for_each_encoder_on_crtc(dev, crtc, encoder) + if (encoder->post_disable) + encoder->post_disable(encoder); + intel_crtc->active = false; intel_update_watermarks(crtc); mutex_lock(&dev->struct_mutex); intel_update_fbc(dev); - intel_edp_psr_update(dev); mutex_unlock(&dev->struct_mutex); + + if (intel_crtc_to_shared_dpll(intel_crtc)) + intel_disable_shared_dpll(intel_crtc); } static void ironlake_crtc_off(struct drm_crtc *crtc) @@ -4258,10 +4299,6 @@ static void ironlake_crtc_off(struct drm_crtc *crtc) intel_put_shared_dpll(intel_crtc); } -static void haswell_crtc_off(struct drm_crtc *crtc) -{ - intel_ddi_put_crtc_pll(crtc); -} static void i9xx_pfit_enable(struct intel_crtc *crtc) { @@ -4287,6 +4324,23 @@ static void i9xx_pfit_enable(struct intel_crtc *crtc) I915_WRITE(BCLRPAT(crtc->pipe), 0); } +static enum intel_display_power_domain port_to_power_domain(enum port port) +{ + switch (port) { + case PORT_A: + return POWER_DOMAIN_PORT_DDI_A_4_LANES; + case PORT_B: + return POWER_DOMAIN_PORT_DDI_B_4_LANES; + case PORT_C: + return POWER_DOMAIN_PORT_DDI_C_4_LANES; + case PORT_D: + return POWER_DOMAIN_PORT_DDI_D_4_LANES; + default: + WARN_ON_ONCE(1); + return POWER_DOMAIN_PORT_OTHER; + } +} + #define for_each_power_domain(domain, mask) \ for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \ if ((1 << (domain)) & (mask)) @@ -4305,19 +4359,10 @@ intel_display_port_power_domain(struct intel_encoder *intel_encoder) case INTEL_OUTPUT_HDMI: case INTEL_OUTPUT_EDP: intel_dig_port = enc_to_dig_port(&intel_encoder->base); - switch (intel_dig_port->port) { - case PORT_A: - return POWER_DOMAIN_PORT_DDI_A_4_LANES; - case PORT_B: - return POWER_DOMAIN_PORT_DDI_B_4_LANES; - case PORT_C: - return POWER_DOMAIN_PORT_DDI_C_4_LANES; - case PORT_D: - return POWER_DOMAIN_PORT_DDI_D_4_LANES; - default: - WARN_ON_ONCE(1); - return POWER_DOMAIN_PORT_OTHER; - } + return port_to_power_domain(intel_dig_port->port); + case INTEL_OUTPUT_DP_MST: + intel_dig_port = enc_to_mst(&intel_encoder->base)->primary; + return port_to_power_domain(intel_dig_port->port); case INTEL_OUTPUT_ANALOG: return POWER_DOMAIN_PORT_CRT; case INTEL_OUTPUT_DSI: @@ -4333,7 +4378,6 @@ static unsigned long get_crtc_power_domains(struct drm_crtc *crtc) struct intel_encoder *intel_encoder; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); enum pipe pipe = intel_crtc->pipe; - bool pfit_enabled = intel_crtc->config.pch_pfit.enabled; unsigned long mask; enum transcoder transcoder; @@ -4341,7 +4385,8 @@ static unsigned long get_crtc_power_domains(struct drm_crtc *crtc) mask = BIT(POWER_DOMAIN_PIPE(pipe)); mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder)); - if (pfit_enabled) + if (intel_crtc->config.pch_pfit.enabled || + intel_crtc->config.pch_pfit.force_thru) mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe)); for_each_encoder_on_crtc(dev, crtc, intel_encoder) @@ -4398,7 +4443,8 @@ static void modeset_update_crtc_power_domains(struct drm_device *dev) intel_display_set_init_power(dev_priv, false); } -int valleyview_get_vco(struct drm_i915_private *dev_priv) +/* returns HPLL frequency in kHz */ +static int valleyview_get_vco(struct drm_i915_private *dev_priv) { int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 }; @@ -4408,7 +4454,23 @@ int valleyview_get_vco(struct drm_i915_private *dev_priv) CCK_FUSE_HPLL_FREQ_MASK; mutex_unlock(&dev_priv->dpio_lock); - return vco_freq[hpll_freq]; + return vco_freq[hpll_freq] * 1000; +} + +static void vlv_update_cdclk(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + dev_priv->vlv_cdclk_freq = dev_priv->display.get_display_clock_speed(dev); + DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz", + dev_priv->vlv_cdclk_freq); + + /* + * Program the gmbus_freq based on the cdclk frequency. + * BSpec erroneously claims we should aim for 4MHz, but + * in fact 1MHz is the correct frequency. + */ + I915_WRITE(GMBUSFREQ_VLV, dev_priv->vlv_cdclk_freq); } /* Adjust CDclk dividers to allow high res or save power if possible */ @@ -4417,12 +4479,11 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk) struct drm_i915_private *dev_priv = dev->dev_private; u32 val, cmd; - WARN_ON(valleyview_cur_cdclk(dev_priv) != dev_priv->vlv_cdclk_freq); - dev_priv->vlv_cdclk_freq = cdclk; + WARN_ON(dev_priv->display.get_display_clock_speed(dev) != dev_priv->vlv_cdclk_freq); - if (cdclk >= 320) /* jump to highest voltage for 400MHz too */ + if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */ cmd = 2; - else if (cdclk == 266) + else if (cdclk == 266667) cmd = 1; else cmd = 0; @@ -4439,18 +4500,23 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk) } mutex_unlock(&dev_priv->rps.hw_lock); - if (cdclk == 400) { + if (cdclk == 400000) { u32 divider, vco; vco = valleyview_get_vco(dev_priv); - divider = ((vco << 1) / cdclk) - 1; + divider = DIV_ROUND_CLOSEST(vco << 1, cdclk) - 1; mutex_lock(&dev_priv->dpio_lock); /* adjust cdclk divider */ val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL); - val &= ~0xf; + val &= ~DISPLAY_FREQUENCY_VALUES; val |= divider; vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val); + + if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) & + DISPLAY_FREQUENCY_STATUS) == (divider << DISPLAY_FREQUENCY_STATUS_SHIFT), + 50)) + DRM_ERROR("timed out waiting for CDclk change\n"); mutex_unlock(&dev_priv->dpio_lock); } @@ -4463,54 +4529,43 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk) * For high bandwidth configs, we set a higher latency in the bunit * so that the core display fetch happens in time to avoid underruns. */ - if (cdclk == 400) + if (cdclk == 400000) val |= 4500 / 250; /* 4.5 usec */ else val |= 3000 / 250; /* 3.0 usec */ vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val); mutex_unlock(&dev_priv->dpio_lock); - /* Since we changed the CDclk, we need to update the GMBUSFREQ too */ - intel_i2c_reset(dev); -} - -int valleyview_cur_cdclk(struct drm_i915_private *dev_priv) -{ - int cur_cdclk, vco; - int divider; - - vco = valleyview_get_vco(dev_priv); - - mutex_lock(&dev_priv->dpio_lock); - divider = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL); - mutex_unlock(&dev_priv->dpio_lock); - - divider &= 0xf; - - cur_cdclk = (vco << 1) / (divider + 1); - - return cur_cdclk; + vlv_update_cdclk(dev); } static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv, int max_pixclk) { + int vco = valleyview_get_vco(dev_priv); + int freq_320 = (vco << 1) % 320000 != 0 ? 333333 : 320000; + /* * Really only a few cases to deal with, as only 4 CDclks are supported: * 200MHz * 267MHz - * 320MHz + * 320/333MHz (depends on HPLL freq) * 400MHz * So we check to see whether we're above 90% of the lower bin and * adjust if needed. + * + * We seem to get an unstable or solid color picture at 200MHz. + * Not sure what's wrong. For now use 200MHz only when all pipes + * are off. */ - if (max_pixclk > 288000) { - return 400; - } else if (max_pixclk > 240000) { - return 320; - } else - return 266; - /* Looks like the 200MHz CDclk freq doesn't work on some configs */ + if (max_pixclk > freq_320*9/10) + return 400000; + else if (max_pixclk > 266667*9/10) + return freq_320; + else if (max_pixclk > 0) + return 266667; + else + return 200000; } /* compute the max pixel clock for new configuration */ @@ -4633,8 +4688,6 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc) intel_crtc_enable_planes(crtc); - drm_crtc_vblank_on(crtc); - /* Underruns don't raise interrupts, so check manually. */ i9xx_check_fifo_underruns(dev); } @@ -4727,8 +4780,6 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc) if (IS_GEN2(dev)) intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); - drm_crtc_vblank_on(crtc); - /* Underruns don't raise interrupts, so check manually. */ i9xx_check_fifo_underruns(dev); } @@ -4768,6 +4819,16 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) if (IS_GEN2(dev)) intel_set_cpu_fifo_underrun_reporting(dev, pipe, false); + /* + * Vblank time updates from the shadow to live plane control register + * are blocked if the memory self-refresh mode is active at that + * moment. So to make sure the plane gets truly disabled, disable + * first the self-refresh mode. The self-refresh enable bit in turn + * will be checked/applied by the HW only at the next frame start + * event which is after the vblank start event, so we need to have a + * wait-for-vblank between disabling the plane and the pipe. + */ + intel_set_memory_cxsr(dev_priv, false); intel_crtc_disable_planes(crtc); for_each_encoder_on_crtc(dev, crtc, encoder) @@ -4776,9 +4837,10 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) /* * On gen2 planes are double buffered but the pipe isn't, so we must * wait for planes to fully turn off before disabling the pipe. + * We also need to wait on all gmch platforms because of the + * self-refresh mode constraint explained above. */ - if (IS_GEN2(dev)) - intel_wait_for_vblank(dev, pipe); + intel_wait_for_vblank(dev, pipe); intel_disable_pipe(dev_priv, pipe); @@ -4805,7 +4867,6 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) mutex_lock(&dev->struct_mutex); intel_update_fbc(dev); - intel_edp_psr_update(dev); mutex_unlock(&dev->struct_mutex); } @@ -4843,23 +4904,49 @@ static void intel_crtc_update_sarea(struct drm_crtc *crtc, } } +/* Master function to enable/disable CRTC and corresponding power wells */ +void intel_crtc_control(struct drm_crtc *crtc, bool enable) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + enum intel_display_power_domain domain; + unsigned long domains; + + if (enable) { + if (!intel_crtc->active) { + domains = get_crtc_power_domains(crtc); + for_each_power_domain(domain, domains) + intel_display_power_get(dev_priv, domain); + intel_crtc->enabled_power_domains = domains; + + dev_priv->display.crtc_enable(crtc); + } + } else { + if (intel_crtc->active) { + dev_priv->display.crtc_disable(crtc); + + domains = intel_crtc->enabled_power_domains; + for_each_power_domain(domain, domains) + intel_display_power_put(dev_priv, domain); + intel_crtc->enabled_power_domains = 0; + } + } +} + /** * Sets the power management mode of the pipe and plane. */ void intel_crtc_update_dpms(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_encoder *intel_encoder; bool enable = false; for_each_encoder_on_crtc(dev, crtc, intel_encoder) enable |= intel_encoder->connectors_active; - if (enable) - dev_priv->display.crtc_enable(crtc); - else - dev_priv->display.crtc_disable(crtc); + intel_crtc_control(crtc, enable); intel_crtc_update_sarea(crtc, enable); } @@ -4869,6 +4956,8 @@ static void intel_crtc_disable(struct drm_crtc *crtc) struct drm_device *dev = crtc->dev; struct drm_connector *connector; struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_gem_object *old_obj = intel_fb_obj(crtc->primary->fb); + enum pipe pipe = to_intel_crtc(crtc)->pipe; /* crtc should still be enabled when we disable it. */ WARN_ON(!crtc->enabled); @@ -4877,13 +4966,11 @@ static void intel_crtc_disable(struct drm_crtc *crtc) intel_crtc_update_sarea(crtc, false); dev_priv->display.off(crtc); - assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane); - assert_cursor_disabled(dev_priv, to_intel_crtc(crtc)->pipe); - assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe); - if (crtc->primary->fb) { mutex_lock(&dev->struct_mutex); - intel_unpin_fb_obj(to_intel_framebuffer(crtc->primary->fb)->obj); + intel_unpin_fb_obj(old_obj); + i915_gem_track_fb(old_obj, NULL, + INTEL_FRONTBUFFER_PRIMARY(pipe)); mutex_unlock(&dev->struct_mutex); crtc->primary->fb = NULL; } @@ -4939,24 +5026,31 @@ static void intel_connector_check_state(struct intel_connector *connector) connector->base.base.id, connector->base.name); + /* there is no real hw state for MST connectors */ + if (connector->mst_port) + return; + WARN(connector->base.dpms == DRM_MODE_DPMS_OFF, "wrong connector dpms state\n"); WARN(connector->base.encoder != &encoder->base, "active connector not linked to encoder\n"); - WARN(!encoder->connectors_active, - "encoder->connectors_active not set\n"); - encoder_enabled = encoder->get_hw_state(encoder, &pipe); - WARN(!encoder_enabled, "encoder not enabled\n"); - if (WARN_ON(!encoder->base.crtc)) - return; + if (encoder) { + WARN(!encoder->connectors_active, + "encoder->connectors_active not set\n"); + + encoder_enabled = encoder->get_hw_state(encoder, &pipe); + WARN(!encoder_enabled, "encoder not enabled\n"); + if (WARN_ON(!encoder->base.crtc)) + return; - crtc = encoder->base.crtc; + crtc = encoder->base.crtc; - WARN(!crtc->enabled, "crtc not enabled\n"); - WARN(!to_intel_crtc(crtc)->active, "crtc not active\n"); - WARN(pipe != to_intel_crtc(crtc)->pipe, - "encoder active on the wrong pipe\n"); + WARN(!crtc->enabled, "crtc not enabled\n"); + WARN(!to_intel_crtc(crtc)->active, "crtc not active\n"); + WARN(pipe != to_intel_crtc(crtc)->pipe, + "encoder active on the wrong pipe\n"); + } } } @@ -5161,9 +5255,11 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc, if (HAS_IPS(dev)) hsw_compute_ips_config(crtc, pipe_config); - /* XXX: PCH clock sharing is done in ->mode_set, so make sure the old - * clock survives for now. */ - if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) + /* + * XXX: PCH/WRPLL clock sharing is done in ->mode_set, so make sure the + * old clock survives for now. + */ + if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev) || HAS_DDI(dev)) pipe_config->shared_dpll = crtc->config.shared_dpll; if (pipe_config->has_pch_encoder) @@ -5174,7 +5270,22 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc, static int valleyview_get_display_clock_speed(struct drm_device *dev) { - return 400000; /* FIXME */ + struct drm_i915_private *dev_priv = dev->dev_private; + int vco = valleyview_get_vco(dev_priv); + u32 val; + int divider; + + mutex_lock(&dev_priv->dpio_lock); + val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL); + mutex_unlock(&dev_priv->dpio_lock); + + divider = val & DISPLAY_FREQUENCY_VALUES; + + WARN((val & DISPLAY_FREQUENCY_STATUS) != + (divider << DISPLAY_FREQUENCY_STATUS_SHIFT), + "cdclk change in progress\n"); + + return DIV_ROUND_CLOSEST(vco << 1, divider + 1); } static int i945_get_display_clock_speed(struct drm_device *dev) @@ -6060,6 +6171,10 @@ static void vlv_crtc_clock_get(struct intel_crtc *crtc, u32 mdiv; int refclk = 100000; + /* In case of MIPI DPLL will not even be used */ + if (!(pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)) + return; + mutex_lock(&dev_priv->dpio_lock); mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe)); mutex_unlock(&dev_priv->dpio_lock); @@ -6125,8 +6240,8 @@ static void i9xx_get_plane_config(struct intel_crtc *crtc, aligned_height = intel_align_height(dev, crtc->base.primary->fb->height, plane_config->tiled); - plane_config->size = ALIGN(crtc->base.primary->fb->pitches[0] * - aligned_height, PAGE_SIZE); + plane_config->size = PAGE_ALIGN(crtc->base.primary->fb->pitches[0] * + aligned_height); DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", pipe, plane, crtc->base.primary->fb->width, @@ -7145,8 +7260,8 @@ static void ironlake_get_plane_config(struct intel_crtc *crtc, aligned_height = intel_align_height(dev, crtc->base.primary->fb->height, plane_config->tiled); - plane_config->size = ALIGN(crtc->base.primary->fb->pitches[0] * - aligned_height, PAGE_SIZE); + plane_config->size = PAGE_ALIGN(crtc->base.primary->fb->pitches[0] * + aligned_height); DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", pipe, plane, crtc->base.primary->fb->width, @@ -7163,6 +7278,10 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc, struct drm_i915_private *dev_priv = dev->dev_private; uint32_t tmp; + if (!intel_display_power_enabled(dev_priv, + POWER_DOMAIN_PIPE(crtc->pipe))) + return false; + pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; pipe_config->shared_dpll = DPLL_ID_PRIVATE; @@ -7237,7 +7356,6 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc, static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) { struct drm_device *dev = dev_priv->dev; - struct intel_ddi_plls *plls = &dev_priv->ddi_plls; struct intel_crtc *crtc; for_each_intel_crtc(dev, crtc) @@ -7245,14 +7363,15 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) pipe_name(crtc->pipe)); WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n"); - WARN(plls->spll_refcount, "SPLL enabled\n"); - WARN(plls->wrpll1_refcount, "WRPLL1 enabled\n"); - WARN(plls->wrpll2_refcount, "WRPLL2 enabled\n"); + WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n"); + WARN(I915_READ(WRPLL_CTL1) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n"); + WARN(I915_READ(WRPLL_CTL2) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n"); WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n"); WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, "CPU PWM1 enabled\n"); - WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE, - "CPU PWM2 enabled\n"); + if (IS_HASWELL(dev)) + WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE, + "CPU PWM2 enabled\n"); WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE, "PCH PWM1 enabled\n"); WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE, @@ -7265,7 +7384,17 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) * gen-specific and since we only disable LCPLL after we fully disable * the interrupts, the check below should be enough. */ - WARN(!dev_priv->pm.irqs_disabled, "IRQs enabled\n"); + WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n"); +} + +static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv) +{ + struct drm_device *dev = dev_priv->dev; + + if (IS_HASWELL(dev)) + return I915_READ(D_COMP_HSW); + else + return I915_READ(D_COMP_BDW); } static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val) @@ -7276,12 +7405,12 @@ static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val) mutex_lock(&dev_priv->rps.hw_lock); if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, val)) - DRM_ERROR("Failed to disable D_COMP\n"); + DRM_ERROR("Failed to write to D_COMP\n"); mutex_unlock(&dev_priv->rps.hw_lock); } else { - I915_WRITE(D_COMP, val); + I915_WRITE(D_COMP_BDW, val); + POSTING_READ(D_COMP_BDW); } - POSTING_READ(D_COMP); } /* @@ -7319,12 +7448,13 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv, if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1)) DRM_ERROR("LCPLL still locked\n"); - val = I915_READ(D_COMP); + val = hsw_read_dcomp(dev_priv); val |= D_COMP_COMP_DISABLE; hsw_write_dcomp(dev_priv, val); ndelay(100); - if (wait_for((I915_READ(D_COMP) & D_COMP_RCOMP_IN_PROGRESS) == 0, 1)) + if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0, + 1)) DRM_ERROR("D_COMP RCOMP still in progress\n"); if (allow_power_down) { @@ -7373,7 +7503,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) POSTING_READ(LCPLL_CTL); } - val = I915_READ(D_COMP); + val = hsw_read_dcomp(dev_priv); val |= D_COMP_COMP_FORCE; val &= ~D_COMP_COMP_DISABLE; hsw_write_dcomp(dev_priv, val); @@ -7479,13 +7609,59 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc, if (!intel_ddi_pll_select(intel_crtc)) return -EINVAL; - intel_ddi_pll_enable(intel_crtc); intel_crtc->lowfreq_avail = false; return 0; } +static void haswell_get_ddi_port_state(struct intel_crtc *crtc, + struct intel_crtc_config *pipe_config) +{ + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_shared_dpll *pll; + enum port port; + uint32_t tmp; + + tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder)); + + port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT; + + pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port)); + + switch (pipe_config->ddi_pll_sel) { + case PORT_CLK_SEL_WRPLL1: + pipe_config->shared_dpll = DPLL_ID_WRPLL1; + break; + case PORT_CLK_SEL_WRPLL2: + pipe_config->shared_dpll = DPLL_ID_WRPLL2; + break; + } + + if (pipe_config->shared_dpll >= 0) { + pll = &dev_priv->shared_dplls[pipe_config->shared_dpll]; + + WARN_ON(!pll->get_hw_state(dev_priv, pll, + &pipe_config->dpll_hw_state)); + } + + /* + * Haswell has only FDI/PCH transcoder A. It is which is connected to + * DDI E. So just check whether this pipe is wired to DDI E and whether + * the PCH transcoder is on. + */ + if ((port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) { + pipe_config->has_pch_encoder = true; + + tmp = I915_READ(FDI_RX_CTL(PIPE_A)); + pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> + FDI_DP_PORT_WIDTH_SHIFT) + 1; + + ironlake_get_fdi_m_n_config(crtc, pipe_config); + } +} + static bool haswell_get_pipe_config(struct intel_crtc *crtc, struct intel_crtc_config *pipe_config) { @@ -7531,22 +7707,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, if (!(tmp & PIPECONF_ENABLE)) return false; - /* - * Haswell has only FDI/PCH transcoder A. It is which is connected to - * DDI E. So just check whether this pipe is wired to DDI E and whether - * the PCH transcoder is on. - */ - tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder)); - if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(PORT_E) && - I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) { - pipe_config->has_pch_encoder = true; - - tmp = I915_READ(FDI_RX_CTL(PIPE_A)); - pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> - FDI_DP_PORT_WIDTH_SHIFT) + 1; - - ironlake_get_fdi_m_n_config(crtc, pipe_config); - } + haswell_get_ddi_port_state(crtc, pipe_config); intel_get_pipe_timings(crtc, pipe_config); @@ -7991,8 +8152,8 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc, struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; - int x = intel_crtc->cursor_x; - int y = intel_crtc->cursor_y; + int x = crtc->cursor_x; + int y = crtc->cursor_y; u32 base = 0, pos = 0; if (on) @@ -8036,21 +8197,27 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc, intel_crtc->cursor_base = base; } -static int intel_crtc_cursor_set(struct drm_crtc *crtc, - struct drm_file *file, - uint32_t handle, - uint32_t width, uint32_t height) +/* + * intel_crtc_cursor_set_obj - Set cursor to specified GEM object + * + * Note that the object's reference will be consumed if the update fails. If + * the update succeeds, the reference of the old object (if any) will be + * consumed. + */ +static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc, + struct drm_i915_gem_object *obj, + uint32_t width, uint32_t height) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct drm_i915_gem_object *obj; + enum pipe pipe = intel_crtc->pipe; unsigned old_width; uint32_t addr; int ret; /* if we want to turn off the cursor ignore width and height */ - if (!handle) { + if (!obj) { DRM_DEBUG_KMS("cursor off\n"); addr = 0; obj = NULL; @@ -8066,12 +8233,8 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, return -EINVAL; } - obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle)); - if (&obj->base == NULL) - return -ENOENT; - if (obj->base.size < width * height * 4) { - DRM_DEBUG_KMS("buffer is to small\n"); + DRM_DEBUG_KMS("buffer is too small\n"); ret = -ENOMEM; goto fail; } @@ -8087,6 +8250,15 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, goto fail_locked; } + /* + * Global gtt pte registers are special registers which actually + * forward writes to a chunk of system memory. Which means that + * there is no risk that the register values disappear as soon + * as we call intel_runtime_pm_put(), so it is correct to wrap + * only the pin/unpin/fence and not more. + */ + intel_runtime_pm_get(dev_priv); + /* Note that the w/a also requires 2 PTE of padding following * the bo. We currently fill all unused PTE with the shadow * page and so we should always have valid PTE following the @@ -8099,16 +8271,20 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, ret = i915_gem_object_pin_to_display_plane(obj, alignment, NULL); if (ret) { DRM_DEBUG_KMS("failed to move cursor bo into the GTT\n"); + intel_runtime_pm_put(dev_priv); goto fail_locked; } ret = i915_gem_object_put_fence(obj); if (ret) { DRM_DEBUG_KMS("failed to release fence for cursor"); + intel_runtime_pm_put(dev_priv); goto fail_unpin; } addr = i915_gem_obj_ggtt_offset(obj); + + intel_runtime_pm_put(dev_priv); } else { int align = IS_I830(dev) ? 16 * 1024 : 256; ret = i915_gem_object_attach_phys(obj, align); @@ -8126,9 +8302,10 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, if (intel_crtc->cursor_bo) { if (!INTEL_INFO(dev)->cursor_needs_physical) i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo); - drm_gem_object_unreference(&intel_crtc->cursor_bo->base); } + i915_gem_track_fb(intel_crtc->cursor_bo, obj, + INTEL_FRONTBUFFER_CURSOR(pipe)); mutex_unlock(&dev->struct_mutex); old_width = intel_crtc->cursor_width; @@ -8144,6 +8321,8 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL); } + intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_CURSOR(pipe)); + return 0; fail_unpin: i915_gem_object_unpin_from_display_plane(obj); @@ -8154,19 +8333,6 @@ fail: return ret; } -static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) -{ - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - - intel_crtc->cursor_x = clamp_t(int, x, SHRT_MIN, SHRT_MAX); - intel_crtc->cursor_y = clamp_t(int, y, SHRT_MIN, SHRT_MAX); - - if (intel_crtc->active) - intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL); - - return 0; -} - static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue, uint32_t start, uint32_t size) { @@ -8242,7 +8408,7 @@ static u32 intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp) { u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp); - return ALIGN(pitch * mode->vdisplay, PAGE_SIZE); + return PAGE_ALIGN(pitch * mode->vdisplay); } static struct drm_framebuffer * @@ -8319,8 +8485,6 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector, connector->base.id, connector->name, encoder->base.id, encoder->name); - drm_modeset_acquire_init(ctx, 0); - retry: ret = drm_modeset_lock(&config->connection_mutex, ctx); if (ret) @@ -8359,10 +8523,14 @@ retry: i++; if (!(encoder->possible_crtcs & (1 << i))) continue; - if (!possible_crtc->enabled) { - crtc = possible_crtc; - break; - } + if (possible_crtc->enabled) + continue; + /* This can occur when applying the pipe A quirk on resume. */ + if (to_intel_crtc(possible_crtc)->new_enabled) + continue; + + crtc = possible_crtc; + break; } /* @@ -8431,15 +8599,11 @@ fail_unlock: goto retry; } - drm_modeset_drop_locks(ctx); - drm_modeset_acquire_fini(ctx); - return false; } void intel_release_load_detect_pipe(struct drm_connector *connector, - struct intel_load_detect_pipe *old, - struct drm_modeset_acquire_ctx *ctx) + struct intel_load_detect_pipe *old) { struct intel_encoder *intel_encoder = intel_attached_encoder(connector); @@ -8463,17 +8627,12 @@ void intel_release_load_detect_pipe(struct drm_connector *connector, drm_framebuffer_unreference(old->release_fb); } - goto unlock; return; } /* Switch crtc and encoder back off if necessary */ if (old->dpms_mode != DRM_MODE_DPMS_ON) connector->funcs->dpms(connector, old->dpms_mode); - -unlock: - drm_modeset_drop_locks(ctx); - drm_modeset_acquire_fini(ctx); } static int i9xx_pll_refclk(struct drm_device *dev, @@ -8667,16 +8826,14 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, return mode; } -static void intel_increase_pllclock(struct drm_crtc *crtc) +static void intel_increase_pllclock(struct drm_device *dev, + enum pipe pipe) { - struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int pipe = intel_crtc->pipe; int dpll_reg = DPLL(pipe); int dpll; - if (HAS_PCH_SPLIT(dev)) + if (!HAS_GMCH_DISPLAY(dev)) return; if (!dev_priv->lvds_downclock_avail) @@ -8704,7 +8861,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - if (HAS_PCH_SPLIT(dev)) + if (!HAS_GMCH_DISPLAY(dev)) return; if (!dev_priv->lvds_downclock_avail) @@ -8773,28 +8930,179 @@ out: intel_runtime_pm_put(dev_priv); } -void intel_mark_fb_busy(struct drm_i915_gem_object *obj, - struct intel_engine_cs *ring) + +/** + * intel_mark_fb_busy - mark given planes as busy + * @dev: DRM device + * @frontbuffer_bits: bits for the affected planes + * @ring: optional ring for asynchronous commands + * + * This function gets called every time the screen contents change. It can be + * used to keep e.g. the update rate at the nominal refresh rate with DRRS. + */ +static void intel_mark_fb_busy(struct drm_device *dev, + unsigned frontbuffer_bits, + struct intel_engine_cs *ring) { - struct drm_device *dev = obj->base.dev; - struct drm_crtc *crtc; + enum pipe pipe; if (!i915.powersave) return; - for_each_crtc(dev, crtc) { - if (!crtc->primary->fb) - continue; - - if (to_intel_framebuffer(crtc->primary->fb)->obj != obj) + for_each_pipe(pipe) { + if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe))) continue; - intel_increase_pllclock(crtc); + intel_increase_pllclock(dev, pipe); if (ring && intel_fbc_enabled(dev)) ring->fbc_dirty = true; } } +/** + * intel_fb_obj_invalidate - invalidate frontbuffer object + * @obj: GEM object to invalidate + * @ring: set for asynchronous rendering + * + * This function gets called every time rendering on the given object starts and + * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must + * be invalidated. If @ring is non-NULL any subsequent invalidation will be delayed + * until the rendering completes or a flip on this frontbuffer plane is + * scheduled. + */ +void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj, + struct intel_engine_cs *ring) +{ + struct drm_device *dev = obj->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + + WARN_ON(!mutex_is_locked(&dev->struct_mutex)); + + if (!obj->frontbuffer_bits) + return; + + if (ring) { + mutex_lock(&dev_priv->fb_tracking.lock); + dev_priv->fb_tracking.busy_bits + |= obj->frontbuffer_bits; + dev_priv->fb_tracking.flip_bits + &= ~obj->frontbuffer_bits; + mutex_unlock(&dev_priv->fb_tracking.lock); + } + + intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring); + + intel_edp_psr_invalidate(dev, obj->frontbuffer_bits); +} + +/** + * intel_frontbuffer_flush - flush frontbuffer + * @dev: DRM device + * @frontbuffer_bits: frontbuffer plane tracking bits + * + * This function gets called every time rendering on the given planes has + * completed and frontbuffer caching can be started again. Flushes will get + * delayed if they're blocked by some oustanding asynchronous rendering. + * + * Can be called without any locks held. + */ +void intel_frontbuffer_flush(struct drm_device *dev, + unsigned frontbuffer_bits) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + /* Delay flushing when rings are still busy.*/ + mutex_lock(&dev_priv->fb_tracking.lock); + frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits; + mutex_unlock(&dev_priv->fb_tracking.lock); + + intel_mark_fb_busy(dev, frontbuffer_bits, NULL); + + intel_edp_psr_flush(dev, frontbuffer_bits); +} + +/** + * intel_fb_obj_flush - flush frontbuffer object + * @obj: GEM object to flush + * @retire: set when retiring asynchronous rendering + * + * This function gets called every time rendering on the given object has + * completed and frontbuffer caching can be started again. If @retire is true + * then any delayed flushes will be unblocked. + */ +void intel_fb_obj_flush(struct drm_i915_gem_object *obj, + bool retire) +{ + struct drm_device *dev = obj->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + unsigned frontbuffer_bits; + + WARN_ON(!mutex_is_locked(&dev->struct_mutex)); + + if (!obj->frontbuffer_bits) + return; + + frontbuffer_bits = obj->frontbuffer_bits; + + if (retire) { + mutex_lock(&dev_priv->fb_tracking.lock); + /* Filter out new bits since rendering started. */ + frontbuffer_bits &= dev_priv->fb_tracking.busy_bits; + + dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits; + mutex_unlock(&dev_priv->fb_tracking.lock); + } + + intel_frontbuffer_flush(dev, frontbuffer_bits); +} + +/** + * intel_frontbuffer_flip_prepare - prepare asnychronous frontbuffer flip + * @dev: DRM device + * @frontbuffer_bits: frontbuffer plane tracking bits + * + * This function gets called after scheduling a flip on @obj. The actual + * frontbuffer flushing will be delayed until completion is signalled with + * intel_frontbuffer_flip_complete. If an invalidate happens in between this + * flush will be cancelled. + * + * Can be called without any locks held. + */ +void intel_frontbuffer_flip_prepare(struct drm_device *dev, + unsigned frontbuffer_bits) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + mutex_lock(&dev_priv->fb_tracking.lock); + dev_priv->fb_tracking.flip_bits + |= frontbuffer_bits; + mutex_unlock(&dev_priv->fb_tracking.lock); +} + +/** + * intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flush + * @dev: DRM device + * @frontbuffer_bits: frontbuffer plane tracking bits + * + * This function gets called after the flip has been latched and will complete + * on the next vblank. It will execute the fush if it hasn't been cancalled yet. + * + * Can be called without any locks held. + */ +void intel_frontbuffer_flip_complete(struct drm_device *dev, + unsigned frontbuffer_bits) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + mutex_lock(&dev_priv->fb_tracking.lock); + /* Mask any cancelled flips. */ + frontbuffer_bits &= dev_priv->fb_tracking.flip_bits; + dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits; + mutex_unlock(&dev_priv->fb_tracking.lock); + + intel_frontbuffer_flush(dev, frontbuffer_bits); +} + static void intel_crtc_destroy(struct drm_crtc *crtc) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); @@ -8812,8 +9120,6 @@ static void intel_crtc_destroy(struct drm_crtc *crtc) kfree(work); } - intel_crtc_cursor_set(crtc, NULL, 0, 0, 0); - drm_crtc_cleanup(crtc); kfree(intel_crtc); @@ -8824,6 +9130,7 @@ static void intel_unpin_work_fn(struct work_struct *__work) struct intel_unpin_work *work = container_of(__work, struct intel_unpin_work, work); struct drm_device *dev = work->crtc->dev; + enum pipe pipe = to_intel_crtc(work->crtc)->pipe; mutex_lock(&dev->struct_mutex); intel_unpin_fb_obj(work->old_fb_obj); @@ -8833,6 +9140,8 @@ static void intel_unpin_work_fn(struct work_struct *__work) intel_update_fbc(dev); mutex_unlock(&dev->struct_mutex); + intel_frontbuffer_flip_complete(dev, INTEL_FRONTBUFFER_PRIMARY(pipe)); + BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0); atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count); @@ -9202,6 +9511,150 @@ static int intel_gen7_queue_flip(struct drm_device *dev, return 0; } +static bool use_mmio_flip(struct intel_engine_cs *ring, + struct drm_i915_gem_object *obj) +{ + /* + * This is not being used for older platforms, because + * non-availability of flip done interrupt forces us to use + * CS flips. Older platforms derive flip done using some clever + * tricks involving the flip_pending status bits and vblank irqs. + * So using MMIO flips there would disrupt this mechanism. + */ + + if (ring == NULL) + return true; + + if (INTEL_INFO(ring->dev)->gen < 5) + return false; + + if (i915.use_mmio_flip < 0) + return false; + else if (i915.use_mmio_flip > 0) + return true; + else + return ring != obj->ring; +} + +static void intel_do_mmio_flip(struct intel_crtc *intel_crtc) +{ + struct drm_device *dev = intel_crtc->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_framebuffer *intel_fb = + to_intel_framebuffer(intel_crtc->base.primary->fb); + struct drm_i915_gem_object *obj = intel_fb->obj; + u32 dspcntr; + u32 reg; + + intel_mark_page_flip_active(intel_crtc); + + reg = DSPCNTR(intel_crtc->plane); + dspcntr = I915_READ(reg); + + if (INTEL_INFO(dev)->gen >= 4) { + if (obj->tiling_mode != I915_TILING_NONE) + dspcntr |= DISPPLANE_TILED; + else + dspcntr &= ~DISPPLANE_TILED; + } + I915_WRITE(reg, dspcntr); + + I915_WRITE(DSPSURF(intel_crtc->plane), + intel_crtc->unpin_work->gtt_offset); + POSTING_READ(DSPSURF(intel_crtc->plane)); +} + +static int intel_postpone_flip(struct drm_i915_gem_object *obj) +{ + struct intel_engine_cs *ring; + int ret; + + lockdep_assert_held(&obj->base.dev->struct_mutex); + + if (!obj->last_write_seqno) + return 0; + + ring = obj->ring; + + if (i915_seqno_passed(ring->get_seqno(ring, true), + obj->last_write_seqno)) + return 0; + + ret = i915_gem_check_olr(ring, obj->last_write_seqno); + if (ret) + return ret; + + if (WARN_ON(!ring->irq_get(ring))) + return 0; + + return 1; +} + +void intel_notify_mmio_flip(struct intel_engine_cs *ring) +{ + struct drm_i915_private *dev_priv = to_i915(ring->dev); + struct intel_crtc *intel_crtc; + unsigned long irq_flags; + u32 seqno; + + seqno = ring->get_seqno(ring, false); + + spin_lock_irqsave(&dev_priv->mmio_flip_lock, irq_flags); + for_each_intel_crtc(ring->dev, intel_crtc) { + struct intel_mmio_flip *mmio_flip; + + mmio_flip = &intel_crtc->mmio_flip; + if (mmio_flip->seqno == 0) + continue; + + if (ring->id != mmio_flip->ring_id) + continue; + + if (i915_seqno_passed(seqno, mmio_flip->seqno)) { + intel_do_mmio_flip(intel_crtc); + mmio_flip->seqno = 0; + ring->irq_put(ring); + } + } + spin_unlock_irqrestore(&dev_priv->mmio_flip_lock, irq_flags); +} + +static int intel_queue_mmio_flip(struct drm_device *dev, + struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_i915_gem_object *obj, + struct intel_engine_cs *ring, + uint32_t flags) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + unsigned long irq_flags; + int ret; + + if (WARN_ON(intel_crtc->mmio_flip.seqno)) + return -EBUSY; + + ret = intel_postpone_flip(obj); + if (ret < 0) + return ret; + if (ret == 0) { + intel_do_mmio_flip(intel_crtc); + return 0; + } + + spin_lock_irqsave(&dev_priv->mmio_flip_lock, irq_flags); + intel_crtc->mmio_flip.seqno = obj->last_write_seqno; + intel_crtc->mmio_flip.ring_id = obj->ring->id; + spin_unlock_irqrestore(&dev_priv->mmio_flip_lock, irq_flags); + + /* + * Double check to catch cases where irq fired before + * mmio flip data was ready + */ + intel_notify_mmio_flip(obj->ring); + return 0; +} + static int intel_default_queue_flip(struct drm_device *dev, struct drm_crtc *crtc, struct drm_framebuffer *fb, @@ -9220,13 +9673,22 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct drm_framebuffer *old_fb = crtc->primary->fb; - struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj; + struct drm_i915_gem_object *obj = intel_fb_obj(fb); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + enum pipe pipe = intel_crtc->pipe; struct intel_unpin_work *work; struct intel_engine_cs *ring; unsigned long flags; int ret; + /* + * drm_mode_page_flip_ioctl() should already catch this, but double + * check to be safe. In the future we may enable pageflipping from + * a disabled primary plane. + */ + if (WARN_ON(intel_fb_obj(old_fb) == NULL)) + return -EBUSY; + /* Can't change pixel format via MI display flips. */ if (fb->pixel_format != crtc->primary->fb->pixel_format) return -EINVAL; @@ -9249,7 +9711,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, work->event = event; work->crtc = crtc; - work->old_fb_obj = to_intel_framebuffer(old_fb)->obj; + work->old_fb_obj = intel_fb_obj(old_fb); INIT_WORK(&work->work, intel_unpin_work_fn); ret = drm_crtc_vblank_get(crtc); @@ -9290,10 +9752,15 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) - work->flip_count = I915_READ(PIPE_FLIPCOUNT_GM45(intel_crtc->pipe)) + 1; + work->flip_count = I915_READ(PIPE_FLIPCOUNT_GM45(pipe)) + 1; if (IS_VALLEYVIEW(dev)) { ring = &dev_priv->ring[BCS]; + if (obj->tiling_mode != work->old_fb_obj->tiling_mode) + /* vlv: DISPLAY_FLIP fails to change tiling */ + ring = NULL; + } else if (IS_IVYBRIDGE(dev)) { + ring = &dev_priv->ring[BCS]; } else if (INTEL_INFO(dev)->gen >= 7) { ring = obj->ring; if (ring == NULL || ring->id != RCS) @@ -9309,12 +9776,20 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, work->gtt_offset = i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset; - ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring, page_flip_flags); + if (use_mmio_flip(ring, obj)) + ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring, + page_flip_flags); + else + ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring, + page_flip_flags); if (ret) goto cleanup_unpin; + i915_gem_track_fb(work->old_fb_obj, obj, + INTEL_FRONTBUFFER_PRIMARY(pipe)); + intel_disable_fbc(dev); - intel_mark_fb_busy(obj, NULL); + intel_frontbuffer_flip_prepare(dev, INTEL_FRONTBUFFER_PRIMARY(pipe)); mutex_unlock(&dev->struct_mutex); trace_i915_flip_request(intel_crtc->plane, obj); @@ -9344,7 +9819,7 @@ out_hang: intel_crtc_wait_for_pending_flips(crtc); ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb); if (ret == 0 && event) - drm_send_vblank_event(dev, intel_crtc->pipe, event); + drm_send_vblank_event(dev, pipe, event); } return ret; } @@ -10017,11 +10492,14 @@ intel_pipe_config_compare(struct drm_device *dev, PIPE_CONF_CHECK_I(double_wide); + PIPE_CONF_CHECK_X(ddi_pll_sel); + PIPE_CONF_CHECK_I(shared_dpll); PIPE_CONF_CHECK_X(dpll_hw_state.dpll); PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md); PIPE_CONF_CHECK_X(dpll_hw_state.fp0); PIPE_CONF_CHECK_X(dpll_hw_state.fp1); + PIPE_CONF_CHECK_X(dpll_hw_state.wrpll); if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) PIPE_CONF_CHECK_I(pipe_bpp); @@ -10083,6 +10561,14 @@ check_encoder_state(struct drm_device *dev) if (connector->base.dpms != DRM_MODE_DPMS_OFF) active = true; } + /* + * for MST connectors if we unplug the connector is gone + * away but the encoder is still connected to a crtc + * until a modeset happens in response to the hotplug. + */ + if (!enabled && encoder->base.encoder_type == DRM_MODE_ENCODER_DPMST) + continue; + WARN(!!encoder->base.crtc != enabled, "encoder's enabled state mismatch " "(expected %i, found %i)\n", @@ -10378,20 +10864,23 @@ static int __intel_set_mode(struct drm_crtc *crtc, * on the DPLL. */ for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) { - struct drm_framebuffer *old_fb; + struct drm_framebuffer *old_fb = crtc->primary->fb; + struct drm_i915_gem_object *old_obj = intel_fb_obj(old_fb); + struct drm_i915_gem_object *obj = intel_fb_obj(fb); mutex_lock(&dev->struct_mutex); ret = intel_pin_and_fence_fb_obj(dev, - to_intel_framebuffer(fb)->obj, + obj, NULL); if (ret != 0) { DRM_ERROR("pin & fence failed\n"); mutex_unlock(&dev->struct_mutex); goto done; } - old_fb = crtc->primary->fb; if (old_fb) - intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj); + intel_unpin_fb_obj(old_obj); + i915_gem_track_fb(old_obj, obj, + INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe)); mutex_unlock(&dev->struct_mutex); crtc->primary->fb = fb; @@ -10563,12 +11052,17 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set, if (is_crtc_connector_off(set)) { config->mode_changed = true; } else if (set->crtc->primary->fb != set->fb) { - /* If we have no fb then treat it as a full mode set */ + /* + * If we have no fb, we can only flip as long as the crtc is + * active, otherwise we need a full mode set. The crtc may + * be active if we've only disabled the primary plane, or + * in fastboot situations. + */ if (set->crtc->primary->fb == NULL) { struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc); - if (intel_crtc->active && i915.fastboot) { + if (intel_crtc->active) { DRM_DEBUG_KMS("crtc has no fb, will flip\n"); config->fb_changed = true; } else { @@ -10620,7 +11114,7 @@ intel_modeset_stage_output_state(struct drm_device *dev, * for them. */ for (ro = 0; ro < set->num_connectors; ro++) { if (set->connectors[ro] == &connector->base) { - connector->new_encoder = connector->encoder; + connector->new_encoder = intel_find_encoder(connector, to_intel_crtc(set->crtc)->pipe); break; } } @@ -10666,7 +11160,7 @@ intel_modeset_stage_output_state(struct drm_device *dev, new_crtc)) { return -EINVAL; } - connector->encoder->new_crtc = to_intel_crtc(new_crtc); + connector->new_encoder->new_crtc = to_intel_crtc(new_crtc); DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n", connector->base.base.id, @@ -10700,7 +11194,12 @@ intel_modeset_stage_output_state(struct drm_device *dev, } } /* Now we've also updated encoder->new_crtc for all encoders. */ - + list_for_each_entry(connector, &dev->mode_config.connector_list, + base.head) { + if (connector->new_encoder) + if (connector->new_encoder != connector->encoder) + connector->encoder = connector->new_encoder; + } for_each_intel_crtc(dev, crtc) { crtc->new_enabled = false; @@ -10806,10 +11305,24 @@ static int intel_crtc_set_config(struct drm_mode_set *set) ret = intel_set_mode(set->crtc, set->mode, set->x, set->y, set->fb); } else if (config->fb_changed) { + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc); + intel_crtc_wait_for_pending_flips(set->crtc); ret = intel_pipe_set_base(set->crtc, set->x, set->y, set->fb); + + /* + * We need to make sure the primary plane is re-enabled if it + * has previously been turned off. + */ + if (!intel_crtc->primary_enabled && ret == 0) { + WARN_ON(!intel_crtc->active); + intel_enable_primary_hw_plane(dev_priv, intel_crtc->plane, + intel_crtc->pipe); + } + /* * In the fastboot case this may be our only check of the * state after boot. It would be better to only do it on @@ -10850,26 +11363,21 @@ out_config: } static const struct drm_crtc_funcs intel_crtc_funcs = { - .cursor_set = intel_crtc_cursor_set, - .cursor_move = intel_crtc_cursor_move, .gamma_set = intel_crtc_gamma_set, .set_config = intel_crtc_set_config, .destroy = intel_crtc_destroy, .page_flip = intel_crtc_page_flip, }; -static void intel_cpu_pll_init(struct drm_device *dev) -{ - if (HAS_DDI(dev)) - intel_ddi_pll_init(dev); -} - static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll, struct intel_dpll_hw_state *hw_state) { uint32_t val; + if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_PLLS)) + return false; + val = I915_READ(PCH_DPLL(pll->id)); hw_state->dpll = val; hw_state->fp0 = I915_READ(PCH_FP0(pll->id)); @@ -10951,7 +11459,9 @@ static void intel_shared_dpll_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) + if (HAS_DDI(dev)) + intel_ddi_pll_init(dev); + else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) ibx_pch_dpll_init(dev); else dev_priv->num_shared_dpll = 0; @@ -10959,17 +11469,328 @@ static void intel_shared_dpll_init(struct drm_device *dev) BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS); } +static int +intel_primary_plane_disable(struct drm_plane *plane) +{ + struct drm_device *dev = plane->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_plane *intel_plane = to_intel_plane(plane); + struct intel_crtc *intel_crtc; + + if (!plane->fb) + return 0; + + BUG_ON(!plane->crtc); + + intel_crtc = to_intel_crtc(plane->crtc); + + /* + * Even though we checked plane->fb above, it's still possible that + * the primary plane has been implicitly disabled because the crtc + * coordinates given weren't visible, or because we detected + * that it was 100% covered by a sprite plane. Or, the CRTC may be + * off and we've set a fb, but haven't actually turned on the CRTC yet. + * In either case, we need to unpin the FB and let the fb pointer get + * updated, but otherwise we don't need to touch the hardware. + */ + if (!intel_crtc->primary_enabled) + goto disable_unpin; + + intel_crtc_wait_for_pending_flips(plane->crtc); + intel_disable_primary_hw_plane(dev_priv, intel_plane->plane, + intel_plane->pipe); +disable_unpin: + mutex_lock(&dev->struct_mutex); + i915_gem_track_fb(intel_fb_obj(plane->fb), NULL, + INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe)); + intel_unpin_fb_obj(intel_fb_obj(plane->fb)); + mutex_unlock(&dev->struct_mutex); + plane->fb = NULL; + + return 0; +} + +static int +intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc, + struct drm_framebuffer *fb, int crtc_x, int crtc_y, + unsigned int crtc_w, unsigned int crtc_h, + uint32_t src_x, uint32_t src_y, + uint32_t src_w, uint32_t src_h) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_plane *intel_plane = to_intel_plane(plane); + struct drm_i915_gem_object *obj = intel_fb_obj(fb); + struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb); + struct drm_rect dest = { + /* integer pixels */ + .x1 = crtc_x, + .y1 = crtc_y, + .x2 = crtc_x + crtc_w, + .y2 = crtc_y + crtc_h, + }; + struct drm_rect src = { + /* 16.16 fixed point */ + .x1 = src_x, + .y1 = src_y, + .x2 = src_x + src_w, + .y2 = src_y + src_h, + }; + const struct drm_rect clip = { + /* integer pixels */ + .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0, + .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0, + }; + bool visible; + int ret; + + ret = drm_plane_helper_check_update(plane, crtc, fb, + &src, &dest, &clip, + DRM_PLANE_HELPER_NO_SCALING, + DRM_PLANE_HELPER_NO_SCALING, + false, true, &visible); + + if (ret) + return ret; + + /* + * If the CRTC isn't enabled, we're just pinning the framebuffer, + * updating the fb pointer, and returning without touching the + * hardware. This allows us to later do a drmModeSetCrtc with fb=-1 to + * turn on the display with all planes setup as desired. + */ + if (!crtc->enabled) { + mutex_lock(&dev->struct_mutex); + + /* + * If we already called setplane while the crtc was disabled, + * we may have an fb pinned; unpin it. + */ + if (plane->fb) + intel_unpin_fb_obj(old_obj); + + i915_gem_track_fb(old_obj, obj, + INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe)); + + /* Pin and return without programming hardware */ + ret = intel_pin_and_fence_fb_obj(dev, obj, NULL); + mutex_unlock(&dev->struct_mutex); + + return ret; + } + + intel_crtc_wait_for_pending_flips(crtc); + + /* + * If clipping results in a non-visible primary plane, we'll disable + * the primary plane. Note that this is a bit different than what + * happens if userspace explicitly disables the plane by passing fb=0 + * because plane->fb still gets set and pinned. + */ + if (!visible) { + mutex_lock(&dev->struct_mutex); + + /* + * Try to pin the new fb first so that we can bail out if we + * fail. + */ + if (plane->fb != fb) { + ret = intel_pin_and_fence_fb_obj(dev, obj, NULL); + if (ret) { + mutex_unlock(&dev->struct_mutex); + return ret; + } + } + + i915_gem_track_fb(old_obj, obj, + INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe)); + + if (intel_crtc->primary_enabled) + intel_disable_primary_hw_plane(dev_priv, + intel_plane->plane, + intel_plane->pipe); + + + if (plane->fb != fb) + if (plane->fb) + intel_unpin_fb_obj(old_obj); + + mutex_unlock(&dev->struct_mutex); + + return 0; + } + + ret = intel_pipe_set_base(crtc, src.x1, src.y1, fb); + if (ret) + return ret; + + if (!intel_crtc->primary_enabled) + intel_enable_primary_hw_plane(dev_priv, intel_crtc->plane, + intel_crtc->pipe); + + return 0; +} + +/* Common destruction function for both primary and cursor planes */ +static void intel_plane_destroy(struct drm_plane *plane) +{ + struct intel_plane *intel_plane = to_intel_plane(plane); + drm_plane_cleanup(plane); + kfree(intel_plane); +} + +static const struct drm_plane_funcs intel_primary_plane_funcs = { + .update_plane = intel_primary_plane_setplane, + .disable_plane = intel_primary_plane_disable, + .destroy = intel_plane_destroy, +}; + +static struct drm_plane *intel_primary_plane_create(struct drm_device *dev, + int pipe) +{ + struct intel_plane *primary; + const uint32_t *intel_primary_formats; + int num_formats; + + primary = kzalloc(sizeof(*primary), GFP_KERNEL); + if (primary == NULL) + return NULL; + + primary->can_scale = false; + primary->max_downscale = 1; + primary->pipe = pipe; + primary->plane = pipe; + if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) + primary->plane = !pipe; + + if (INTEL_INFO(dev)->gen <= 3) { + intel_primary_formats = intel_primary_formats_gen2; + num_formats = ARRAY_SIZE(intel_primary_formats_gen2); + } else { + intel_primary_formats = intel_primary_formats_gen4; + num_formats = ARRAY_SIZE(intel_primary_formats_gen4); + } + + drm_universal_plane_init(dev, &primary->base, 0, + &intel_primary_plane_funcs, + intel_primary_formats, num_formats, + DRM_PLANE_TYPE_PRIMARY); + return &primary->base; +} + +static int +intel_cursor_plane_disable(struct drm_plane *plane) +{ + if (!plane->fb) + return 0; + + BUG_ON(!plane->crtc); + + return intel_crtc_cursor_set_obj(plane->crtc, NULL, 0, 0); +} + +static int +intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc, + struct drm_framebuffer *fb, int crtc_x, int crtc_y, + unsigned int crtc_w, unsigned int crtc_h, + uint32_t src_x, uint32_t src_y, + uint32_t src_w, uint32_t src_h) +{ + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); + struct drm_i915_gem_object *obj = intel_fb->obj; + struct drm_rect dest = { + /* integer pixels */ + .x1 = crtc_x, + .y1 = crtc_y, + .x2 = crtc_x + crtc_w, + .y2 = crtc_y + crtc_h, + }; + struct drm_rect src = { + /* 16.16 fixed point */ + .x1 = src_x, + .y1 = src_y, + .x2 = src_x + src_w, + .y2 = src_y + src_h, + }; + const struct drm_rect clip = { + /* integer pixels */ + .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0, + .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0, + }; + bool visible; + int ret; + + ret = drm_plane_helper_check_update(plane, crtc, fb, + &src, &dest, &clip, + DRM_PLANE_HELPER_NO_SCALING, + DRM_PLANE_HELPER_NO_SCALING, + true, true, &visible); + if (ret) + return ret; + + crtc->cursor_x = crtc_x; + crtc->cursor_y = crtc_y; + if (fb != crtc->cursor->fb) { + return intel_crtc_cursor_set_obj(crtc, obj, crtc_w, crtc_h); + } else { + intel_crtc_update_cursor(crtc, visible); + return 0; + } +} +static const struct drm_plane_funcs intel_cursor_plane_funcs = { + .update_plane = intel_cursor_plane_update, + .disable_plane = intel_cursor_plane_disable, + .destroy = intel_plane_destroy, +}; + +static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev, + int pipe) +{ + struct intel_plane *cursor; + + cursor = kzalloc(sizeof(*cursor), GFP_KERNEL); + if (cursor == NULL) + return NULL; + + cursor->can_scale = false; + cursor->max_downscale = 1; + cursor->pipe = pipe; + cursor->plane = pipe; + + drm_universal_plane_init(dev, &cursor->base, 0, + &intel_cursor_plane_funcs, + intel_cursor_formats, + ARRAY_SIZE(intel_cursor_formats), + DRM_PLANE_TYPE_CURSOR); + return &cursor->base; +} + static void intel_crtc_init(struct drm_device *dev, int pipe) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc; - int i; + struct drm_plane *primary = NULL; + struct drm_plane *cursor = NULL; + int i, ret; intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL); if (intel_crtc == NULL) return; - drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs); + primary = intel_primary_plane_create(dev, pipe); + if (!primary) + goto fail; + + cursor = intel_cursor_plane_create(dev, pipe); + if (!cursor) + goto fail; + + ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary, + cursor, &intel_crtc_funcs); + if (ret) + goto fail; drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256); for (i = 0; i < 256; i++) { @@ -10980,7 +11801,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) /* * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port - * is hooked to plane B. Hence we want plane A feeding pipe B. + * is hooked to pipe B. Hence we want plane A feeding pipe B. */ intel_crtc->pipe = pipe; intel_crtc->plane = pipe; @@ -11002,6 +11823,14 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe); + return; + +fail: + if (primary) + drm_plane_cleanup(primary); + if (cursor) + drm_plane_cleanup(cursor); + kfree(intel_crtc); } enum pipe intel_get_pipe_from_connector(struct intel_connector *connector) @@ -11021,21 +11850,20 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; - struct drm_mode_object *drmmode_obj; + struct drm_crtc *drmmode_crtc; struct intel_crtc *crtc; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -ENODEV; - drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id, - DRM_MODE_OBJECT_CRTC); + drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id); - if (!drmmode_obj) { + if (!drmmode_crtc) { DRM_ERROR("no such CRTC id\n"); return -ENOENT; } - crtc = to_intel_crtc(obj_to_crtc(drmmode_obj)); + crtc = to_intel_crtc(drmmode_crtc); pipe_from_crtc_id->pipe = crtc->pipe; return 0; @@ -11236,6 +12064,8 @@ static void intel_setup_outputs(struct drm_device *dev) if (SUPPORTS_TV(dev)) intel_tv_init(dev); + intel_edp_psr_init(dev); + list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { encoder->base.possible_crtcs = encoder->crtc_mask; encoder->base.possible_clones = @@ -11249,11 +12079,14 @@ static void intel_setup_outputs(struct drm_device *dev) static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) { + struct drm_device *dev = fb->dev; struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); drm_framebuffer_cleanup(fb); + mutex_lock(&dev->struct_mutex); WARN_ON(!intel_fb->obj->framebuffer_references--); - drm_gem_object_unreference_unlocked(&intel_fb->obj->base); + drm_gem_object_unreference(&intel_fb->obj->base); + mutex_unlock(&dev->struct_mutex); kfree(intel_fb); } @@ -11438,7 +12271,7 @@ static void intel_init_display(struct drm_device *dev) dev_priv->display.crtc_mode_set = haswell_crtc_mode_set; dev_priv->display.crtc_enable = haswell_crtc_enable; dev_priv->display.crtc_disable = haswell_crtc_disable; - dev_priv->display.off = haswell_crtc_off; + dev_priv->display.off = ironlake_crtc_off; dev_priv->display.update_primary_plane = ironlake_update_primary_plane; } else if (HAS_PCH_SPLIT(dev)) { @@ -11671,6 +12504,9 @@ static struct intel_quirk intel_quirks[] = { /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */ { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present }, + /* Acer C720 Chromebook (Core i3 4005U) */ + { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present }, + /* Toshiba CB35 Chromebook (Celeron 2955U) */ { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present }, @@ -11722,6 +12558,9 @@ void intel_modeset_init_hw(struct drm_device *dev) { intel_prepare_ddi(dev); + if (IS_VALLEYVIEW(dev)) + vlv_update_cdclk(dev); + intel_init_clock_gating(dev); intel_reset_dpio(dev); @@ -11798,7 +12637,6 @@ void intel_modeset_init(struct drm_device *dev) intel_init_dpio(dev); intel_reset_dpio(dev); - intel_cpu_pll_init(dev); intel_shared_dpll_init(dev); /* Just disable it once at startup */ @@ -11840,7 +12678,7 @@ static void intel_enable_pipe_a(struct drm_device *dev) struct intel_connector *connector; struct drm_connector *crt = NULL; struct intel_load_detect_pipe load_detect_temp; - struct drm_modeset_acquire_ctx ctx; + struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx; /* We can't just switch on the pipe A, we need to set things up with a * proper mode and output configuration. As a gross hack, enable pipe A @@ -11857,10 +12695,8 @@ static void intel_enable_pipe_a(struct drm_device *dev) if (!crt) return; - if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, &ctx)) - intel_release_load_detect_pipe(crt, &load_detect_temp, &ctx); - - + if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx)) + intel_release_load_detect_pipe(crt, &load_detect_temp); } static bool @@ -12024,6 +12860,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder) encoder->base.base.id, encoder->base.name); encoder->disable(encoder); + if (encoder->post_disable) + encoder->post_disable(encoder); } encoder->base.crtc = NULL; encoder->connectors_active = false; @@ -12108,10 +12946,6 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) crtc->active ? "enabled" : "disabled"); } - /* FIXME: Smash this into the new shared dpll infrastructure. */ - if (HAS_DDI(dev)) - intel_ddi_setup_hw_pll_state(dev); - for (i = 0; i < dev_priv->num_shared_dpll; i++) { struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; @@ -12125,6 +12959,9 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) DRM_DEBUG_KMS("%s hw state readout: refcount %i, on %i\n", pll->name, pll->refcount, pll->on); + + if (pll->refcount) + intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS); } list_for_each_entry(encoder, &dev->mode_config.encoder_list, @@ -12242,7 +13079,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev, void intel_modeset_gem_init(struct drm_device *dev) { struct drm_crtc *c; - struct intel_framebuffer *fb; + struct drm_i915_gem_object *obj; mutex_lock(&dev->struct_mutex); intel_init_gt_powersave(dev); @@ -12259,11 +13096,11 @@ void intel_modeset_gem_init(struct drm_device *dev) */ mutex_lock(&dev->struct_mutex); for_each_crtc(dev, c) { - if (!c->primary->fb) + obj = intel_fb_obj(c->primary->fb); + if (obj == NULL) continue; - fb = to_intel_framebuffer(c->primary->fb); - if (intel_pin_and_fence_fb_obj(dev, fb->obj, NULL)) { + if (intel_pin_and_fence_fb_obj(dev, obj, NULL)) { DRM_ERROR("failed to pin boot fb on pipe %d\n", to_intel_crtc(c)->pipe); drm_framebuffer_unreference(c->primary->fb); @@ -12278,13 +13115,12 @@ void intel_connector_unregister(struct intel_connector *intel_connector) struct drm_connector *connector = &intel_connector->base; intel_panel_destroy_backlight(connector); - drm_sysfs_connector_remove(connector); + drm_connector_unregister(connector); } void intel_modeset_cleanup(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_crtc *crtc; struct drm_connector *connector; /* @@ -12293,7 +13129,9 @@ void intel_modeset_cleanup(struct drm_device *dev) * experience fancy races otherwise. */ drm_irq_uninstall(dev); - cancel_work_sync(&dev_priv->hotplug_work); + intel_hpd_cancel_work(dev_priv); + dev_priv->pm._irqs_disabled = true; + /* * Due to the hpd irq storm handling the hotplug work can re-arm the * poll handlers. Hence disable polling after hpd handling is shut down. @@ -12304,14 +13142,6 @@ void intel_modeset_cleanup(struct drm_device *dev) intel_unregister_dsm_handler(); - for_each_crtc(dev, crtc) { - /* Skip inactive CRTCs */ - if (!crtc->primary->fb) - continue; - - intel_increase_pllclock(crtc); - } - intel_disable_fbc(dev); intel_disable_gt_powersave(dev); @@ -12479,7 +13309,7 @@ intel_display_capture_error_state(struct drm_device *dev) error->pipe[i].source = I915_READ(PIPESRC(i)); - if (!HAS_PCH_SPLIT(dev)) + if (HAS_GMCH_DISPLAY(dev)) error->pipe[i].stat = I915_READ(PIPESTAT(i)); } diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 8a1a4fbc06ac..fdff1d420c14 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -114,7 +114,7 @@ static void intel_dp_link_down(struct intel_dp *intel_dp); static bool _edp_panel_vdd_on(struct intel_dp *intel_dp); static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync); -static int +int intel_dp_max_link_bw(struct intel_dp *intel_dp) { int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE]; @@ -773,12 +773,29 @@ intel_dp_connector_unregister(struct intel_connector *intel_connector) { struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base); - sysfs_remove_link(&intel_connector->base.kdev->kobj, - intel_dp->aux.ddc.dev.kobj.name); + if (!intel_connector->mst_port) + sysfs_remove_link(&intel_connector->base.kdev->kobj, + intel_dp->aux.ddc.dev.kobj.name); intel_connector_unregister(intel_connector); } static void +hsw_dp_set_ddi_pll_sel(struct intel_crtc_config *pipe_config, int link_bw) +{ + switch (link_bw) { + case DP_LINK_BW_1_62: + pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810; + break; + case DP_LINK_BW_2_7: + pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350; + break; + case DP_LINK_BW_5_4: + pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700; + break; + } +} + +static void intel_dp_set_clock(struct intel_encoder *encoder, struct intel_crtc_config *pipe_config, int link_bw) { @@ -789,8 +806,6 @@ intel_dp_set_clock(struct intel_encoder *encoder, if (IS_G4X(dev)) { divisor = gen4_dpll; count = ARRAY_SIZE(gen4_dpll); - } else if (IS_HASWELL(dev)) { - /* Haswell has special-purpose DP DDI clocks. */ } else if (HAS_PCH_SPLIT(dev)) { divisor = pch_dpll; count = ARRAY_SIZE(pch_dpll); @@ -961,7 +976,10 @@ found: &pipe_config->dp_m2_n2); } - intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw); + if (HAS_DDI(dev)) + hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw); + else + intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw); return true; } @@ -1267,6 +1285,19 @@ static void edp_panel_vdd_work(struct work_struct *__work) drm_modeset_unlock(&dev->mode_config.connection_mutex); } +static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp) +{ + unsigned long delay; + + /* + * Queue the timer to fire a long time from now (relative to the power + * down delay) to keep the panel power up across a sequence of + * operations. + */ + delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5); + schedule_delayed_work(&intel_dp->panel_vdd_work, delay); +} + static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) { if (!is_edp(intel_dp)) @@ -1276,17 +1307,10 @@ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) intel_dp->want_panel_vdd = false; - if (sync) { + if (sync) edp_panel_vdd_off_sync(intel_dp); - } else { - /* - * Queue the timer to fire a long - * time from now (relative to the power down delay) - * to keep the panel power up across a sequence of operations - */ - schedule_delayed_work(&intel_dp->panel_vdd_work, - msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5)); - } + else + edp_panel_vdd_schedule_off(intel_dp); } void intel_edp_panel_on(struct intel_dp *intel_dp) @@ -1349,8 +1373,6 @@ void intel_edp_panel_off(struct intel_dp *intel_dp) DRM_DEBUG_KMS("Turn eDP power off\n"); - edp_wait_backlight_off(intel_dp); - WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n"); pp = ironlake_get_pp_control(intel_dp); @@ -1386,6 +1408,9 @@ void intel_edp_backlight_on(struct intel_dp *intel_dp) return; DRM_DEBUG_KMS("\n"); + + intel_panel_enable_backlight(intel_dp->attached_connector); + /* * If we enable the backlight right away following a panel power * on, we may see slight flicker as the panel syncs with the eDP @@ -1400,8 +1425,6 @@ void intel_edp_backlight_on(struct intel_dp *intel_dp) I915_WRITE(pp_ctrl_reg, pp); POSTING_READ(pp_ctrl_reg); - - intel_panel_enable_backlight(intel_dp->attached_connector); } void intel_edp_backlight_off(struct intel_dp *intel_dp) @@ -1414,8 +1437,6 @@ void intel_edp_backlight_off(struct intel_dp *intel_dp) if (!is_edp(intel_dp)) return; - intel_panel_disable_backlight(intel_dp->attached_connector); - DRM_DEBUG_KMS("\n"); pp = ironlake_get_pp_control(intel_dp); pp &= ~EDP_BLC_ENABLE; @@ -1425,6 +1446,10 @@ void intel_edp_backlight_off(struct intel_dp *intel_dp) I915_WRITE(pp_ctrl_reg, pp); POSTING_READ(pp_ctrl_reg); intel_dp->last_backlight_off = jiffies; + + edp_wait_backlight_off(intel_dp); + + intel_panel_disable_backlight(intel_dp->attached_connector); } static void ironlake_edp_pll_on(struct intel_dp *intel_dp) @@ -1606,6 +1631,10 @@ static void intel_dp_get_config(struct intel_encoder *encoder, pipe_config->adjusted_mode.flags |= flags; + if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) && + tmp & DP_COLOR_RANGE_16_235) + pipe_config->limited_color_range = true; + pipe_config->has_dp_encoder = true; intel_dp_get_m_n(crtc, pipe_config); @@ -1646,11 +1675,9 @@ static void intel_dp_get_config(struct intel_encoder *encoder, } } -static bool is_edp_psr(struct drm_device *dev) +static bool is_edp_psr(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dev->dev_private; - - return dev_priv->psr.sink_support; + return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED; } static bool intel_edp_is_psr_enabled(struct drm_device *dev) @@ -1698,9 +1725,6 @@ static void intel_edp_psr_setup(struct intel_dp *intel_dp) struct drm_i915_private *dev_priv = dev->dev_private; struct edp_vsc_psr psr_vsc; - if (intel_dp->psr_setup_done) - return; - /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */ memset(&psr_vsc, 0, sizeof(psr_vsc)); psr_vsc.sdp_header.HB0 = 0; @@ -1712,22 +1736,25 @@ static void intel_edp_psr_setup(struct intel_dp *intel_dp) /* Avoid continuous PSR exit by masking memup and hpd */ I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP | EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP); - - intel_dp->psr_setup_done = true; } static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp) { - struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = dig_port->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; uint32_t aux_clock_divider; int precharge = 0x3; int msg_size = 5; /* Header(4) + Message(1) */ + bool only_standby = false; aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0); + if (IS_BROADWELL(dev) && dig_port->port != PORT_A) + only_standby = true; + /* Enable PSR in sink */ - if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) + if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby) drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE); else @@ -1746,18 +1773,24 @@ static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp) static void intel_edp_psr_enable_source(struct intel_dp *intel_dp) { - struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = dig_port->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; uint32_t max_sleep_time = 0x1f; uint32_t idle_frames = 1; uint32_t val = 0x0; const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES; + bool only_standby = false; + + if (IS_BROADWELL(dev) && dig_port->port != PORT_A) + only_standby = true; - if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) { + if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby) { val |= EDP_PSR_LINK_STANDBY; val |= EDP_PSR_TP2_TP3_TIME_0us; val |= EDP_PSR_TP1_TIME_0us; val |= EDP_PSR_SKIP_AUX_EXIT; + val |= IS_BROADWELL(dev) ? BDW_PSR_SINGLE_FRAME : 0; } else val |= EDP_PSR_LINK_DISABLE; @@ -1775,18 +1808,14 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp) struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *crtc = dig_port->base.base.crtc; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->primary->fb)->obj; - struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base; - dev_priv->psr.source_ok = false; + lockdep_assert_held(&dev_priv->psr.lock); + WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); + WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); - if (!HAS_PSR(dev)) { - DRM_DEBUG_KMS("PSR not supported on this platform\n"); - return false; - } + dev_priv->psr.source_ok = false; - if ((intel_encoder->type != INTEL_OUTPUT_EDP) || - (dig_port->port != PORT_A)) { + if (IS_HASWELL(dev) && dig_port->port != PORT_A) { DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n"); return false; } @@ -1796,29 +1825,9 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp) return false; } - crtc = dig_port->base.base.crtc; - if (crtc == NULL) { - DRM_DEBUG_KMS("crtc not active for PSR\n"); - return false; - } - - intel_crtc = to_intel_crtc(crtc); - if (!intel_crtc_active(crtc)) { - DRM_DEBUG_KMS("crtc not active for PSR\n"); - return false; - } - - obj = to_intel_framebuffer(crtc->primary->fb)->obj; - if (obj->tiling_mode != I915_TILING_X || - obj->fence_reg == I915_FENCE_REG_NONE) { - DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n"); - return false; - } - - if (I915_READ(SPRCTL(intel_crtc->pipe)) & SPRITE_ENABLE) { - DRM_DEBUG_KMS("PSR condition failed: Sprite is Enabled\n"); - return false; - } + /* Below limitations aren't valid for Broadwell */ + if (IS_BROADWELL(dev)) + goto out; if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) & S3D_ENABLE) { @@ -1831,35 +1840,60 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp) return false; } + out: dev_priv->psr.source_ok = true; return true; } static void intel_edp_psr_do_enable(struct intel_dp *intel_dp) { - struct drm_device *dev = intel_dp_to_dev(intel_dp); - - if (!intel_edp_psr_match_conditions(intel_dp) || - intel_edp_is_psr_enabled(dev)) - return; + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = intel_dig_port->base.base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; - /* Setup PSR once */ - intel_edp_psr_setup(intel_dp); + WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE); + WARN_ON(dev_priv->psr.active); + lockdep_assert_held(&dev_priv->psr.lock); /* Enable PSR on the panel */ intel_edp_psr_enable_sink(intel_dp); /* Enable PSR on the host */ intel_edp_psr_enable_source(intel_dp); + + dev_priv->psr.active = true; } void intel_edp_psr_enable(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct drm_i915_private *dev_priv = dev->dev_private; - if (intel_edp_psr_match_conditions(intel_dp) && - !intel_edp_is_psr_enabled(dev)) - intel_edp_psr_do_enable(intel_dp); + if (!HAS_PSR(dev)) { + DRM_DEBUG_KMS("PSR not supported on this platform\n"); + return; + } + + if (!is_edp_psr(intel_dp)) { + DRM_DEBUG_KMS("PSR not supported by this panel\n"); + return; + } + + mutex_lock(&dev_priv->psr.lock); + if (dev_priv->psr.enabled) { + DRM_DEBUG_KMS("PSR already in use\n"); + mutex_unlock(&dev_priv->psr.lock); + return; + } + + dev_priv->psr.busy_frontbuffer_bits = 0; + + /* Setup PSR once */ + intel_edp_psr_setup(intel_dp); + + if (intel_edp_psr_match_conditions(intel_dp)) + dev_priv->psr.enabled = intel_dp; + mutex_unlock(&dev_priv->psr.lock); } void intel_edp_psr_disable(struct intel_dp *intel_dp) @@ -1867,36 +1901,136 @@ void intel_edp_psr_disable(struct intel_dp *intel_dp) struct drm_device *dev = intel_dp_to_dev(intel_dp); struct drm_i915_private *dev_priv = dev->dev_private; - if (!intel_edp_is_psr_enabled(dev)) + mutex_lock(&dev_priv->psr.lock); + if (!dev_priv->psr.enabled) { + mutex_unlock(&dev_priv->psr.lock); return; + } - I915_WRITE(EDP_PSR_CTL(dev), - I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE); + if (dev_priv->psr.active) { + I915_WRITE(EDP_PSR_CTL(dev), + I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE); - /* Wait till PSR is idle */ - if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) & - EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10)) - DRM_ERROR("Timed out waiting for PSR Idle State\n"); + /* Wait till PSR is idle */ + if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) & + EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10)) + DRM_ERROR("Timed out waiting for PSR Idle State\n"); + + dev_priv->psr.active = false; + } else { + WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE); + } + + dev_priv->psr.enabled = NULL; + mutex_unlock(&dev_priv->psr.lock); + + cancel_delayed_work_sync(&dev_priv->psr.work); } -void intel_edp_psr_update(struct drm_device *dev) +static void intel_edp_psr_work(struct work_struct *work) { - struct intel_encoder *encoder; - struct intel_dp *intel_dp = NULL; + struct drm_i915_private *dev_priv = + container_of(work, typeof(*dev_priv), psr.work.work); + struct intel_dp *intel_dp = dev_priv->psr.enabled; - list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) - if (encoder->type == INTEL_OUTPUT_EDP) { - intel_dp = enc_to_intel_dp(&encoder->base); + mutex_lock(&dev_priv->psr.lock); + intel_dp = dev_priv->psr.enabled; - if (!is_edp_psr(dev)) - return; + if (!intel_dp) + goto unlock; + + /* + * The delayed work can race with an invalidate hence we need to + * recheck. Since psr_flush first clears this and then reschedules we + * won't ever miss a flush when bailing out here. + */ + if (dev_priv->psr.busy_frontbuffer_bits) + goto unlock; + + intel_edp_psr_do_enable(intel_dp); +unlock: + mutex_unlock(&dev_priv->psr.lock); +} + +static void intel_edp_psr_do_exit(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (dev_priv->psr.active) { + u32 val = I915_READ(EDP_PSR_CTL(dev)); + + WARN_ON(!(val & EDP_PSR_ENABLE)); + + I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE); + + dev_priv->psr.active = false; + } - if (!intel_edp_psr_match_conditions(intel_dp)) - intel_edp_psr_disable(intel_dp); - else - if (!intel_edp_is_psr_enabled(dev)) - intel_edp_psr_do_enable(intel_dp); - } +} + +void intel_edp_psr_invalidate(struct drm_device *dev, + unsigned frontbuffer_bits) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_crtc *crtc; + enum pipe pipe; + + mutex_lock(&dev_priv->psr.lock); + if (!dev_priv->psr.enabled) { + mutex_unlock(&dev_priv->psr.lock); + return; + } + + crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc; + pipe = to_intel_crtc(crtc)->pipe; + + intel_edp_psr_do_exit(dev); + + frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); + + dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits; + mutex_unlock(&dev_priv->psr.lock); +} + +void intel_edp_psr_flush(struct drm_device *dev, + unsigned frontbuffer_bits) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_crtc *crtc; + enum pipe pipe; + + mutex_lock(&dev_priv->psr.lock); + if (!dev_priv->psr.enabled) { + mutex_unlock(&dev_priv->psr.lock); + return; + } + + crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc; + pipe = to_intel_crtc(crtc)->pipe; + dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits; + + /* + * On Haswell sprite plane updates don't result in a psr invalidating + * signal in the hardware. Which means we need to manually fake this in + * software for all flushes, not just when we've seen a preceding + * invalidation through frontbuffer rendering. + */ + if (IS_HASWELL(dev) && + (frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe))) + intel_edp_psr_do_exit(dev); + + if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits) + schedule_delayed_work(&dev_priv->psr.work, + msecs_to_jiffies(100)); + mutex_unlock(&dev_priv->psr.lock); +} + +void intel_edp_psr_init(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + INIT_DELAYED_WORK(&dev_priv->psr.work, intel_edp_psr_work); + mutex_init(&dev_priv->psr.lock); } static void intel_disable_dp(struct intel_encoder *encoder) @@ -2152,6 +2286,70 @@ static void chv_pre_enable_dp(struct intel_encoder *encoder) vlv_wait_port_ready(dev_priv, dport); } +static void chv_dp_pre_pll_enable(struct intel_encoder *encoder) +{ + struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = + to_intel_crtc(encoder->base.crtc); + enum dpio_channel ch = vlv_dport_to_channel(dport); + enum pipe pipe = intel_crtc->pipe; + u32 val; + + mutex_lock(&dev_priv->dpio_lock); + + /* program left/right clock distribution */ + if (pipe != PIPE_B) { + val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0); + val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK); + if (ch == DPIO_CH0) + val |= CHV_BUFLEFTENA1_FORCE; + if (ch == DPIO_CH1) + val |= CHV_BUFRIGHTENA1_FORCE; + vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val); + } else { + val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1); + val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK); + if (ch == DPIO_CH0) + val |= CHV_BUFLEFTENA2_FORCE; + if (ch == DPIO_CH1) + val |= CHV_BUFRIGHTENA2_FORCE; + vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val); + } + + /* program clock channel usage */ + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch)); + val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE; + if (pipe != PIPE_B) + val &= ~CHV_PCS_USEDCLKCHANNEL; + else + val |= CHV_PCS_USEDCLKCHANNEL; + vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val); + + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch)); + val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE; + if (pipe != PIPE_B) + val &= ~CHV_PCS_USEDCLKCHANNEL; + else + val |= CHV_PCS_USEDCLKCHANNEL; + vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val); + + /* + * This a a bit weird since generally CL + * matches the pipe, but here we need to + * pick the CL based on the port. + */ + val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch)); + if (pipe != PIPE_B) + val &= ~CHV_CMN_USEDCLKCHANNEL; + else + val |= CHV_CMN_USEDCLKCHANNEL; + vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val); + + mutex_unlock(&dev_priv->dpio_lock); +} + /* * Native read with retry for link status and receiver capability reads for * cases where the sink may still be asleep. @@ -2189,18 +2387,14 @@ intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_ DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE; } -/* - * These are source-specific values; current Intel hardware supports - * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB - */ - +/* These are source-specific values. */ static uint8_t intel_dp_voltage_max(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp_to_dev(intel_dp); enum port port = dp_to_dig_port(intel_dp)->port; - if (IS_VALLEYVIEW(dev) || IS_BROADWELL(dev)) + if (IS_VALLEYVIEW(dev)) return DP_TRAIN_VOLTAGE_SWING_1200; else if (IS_GEN7(dev) && port == PORT_A) return DP_TRAIN_VOLTAGE_SWING_800; @@ -2216,18 +2410,7 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing) struct drm_device *dev = intel_dp_to_dev(intel_dp); enum port port = dp_to_dig_port(intel_dp)->port; - if (IS_BROADWELL(dev)) { - switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { - case DP_TRAIN_VOLTAGE_SWING_400: - case DP_TRAIN_VOLTAGE_SWING_600: - return DP_TRAIN_PRE_EMPHASIS_6; - case DP_TRAIN_VOLTAGE_SWING_800: - return DP_TRAIN_PRE_EMPHASIS_3_5; - case DP_TRAIN_VOLTAGE_SWING_1200: - default: - return DP_TRAIN_PRE_EMPHASIS_0; - } - } else if (IS_HASWELL(dev)) { + if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { case DP_TRAIN_VOLTAGE_SWING_400: return DP_TRAIN_PRE_EMPHASIS_9_5; @@ -2699,41 +2882,6 @@ intel_hsw_signal_levels(uint8_t train_set) } } -static uint32_t -intel_bdw_signal_levels(uint8_t train_set) -{ - int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | - DP_TRAIN_PRE_EMPHASIS_MASK); - switch (signal_levels) { - case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: - return DDI_BUF_EMP_400MV_0DB_BDW; /* Sel0 */ - case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: - return DDI_BUF_EMP_400MV_3_5DB_BDW; /* Sel1 */ - case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: - return DDI_BUF_EMP_400MV_6DB_BDW; /* Sel2 */ - - case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: - return DDI_BUF_EMP_600MV_0DB_BDW; /* Sel3 */ - case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: - return DDI_BUF_EMP_600MV_3_5DB_BDW; /* Sel4 */ - case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6: - return DDI_BUF_EMP_600MV_6DB_BDW; /* Sel5 */ - - case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: - return DDI_BUF_EMP_800MV_0DB_BDW; /* Sel6 */ - case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: - return DDI_BUF_EMP_800MV_3_5DB_BDW; /* Sel7 */ - - case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0: - return DDI_BUF_EMP_1200MV_0DB_BDW; /* Sel8 */ - - default: - DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" - "0x%x\n", signal_levels); - return DDI_BUF_EMP_400MV_0DB_BDW; /* Sel0 */ - } -} - /* Properly updates "DP" with the correct signal levels. */ static void intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP) @@ -2744,10 +2892,7 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP) uint32_t signal_levels, mask; uint8_t train_set = intel_dp->train_set[0]; - if (IS_BROADWELL(dev)) { - signal_levels = intel_bdw_signal_levels(train_set); - mask = DDI_BUF_EMP_MASK; - } else if (IS_HASWELL(dev)) { + if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { signal_levels = intel_hsw_signal_levels(train_set); mask = DDI_BUF_EMP_MASK; } else if (IS_CHERRYVIEW(dev)) { @@ -3246,6 +3391,33 @@ intel_dp_probe_oui(struct intel_dp *intel_dp) edp_panel_vdd_off(intel_dp, false); } +static bool +intel_dp_probe_mst(struct intel_dp *intel_dp) +{ + u8 buf[1]; + + if (!intel_dp->can_mst) + return false; + + if (intel_dp->dpcd[DP_DPCD_REV] < 0x12) + return false; + + _edp_panel_vdd_on(intel_dp); + if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) { + if (buf[0] & DP_MST_CAP) { + DRM_DEBUG_KMS("Sink is MST capable\n"); + intel_dp->is_mst = true; + } else { + DRM_DEBUG_KMS("Sink is not MST capable\n"); + intel_dp->is_mst = false; + } + } + edp_panel_vdd_off(intel_dp, false); + + drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst); + return intel_dp->is_mst; +} + int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); @@ -3283,6 +3455,20 @@ intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector) sink_irq_vector, 1) == 1; } +static bool +intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector) +{ + int ret; + + ret = intel_dp_dpcd_read_wake(&intel_dp->aux, + DP_SINK_COUNT_ESI, + sink_irq_vector, 14); + if (ret != 14) + return false; + + return true; +} + static void intel_dp_handle_test_request(struct intel_dp *intel_dp) { @@ -3290,6 +3476,63 @@ intel_dp_handle_test_request(struct intel_dp *intel_dp) drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK); } +static int +intel_dp_check_mst_status(struct intel_dp *intel_dp) +{ + bool bret; + + if (intel_dp->is_mst) { + u8 esi[16] = { 0 }; + int ret = 0; + int retry; + bool handled; + bret = intel_dp_get_sink_irq_esi(intel_dp, esi); +go_again: + if (bret == true) { + + /* check link status - esi[10] = 0x200c */ + if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) { + DRM_DEBUG_KMS("channel EQ not ok, retraining\n"); + intel_dp_start_link_train(intel_dp); + intel_dp_complete_link_train(intel_dp); + intel_dp_stop_link_train(intel_dp); + } + + DRM_DEBUG_KMS("got esi %02x %02x %02x\n", esi[0], esi[1], esi[2]); + ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled); + + if (handled) { + for (retry = 0; retry < 3; retry++) { + int wret; + wret = drm_dp_dpcd_write(&intel_dp->aux, + DP_SINK_COUNT_ESI+1, + &esi[1], 3); + if (wret == 3) { + break; + } + } + + bret = intel_dp_get_sink_irq_esi(intel_dp, esi); + if (bret == true) { + DRM_DEBUG_KMS("got esi2 %02x %02x %02x\n", esi[0], esi[1], esi[2]); + goto go_again; + } + } else + ret = 0; + + return ret; + } else { + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + DRM_DEBUG_KMS("failed to get ESI - device may have failed\n"); + intel_dp->is_mst = false; + drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst); + /* send a hotplug event */ + drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev); + } + } + return -EINVAL; +} + /* * According to DP spec * 5.1.2: @@ -3298,21 +3541,25 @@ intel_dp_handle_test_request(struct intel_dp *intel_dp) * 3. Use Link Training from 2.5.3.3 and 3.5.1.3 * 4. Check link status on receipt of hot-plug interrupt */ - void intel_dp_check_link_status(struct intel_dp *intel_dp) { + struct drm_device *dev = intel_dp_to_dev(intel_dp); struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base; u8 sink_irq_vector; u8 link_status[DP_LINK_STATUS_SIZE]; - /* FIXME: This access isn't protected by any locks. */ + WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); + if (!intel_encoder->connectors_active) return; if (WARN_ON(!intel_encoder->base.crtc)) return; + if (!to_intel_crtc(intel_encoder->base.crtc)->active) + return; + /* Try to read receiver status if the link appears to be up */ if (!intel_dp_get_link_status(intel_dp, link_status)) { return; @@ -3418,24 +3665,12 @@ ironlake_dp_detect(struct intel_dp *intel_dp) return intel_dp_detect_dpcd(intel_dp); } -static enum drm_connector_status -g4x_dp_detect(struct intel_dp *intel_dp) +static int g4x_digital_port_connected(struct drm_device *dev, + struct intel_digital_port *intel_dig_port) { - struct drm_device *dev = intel_dp_to_dev(intel_dp); struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); uint32_t bit; - /* Can't disconnect eDP, but you can close the lid... */ - if (is_edp(intel_dp)) { - enum drm_connector_status status; - - status = intel_panel_detect(dev); - if (status == connector_status_unknown) - status = connector_status_connected; - return status; - } - if (IS_VALLEYVIEW(dev)) { switch (intel_dig_port->port) { case PORT_B: @@ -3448,7 +3683,7 @@ g4x_dp_detect(struct intel_dp *intel_dp) bit = PORTD_HOTPLUG_LIVE_STATUS_VLV; break; default: - return connector_status_unknown; + return -EINVAL; } } else { switch (intel_dig_port->port) { @@ -3462,11 +3697,36 @@ g4x_dp_detect(struct intel_dp *intel_dp) bit = PORTD_HOTPLUG_LIVE_STATUS_G4X; break; default: - return connector_status_unknown; + return -EINVAL; } } if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0) + return 0; + return 1; +} + +static enum drm_connector_status +g4x_dp_detect(struct intel_dp *intel_dp) +{ + struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + int ret; + + /* Can't disconnect eDP, but you can close the lid... */ + if (is_edp(intel_dp)) { + enum drm_connector_status status; + + status = intel_panel_detect(dev); + if (status == connector_status_unknown) + status = connector_status_connected; + return status; + } + + ret = g4x_digital_port_connected(dev, intel_dig_port); + if (ret == -EINVAL) + return connector_status_unknown; + else if (ret == 0) return connector_status_disconnected; return intel_dp_detect_dpcd(intel_dp); @@ -3518,8 +3778,7 @@ intel_dp_detect(struct drm_connector *connector, bool force) enum drm_connector_status status; enum intel_display_power_domain power_domain; struct edid *edid = NULL; - - intel_runtime_pm_get(dev_priv); + bool ret; power_domain = intel_display_port_power_domain(intel_encoder); intel_display_power_get(dev_priv, power_domain); @@ -3527,6 +3786,14 @@ intel_dp_detect(struct drm_connector *connector, bool force) DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); + if (intel_dp->is_mst) { + /* MST devices are disconnected from a monitor POV */ + if (intel_encoder->type != INTEL_OUTPUT_EDP) + intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; + status = connector_status_disconnected; + goto out; + } + intel_dp->has_audio = false; if (HAS_PCH_SPLIT(dev)) @@ -3539,6 +3806,16 @@ intel_dp_detect(struct drm_connector *connector, bool force) intel_dp_probe_oui(intel_dp); + ret = intel_dp_probe_mst(intel_dp); + if (ret) { + /* if we are in MST mode then this connector + won't appear connected or have anything with EDID on it */ + if (intel_encoder->type != INTEL_OUTPUT_EDP) + intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; + status = connector_status_disconnected; + goto out; + } + if (intel_dp->force_audio != HDMI_AUDIO_AUTO) { intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON); } else { @@ -3555,9 +3832,6 @@ intel_dp_detect(struct drm_connector *connector, bool force) out: intel_display_power_put(dev_priv, power_domain); - - intel_runtime_pm_put(dev_priv); - return status; } @@ -3734,6 +4008,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder) struct drm_device *dev = intel_dp_to_dev(intel_dp); drm_dp_aux_unregister(&intel_dp->aux); + intel_dp_mst_encoder_cleanup(intel_dig_port); drm_encoder_cleanup(encoder); if (is_edp(intel_dp)) { cancel_delayed_work_sync(&intel_dp->panel_vdd_work); @@ -3748,6 +4023,21 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder) kfree(intel_dig_port); } +static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); + + if (!is_edp(intel_dp)) + return; + + edp_panel_vdd_off_sync(intel_dp); +} + +static void intel_dp_encoder_reset(struct drm_encoder *encoder) +{ + intel_edp_panel_vdd_sanitize(to_intel_encoder(encoder)); +} + static const struct drm_connector_funcs intel_dp_connector_funcs = { .dpms = intel_connector_dpms, .detect = intel_dp_detect, @@ -3763,15 +4053,83 @@ static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = }; static const struct drm_encoder_funcs intel_dp_enc_funcs = { + .reset = intel_dp_encoder_reset, .destroy = intel_dp_encoder_destroy, }; -static void +void intel_dp_hot_plug(struct intel_encoder *intel_encoder) { - struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); + return; +} - intel_dp_check_link_status(intel_dp); +bool +intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) +{ + struct intel_dp *intel_dp = &intel_dig_port->dp; + struct intel_encoder *intel_encoder = &intel_dig_port->base; + struct drm_device *dev = intel_dig_port->base.base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + enum intel_display_power_domain power_domain; + bool ret = true; + + if (intel_dig_port->base.type != INTEL_OUTPUT_EDP) + intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT; + + DRM_DEBUG_KMS("got hpd irq on port %d - %s\n", intel_dig_port->port, + long_hpd ? "long" : "short"); + + power_domain = intel_display_port_power_domain(intel_encoder); + intel_display_power_get(dev_priv, power_domain); + + if (long_hpd) { + + if (HAS_PCH_SPLIT(dev)) { + if (!ibx_digital_port_connected(dev_priv, intel_dig_port)) + goto mst_fail; + } else { + if (g4x_digital_port_connected(dev, intel_dig_port) != 1) + goto mst_fail; + } + + if (!intel_dp_get_dpcd(intel_dp)) { + goto mst_fail; + } + + intel_dp_probe_oui(intel_dp); + + if (!intel_dp_probe_mst(intel_dp)) + goto mst_fail; + + } else { + if (intel_dp->is_mst) { + if (intel_dp_check_mst_status(intel_dp) == -EINVAL) + goto mst_fail; + } + + if (!intel_dp->is_mst) { + /* + * we'll check the link status via the normal hot plug path later - + * but for short hpds we should check it now + */ + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + intel_dp_check_link_status(intel_dp); + drm_modeset_unlock(&dev->mode_config.connection_mutex); + } + } + ret = false; + goto put_power; +mst_fail: + /* if we were in MST mode, and device is not there get out of MST mode */ + if (intel_dp->is_mst) { + DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state); + intel_dp->is_mst = false; + drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst); + } +put_power: + intel_display_power_put(dev_priv, power_domain); + + return ret; } /* Return which DP Port should be selected for Transcoder DP control */ @@ -3822,7 +4180,7 @@ bool intel_dp_is_edp(struct drm_device *dev, enum port port) return false; } -static void +void intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) { struct intel_connector *intel_connector = to_intel_connector(connector); @@ -4035,6 +4393,11 @@ void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate) return; } + /* + * FIXME: This needs proper synchronization with psr state. But really + * hard to tell without seeing the user of this function of this code. + * Check locking and ordering once that lands. + */ if (INTEL_INFO(dev)->gen < 8 && intel_edp_is_psr_enabled(dev)) { DRM_DEBUG_KMS("DRRS is disabled as PSR is enabled\n"); return; @@ -4138,6 +4501,32 @@ intel_dp_drrs_init(struct intel_digital_port *intel_dig_port, return downclock_mode; } +void intel_edp_panel_vdd_sanitize(struct intel_encoder *intel_encoder) +{ + struct drm_device *dev = intel_encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_dp *intel_dp; + enum intel_display_power_domain power_domain; + + if (intel_encoder->type != INTEL_OUTPUT_EDP) + return; + + intel_dp = enc_to_intel_dp(&intel_encoder->base); + if (!edp_have_panel_vdd(intel_dp)) + return; + /* + * The VDD bit needs a power domain reference, so if the bit is + * already enabled when we boot or resume, grab this reference and + * schedule a vdd off, so we don't hold on to the reference + * indefinitely. + */ + DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n"); + power_domain = intel_display_port_power_domain(intel_encoder); + intel_display_power_get(dev_priv, power_domain); + + edp_panel_vdd_schedule_off(intel_dp); +} + static bool intel_edp_init_connector(struct intel_dp *intel_dp, struct intel_connector *intel_connector, struct edp_power_seq *power_seq) @@ -4158,13 +4547,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, if (!is_edp(intel_dp)) return true; - /* The VDD bit needs a power domain reference, so if the bit is already - * enabled when we boot, grab this reference. */ - if (edp_have_panel_vdd(intel_dp)) { - enum intel_display_power_domain power_domain; - power_domain = intel_display_port_power_domain(intel_encoder); - intel_display_power_get(dev_priv, power_domain); - } + intel_edp_panel_vdd_sanitize(intel_encoder); /* Cache DPCD and EDID for edp. */ intel_edp_panel_vdd_on(intel_dp); @@ -4288,7 +4671,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, edp_panel_vdd_work); intel_connector_attach_encoder(intel_connector, intel_encoder); - drm_sysfs_connector_add(connector); + drm_connector_register(connector); if (HAS_DDI(dev)) intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; @@ -4321,7 +4704,12 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, intel_dp_aux_init(intel_dp, intel_connector); - intel_dp->psr_setup_done = false; + /* init MST on ports that can support it */ + if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { + if (port == PORT_B || port == PORT_C || port == PORT_D) { + intel_dp_mst_encoder_init(intel_dig_port, intel_connector->base.base.id); + } + } if (!intel_edp_init_connector(intel_dp, intel_connector, &power_seq)) { drm_dp_aux_unregister(&intel_dp->aux); @@ -4331,7 +4719,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, edp_panel_vdd_off_sync(intel_dp); drm_modeset_unlock(&dev->mode_config.connection_mutex); } - drm_sysfs_connector_remove(connector); + drm_connector_unregister(connector); drm_connector_cleanup(connector); return false; } @@ -4353,6 +4741,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, void intel_dp_init(struct drm_device *dev, int output_reg, enum port port) { + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_digital_port *intel_dig_port; struct intel_encoder *intel_encoder; struct drm_encoder *encoder; @@ -4378,7 +4767,9 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port) intel_encoder->disable = intel_disable_dp; intel_encoder->get_hw_state = intel_dp_get_hw_state; intel_encoder->get_config = intel_dp_get_config; + intel_encoder->suspend = intel_dp_encoder_suspend; if (IS_CHERRYVIEW(dev)) { + intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable; intel_encoder->pre_enable = chv_pre_enable_dp; intel_encoder->enable = vlv_enable_dp; intel_encoder->post_disable = chv_post_disable_dp; @@ -4408,9 +4799,55 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port) intel_encoder->cloneable = 0; intel_encoder->hot_plug = intel_dp_hot_plug; + intel_dig_port->hpd_pulse = intel_dp_hpd_pulse; + dev_priv->hpd_irq_port[port] = intel_dig_port; + if (!intel_dp_init_connector(intel_dig_port, intel_connector)) { drm_encoder_cleanup(encoder); kfree(intel_dig_port); kfree(intel_connector); } } + +void intel_dp_mst_suspend(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int i; + + /* disable MST */ + for (i = 0; i < I915_MAX_PORTS; i++) { + struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i]; + if (!intel_dig_port) + continue; + + if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) { + if (!intel_dig_port->dp.can_mst) + continue; + if (intel_dig_port->dp.is_mst) + drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr); + } + } +} + +void intel_dp_mst_resume(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int i; + + for (i = 0; i < I915_MAX_PORTS; i++) { + struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i]; + if (!intel_dig_port) + continue; + if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) { + int ret; + + if (!intel_dig_port->dp.can_mst) + continue; + + ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr); + if (ret != 0) { + intel_dp_check_mst_status(&intel_dig_port->dp); + } + } + } +} diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c new file mode 100644 index 000000000000..d9a7a7865f66 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_dp_mst.c @@ -0,0 +1,548 @@ +/* + * Copyright © 2008 Intel Corporation + * 2014 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#include <drm/drmP.h> +#include "i915_drv.h" +#include "intel_drv.h" +#include <drm/drm_crtc_helper.h> +#include <drm/drm_edid.h> + +static bool intel_dp_mst_compute_config(struct intel_encoder *encoder, + struct intel_crtc_config *pipe_config) +{ + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); + struct intel_digital_port *intel_dig_port = intel_mst->primary; + struct intel_dp *intel_dp = &intel_dig_port->dp; + struct drm_device *dev = encoder->base.dev; + int bpp; + int lane_count, slots; + struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; + struct intel_connector *found = NULL, *intel_connector; + int mst_pbn; + + pipe_config->dp_encoder_is_mst = true; + pipe_config->has_pch_encoder = false; + pipe_config->has_dp_encoder = true; + bpp = 24; + /* + * for MST we always configure max link bw - the spec doesn't + * seem to suggest we should do otherwise. + */ + lane_count = drm_dp_max_lane_count(intel_dp->dpcd); + intel_dp->link_bw = intel_dp_max_link_bw(intel_dp); + intel_dp->lane_count = lane_count; + + pipe_config->pipe_bpp = 24; + pipe_config->port_clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw); + + list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head) { + if (intel_connector->new_encoder == encoder) { + found = intel_connector; + break; + } + } + + if (!found) { + DRM_ERROR("can't find connector\n"); + return false; + } + + mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->clock, bpp); + + pipe_config->pbn = mst_pbn; + slots = drm_dp_find_vcpi_slots(&intel_dp->mst_mgr, mst_pbn); + + intel_link_compute_m_n(bpp, lane_count, + adjusted_mode->crtc_clock, + pipe_config->port_clock, + &pipe_config->dp_m_n); + + pipe_config->dp_m_n.tu = slots; + return true; + +} + +static void intel_mst_disable_dp(struct intel_encoder *encoder) +{ + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); + struct intel_digital_port *intel_dig_port = intel_mst->primary; + struct intel_dp *intel_dp = &intel_dig_port->dp; + int ret; + + DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links); + + drm_dp_mst_reset_vcpi_slots(&intel_dp->mst_mgr, intel_mst->port); + + ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr); + if (ret) { + DRM_ERROR("failed to update payload %d\n", ret); + } +} + +static void intel_mst_post_disable_dp(struct intel_encoder *encoder) +{ + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); + struct intel_digital_port *intel_dig_port = intel_mst->primary; + struct intel_dp *intel_dp = &intel_dig_port->dp; + + DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links); + + /* this can fail */ + drm_dp_check_act_status(&intel_dp->mst_mgr); + /* and this can also fail */ + drm_dp_update_payload_part2(&intel_dp->mst_mgr); + + drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, intel_mst->port); + + intel_dp->active_mst_links--; + intel_mst->port = NULL; + if (intel_dp->active_mst_links == 0) { + intel_dig_port->base.post_disable(&intel_dig_port->base); + intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); + } +} + +static void intel_mst_pre_enable_dp(struct intel_encoder *encoder) +{ + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); + struct intel_digital_port *intel_dig_port = intel_mst->primary; + struct intel_dp *intel_dp = &intel_dig_port->dp; + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + enum port port = intel_dig_port->port; + int ret; + uint32_t temp; + struct intel_connector *found = NULL, *intel_connector; + int slots; + struct drm_crtc *crtc = encoder->base.crtc; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + + list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head) { + if (intel_connector->new_encoder == encoder) { + found = intel_connector; + break; + } + } + + if (!found) { + DRM_ERROR("can't find connector\n"); + return; + } + + DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links); + intel_mst->port = found->port; + + if (intel_dp->active_mst_links == 0) { + enum port port = intel_ddi_get_encoder_port(encoder); + + I915_WRITE(PORT_CLK_SEL(port), intel_crtc->config.ddi_pll_sel); + + intel_ddi_init_dp_buf_reg(&intel_dig_port->base); + + intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); + + + intel_dp_start_link_train(intel_dp); + intel_dp_complete_link_train(intel_dp); + intel_dp_stop_link_train(intel_dp); + } + + ret = drm_dp_mst_allocate_vcpi(&intel_dp->mst_mgr, + intel_mst->port, intel_crtc->config.pbn, &slots); + if (ret == false) { + DRM_ERROR("failed to allocate vcpi\n"); + return; + } + + + intel_dp->active_mst_links++; + temp = I915_READ(DP_TP_STATUS(port)); + I915_WRITE(DP_TP_STATUS(port), temp); + + ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr); +} + +static void intel_mst_enable_dp(struct intel_encoder *encoder) +{ + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); + struct intel_digital_port *intel_dig_port = intel_mst->primary; + struct intel_dp *intel_dp = &intel_dig_port->dp; + struct drm_device *dev = intel_dig_port->base.base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + enum port port = intel_dig_port->port; + int ret; + + DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links); + + if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_ACT_SENT), + 1)) + DRM_ERROR("Timed out waiting for ACT sent\n"); + + ret = drm_dp_check_act_status(&intel_dp->mst_mgr); + + ret = drm_dp_update_payload_part2(&intel_dp->mst_mgr); +} + +static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder, + enum pipe *pipe) +{ + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); + *pipe = intel_mst->pipe; + if (intel_mst->port) + return true; + return false; +} + +static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder, + struct intel_crtc_config *pipe_config) +{ + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); + struct intel_digital_port *intel_dig_port = intel_mst->primary; + struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + enum transcoder cpu_transcoder = crtc->config.cpu_transcoder; + u32 temp, flags = 0; + + pipe_config->has_dp_encoder = true; + + temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); + if (temp & TRANS_DDI_PHSYNC) + flags |= DRM_MODE_FLAG_PHSYNC; + else + flags |= DRM_MODE_FLAG_NHSYNC; + if (temp & TRANS_DDI_PVSYNC) + flags |= DRM_MODE_FLAG_PVSYNC; + else + flags |= DRM_MODE_FLAG_NVSYNC; + + switch (temp & TRANS_DDI_BPC_MASK) { + case TRANS_DDI_BPC_6: + pipe_config->pipe_bpp = 18; + break; + case TRANS_DDI_BPC_8: + pipe_config->pipe_bpp = 24; + break; + case TRANS_DDI_BPC_10: + pipe_config->pipe_bpp = 30; + break; + case TRANS_DDI_BPC_12: + pipe_config->pipe_bpp = 36; + break; + default: + break; + } + pipe_config->adjusted_mode.flags |= flags; + intel_dp_get_m_n(crtc, pipe_config); + + intel_ddi_clock_get(&intel_dig_port->base, pipe_config); +} + +static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + struct intel_dp *intel_dp = intel_connector->mst_port; + struct edid *edid; + int ret; + + edid = drm_dp_mst_get_edid(connector, &intel_dp->mst_mgr, intel_connector->port); + if (!edid) + return 0; + + ret = intel_connector_update_modes(connector, edid); + kfree(edid); + + return ret; +} + +static enum drm_connector_status +intel_mst_port_dp_detect(struct drm_connector *connector) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + struct intel_dp *intel_dp = intel_connector->mst_port; + + return drm_dp_mst_detect_port(&intel_dp->mst_mgr, intel_connector->port); +} + +static enum drm_connector_status +intel_dp_mst_detect(struct drm_connector *connector, bool force) +{ + enum drm_connector_status status; + status = intel_mst_port_dp_detect(connector); + return status; +} + +static int +intel_dp_mst_set_property(struct drm_connector *connector, + struct drm_property *property, + uint64_t val) +{ + return 0; +} + +static void +intel_dp_mst_connector_destroy(struct drm_connector *connector) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + + if (!IS_ERR_OR_NULL(intel_connector->edid)) + kfree(intel_connector->edid); + + drm_connector_cleanup(connector); + kfree(connector); +} + +static const struct drm_connector_funcs intel_dp_mst_connector_funcs = { + .dpms = intel_connector_dpms, + .detect = intel_dp_mst_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .set_property = intel_dp_mst_set_property, + .destroy = intel_dp_mst_connector_destroy, +}; + +static int intel_dp_mst_get_modes(struct drm_connector *connector) +{ + return intel_dp_mst_get_ddc_modes(connector); +} + +static enum drm_mode_status +intel_dp_mst_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + /* TODO - validate mode against available PBN for link */ + if (mode->clock < 10000) + return MODE_CLOCK_LOW; + + if (mode->flags & DRM_MODE_FLAG_DBLCLK) + return MODE_H_ILLEGAL; + + return MODE_OK; +} + +static struct drm_encoder *intel_mst_best_encoder(struct drm_connector *connector) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + struct intel_dp *intel_dp = intel_connector->mst_port; + return &intel_dp->mst_encoders[0]->base.base; +} + +static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = { + .get_modes = intel_dp_mst_get_modes, + .mode_valid = intel_dp_mst_mode_valid, + .best_encoder = intel_mst_best_encoder, +}; + +static void intel_dp_mst_encoder_destroy(struct drm_encoder *encoder) +{ + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); + + drm_encoder_cleanup(encoder); + kfree(intel_mst); +} + +static const struct drm_encoder_funcs intel_dp_mst_enc_funcs = { + .destroy = intel_dp_mst_encoder_destroy, +}; + +static bool intel_dp_mst_get_hw_state(struct intel_connector *connector) +{ + if (connector->encoder) { + enum pipe pipe; + if (!connector->encoder->get_hw_state(connector->encoder, &pipe)) + return false; + return true; + } + return false; +} + +static void intel_connector_add_to_fbdev(struct intel_connector *connector) +{ +#ifdef CONFIG_DRM_I915_FBDEV + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + drm_fb_helper_add_one_connector(&dev_priv->fbdev->helper, &connector->base); +#endif +} + +static void intel_connector_remove_from_fbdev(struct intel_connector *connector) +{ +#ifdef CONFIG_DRM_I915_FBDEV + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + drm_fb_helper_remove_one_connector(&dev_priv->fbdev->helper, &connector->base); +#endif +} + +static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, char *pathprop) +{ + struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr); + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = intel_dig_port->base.base.dev; + struct intel_connector *intel_connector; + struct drm_connector *connector; + int i; + + intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL); + if (!intel_connector) + return NULL; + + connector = &intel_connector->base; + drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs, DRM_MODE_CONNECTOR_DisplayPort); + drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs); + + intel_connector->unregister = intel_connector_unregister; + intel_connector->get_hw_state = intel_dp_mst_get_hw_state; + intel_connector->mst_port = intel_dp; + intel_connector->port = port; + + for (i = PIPE_A; i <= PIPE_C; i++) { + drm_mode_connector_attach_encoder(&intel_connector->base, + &intel_dp->mst_encoders[i]->base.base); + } + intel_dp_add_properties(intel_dp, connector); + + drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0); + drm_mode_connector_set_path_property(connector, pathprop); + drm_reinit_primary_mode_group(dev); + mutex_lock(&dev->mode_config.mutex); + intel_connector_add_to_fbdev(intel_connector); + mutex_unlock(&dev->mode_config.mutex); + drm_connector_register(&intel_connector->base); + return connector; +} + +static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, + struct drm_connector *connector) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + struct drm_device *dev = connector->dev; + /* need to nuke the connector */ + mutex_lock(&dev->mode_config.mutex); + intel_connector_dpms(connector, DRM_MODE_DPMS_OFF); + mutex_unlock(&dev->mode_config.mutex); + + intel_connector->unregister(intel_connector); + + mutex_lock(&dev->mode_config.mutex); + intel_connector_remove_from_fbdev(intel_connector); + drm_connector_cleanup(connector); + mutex_unlock(&dev->mode_config.mutex); + + drm_reinit_primary_mode_group(dev); + + kfree(intel_connector); + DRM_DEBUG_KMS("\n"); +} + +static void intel_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) +{ + struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr); + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = intel_dig_port->base.base.dev; + + drm_kms_helper_hotplug_event(dev); +} + +static struct drm_dp_mst_topology_cbs mst_cbs = { + .add_connector = intel_dp_add_mst_connector, + .destroy_connector = intel_dp_destroy_mst_connector, + .hotplug = intel_dp_mst_hotplug, +}; + +static struct intel_dp_mst_encoder * +intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum pipe pipe) +{ + struct intel_dp_mst_encoder *intel_mst; + struct intel_encoder *intel_encoder; + struct drm_device *dev = intel_dig_port->base.base.dev; + + intel_mst = kzalloc(sizeof(*intel_mst), GFP_KERNEL); + + if (!intel_mst) + return NULL; + + intel_mst->pipe = pipe; + intel_encoder = &intel_mst->base; + intel_mst->primary = intel_dig_port; + + drm_encoder_init(dev, &intel_encoder->base, &intel_dp_mst_enc_funcs, + DRM_MODE_ENCODER_DPMST); + + intel_encoder->type = INTEL_OUTPUT_DP_MST; + intel_encoder->crtc_mask = 0x7; + intel_encoder->cloneable = 0; + + intel_encoder->compute_config = intel_dp_mst_compute_config; + intel_encoder->disable = intel_mst_disable_dp; + intel_encoder->post_disable = intel_mst_post_disable_dp; + intel_encoder->pre_enable = intel_mst_pre_enable_dp; + intel_encoder->enable = intel_mst_enable_dp; + intel_encoder->get_hw_state = intel_dp_mst_enc_get_hw_state; + intel_encoder->get_config = intel_dp_mst_enc_get_config; + + return intel_mst; + +} + +static bool +intel_dp_create_fake_mst_encoders(struct intel_digital_port *intel_dig_port) +{ + int i; + struct intel_dp *intel_dp = &intel_dig_port->dp; + + for (i = PIPE_A; i <= PIPE_C; i++) + intel_dp->mst_encoders[i] = intel_dp_create_fake_mst_encoder(intel_dig_port, i); + return true; +} + +int +intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_base_id) +{ + struct intel_dp *intel_dp = &intel_dig_port->dp; + struct drm_device *dev = intel_dig_port->base.base.dev; + int ret; + + intel_dp->can_mst = true; + intel_dp->mst_mgr.cbs = &mst_cbs; + + /* create encoders */ + intel_dp_create_fake_mst_encoders(intel_dig_port); + ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, dev->dev, &intel_dp->aux, 16, 3, conn_base_id); + if (ret) { + intel_dp->can_mst = false; + return ret; + } + return 0; +} + +void +intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port) +{ + struct intel_dp *intel_dp = &intel_dig_port->dp; + + if (!intel_dp->can_mst) + return; + + drm_dp_mst_topology_mgr_destroy(&intel_dp->mst_mgr); + /* encoders will get killed by normal cleanup */ +} diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index f67340ed2c12..b8c8bbd8e5f9 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -32,7 +32,7 @@ #include <drm/drm_crtc.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_fb_helper.h> -#include <drm/drm_dp_helper.h> +#include <drm/drm_dp_mst_helper.h> /** * _wait_for - magic (register) wait macro @@ -100,6 +100,7 @@ #define INTEL_OUTPUT_EDP 8 #define INTEL_OUTPUT_DSI 9 #define INTEL_OUTPUT_UNKNOWN 10 +#define INTEL_OUTPUT_DP_MST 11 #define INTEL_DVO_CHIP_NONE 0 #define INTEL_DVO_CHIP_LVDS 1 @@ -152,6 +153,12 @@ struct intel_encoder { * be set correctly before calling this function. */ void (*get_config)(struct intel_encoder *, struct intel_crtc_config *pipe_config); + /* + * Called during system suspend after all pending requests for the + * encoder are flushed (for example for DP AUX transactions) and + * device interrupts are disabled. + */ + void (*suspend)(struct intel_encoder *); int crtc_mask; enum hpd_pin hpd_pin; }; @@ -165,6 +172,7 @@ struct intel_panel { struct { bool present; u32 level; + u32 min; u32 max; bool enabled; bool combination_mode; /* gen 2/4 only */ @@ -207,6 +215,10 @@ struct intel_connector { /* since POLL and HPD connectors may use the same HPD line keep the native state of connector->polled in case hotplug storm detection changes it */ u8 polled; + + void *port; /* store this opaque as its illegal to dereference it */ + + struct intel_dp *mst_port; }; typedef struct dpll { @@ -307,6 +319,9 @@ struct intel_crtc_config { /* Selected dpll when shared or DPLL_ID_PRIVATE. */ enum intel_dpll_id shared_dpll; + /* PORT_CLK_SEL for DDI ports. */ + uint32_t ddi_pll_sel; + /* Actual register state of the dpll, for shared dpll cross-checking. */ struct intel_dpll_hw_state dpll_hw_state; @@ -338,6 +353,7 @@ struct intel_crtc_config { u32 pos; u32 size; bool enabled; + bool force_thru; } pch_pfit; /* FDI configuration, only valid if has_pch_encoder is set. */ @@ -347,6 +363,9 @@ struct intel_crtc_config { bool ips_enabled; bool double_wide; + + bool dp_encoder_is_mst; + int pbn; }; struct intel_pipe_wm { @@ -358,6 +377,11 @@ struct intel_pipe_wm { bool sprites_scaled; }; +struct intel_mmio_flip { + u32 seqno; + u32 ring_id; +}; + struct intel_crtc { struct drm_crtc base; enum pipe pipe; @@ -384,7 +408,6 @@ struct intel_crtc { struct drm_i915_gem_object *cursor_bo; uint32_t cursor_addr; - int16_t cursor_x, cursor_y; int16_t cursor_width, cursor_height; uint32_t cursor_cntl; uint32_t cursor_base; @@ -394,8 +417,6 @@ struct intel_crtc { struct intel_crtc_config *new_config; bool new_enabled; - uint32_t ddi_pll_sel; - /* reset counter value when the last flip was submitted */ unsigned int reset_counter; @@ -412,10 +433,12 @@ struct intel_crtc { wait_queue_head_t vbl_wait; int scanline_offset; + struct intel_mmio_flip mmio_flip; }; struct intel_plane_wm_parameters { uint32_t horiz_pixels; + uint32_t vert_pixels; uint8_t bytes_per_pixel; bool enabled; bool scaled; @@ -428,7 +451,6 @@ struct intel_plane { struct drm_i915_gem_object *obj; bool can_scale; int max_downscale; - u32 lut_r[1024], lut_g[1024], lut_b[1024]; int crtc_x, crtc_y; unsigned int crtc_w, crtc_h; uint32_t src_x, src_y; @@ -481,6 +503,7 @@ struct cxsr_latency { #define to_intel_encoder(x) container_of(x, struct intel_encoder, base) #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) #define to_intel_plane(x) container_of(x, struct intel_plane, base) +#define intel_fb_obj(x) (x ? to_intel_framebuffer(x)->obj : NULL) struct intel_hdmi { u32 hdmi_reg; @@ -491,6 +514,7 @@ struct intel_hdmi { bool has_audio; enum hdmi_force_audio force_audio; bool rgb_quant_range_selectable; + enum hdmi_picture_aspect aspect_ratio; void (*write_infoframe)(struct drm_encoder *encoder, enum hdmi_infoframe_type type, const void *frame, ssize_t len); @@ -499,6 +523,7 @@ struct intel_hdmi { struct drm_display_mode *adjusted_mode); }; +struct intel_dp_mst_encoder; #define DP_MAX_DOWNSTREAM_PORTS 0x10 /** @@ -537,12 +562,20 @@ struct intel_dp { unsigned long last_power_cycle; unsigned long last_power_on; unsigned long last_backlight_off; - bool psr_setup_done; + struct notifier_block edp_notifier; bool use_tps3; + bool can_mst; /* this port supports mst */ + bool is_mst; + int active_mst_links; + /* connector directly attached - won't be use for modeset in mst world */ struct intel_connector *attached_connector; + /* mst connector list */ + struct intel_dp_mst_encoder *mst_encoders[I915_MAX_PIPES]; + struct drm_dp_mst_topology_mgr mst_mgr; + uint32_t (*get_aux_clock_divider)(struct intel_dp *dp, int index); /* * This function returns the value we have to program the AUX_CTL @@ -566,6 +599,14 @@ struct intel_digital_port { u32 saved_port_bits; struct intel_dp dp; struct intel_hdmi hdmi; + bool (*hpd_pulse)(struct intel_digital_port *, bool); +}; + +struct intel_dp_mst_encoder { + struct intel_encoder base; + enum pipe pipe; + struct intel_digital_port *primary; + void *port; /* store this opaque as its illegal to dereference it */ }; static inline int @@ -652,6 +693,12 @@ enc_to_dig_port(struct drm_encoder *encoder) return container_of(encoder, struct intel_digital_port, base.base); } +static inline struct intel_dp_mst_encoder * +enc_to_mst(struct drm_encoder *encoder) +{ + return container_of(encoder, struct intel_dp_mst_encoder, base.base); +} + static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder) { return &enc_to_dig_port(encoder)->dp; @@ -676,17 +723,26 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev, enum transcoder pch_transcoder, bool enable); -void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); -void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); -void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); -void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); -void bdw_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); -void bdw_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); +void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); +void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); +void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); +void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); +void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); +void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); void intel_runtime_pm_disable_interrupts(struct drm_device *dev); void intel_runtime_pm_restore_interrupts(struct drm_device *dev); +static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv) +{ + /* + * We only use drm_irq_uninstall() at unload and VT switch, so + * this is the only thing we need to check. + */ + return !dev_priv->pm._irqs_disabled; +} + int intel_get_crtc_scanline(struct intel_crtc *crtc); void i9xx_check_fifo_underruns(struct drm_device *dev); - +void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv); /* intel_crt.c */ void intel_crt_init(struct drm_device *dev); @@ -705,10 +761,7 @@ void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder); void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc); void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc); -void intel_ddi_setup_hw_pll_state(struct drm_device *dev); bool intel_ddi_pll_select(struct intel_crtc *crtc); -void intel_ddi_pll_enable(struct intel_crtc *crtc); -void intel_ddi_put_crtc_pll(struct drm_crtc *crtc); void intel_ddi_set_pipe_settings(struct drm_crtc *crtc); void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder); bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector); @@ -716,17 +769,46 @@ void intel_ddi_fdi_disable(struct drm_crtc *crtc); void intel_ddi_get_config(struct intel_encoder *encoder, struct intel_crtc_config *pipe_config); +void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder); +void intel_ddi_clock_get(struct intel_encoder *encoder, + struct intel_crtc_config *pipe_config); +void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state); /* intel_display.c */ const char *intel_output_name(int output); bool intel_has_pending_fb_unpin(struct drm_device *dev); int intel_pch_rawclk(struct drm_device *dev); -int valleyview_cur_cdclk(struct drm_i915_private *dev_priv); void intel_mark_busy(struct drm_device *dev); -void intel_mark_fb_busy(struct drm_i915_gem_object *obj, - struct intel_engine_cs *ring); +void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj, + struct intel_engine_cs *ring); +void intel_frontbuffer_flip_prepare(struct drm_device *dev, + unsigned frontbuffer_bits); +void intel_frontbuffer_flip_complete(struct drm_device *dev, + unsigned frontbuffer_bits); +void intel_frontbuffer_flush(struct drm_device *dev, + unsigned frontbuffer_bits); +/** + * intel_frontbuffer_flip - prepare frontbuffer flip + * @dev: DRM device + * @frontbuffer_bits: frontbuffer plane tracking bits + * + * This function gets called after scheduling a flip on @obj. This is for + * synchronous plane updates which will happen on the next vblank and which will + * not get delayed by pending gpu rendering. + * + * Can be called without any locks held. + */ +static inline +void intel_frontbuffer_flip(struct drm_device *dev, + unsigned frontbuffer_bits) +{ + intel_frontbuffer_flush(dev, frontbuffer_bits); +} + +void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire); void intel_mark_idle(struct drm_device *dev); void intel_crtc_restore_mode(struct drm_crtc *crtc); +void intel_crtc_control(struct drm_crtc *crtc, bool enable); void intel_crtc_update_dpms(struct drm_crtc *crtc); void intel_encoder_destroy(struct drm_encoder *encoder); void intel_connector_dpms(struct drm_connector *, int mode); @@ -754,8 +836,7 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector, struct intel_load_detect_pipe *old, struct drm_modeset_acquire_ctx *ctx); void intel_release_load_detect_pipe(struct drm_connector *connector, - struct intel_load_detect_pipe *old, - struct drm_modeset_acquire_ctx *ctx); + struct intel_load_detect_pipe *old); int intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_i915_gem_object *obj, struct intel_engine_cs *pipelined); @@ -767,12 +848,18 @@ __intel_framebuffer_create(struct drm_device *dev, void intel_prepare_page_flip(struct drm_device *dev, int plane); void intel_finish_page_flip(struct drm_device *dev, int pipe); void intel_finish_page_flip_plane(struct drm_device *dev, int plane); + +/* shared dpll functions */ struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc); void assert_shared_dpll(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll, bool state); #define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true) #define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false) +struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc); +void intel_put_shared_dpll(struct intel_crtc *crtc); + +/* modesetting asserts */ void assert_pll(struct drm_i915_private *dev_priv, enum pipe pipe, bool state); #define assert_pll_enabled(d, p) assert_pll(d, p, true) @@ -805,7 +892,6 @@ void hsw_disable_ips(struct intel_crtc *crtc); void intel_display_set_init_power(struct drm_i915_private *dev, bool enable); enum intel_display_power_domain intel_display_port_power_domain(struct intel_encoder *intel_encoder); -int valleyview_get_vco(struct drm_i915_private *dev_priv); void intel_mode_from_pipe_config(struct drm_display_mode *mode, struct intel_crtc_config *pipe_config); int intel_format_to_fourcc(int format); @@ -826,18 +912,34 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc); bool intel_dp_compute_config(struct intel_encoder *encoder, struct intel_crtc_config *pipe_config); bool intel_dp_is_edp(struct drm_device *dev, enum port port); +bool intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, + bool long_hpd); void intel_edp_backlight_on(struct intel_dp *intel_dp); void intel_edp_backlight_off(struct intel_dp *intel_dp); void intel_edp_panel_vdd_on(struct intel_dp *intel_dp); +void intel_edp_panel_vdd_sanitize(struct intel_encoder *intel_encoder); void intel_edp_panel_on(struct intel_dp *intel_dp); void intel_edp_panel_off(struct intel_dp *intel_dp); void intel_edp_psr_enable(struct intel_dp *intel_dp); void intel_edp_psr_disable(struct intel_dp *intel_dp); -void intel_edp_psr_update(struct drm_device *dev); void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate); - +void intel_edp_psr_invalidate(struct drm_device *dev, + unsigned frontbuffer_bits); +void intel_edp_psr_flush(struct drm_device *dev, + unsigned frontbuffer_bits); +void intel_edp_psr_init(struct drm_device *dev); + +int intel_dp_handle_hpd_irq(struct intel_digital_port *digport, bool long_hpd); +void intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector); +void intel_dp_mst_suspend(struct drm_device *dev); +void intel_dp_mst_resume(struct drm_device *dev); +int intel_dp_max_link_bw(struct intel_dp *intel_dp); +void intel_dp_hot_plug(struct intel_encoder *intel_encoder); +/* intel_dp_mst.c */ +int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id); +void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port); /* intel_dsi.c */ -bool intel_dsi_init(struct drm_device *dev); +void intel_dsi_init(struct drm_device *dev); /* intel_dvo.c */ @@ -920,8 +1022,8 @@ void intel_pch_panel_fitting(struct intel_crtc *crtc, void intel_gmch_panel_fitting(struct intel_crtc *crtc, struct intel_crtc_config *pipe_config, int fitting_mode); -void intel_panel_set_backlight(struct intel_connector *connector, u32 level, - u32 max); +void intel_panel_set_backlight_acpi(struct intel_connector *connector, + u32 level, u32 max); int intel_panel_setup_backlight(struct drm_connector *connector); void intel_panel_enable_backlight(struct intel_connector *connector); void intel_panel_disable_backlight(struct intel_connector *connector); @@ -940,7 +1042,9 @@ int ilk_wm_max_level(const struct drm_device *dev); void intel_update_watermarks(struct drm_crtc *crtc); void intel_update_sprite_watermarks(struct drm_plane *plane, struct drm_crtc *crtc, - uint32_t sprite_width, int pixel_size, + uint32_t sprite_width, + uint32_t sprite_height, + int pixel_size, bool enabled, bool scaled); void intel_init_pm(struct drm_device *dev); void intel_pm_setup(struct drm_device *dev); @@ -963,6 +1067,7 @@ void intel_init_gt_powersave(struct drm_device *dev); void intel_cleanup_gt_powersave(struct drm_device *dev); void intel_enable_gt_powersave(struct drm_device *dev); void intel_disable_gt_powersave(struct drm_device *dev); +void intel_suspend_gt_powersave(struct drm_device *dev); void intel_reset_gt_powersave(struct drm_device *dev); void ironlake_teardown_rc6(struct drm_device *dev); void gen6_update_ring_freq(struct drm_device *dev); @@ -976,8 +1081,7 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv); void intel_init_runtime_pm(struct drm_i915_private *dev_priv); void intel_fini_runtime_pm(struct drm_i915_private *dev_priv); void ilk_wm_get_hw_state(struct drm_device *dev); -void __vlv_set_power_well(struct drm_i915_private *dev_priv, - enum punit_power_well power_well_id, bool enable); + /* intel_sdvo.c */ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob); diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c index 3fd082933c87..670c29a7b5dd 100644 --- a/drivers/gpu/drm/i915/intel_dsi.c +++ b/drivers/gpu/drm/i915/intel_dsi.c @@ -92,6 +92,9 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder, if (fixed_mode) intel_fixed_panel_mode(fixed_mode, adjusted_mode); + /* DSI uses short packets for sync events, so clear mode flags for DSI */ + adjusted_mode->flags = 0; + if (intel_dsi->dev.dev_ops->mode_fixup) return intel_dsi->dev.dev_ops->mode_fixup(&intel_dsi->dev, mode, adjusted_mode); @@ -152,6 +155,8 @@ static void intel_dsi_enable(struct intel_encoder *encoder) if (intel_dsi->dev.dev_ops->enable) intel_dsi->dev.dev_ops->enable(&intel_dsi->dev); + wait_for_dsi_fifo_empty(intel_dsi); + /* assert ip_tg_enable signal */ temp = I915_READ(MIPI_PORT_CTRL(pipe)) & ~LANE_CONFIGURATION_MASK; temp = temp | intel_dsi->port_bits; @@ -177,6 +182,10 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder) tmp |= DPLL_REFA_CLK_ENABLE_VLV; I915_WRITE(DPLL(pipe), tmp); + /* update the hw state for DPLL */ + intel_crtc->config.dpll_hw_state.dpll = DPLL_INTEGRATED_CLOCK_VLV | + DPLL_REFA_CLK_ENABLE_VLV; + tmp = I915_READ(DSPCLK_GATE_D); tmp |= DPOUNIT_CLOCK_GATE_DISABLE; I915_WRITE(DSPCLK_GATE_D, tmp); @@ -192,6 +201,8 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder) if (intel_dsi->dev.dev_ops->send_otp_cmds) intel_dsi->dev.dev_ops->send_otp_cmds(&intel_dsi->dev); + wait_for_dsi_fifo_empty(intel_dsi); + /* Enable port in pre-enable phase itself because as per hw team * recommendation, port should be enabled befor plane & pipe */ intel_dsi_enable(encoder); @@ -232,6 +243,8 @@ static void intel_dsi_disable(struct intel_encoder *encoder) DRM_DEBUG_KMS("\n"); if (is_vid_mode(intel_dsi)) { + wait_for_dsi_fifo_empty(intel_dsi); + /* de-assert ip_tg_enable signal */ temp = I915_READ(MIPI_PORT_CTRL(pipe)); I915_WRITE(MIPI_PORT_CTRL(pipe), temp & ~DPI_ENABLE); @@ -261,6 +274,8 @@ static void intel_dsi_disable(struct intel_encoder *encoder) * some next enable sequence send turn on packet error is observed */ if (intel_dsi->dev.dev_ops->disable) intel_dsi->dev.dev_ops->disable(&intel_dsi->dev); + + wait_for_dsi_fifo_empty(intel_dsi); } static void intel_dsi_clear_device_ready(struct intel_encoder *encoder) @@ -351,9 +366,21 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, static void intel_dsi_get_config(struct intel_encoder *encoder, struct intel_crtc_config *pipe_config) { + u32 pclk; DRM_DEBUG_KMS("\n"); - /* XXX: read flags, set to adjusted_mode */ + /* + * DPLL_MD is not used in case of DSI, reading will get some default value + * set dpll_md = 0 + */ + pipe_config->dpll_hw_state.dpll_md = 0; + + pclk = vlv_get_dsi_pclk(encoder, pipe_config->pipe_bpp); + if (!pclk) + return; + + pipe_config->adjusted_mode.crtc_clock = pclk; + pipe_config->port_clock = pclk; } static enum drm_mode_status @@ -658,7 +685,7 @@ static const struct drm_connector_funcs intel_dsi_connector_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, }; -bool intel_dsi_init(struct drm_device *dev) +void intel_dsi_init(struct drm_device *dev) { struct intel_dsi *intel_dsi; struct intel_encoder *intel_encoder; @@ -674,29 +701,29 @@ bool intel_dsi_init(struct drm_device *dev) /* There is no detection method for MIPI so rely on VBT */ if (!dev_priv->vbt.has_mipi) - return false; + return; + + if (IS_VALLEYVIEW(dev)) { + dev_priv->mipi_mmio_base = VLV_MIPI_BASE; + } else { + DRM_ERROR("Unsupported Mipi device to reg base"); + return; + } intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL); if (!intel_dsi) - return false; + return; intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL); if (!intel_connector) { kfree(intel_dsi); - return false; + return; } intel_encoder = &intel_dsi->base; encoder = &intel_encoder->base; intel_dsi->attached_connector = intel_connector; - if (IS_VALLEYVIEW(dev)) { - dev_priv->mipi_mmio_base = VLV_MIPI_BASE; - } else { - DRM_ERROR("Unsupported Mipi device to reg base"); - return false; - } - connector = &intel_connector->base; drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI); @@ -743,7 +770,7 @@ bool intel_dsi_init(struct drm_device *dev) intel_connector_attach_encoder(intel_connector, intel_encoder); - drm_sysfs_connector_add(connector); + drm_connector_register(connector); fixed_mode = dsi->dev_ops->get_modes(&intel_dsi->dev); if (!fixed_mode) { @@ -754,12 +781,10 @@ bool intel_dsi_init(struct drm_device *dev) fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; intel_panel_init(&intel_connector->panel, fixed_mode, NULL); - return true; + return; err: drm_encoder_cleanup(&intel_encoder->base); kfree(intel_dsi); kfree(intel_connector); - - return false; } diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h index 31db33d3e5cc..fd51867fd0d3 100644 --- a/drivers/gpu/drm/i915/intel_dsi.h +++ b/drivers/gpu/drm/i915/intel_dsi.h @@ -132,6 +132,7 @@ static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder) extern void vlv_enable_dsi_pll(struct intel_encoder *encoder); extern void vlv_disable_dsi_pll(struct intel_encoder *encoder); +extern u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp); extern struct intel_dsi_dev_ops vbt_generic_dsi_display_ops; diff --git a/drivers/gpu/drm/i915/intel_dsi_cmd.c b/drivers/gpu/drm/i915/intel_dsi_cmd.c index 933c86305237..7f1430ac8543 100644 --- a/drivers/gpu/drm/i915/intel_dsi_cmd.c +++ b/drivers/gpu/drm/i915/intel_dsi_cmd.c @@ -419,3 +419,19 @@ int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs) return 0; } + +void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi) +{ + struct drm_encoder *encoder = &intel_dsi->base.base; + struct drm_device *dev = encoder->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); + enum pipe pipe = intel_crtc->pipe; + u32 mask; + + mask = LP_CTRL_FIFO_EMPTY | HS_CTRL_FIFO_EMPTY | + LP_DATA_FIFO_EMPTY | HS_DATA_FIFO_EMPTY; + + if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == mask, 100)) + DRM_ERROR("DPI FIFOs are not empty\n"); +} diff --git a/drivers/gpu/drm/i915/intel_dsi_cmd.h b/drivers/gpu/drm/i915/intel_dsi_cmd.h index 9a18cbfa5460..46aa1acc00eb 100644 --- a/drivers/gpu/drm/i915/intel_dsi_cmd.h +++ b/drivers/gpu/drm/i915/intel_dsi_cmd.h @@ -51,6 +51,7 @@ int dsi_vc_generic_read(struct intel_dsi *intel_dsi, int channel, u8 *reqdata, int reqlen, u8 *buf, int buflen); int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs); +void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi); /* XXX: questionable write helpers */ static inline int dsi_vc_dcs_write_0(struct intel_dsi *intel_dsi, diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c index 21a0d348cedc..47c7584a4aa0 100644 --- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c +++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c @@ -143,7 +143,7 @@ static u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi, u8 *data) case MIPI_DSI_DCS_LONG_WRITE: dsi_vc_dcs_write(intel_dsi, vc, data, len); break; - }; + } data += len; @@ -294,7 +294,8 @@ static bool generic_init(struct intel_dsi_device *dsi) intel_dsi->rst_timer_val = mipi_config->device_reset_timer; intel_dsi->init_count = mipi_config->master_init_timer; intel_dsi->bw_timer = mipi_config->dbi_bw_timer; - intel_dsi->video_frmt_cfg_bits = mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0; + intel_dsi->video_frmt_cfg_bits = + mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0; switch (intel_dsi->escape_clk_div) { case 0: @@ -351,7 +352,8 @@ static bool generic_init(struct intel_dsi_device *dsi) * * prepare count */ - ths_prepare_ns = max(mipi_config->ths_prepare, mipi_config->tclk_prepare); + ths_prepare_ns = max(mipi_config->ths_prepare, + mipi_config->tclk_prepare); prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * ui_den, ui_num * 2); /* exit zero count */ diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c index ba79ec19da3b..d8bb1ea2f0da 100644 --- a/drivers/gpu/drm/i915/intel_dsi_pll.c +++ b/drivers/gpu/drm/i915/intel_dsi_pll.c @@ -298,3 +298,84 @@ void vlv_disable_dsi_pll(struct intel_encoder *encoder) mutex_unlock(&dev_priv->dpio_lock); } + +static void assert_bpp_mismatch(int pixel_format, int pipe_bpp) +{ + int bpp; + + switch (pixel_format) { + default: + case VID_MODE_FORMAT_RGB888: + case VID_MODE_FORMAT_RGB666_LOOSE: + bpp = 24; + break; + case VID_MODE_FORMAT_RGB666: + bpp = 18; + break; + case VID_MODE_FORMAT_RGB565: + bpp = 16; + break; + } + + WARN(bpp != pipe_bpp, + "bpp match assertion failure (expected %d, current %d)\n", + bpp, pipe_bpp); +} + +u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp) +{ + struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + u32 dsi_clock, pclk; + u32 pll_ctl, pll_div; + u32 m = 0, p = 0; + int refclk = 25000; + int i; + + DRM_DEBUG_KMS("\n"); + + mutex_lock(&dev_priv->dpio_lock); + pll_ctl = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL); + pll_div = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_DIVIDER); + mutex_unlock(&dev_priv->dpio_lock); + + /* mask out other bits and extract the P1 divisor */ + pll_ctl &= DSI_PLL_P1_POST_DIV_MASK; + pll_ctl = pll_ctl >> (DSI_PLL_P1_POST_DIV_SHIFT - 2); + + /* mask out the other bits and extract the M1 divisor */ + pll_div &= DSI_PLL_M1_DIV_MASK; + pll_div = pll_div >> DSI_PLL_M1_DIV_SHIFT; + + while (pll_ctl) { + pll_ctl = pll_ctl >> 1; + p++; + } + p--; + + if (!p) { + DRM_ERROR("wrong P1 divisor\n"); + return 0; + } + + for (i = 0; i < ARRAY_SIZE(lfsr_converts); i++) { + if (lfsr_converts[i] == pll_div) + break; + } + + if (i == ARRAY_SIZE(lfsr_converts)) { + DRM_ERROR("wrong m_seed programmed\n"); + return 0; + } + + m = i + 62; + + dsi_clock = (m * refclk) / p; + + /* pixel_format and pipe_bpp should agree */ + assert_bpp_mismatch(intel_dsi->pixel_format, pipe_bpp); + + pclk = DIV_ROUND_CLOSEST(dsi_clock * intel_dsi->lane_count, pipe_bpp); + + return pclk; +} diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index a3631c0a5c28..56b47d2ffaf7 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c @@ -112,7 +112,15 @@ static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector) static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector) { + struct drm_device *dev = connector->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_dvo *intel_dvo = intel_attached_dvo(&connector->base); + u32 tmp; + + tmp = I915_READ(intel_dvo->dev.dvo_reg); + + if (!(tmp & DVO_ENABLE)) + return false; return intel_dvo->dev.dev_ops->get_hw_state(&intel_dvo->dev); } @@ -558,7 +566,7 @@ void intel_dvo_init(struct drm_device *dev) intel_dvo->panel_wants_dither = true; } - drm_sysfs_connector_add(connector); + drm_connector_register(connector); return; } diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index 088fe9378a4c..f475414671d8 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -43,10 +43,36 @@ #include <drm/i915_drm.h> #include "i915_drv.h" +static int intel_fbdev_set_par(struct fb_info *info) +{ + struct drm_fb_helper *fb_helper = info->par; + struct intel_fbdev *ifbdev = + container_of(fb_helper, struct intel_fbdev, helper); + int ret; + + ret = drm_fb_helper_set_par(info); + + if (ret == 0) { + /* + * FIXME: fbdev presumes that all callbacks also work from + * atomic contexts and relies on that for emergency oops + * printing. KMS totally doesn't do that and the locking here is + * by far not the only place this goes wrong. Ignore this for + * now until we solve this for real. + */ + mutex_lock(&fb_helper->dev->struct_mutex); + ret = i915_gem_object_set_to_gtt_domain(ifbdev->fb->obj, + true); + mutex_unlock(&fb_helper->dev->struct_mutex); + } + + return ret; +} + static struct fb_ops intelfb_ops = { .owner = THIS_MODULE, .fb_check_var = drm_fb_helper_check_var, - .fb_set_par = drm_fb_helper_set_par, + .fb_set_par = intel_fbdev_set_par, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, @@ -81,7 +107,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper, sizes->surface_depth); size = mode_cmd.pitches[0] * mode_cmd.height; - size = ALIGN(size, PAGE_SIZE); + size = PAGE_ALIGN(size); obj = i915_gem_object_create_stolen(dev, size); if (obj == NULL) obj = i915_gem_alloc_object(dev, size); @@ -417,7 +443,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, } crtcs[i] = new_crtc; - DRM_DEBUG_KMS("connector %s on pipe %d [CRTC:%d]: %dx%d%s\n", + DRM_DEBUG_KMS("connector %s on pipe %c [CRTC:%d]: %dx%d%s\n", connector->name, pipe_name(to_intel_crtc(encoder->crtc)->pipe), encoder->crtc->base.id, @@ -452,7 +478,7 @@ out: return true; } -static struct drm_fb_helper_funcs intel_fb_helper_funcs = { +static const struct drm_fb_helper_funcs intel_fb_helper_funcs = { .initial_config = intel_fb_initial_config, .gamma_set = intel_crtc_fb_gamma_set, .gamma_get = intel_crtc_fb_gamma_get, @@ -623,7 +649,8 @@ int intel_fbdev_init(struct drm_device *dev) if (ifbdev == NULL) return -ENOMEM; - ifbdev->helper.funcs = &intel_fb_helper_funcs; + drm_fb_helper_prepare(dev, &ifbdev->helper, &intel_fb_helper_funcs); + if (!intel_fbdev_init_bios(dev, ifbdev)) ifbdev->preferred_bpp = 32; diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index eee2bbec2958..5a9de21637b7 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -367,6 +367,9 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder, union hdmi_infoframe frame; int ret; + /* Set user selected PAR to incoming mode's member */ + adjusted_mode->picture_aspect_ratio = intel_hdmi->aspect_ratio; + ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, adjusted_mode); if (ret < 0) { @@ -709,7 +712,8 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder, struct intel_crtc_config *pipe_config) { struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; u32 tmp, flags = 0; int dotclock; @@ -728,9 +732,13 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder, if (tmp & HDMI_MODE_SELECT_HDMI) pipe_config->has_hdmi_sink = true; - if (tmp & HDMI_MODE_SELECT_HDMI) + if (tmp & SDVO_AUDIO_ENABLE) pipe_config->has_audio = true; + if (!HAS_PCH_SPLIT(dev) && + tmp & HDMI_COLOR_RANGE_16_235) + pipe_config->limited_color_range = true; + pipe_config->adjusted_mode.flags |= flags; if ((tmp & SDVO_COLOR_FORMAT_MASK) == HDMI_COLOR_FORMAT_12bpc) @@ -879,7 +887,7 @@ static bool hdmi_12bpc_possible(struct intel_crtc *crtc) struct intel_encoder *encoder; int count = 0, count_hdmi = 0; - if (!HAS_PCH_SPLIT(dev)) + if (HAS_GMCH_DISPLAY(dev)) return false; list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { @@ -1124,6 +1132,23 @@ intel_hdmi_set_property(struct drm_connector *connector, goto done; } + if (property == connector->dev->mode_config.aspect_ratio_property) { + switch (val) { + case DRM_MODE_PICTURE_ASPECT_NONE: + intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_NONE; + break; + case DRM_MODE_PICTURE_ASPECT_4_3: + intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_4_3; + break; + case DRM_MODE_PICTURE_ASPECT_16_9: + intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_16_9; + break; + default: + return -EINVAL; + } + goto done; + } + return -EINVAL; done: @@ -1229,6 +1254,70 @@ static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder) mutex_unlock(&dev_priv->dpio_lock); } +static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder) +{ + struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = + to_intel_crtc(encoder->base.crtc); + enum dpio_channel ch = vlv_dport_to_channel(dport); + enum pipe pipe = intel_crtc->pipe; + u32 val; + + mutex_lock(&dev_priv->dpio_lock); + + /* program left/right clock distribution */ + if (pipe != PIPE_B) { + val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0); + val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK); + if (ch == DPIO_CH0) + val |= CHV_BUFLEFTENA1_FORCE; + if (ch == DPIO_CH1) + val |= CHV_BUFRIGHTENA1_FORCE; + vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val); + } else { + val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1); + val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK); + if (ch == DPIO_CH0) + val |= CHV_BUFLEFTENA2_FORCE; + if (ch == DPIO_CH1) + val |= CHV_BUFRIGHTENA2_FORCE; + vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val); + } + + /* program clock channel usage */ + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch)); + val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE; + if (pipe != PIPE_B) + val &= ~CHV_PCS_USEDCLKCHANNEL; + else + val |= CHV_PCS_USEDCLKCHANNEL; + vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val); + + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch)); + val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE; + if (pipe != PIPE_B) + val &= ~CHV_PCS_USEDCLKCHANNEL; + else + val |= CHV_PCS_USEDCLKCHANNEL; + vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val); + + /* + * This a a bit weird since generally CL + * matches the pipe, but here we need to + * pick the CL based on the port. + */ + val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch)); + if (pipe != PIPE_B) + val &= ~CHV_CMN_USEDCLKCHANNEL; + else + val |= CHV_CMN_USEDCLKCHANNEL; + vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val); + + mutex_unlock(&dev_priv->dpio_lock); +} + static void vlv_hdmi_post_disable(struct intel_encoder *encoder) { struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); @@ -1416,11 +1505,22 @@ static const struct drm_encoder_funcs intel_hdmi_enc_funcs = { }; static void +intel_attach_aspect_ratio_property(struct drm_connector *connector) +{ + if (!drm_mode_create_aspect_ratio_property(connector->dev)) + drm_object_attach_property(&connector->base, + connector->dev->mode_config.aspect_ratio_property, + DRM_MODE_PICTURE_ASPECT_NONE); +} + +static void intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector) { intel_attach_force_audio_property(connector); intel_attach_broadcast_rgb_property(connector); intel_hdmi->color_range_auto = true; + intel_attach_aspect_ratio_property(connector); + intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_NONE; } void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, @@ -1467,7 +1567,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, if (IS_VALLEYVIEW(dev)) { intel_hdmi->write_infoframe = vlv_write_infoframe; intel_hdmi->set_infoframes = vlv_set_infoframes; - } else if (!HAS_PCH_SPLIT(dev)) { + } else if (IS_G4X(dev)) { intel_hdmi->write_infoframe = g4x_write_infoframe; intel_hdmi->set_infoframes = g4x_set_infoframes; } else if (HAS_DDI(dev)) { @@ -1490,7 +1590,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, intel_hdmi_add_properties(intel_hdmi, connector); intel_connector_attach_encoder(intel_connector, intel_encoder); - drm_sysfs_connector_add(connector); + drm_connector_register(connector); /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written * 0xd. Failure to do so will result in spurious interrupts being @@ -1528,6 +1628,7 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port) intel_encoder->get_hw_state = intel_hdmi_get_hw_state; intel_encoder->get_config = intel_hdmi_get_config; if (IS_CHERRYVIEW(dev)) { + intel_encoder->pre_pll_enable = chv_hdmi_pre_pll_enable; intel_encoder->pre_enable = chv_hdmi_pre_enable; intel_encoder->enable = vlv_enable_hdmi; intel_encoder->post_disable = chv_hdmi_post_disable; diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index d33b61d0dd33..b31088a551f2 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -34,11 +34,6 @@ #include <drm/i915_drm.h> #include "i915_drv.h" -enum disp_clk { - CDCLK, - CZCLK -}; - struct gmbus_port { const char *name; int reg; @@ -63,60 +58,11 @@ to_intel_gmbus(struct i2c_adapter *i2c) return container_of(i2c, struct intel_gmbus, adapter); } -static int get_disp_clk_div(struct drm_i915_private *dev_priv, - enum disp_clk clk) -{ - u32 reg_val; - int clk_ratio; - - reg_val = I915_READ(CZCLK_CDCLK_FREQ_RATIO); - - if (clk == CDCLK) - clk_ratio = - ((reg_val & CDCLK_FREQ_MASK) >> CDCLK_FREQ_SHIFT) + 1; - else - clk_ratio = (reg_val & CZCLK_FREQ_MASK) + 1; - - return clk_ratio; -} - -static void gmbus_set_freq(struct drm_i915_private *dev_priv) -{ - int vco, gmbus_freq = 0, cdclk_div; - - BUG_ON(!IS_VALLEYVIEW(dev_priv->dev)); - - vco = valleyview_get_vco(dev_priv); - - /* Get the CDCLK divide ratio */ - cdclk_div = get_disp_clk_div(dev_priv, CDCLK); - - /* - * Program the gmbus_freq based on the cdclk frequency. - * BSpec erroneously claims we should aim for 4MHz, but - * in fact 1MHz is the correct frequency. - */ - if (cdclk_div) - gmbus_freq = (vco << 1) / cdclk_div; - - if (WARN_ON(gmbus_freq == 0)) - return; - - I915_WRITE(GMBUSFREQ_VLV, gmbus_freq); -} - void intel_i2c_reset(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - /* - * In BIOS-less system, program the correct gmbus frequency - * before reading edid. - */ - if (IS_VALLEYVIEW(dev)) - gmbus_set_freq(dev_priv); - I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0); I915_WRITE(dev_priv->gpio_mmio_base + GMBUS4, 0); } diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 5e5a72fca5fb..fdf40267249c 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -51,6 +51,7 @@ struct intel_lvds_encoder { bool is_dual_link; u32 reg; + u32 a3_power; struct intel_lvds_connector *attached_connector; }; @@ -71,8 +72,13 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder, struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); + enum intel_display_power_domain power_domain; u32 tmp; + power_domain = intel_display_port_power_domain(encoder); + if (!intel_display_power_enabled(dev_priv, power_domain)) + return false; + tmp = I915_READ(lvds_encoder->reg); if (!(tmp & LVDS_PORT_EN)) @@ -172,8 +178,11 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder) /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP) * appropriately here, but we need to look more thoroughly into how - * panels behave in the two modes. + * panels behave in the two modes. For now, let's just maintain the + * value we got from the BIOS. */ + temp &= ~LVDS_A3_POWER_MASK; + temp |= lvds_encoder->a3_power; /* Set the dithering flag on LVDS as needed, note that there is no * special lvds dither control bit on pch-split platforms, dithering is @@ -271,7 +280,6 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder, struct intel_crtc_config *pipe_config) { struct drm_device *dev = intel_encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&intel_encoder->base); struct intel_connector *intel_connector = @@ -286,8 +294,7 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder, return false; } - if ((I915_READ(lvds_encoder->reg) & LVDS_A3_POWER_MASK) == - LVDS_A3_POWER_UP) + if (lvds_encoder->a3_power == LVDS_A3_POWER_UP) lvds_bpp = 8*3; else lvds_bpp = 6*3; @@ -531,7 +538,7 @@ static const struct drm_encoder_funcs intel_lvds_enc_funcs = { .destroy = intel_encoder_destroy, }; -static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id) +static int intel_no_lvds_dmi_callback(const struct dmi_system_id *id) { DRM_INFO("Skipping LVDS initialization for %s\n", id->ident); return 1; @@ -1088,6 +1095,9 @@ out: DRM_DEBUG_KMS("detected %s-link lvds configuration\n", lvds_encoder->is_dual_link ? "dual" : "single"); + lvds_encoder->a3_power = I915_READ(lvds_encoder->reg) & + LVDS_A3_POWER_MASK; + /* * Unlock registers and just * leave them unlocked @@ -1104,7 +1114,7 @@ out: DRM_DEBUG_KMS("lid notifier registration failed\n"); lvds_connector->lid_notifier.notifier_call = NULL; } - drm_sysfs_connector_add(connector); + drm_connector_register(connector); intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode); intel_panel_setup_backlight(connector); diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index 4f6b53998d79..d8de1d5140a7 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c @@ -352,6 +352,7 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, case INTEL_OUTPUT_UNKNOWN: case INTEL_OUTPUT_DISPLAYPORT: case INTEL_OUTPUT_HDMI: + case INTEL_OUTPUT_DP_MST: type = DISPLAY_TYPE_EXTERNAL_FLAT_PANEL; break; case INTEL_OUTPUT_EDP: @@ -395,6 +396,16 @@ int intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state) return -EINVAL; } +/* + * If the vendor backlight interface is not in use and ACPI backlight interface + * is broken, do not bother processing backlight change requests from firmware. + */ +static bool should_ignore_backlight_request(void) +{ + return acpi_video_backlight_support() && + !acpi_video_verify_backlight_support(); +} + static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -403,11 +414,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp); - /* - * If the acpi_video interface is not supposed to be used, don't - * bother processing backlight level change requests from firmware. - */ - if (!acpi_video_verify_backlight_support()) { + if (should_ignore_backlight_request()) { DRM_DEBUG_KMS("opregion backlight request ignored\n"); return 0; } @@ -427,7 +434,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) */ DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp); list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head) - intel_panel_set_backlight(intel_connector, bclp, 255); + intel_panel_set_backlight_acpi(intel_connector, bclp, 255); iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv); drm_modeset_unlock(&dev->mode_config.connection_mutex); diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index daa118978eec..dc2f4f26c961 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -415,6 +415,10 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay) } intel_overlay_release_old_vid_tail(overlay); + + + i915_gem_track_fb(overlay->old_vid_bo, NULL, + INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe)); return 0; } @@ -686,6 +690,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, bool scale_changed = false; struct drm_device *dev = overlay->dev; u32 swidth, swidthsw, sheight, ostride; + enum pipe pipe = overlay->crtc->pipe; BUG_ON(!mutex_is_locked(&dev->struct_mutex)); BUG_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); @@ -713,7 +718,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, oconfig = OCONF_CC_OUT_8BIT; if (IS_GEN4(overlay->dev)) oconfig |= OCONF_CSC_MODE_BT709; - oconfig |= overlay->crtc->pipe == 0 ? + oconfig |= pipe == 0 ? OCONF_PIPE_A : OCONF_PIPE_B; iowrite32(oconfig, ®s->OCONFIG); intel_overlay_unmap_regs(overlay, regs); @@ -776,9 +781,15 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, if (ret) goto out_unpin; + i915_gem_track_fb(overlay->vid_bo, new_bo, + INTEL_FRONTBUFFER_OVERLAY(pipe)); + overlay->old_vid_bo = overlay->vid_bo; overlay->vid_bo = new_bo; + intel_frontbuffer_flip(dev, + INTEL_FRONTBUFFER_OVERLAY(pipe)); + return 0; out_unpin: @@ -1028,7 +1039,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, struct drm_intel_overlay_put_image *put_image_rec = data; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_overlay *overlay; - struct drm_mode_object *drmmode_obj; + struct drm_crtc *drmmode_crtc; struct intel_crtc *crtc; struct drm_i915_gem_object *new_bo; struct put_image_params *params; @@ -1057,13 +1068,12 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, if (!params) return -ENOMEM; - drmmode_obj = drm_mode_object_find(dev, put_image_rec->crtc_id, - DRM_MODE_OBJECT_CRTC); - if (!drmmode_obj) { + drmmode_crtc = drm_crtc_find(dev, put_image_rec->crtc_id); + if (!drmmode_crtc) { ret = -ENOENT; goto out_free; } - crtc = to_intel_crtc(obj_to_crtc(drmmode_obj)); + crtc = to_intel_crtc(drmmode_crtc); new_bo = to_intel_bo(drm_gem_object_lookup(dev, file_priv, put_image_rec->bo_handle)); diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 12b02fe1d0ae..8e374449c6b5 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -398,6 +398,69 @@ intel_panel_detect(struct drm_device *dev) } } +/** + * scale - scale values from one range to another + * + * @source_val: value in range [@source_min..@source_max] + * + * Return @source_val in range [@source_min..@source_max] scaled to range + * [@target_min..@target_max]. + */ +static uint32_t scale(uint32_t source_val, + uint32_t source_min, uint32_t source_max, + uint32_t target_min, uint32_t target_max) +{ + uint64_t target_val; + + WARN_ON(source_min > source_max); + WARN_ON(target_min > target_max); + + /* defensive */ + source_val = clamp(source_val, source_min, source_max); + + /* avoid overflows */ + target_val = (uint64_t)(source_val - source_min) * + (target_max - target_min); + do_div(target_val, source_max - source_min); + target_val += target_min; + + return target_val; +} + +/* Scale user_level in range [0..user_max] to [hw_min..hw_max]. */ +static inline u32 scale_user_to_hw(struct intel_connector *connector, + u32 user_level, u32 user_max) +{ + struct intel_panel *panel = &connector->panel; + + return scale(user_level, 0, user_max, + panel->backlight.min, panel->backlight.max); +} + +/* Scale user_level in range [0..user_max] to [0..hw_max], clamping the result + * to [hw_min..hw_max]. */ +static inline u32 clamp_user_to_hw(struct intel_connector *connector, + u32 user_level, u32 user_max) +{ + struct intel_panel *panel = &connector->panel; + u32 hw_level; + + hw_level = scale(user_level, 0, user_max, 0, panel->backlight.max); + hw_level = clamp(hw_level, panel->backlight.min, panel->backlight.max); + + return hw_level; +} + +/* Scale hw_level in range [hw_min..hw_max] to [0..user_max]. */ +static inline u32 scale_hw_to_user(struct intel_connector *connector, + u32 hw_level, u32 user_max) +{ + struct intel_panel *panel = &connector->panel; + + return scale(hw_level, panel->backlight.min, panel->backlight.max, + 0, user_max); +} + static u32 intel_panel_compute_brightness(struct intel_connector *connector, u32 val) { @@ -557,17 +620,16 @@ intel_panel_actually_set_backlight(struct intel_connector *connector, u32 level) dev_priv->display.set_backlight(connector, level); } -/* set backlight brightness to level in range [0..max] */ -void intel_panel_set_backlight(struct intel_connector *connector, u32 level, - u32 max) +/* set backlight brightness to level in range [0..max], scaling wrt hw min */ +static void intel_panel_set_backlight(struct intel_connector *connector, + u32 user_level, u32 user_max) { struct drm_device *dev = connector->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_panel *panel = &connector->panel; enum pipe pipe = intel_get_pipe_from_connector(connector); - u32 freq; + u32 hw_level; unsigned long flags; - u64 n; if (!panel->backlight.present || pipe == INVALID_PIPE) return; @@ -576,18 +638,46 @@ void intel_panel_set_backlight(struct intel_connector *connector, u32 level, WARN_ON(panel->backlight.max == 0); - /* scale to hardware max, but be careful to not overflow */ - freq = panel->backlight.max; - n = (u64)level * freq; - do_div(n, max); - level = n; + hw_level = scale_user_to_hw(connector, user_level, user_max); + panel->backlight.level = hw_level; + + if (panel->backlight.enabled) + intel_panel_actually_set_backlight(connector, hw_level); + + spin_unlock_irqrestore(&dev_priv->backlight_lock, flags); +} + +/* set backlight brightness to level in range [0..max], assuming hw min is + * respected. + */ +void intel_panel_set_backlight_acpi(struct intel_connector *connector, + u32 user_level, u32 user_max) +{ + struct drm_device *dev = connector->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_panel *panel = &connector->panel; + enum pipe pipe = intel_get_pipe_from_connector(connector); + u32 hw_level; + unsigned long flags; + + if (!panel->backlight.present || pipe == INVALID_PIPE) + return; + + spin_lock_irqsave(&dev_priv->backlight_lock, flags); + + WARN_ON(panel->backlight.max == 0); + + hw_level = clamp_user_to_hw(connector, user_level, user_max); + panel->backlight.level = hw_level; - panel->backlight.level = level; if (panel->backlight.device) - panel->backlight.device->props.brightness = level; + panel->backlight.device->props.brightness = + scale_hw_to_user(connector, + panel->backlight.level, + panel->backlight.device->props.max_brightness); if (panel->backlight.enabled) - intel_panel_actually_set_backlight(connector, level); + intel_panel_actually_set_backlight(connector, hw_level); spin_unlock_irqrestore(&dev_priv->backlight_lock, flags); } @@ -711,7 +801,7 @@ static void pch_enable_backlight(struct intel_connector *connector) cpu_ctl2 = I915_READ(BLC_PWM_CPU_CTL2); if (cpu_ctl2 & BLM_PWM_ENABLE) { - WARN(1, "cpu backlight already enabled\n"); + DRM_DEBUG_KMS("cpu backlight already enabled\n"); cpu_ctl2 &= ~BLM_PWM_ENABLE; I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2); } @@ -755,7 +845,7 @@ static void i9xx_enable_backlight(struct intel_connector *connector) ctl = I915_READ(BLC_PWM_CTL); if (ctl & BACKLIGHT_DUTY_CYCLE_MASK_PNV) { - WARN(1, "backlight already enabled\n"); + DRM_DEBUG_KMS("backlight already enabled\n"); I915_WRITE(BLC_PWM_CTL, 0); } @@ -786,7 +876,7 @@ static void i965_enable_backlight(struct intel_connector *connector) ctl2 = I915_READ(BLC_PWM_CTL2); if (ctl2 & BLM_PWM_ENABLE) { - WARN(1, "backlight already enabled\n"); + DRM_DEBUG_KMS("backlight already enabled\n"); ctl2 &= ~BLM_PWM_ENABLE; I915_WRITE(BLC_PWM_CTL2, ctl2); } @@ -820,7 +910,7 @@ static void vlv_enable_backlight(struct intel_connector *connector) ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe)); if (ctl2 & BLM_PWM_ENABLE) { - WARN(1, "backlight already enabled\n"); + DRM_DEBUG_KMS("backlight already enabled\n"); ctl2 &= ~BLM_PWM_ENABLE; I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2); } @@ -860,7 +950,9 @@ void intel_panel_enable_backlight(struct intel_connector *connector) panel->backlight.level = panel->backlight.max; if (panel->backlight.device) panel->backlight.device->props.brightness = - panel->backlight.level; + scale_hw_to_user(connector, + panel->backlight.level, + panel->backlight.device->props.max_brightness); } dev_priv->display.enable_backlight(connector); @@ -889,11 +981,15 @@ static int intel_backlight_device_get_brightness(struct backlight_device *bd) struct intel_connector *connector = bl_get_data(bd); struct drm_device *dev = connector->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; + u32 hw_level; int ret; intel_runtime_pm_get(dev_priv); drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); - ret = intel_panel_get_backlight(connector); + + hw_level = intel_panel_get_backlight(connector); + ret = scale_hw_to_user(connector, hw_level, bd->props.max_brightness); + drm_modeset_unlock(&dev->mode_config.connection_mutex); intel_runtime_pm_put(dev_priv); @@ -913,12 +1009,19 @@ static int intel_backlight_device_register(struct intel_connector *connector) if (WARN_ON(panel->backlight.device)) return -ENODEV; - BUG_ON(panel->backlight.max == 0); + WARN_ON(panel->backlight.max == 0); memset(&props, 0, sizeof(props)); props.type = BACKLIGHT_RAW; - props.brightness = panel->backlight.level; + + /* + * Note: Everything should work even if the backlight device max + * presented to the userspace is arbitrarily chosen. + */ props.max_brightness = panel->backlight.max; + props.brightness = scale_hw_to_user(connector, + panel->backlight.level, + props.max_brightness); /* * Note: using the same name independent of the connector prevents @@ -964,6 +1067,19 @@ static void intel_backlight_device_unregister(struct intel_connector *connector) * XXX: Query mode clock or hardware clock and program PWM modulation frequency * appropriately when it's 0. Use VBT and/or sane defaults. */ +static u32 get_backlight_min_vbt(struct intel_connector *connector) +{ + struct drm_device *dev = connector->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_panel *panel = &connector->panel; + + WARN_ON(panel->backlight.max == 0); + + /* vbt value is a coefficient in range [0..255] */ + return scale(dev_priv->vbt.backlight.min_brightness, 0, 255, + 0, panel->backlight.max); +} + static int bdw_setup_backlight(struct intel_connector *connector) { struct drm_device *dev = connector->base.dev; @@ -979,6 +1095,8 @@ static int bdw_setup_backlight(struct intel_connector *connector) if (!panel->backlight.max) return -ENODEV; + panel->backlight.min = get_backlight_min_vbt(connector); + val = bdw_get_backlight(connector); panel->backlight.level = intel_panel_compute_brightness(connector, val); @@ -1003,6 +1121,8 @@ static int pch_setup_backlight(struct intel_connector *connector) if (!panel->backlight.max) return -ENODEV; + panel->backlight.min = get_backlight_min_vbt(connector); + val = pch_get_backlight(connector); panel->backlight.level = intel_panel_compute_brightness(connector, val); @@ -1035,6 +1155,8 @@ static int i9xx_setup_backlight(struct intel_connector *connector) if (!panel->backlight.max) return -ENODEV; + panel->backlight.min = get_backlight_min_vbt(connector); + val = i9xx_get_backlight(connector); panel->backlight.level = intel_panel_compute_brightness(connector, val); @@ -1062,6 +1184,8 @@ static int i965_setup_backlight(struct intel_connector *connector) if (!panel->backlight.max) return -ENODEV; + panel->backlight.min = get_backlight_min_vbt(connector); + val = i9xx_get_backlight(connector); panel->backlight.level = intel_panel_compute_brightness(connector, val); @@ -1099,6 +1223,8 @@ static int vlv_setup_backlight(struct intel_connector *connector) if (!panel->backlight.max) return -ENODEV; + panel->backlight.min = get_backlight_min_vbt(connector); + val = _vlv_get_backlight(dev, PIPE_A); panel->backlight.level = intel_panel_compute_brightness(connector, val); diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index f1233f544f3e..40c12295c0bd 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -93,8 +93,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc) struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct drm_framebuffer *fb = crtc->primary->fb; - struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); - struct drm_i915_gem_object *obj = intel_fb->obj; + struct drm_i915_gem_object *obj = intel_fb_obj(fb); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int cfb_pitch; int i; @@ -150,8 +149,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc) struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct drm_framebuffer *fb = crtc->primary->fb; - struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); - struct drm_i915_gem_object *obj = intel_fb->obj; + struct drm_i915_gem_object *obj = intel_fb_obj(fb); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); u32 dpfc_ctl; @@ -222,16 +220,26 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc) struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct drm_framebuffer *fb = crtc->primary->fb; - struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); - struct drm_i915_gem_object *obj = intel_fb->obj; + struct drm_i915_gem_object *obj = intel_fb_obj(fb); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); u32 dpfc_ctl; dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane); if (drm_format_plane_cpp(fb->pixel_format, 0) == 2) + dev_priv->fbc.threshold++; + + switch (dev_priv->fbc.threshold) { + case 4: + case 3: + dpfc_ctl |= DPFC_CTL_LIMIT_4X; + break; + case 2: dpfc_ctl |= DPFC_CTL_LIMIT_2X; - else + break; + case 1: dpfc_ctl |= DPFC_CTL_LIMIT_1X; + break; + } dpfc_ctl |= DPFC_CTL_FENCE_EN; if (IS_GEN5(dev)) dpfc_ctl |= obj->fence_reg; @@ -278,16 +286,27 @@ static void gen7_enable_fbc(struct drm_crtc *crtc) struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct drm_framebuffer *fb = crtc->primary->fb; - struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); - struct drm_i915_gem_object *obj = intel_fb->obj; + struct drm_i915_gem_object *obj = intel_fb_obj(fb); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); u32 dpfc_ctl; dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane); if (drm_format_plane_cpp(fb->pixel_format, 0) == 2) + dev_priv->fbc.threshold++; + + switch (dev_priv->fbc.threshold) { + case 4: + case 3: + dpfc_ctl |= DPFC_CTL_LIMIT_4X; + break; + case 2: dpfc_ctl |= DPFC_CTL_LIMIT_2X; - else + break; + case 1: dpfc_ctl |= DPFC_CTL_LIMIT_1X; + break; + } + dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN; I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); @@ -462,7 +481,6 @@ void intel_update_fbc(struct drm_device *dev) struct drm_crtc *crtc = NULL, *tmp_crtc; struct intel_crtc *intel_crtc; struct drm_framebuffer *fb; - struct intel_framebuffer *intel_fb; struct drm_i915_gem_object *obj; const struct drm_display_mode *adjusted_mode; unsigned int max_width, max_height; @@ -507,8 +525,7 @@ void intel_update_fbc(struct drm_device *dev) intel_crtc = to_intel_crtc(crtc); fb = crtc->primary->fb; - intel_fb = to_intel_framebuffer(fb); - obj = intel_fb->obj; + obj = intel_fb_obj(fb); adjusted_mode = &intel_crtc->config.adjusted_mode; if (i915.enable_fbc < 0) { @@ -529,7 +546,10 @@ void intel_update_fbc(struct drm_device *dev) goto out_disable; } - if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { + if (INTEL_INFO(dev)->gen >= 8 || IS_HASWELL(dev)) { + max_width = 4096; + max_height = 4096; + } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { max_width = 4096; max_height = 2048; } else { @@ -563,7 +583,8 @@ void intel_update_fbc(struct drm_device *dev) if (in_dbg_master()) goto out_disable; - if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) { + if (i915_gem_stolen_setup_compression(dev, obj->base.size, + drm_format_plane_cpp(fb->pixel_format, 0))) { if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL)) DRM_DEBUG_KMS("framebuffer too large, disabling compression\n"); goto out_disable; @@ -789,12 +810,33 @@ static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, return NULL; } -static void pineview_disable_cxsr(struct drm_device *dev) +void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_device *dev = dev_priv->dev; + u32 val; + + if (IS_VALLEYVIEW(dev)) { + I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0); + } else if (IS_G4X(dev) || IS_CRESTLINE(dev)) { + I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0); + } else if (IS_PINEVIEW(dev)) { + val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN; + val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0; + I915_WRITE(DSPFW3, val); + } else if (IS_I945G(dev) || IS_I945GM(dev)) { + val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) : + _MASKED_BIT_DISABLE(FW_BLC_SELF_EN); + I915_WRITE(FW_BLC_SELF, val); + } else if (IS_I915GM(dev)) { + val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) : + _MASKED_BIT_DISABLE(INSTPM_SELF_EN); + I915_WRITE(INSTPM, val); + } else { + return; + } - /* deactivate cxsr */ - I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN); + DRM_DEBUG_KMS("memory self-refresh is %s\n", + enable ? "enabled" : "disabled"); } /* @@ -864,95 +906,95 @@ static int i845_get_fifo_size(struct drm_device *dev, int plane) /* Pineview has different values for various configs */ static const struct intel_watermark_params pineview_display_wm = { - PINEVIEW_DISPLAY_FIFO, - PINEVIEW_MAX_WM, - PINEVIEW_DFT_WM, - PINEVIEW_GUARD_WM, - PINEVIEW_FIFO_LINE_SIZE + .fifo_size = PINEVIEW_DISPLAY_FIFO, + .max_wm = PINEVIEW_MAX_WM, + .default_wm = PINEVIEW_DFT_WM, + .guard_size = PINEVIEW_GUARD_WM, + .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, }; static const struct intel_watermark_params pineview_display_hplloff_wm = { - PINEVIEW_DISPLAY_FIFO, - PINEVIEW_MAX_WM, - PINEVIEW_DFT_HPLLOFF_WM, - PINEVIEW_GUARD_WM, - PINEVIEW_FIFO_LINE_SIZE + .fifo_size = PINEVIEW_DISPLAY_FIFO, + .max_wm = PINEVIEW_MAX_WM, + .default_wm = PINEVIEW_DFT_HPLLOFF_WM, + .guard_size = PINEVIEW_GUARD_WM, + .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, }; static const struct intel_watermark_params pineview_cursor_wm = { - PINEVIEW_CURSOR_FIFO, - PINEVIEW_CURSOR_MAX_WM, - PINEVIEW_CURSOR_DFT_WM, - PINEVIEW_CURSOR_GUARD_WM, - PINEVIEW_FIFO_LINE_SIZE, + .fifo_size = PINEVIEW_CURSOR_FIFO, + .max_wm = PINEVIEW_CURSOR_MAX_WM, + .default_wm = PINEVIEW_CURSOR_DFT_WM, + .guard_size = PINEVIEW_CURSOR_GUARD_WM, + .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, }; static const struct intel_watermark_params pineview_cursor_hplloff_wm = { - PINEVIEW_CURSOR_FIFO, - PINEVIEW_CURSOR_MAX_WM, - PINEVIEW_CURSOR_DFT_WM, - PINEVIEW_CURSOR_GUARD_WM, - PINEVIEW_FIFO_LINE_SIZE + .fifo_size = PINEVIEW_CURSOR_FIFO, + .max_wm = PINEVIEW_CURSOR_MAX_WM, + .default_wm = PINEVIEW_CURSOR_DFT_WM, + .guard_size = PINEVIEW_CURSOR_GUARD_WM, + .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, }; static const struct intel_watermark_params g4x_wm_info = { - G4X_FIFO_SIZE, - G4X_MAX_WM, - G4X_MAX_WM, - 2, - G4X_FIFO_LINE_SIZE, + .fifo_size = G4X_FIFO_SIZE, + .max_wm = G4X_MAX_WM, + .default_wm = G4X_MAX_WM, + .guard_size = 2, + .cacheline_size = G4X_FIFO_LINE_SIZE, }; static const struct intel_watermark_params g4x_cursor_wm_info = { - I965_CURSOR_FIFO, - I965_CURSOR_MAX_WM, - I965_CURSOR_DFT_WM, - 2, - G4X_FIFO_LINE_SIZE, + .fifo_size = I965_CURSOR_FIFO, + .max_wm = I965_CURSOR_MAX_WM, + .default_wm = I965_CURSOR_DFT_WM, + .guard_size = 2, + .cacheline_size = G4X_FIFO_LINE_SIZE, }; static const struct intel_watermark_params valleyview_wm_info = { - VALLEYVIEW_FIFO_SIZE, - VALLEYVIEW_MAX_WM, - VALLEYVIEW_MAX_WM, - 2, - G4X_FIFO_LINE_SIZE, + .fifo_size = VALLEYVIEW_FIFO_SIZE, + .max_wm = VALLEYVIEW_MAX_WM, + .default_wm = VALLEYVIEW_MAX_WM, + .guard_size = 2, + .cacheline_size = G4X_FIFO_LINE_SIZE, }; static const struct intel_watermark_params valleyview_cursor_wm_info = { - I965_CURSOR_FIFO, - VALLEYVIEW_CURSOR_MAX_WM, - I965_CURSOR_DFT_WM, - 2, - G4X_FIFO_LINE_SIZE, + .fifo_size = I965_CURSOR_FIFO, + .max_wm = VALLEYVIEW_CURSOR_MAX_WM, + .default_wm = I965_CURSOR_DFT_WM, + .guard_size = 2, + .cacheline_size = G4X_FIFO_LINE_SIZE, }; static const struct intel_watermark_params i965_cursor_wm_info = { - I965_CURSOR_FIFO, - I965_CURSOR_MAX_WM, - I965_CURSOR_DFT_WM, - 2, - I915_FIFO_LINE_SIZE, + .fifo_size = I965_CURSOR_FIFO, + .max_wm = I965_CURSOR_MAX_WM, + .default_wm = I965_CURSOR_DFT_WM, + .guard_size = 2, + .cacheline_size = I915_FIFO_LINE_SIZE, }; static const struct intel_watermark_params i945_wm_info = { - I945_FIFO_SIZE, - I915_MAX_WM, - 1, - 2, - I915_FIFO_LINE_SIZE + .fifo_size = I945_FIFO_SIZE, + .max_wm = I915_MAX_WM, + .default_wm = 1, + .guard_size = 2, + .cacheline_size = I915_FIFO_LINE_SIZE, }; static const struct intel_watermark_params i915_wm_info = { - I915_FIFO_SIZE, - I915_MAX_WM, - 1, - 2, - I915_FIFO_LINE_SIZE + .fifo_size = I915_FIFO_SIZE, + .max_wm = I915_MAX_WM, + .default_wm = 1, + .guard_size = 2, + .cacheline_size = I915_FIFO_LINE_SIZE, }; static const struct intel_watermark_params i830_wm_info = { - I855GM_FIFO_SIZE, - I915_MAX_WM, - 1, - 2, - I830_FIFO_LINE_SIZE + .fifo_size = I855GM_FIFO_SIZE, + .max_wm = I915_MAX_WM, + .default_wm = 1, + .guard_size = 2, + .cacheline_size = I830_FIFO_LINE_SIZE, }; static const struct intel_watermark_params i845_wm_info = { - I830_FIFO_SIZE, - I915_MAX_WM, - 1, - 2, - I830_FIFO_LINE_SIZE + .fifo_size = I830_FIFO_SIZE, + .max_wm = I915_MAX_WM, + .default_wm = 1, + .guard_size = 2, + .cacheline_size = I830_FIFO_LINE_SIZE, }; /** @@ -1033,7 +1075,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc) dev_priv->fsb_freq, dev_priv->mem_freq); if (!latency) { DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); - pineview_disable_cxsr(dev); + intel_set_memory_cxsr(dev_priv, false); return; } @@ -1084,13 +1126,9 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc) I915_WRITE(DSPFW3, reg); DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg); - /* activate cxsr */ - I915_WRITE(DSPFW3, - I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN); - DRM_DEBUG_KMS("Self-refresh is enabled\n"); + intel_set_memory_cxsr(dev_priv, true); } else { - pineview_disable_cxsr(dev); - DRM_DEBUG_KMS("Self-refresh is disabled\n"); + intel_set_memory_cxsr(dev_priv, false); } } @@ -1249,15 +1287,14 @@ static bool vlv_compute_drain_latency(struct drm_device *dev, pixel_size = crtc->primary->fb->bits_per_pixel / 8; /* BPP */ entries = (clock / 1000) * pixel_size; - *plane_prec_mult = (entries > 256) ? - DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16; - *plane_dl = (64 * (*plane_prec_mult) * 4) / ((clock / 1000) * - pixel_size); + *plane_prec_mult = (entries > 128) ? + DRAIN_LATENCY_PRECISION_64 : DRAIN_LATENCY_PRECISION_32; + *plane_dl = (64 * (*plane_prec_mult) * 4) / entries; entries = (clock / 1000) * 4; /* BPP is always 4 for cursor */ - *cursor_prec_mult = (entries > 256) ? - DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16; - *cursor_dl = (64 * (*cursor_prec_mult) * 4) / ((clock / 1000) * 4); + *cursor_prec_mult = (entries > 128) ? + DRAIN_LATENCY_PRECISION_64 : DRAIN_LATENCY_PRECISION_32; + *cursor_dl = (64 * (*cursor_prec_mult) * 4) / entries; return true; } @@ -1282,9 +1319,9 @@ static void vlv_update_drain_latency(struct drm_device *dev) if (vlv_compute_drain_latency(dev, 0, &plane_prec_mult, &planea_dl, &cursor_prec_mult, &cursora_dl)) { cursora_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ? - DDL_CURSORA_PRECISION_32 : DDL_CURSORA_PRECISION_16; + DDL_CURSORA_PRECISION_32 : DDL_CURSORA_PRECISION_64; planea_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ? - DDL_PLANEA_PRECISION_32 : DDL_PLANEA_PRECISION_16; + DDL_PLANEA_PRECISION_32 : DDL_PLANEA_PRECISION_64; I915_WRITE(VLV_DDL1, cursora_prec | (cursora_dl << DDL_CURSORA_SHIFT) | @@ -1295,9 +1332,9 @@ static void vlv_update_drain_latency(struct drm_device *dev) if (vlv_compute_drain_latency(dev, 1, &plane_prec_mult, &planeb_dl, &cursor_prec_mult, &cursorb_dl)) { cursorb_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ? - DDL_CURSORB_PRECISION_32 : DDL_CURSORB_PRECISION_16; + DDL_CURSORB_PRECISION_32 : DDL_CURSORB_PRECISION_64; planeb_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ? - DDL_PLANEB_PRECISION_32 : DDL_PLANEB_PRECISION_16; + DDL_PLANEB_PRECISION_32 : DDL_PLANEB_PRECISION_64; I915_WRITE(VLV_DDL2, cursorb_prec | (cursorb_dl << DDL_CURSORB_SHIFT) | @@ -1316,6 +1353,7 @@ static void valleyview_update_wm(struct drm_crtc *crtc) int plane_sr, cursor_sr; int ignore_plane_sr, ignore_cursor_sr; unsigned int enabled = 0; + bool cxsr_enabled; vlv_update_drain_latency(dev); @@ -1342,10 +1380,10 @@ static void valleyview_update_wm(struct drm_crtc *crtc) &valleyview_wm_info, &valleyview_cursor_wm_info, &ignore_plane_sr, &cursor_sr)) { - I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN); + cxsr_enabled = true; } else { - I915_WRITE(FW_BLC_SELF_VLV, - I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN); + cxsr_enabled = false; + intel_set_memory_cxsr(dev_priv, false); plane_sr = cursor_sr = 0; } @@ -1365,6 +1403,9 @@ static void valleyview_update_wm(struct drm_crtc *crtc) I915_WRITE(DSPFW3, (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) | (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); + + if (cxsr_enabled) + intel_set_memory_cxsr(dev_priv, true); } static void g4x_update_wm(struct drm_crtc *crtc) @@ -1375,6 +1416,7 @@ static void g4x_update_wm(struct drm_crtc *crtc) int planea_wm, planeb_wm, cursora_wm, cursorb_wm; int plane_sr, cursor_sr; unsigned int enabled = 0; + bool cxsr_enabled; if (g4x_compute_wm0(dev, PIPE_A, &g4x_wm_info, latency_ns, @@ -1394,10 +1436,10 @@ static void g4x_update_wm(struct drm_crtc *crtc) &g4x_wm_info, &g4x_cursor_wm_info, &plane_sr, &cursor_sr)) { - I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); + cxsr_enabled = true; } else { - I915_WRITE(FW_BLC_SELF, - I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN); + cxsr_enabled = false; + intel_set_memory_cxsr(dev_priv, false); plane_sr = cursor_sr = 0; } @@ -1418,6 +1460,9 @@ static void g4x_update_wm(struct drm_crtc *crtc) I915_WRITE(DSPFW3, (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) | (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); + + if (cxsr_enabled) + intel_set_memory_cxsr(dev_priv, true); } static void i965_update_wm(struct drm_crtc *unused_crtc) @@ -1427,6 +1472,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc) struct drm_crtc *crtc; int srwm = 1; int cursor_sr = 16; + bool cxsr_enabled; /* Calc sr entries for one plane configs */ crtc = single_enabled_crtc(dev); @@ -1468,13 +1514,11 @@ static void i965_update_wm(struct drm_crtc *unused_crtc) DRM_DEBUG_KMS("self-refresh watermark: display plane %d " "cursor %d\n", srwm, cursor_sr); - if (IS_CRESTLINE(dev)) - I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); + cxsr_enabled = true; } else { + cxsr_enabled = false; /* Turn off self refresh if both pipes are enabled */ - if (IS_CRESTLINE(dev)) - I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) - & ~FW_BLC_SELF_EN); + intel_set_memory_cxsr(dev_priv, false); } DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", @@ -1486,6 +1530,9 @@ static void i965_update_wm(struct drm_crtc *unused_crtc) I915_WRITE(DSPFW2, (8 << 8) | (8 << 0)); /* update cursor SR watermark */ I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); + + if (cxsr_enabled) + intel_set_memory_cxsr(dev_priv, true); } static void i9xx_update_wm(struct drm_crtc *unused_crtc) @@ -1545,12 +1592,12 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc) DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); if (IS_I915GM(dev) && enabled) { - struct intel_framebuffer *fb; + struct drm_i915_gem_object *obj; - fb = to_intel_framebuffer(enabled->primary->fb); + obj = intel_fb_obj(enabled->primary->fb); /* self-refresh seems busted with untiled */ - if (fb->obj->tiling_mode == I915_TILING_NONE) + if (obj->tiling_mode == I915_TILING_NONE) enabled = NULL; } @@ -1560,10 +1607,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc) cwm = 2; /* Play safe and disable self-refresh before adjusting watermarks. */ - if (IS_I945G(dev) || IS_I945GM(dev)) - I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0); - else if (IS_I915GM(dev)) - I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_SELF_EN)); + intel_set_memory_cxsr(dev_priv, false); /* Calc sr entries for one plane configs */ if (HAS_FW_BLC(dev) && enabled) { @@ -1609,17 +1653,8 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc) I915_WRITE(FW_BLC, fwater_lo); I915_WRITE(FW_BLC2, fwater_hi); - if (HAS_FW_BLC(dev)) { - if (enabled) { - if (IS_I945G(dev) || IS_I945GM(dev)) - I915_WRITE(FW_BLC_SELF, - FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN); - else if (IS_I915GM(dev)) - I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_SELF_EN)); - DRM_DEBUG_KMS("memory self refresh enabled\n"); - } else - DRM_DEBUG_KMS("memory self refresh disabled\n"); - } + if (enabled) + intel_set_memory_cxsr(dev_priv, true); } static void i845_update_wm(struct drm_crtc *unused_crtc) @@ -2707,10 +2742,11 @@ static void ilk_update_wm(struct drm_crtc *crtc) ilk_write_wm_values(dev_priv, &results); } -static void ilk_update_sprite_wm(struct drm_plane *plane, - struct drm_crtc *crtc, - uint32_t sprite_width, int pixel_size, - bool enabled, bool scaled) +static void +ilk_update_sprite_wm(struct drm_plane *plane, + struct drm_crtc *crtc, + uint32_t sprite_width, uint32_t sprite_height, + int pixel_size, bool enabled, bool scaled) { struct drm_device *dev = plane->dev; struct intel_plane *intel_plane = to_intel_plane(plane); @@ -2718,6 +2754,7 @@ static void ilk_update_sprite_wm(struct drm_plane *plane, intel_plane->wm.enabled = enabled; intel_plane->wm.scaled = scaled; intel_plane->wm.horiz_pixels = sprite_width; + intel_plane->wm.vert_pixels = sprite_width; intel_plane->wm.bytes_per_pixel = pixel_size; /* @@ -2852,13 +2889,16 @@ void intel_update_watermarks(struct drm_crtc *crtc) void intel_update_sprite_watermarks(struct drm_plane *plane, struct drm_crtc *crtc, - uint32_t sprite_width, int pixel_size, + uint32_t sprite_width, + uint32_t sprite_height, + int pixel_size, bool enabled, bool scaled) { struct drm_i915_private *dev_priv = plane->dev->dev_private; if (dev_priv->display.update_sprite_wm) - dev_priv->display.update_sprite_wm(plane, crtc, sprite_width, + dev_priv->display.update_sprite_wm(plane, crtc, + sprite_width, sprite_height, pixel_size, enabled, scaled); } @@ -3147,6 +3187,9 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val) if (val < dev_priv->rps.max_freq_softlimit) mask |= GEN6_PM_RP_UP_THRESHOLD; + mask |= dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED); + mask &= dev_priv->pm_rps_events; + /* IVB and SNB hard hangs on looping batchbuffer * if GEN6_PM_UP_EI_EXPIRED is masked. */ @@ -3250,7 +3293,9 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv) mutex_lock(&dev_priv->rps.hw_lock); if (dev_priv->rps.enabled) { - if (IS_VALLEYVIEW(dev)) + if (IS_CHERRYVIEW(dev)) + valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit); + else if (IS_VALLEYVIEW(dev)) vlv_set_rps_idle(dev_priv); else gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit); @@ -3348,6 +3393,15 @@ static void gen6_disable_rps(struct drm_device *dev) gen6_disable_rps_interrupts(dev); } +static void cherryview_disable_rps(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + I915_WRITE(GEN6_RC_CONTROL, 0); + + gen8_disable_rps_interrupts(dev); +} + static void valleyview_disable_rps(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -3365,10 +3419,10 @@ static void intel_print_rc6_info(struct drm_device *dev, u32 mode) else mode = 0; } - DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n", - (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off", - (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off", - (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off"); + DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n", + (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off", + (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off", + (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off"); } static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6) @@ -3392,8 +3446,8 @@ static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6) mask = INTEL_RC6_ENABLE; if ((enable_rc6 & mask) != enable_rc6) - DRM_INFO("Adjusting RC6 mask to %d (requested %d, valid %d)\n", - enable_rc6 & mask, enable_rc6, mask); + DRM_DEBUG_KMS("Adjusting RC6 mask to %d (requested %d, valid %d)\n", + enable_rc6 & mask, enable_rc6, mask); return enable_rc6 & mask; } @@ -3419,7 +3473,7 @@ static void gen8_enable_rps_interrupts(struct drm_device *dev) spin_lock_irq(&dev_priv->irq_lock); WARN_ON(dev_priv->rps.pm_iir); - bdw_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); + gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events); spin_unlock_irq(&dev_priv->irq_lock); } @@ -3430,7 +3484,7 @@ static void gen6_enable_rps_interrupts(struct drm_device *dev) spin_lock_irq(&dev_priv->irq_lock); WARN_ON(dev_priv->rps.pm_iir); - snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); + gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events); spin_unlock_irq(&dev_priv->irq_lock); } @@ -3483,15 +3537,23 @@ static void gen8_enable_rps(struct drm_device *dev) for_each_ring(ring, dev_priv, unused) I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10); I915_WRITE(GEN6_RC_SLEEP, 0); - I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */ + if (IS_BROADWELL(dev)) + I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */ + else + I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */ /* 3: Enable RC6 */ if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) rc6_mask = GEN6_RC_CTL_RC6_ENABLE; intel_print_rc6_info(dev, rc6_mask); - I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | - GEN6_RC_CTL_EI_MODE(1) | - rc6_mask); + if (IS_BROADWELL(dev)) + I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | + GEN7_RC_CTL_TO_MODE | + rc6_mask); + else + I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | + GEN6_RC_CTL_EI_MODE(1) | + rc6_mask); /* 4 Program defaults and thresholds for RPS*/ I915_WRITE(GEN6_RPNSWREQ, @@ -3727,7 +3789,57 @@ void gen6_update_ring_freq(struct drm_device *dev) mutex_unlock(&dev_priv->rps.hw_lock); } -int valleyview_rps_max_freq(struct drm_i915_private *dev_priv) +static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv) +{ + u32 val, rp0; + + val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG); + rp0 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & PUNIT_GPU_STATUS_MAX_FREQ_MASK; + + return rp0; +} + +static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv) +{ + u32 val, rpe; + + val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG); + rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK; + + return rpe; +} + +static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv) +{ + u32 val, rp1; + + val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); + rp1 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & PUNIT_GPU_STATUS_MAX_FREQ_MASK; + + return rp1; +} + +static int cherryview_rps_min_freq(struct drm_i915_private *dev_priv) +{ + u32 val, rpn; + + val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG); + rpn = (val >> PUNIT_GPU_STATIS_GFX_MIN_FREQ_SHIFT) & PUNIT_GPU_STATUS_GFX_MIN_FREQ_MASK; + return rpn; +} + +static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv) +{ + u32 val, rp1; + + val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE); + + rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT; + + return rp1; +} + +static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv) { u32 val, rp0; @@ -3752,7 +3864,7 @@ static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv) return rpe; } -int valleyview_rps_min_freq(struct drm_i915_private *dev_priv) +static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv) { return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff; } @@ -3766,6 +3878,35 @@ static void valleyview_check_pctx(struct drm_i915_private *dev_priv) dev_priv->vlv_pctx->stolen->start); } + +/* Check that the pcbr address is not empty. */ +static void cherryview_check_pctx(struct drm_i915_private *dev_priv) +{ + unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095; + + WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0); +} + +static void cherryview_setup_pctx(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + unsigned long pctx_paddr, paddr; + struct i915_gtt *gtt = &dev_priv->gtt; + u32 pcbr; + int pctx_size = 32*1024; + + WARN_ON(!mutex_is_locked(&dev->struct_mutex)); + + pcbr = I915_READ(VLV_PCBR); + if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) { + paddr = (dev_priv->mm.stolen_base + + (gtt->stolen_size - pctx_size)); + + pctx_paddr = (paddr & (~4095)); + I915_WRITE(VLV_PCBR, pctx_paddr); + } +} + static void valleyview_setup_pctx(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -3840,6 +3981,11 @@ static void valleyview_init_gt_powersave(struct drm_device *dev) vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq), dev_priv->rps.efficient_freq); + dev_priv->rps.rp1_freq = valleyview_rps_guar_freq(dev_priv); + DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n", + vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq), + dev_priv->rps.rp1_freq); + dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv); DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n", vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq), @@ -3855,11 +4001,142 @@ static void valleyview_init_gt_powersave(struct drm_device *dev) mutex_unlock(&dev_priv->rps.hw_lock); } +static void cherryview_init_gt_powersave(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + cherryview_setup_pctx(dev); + + mutex_lock(&dev_priv->rps.hw_lock); + + dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv); + dev_priv->rps.rp0_freq = dev_priv->rps.max_freq; + DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n", + vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq), + dev_priv->rps.max_freq); + + dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv); + DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n", + vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq), + dev_priv->rps.efficient_freq); + + dev_priv->rps.rp1_freq = cherryview_rps_guar_freq(dev_priv); + DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n", + vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq), + dev_priv->rps.rp1_freq); + + dev_priv->rps.min_freq = cherryview_rps_min_freq(dev_priv); + DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n", + vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq), + dev_priv->rps.min_freq); + + /* Preserve min/max settings in case of re-init */ + if (dev_priv->rps.max_freq_softlimit == 0) + dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq; + + if (dev_priv->rps.min_freq_softlimit == 0) + dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq; + + mutex_unlock(&dev_priv->rps.hw_lock); +} + static void valleyview_cleanup_gt_powersave(struct drm_device *dev) { valleyview_cleanup_pctx(dev); } +static void cherryview_enable_rps(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_engine_cs *ring; + u32 gtfifodbg, val, rc6_mode = 0, pcbr; + int i; + + WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); + + gtfifodbg = I915_READ(GTFIFODBG); + if (gtfifodbg) { + DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n", + gtfifodbg); + I915_WRITE(GTFIFODBG, gtfifodbg); + } + + cherryview_check_pctx(dev_priv); + + /* 1a & 1b: Get forcewake during program sequence. Although the driver + * hasn't enabled a state yet where we need forcewake, BIOS may have.*/ + gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); + + /* 2a: Program RC6 thresholds.*/ + I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16); + I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ + I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ + + for_each_ring(ring, dev_priv, i) + I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10); + I915_WRITE(GEN6_RC_SLEEP, 0); + + I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */ + + /* allows RC6 residency counter to work */ + I915_WRITE(VLV_COUNTER_CONTROL, + _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH | + VLV_MEDIA_RC6_COUNT_EN | + VLV_RENDER_RC6_COUNT_EN)); + + /* For now we assume BIOS is allocating and populating the PCBR */ + pcbr = I915_READ(VLV_PCBR); + + DRM_DEBUG_DRIVER("PCBR offset : 0x%x\n", pcbr); + + /* 3: Enable RC6 */ + if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) && + (pcbr >> VLV_PCBR_ADDR_SHIFT)) + rc6_mode = GEN6_RC_CTL_EI_MODE(1); + + I915_WRITE(GEN6_RC_CONTROL, rc6_mode); + + /* 4 Program defaults and thresholds for RPS*/ + I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400); + I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000); + I915_WRITE(GEN6_RP_UP_EI, 66000); + I915_WRITE(GEN6_RP_DOWN_EI, 350000); + + I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); + + /* WaDisablePwrmtrEvent:chv (pre-production hw) */ + I915_WRITE(0xA80C, I915_READ(0xA80C) & 0x00ffffff); + I915_WRITE(0xA810, I915_READ(0xA810) & 0xffffff00); + + /* 5: Enable RPS */ + I915_WRITE(GEN6_RP_CONTROL, + GEN6_RP_MEDIA_HW_NORMAL_MODE | + GEN6_RP_MEDIA_IS_GFX | /* WaSetMaskForGfxBusyness:chv (pre-production hw ?) */ + GEN6_RP_ENABLE | + GEN6_RP_UP_BUSY_AVG | + GEN6_RP_DOWN_IDLE_AVG); + + val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); + + DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no"); + DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); + + dev_priv->rps.cur_freq = (val >> 8) & 0xff; + DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n", + vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq), + dev_priv->rps.cur_freq); + + DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n", + vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq), + dev_priv->rps.efficient_freq); + + valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq); + + gen8_enable_rps_interrupts(dev); + + gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); +} + static void valleyview_enable_rps(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -3886,6 +4163,7 @@ static void valleyview_enable_rps(struct drm_device *dev) I915_WRITE(GEN6_RP_DOWN_EI, 350000); I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); + I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 0xf4240); I915_WRITE(GEN6_RP_CONTROL, GEN6_RP_MEDIA_TURBO | @@ -3906,9 +4184,11 @@ static void valleyview_enable_rps(struct drm_device *dev) /* allows RC6 residency counter to work */ I915_WRITE(VLV_COUNTER_CONTROL, - _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH | + _MASKED_BIT_ENABLE(VLV_MEDIA_RC0_COUNT_EN | + VLV_RENDER_RC0_COUNT_EN | VLV_MEDIA_RC6_COUNT_EN | VLV_RENDER_RC6_COUNT_EN)); + if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL; @@ -4666,33 +4946,60 @@ void intel_init_gt_powersave(struct drm_device *dev) { i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6); - if (IS_VALLEYVIEW(dev)) + if (IS_CHERRYVIEW(dev)) + cherryview_init_gt_powersave(dev); + else if (IS_VALLEYVIEW(dev)) valleyview_init_gt_powersave(dev); } void intel_cleanup_gt_powersave(struct drm_device *dev) { - if (IS_VALLEYVIEW(dev)) + if (IS_CHERRYVIEW(dev)) + return; + else if (IS_VALLEYVIEW(dev)) valleyview_cleanup_gt_powersave(dev); } +/** + * intel_suspend_gt_powersave - suspend PM work and helper threads + * @dev: drm device + * + * We don't want to disable RC6 or other features here, we just want + * to make sure any work we've queued has finished and won't bother + * us while we're suspended. + */ +void intel_suspend_gt_powersave(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + /* Interrupts should be disabled already to avoid re-arming. */ + WARN_ON(intel_irqs_enabled(dev_priv)); + + flush_delayed_work(&dev_priv->rps.delayed_resume_work); + + cancel_work_sync(&dev_priv->rps.work); + + /* Force GPU to min freq during suspend */ + gen6_rps_idle(dev_priv); +} + void intel_disable_gt_powersave(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; /* Interrupts should be disabled already to avoid re-arming. */ - WARN_ON(dev->irq_enabled); + WARN_ON(intel_irqs_enabled(dev_priv)); if (IS_IRONLAKE_M(dev)) { ironlake_disable_drps(dev); ironlake_disable_rc6(dev); - } else if (IS_GEN6(dev) || IS_GEN7(dev) || IS_BROADWELL(dev)) { - if (cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work)) - intel_runtime_pm_put(dev_priv); + } else if (INTEL_INFO(dev)->gen >= 6) { + intel_suspend_gt_powersave(dev); - cancel_work_sync(&dev_priv->rps.work); mutex_lock(&dev_priv->rps.hw_lock); - if (IS_VALLEYVIEW(dev)) + if (IS_CHERRYVIEW(dev)) + cherryview_disable_rps(dev); + else if (IS_VALLEYVIEW(dev)) valleyview_disable_rps(dev); else gen6_disable_rps(dev); @@ -4710,7 +5017,9 @@ static void intel_gen6_powersave_work(struct work_struct *work) mutex_lock(&dev_priv->rps.hw_lock); - if (IS_VALLEYVIEW(dev)) { + if (IS_CHERRYVIEW(dev)) { + cherryview_enable_rps(dev); + } else if (IS_VALLEYVIEW(dev)) { valleyview_enable_rps(dev); } else if (IS_BROADWELL(dev)) { gen8_enable_rps(dev); @@ -4735,7 +5044,7 @@ void intel_enable_gt_powersave(struct drm_device *dev) ironlake_enable_rc6(dev); intel_init_emon(dev); mutex_unlock(&dev->struct_mutex); - } else if (IS_GEN6(dev) || IS_GEN7(dev) || IS_BROADWELL(dev)) { + } else if (INTEL_INFO(dev)->gen >= 6) { /* * PCU communication is slow and this doesn't need to be * done at any specific time, so do this out of our fast path @@ -4918,11 +5227,9 @@ static void gen6_check_mch_setup(struct drm_device *dev) uint32_t tmp; tmp = I915_READ(MCH_SSKPD); - if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL) { - DRM_INFO("Wrong MCH_SSKPD value: 0x%08x\n", tmp); - DRM_INFO("This can cause pipe underruns and display issues.\n"); - DRM_INFO("Please upgrade your BIOS to fix this.\n"); - } + if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL) + DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n", + tmp); } static void gen6_init_clock_gating(struct drm_device *dev) @@ -5108,7 +5415,7 @@ static void gen8_init_clock_gating(struct drm_device *dev) I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_BWGTLB_DISABLE)); I915_WRITE(_3D_CHICKEN3, - _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2)); + _MASKED_BIT_ENABLE(_3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2))); I915_WRITE(COMMON_SLICE_CHICKEN2, _MASKED_BIT_ENABLE(GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE)); @@ -5343,10 +5650,6 @@ static void valleyview_init_clock_gating(struct drm_device *dev) } DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq); - dev_priv->vlv_cdclk_freq = valleyview_cur_cdclk(dev_priv); - DRM_DEBUG_DRIVER("Current CD clock rate: %d MHz", - dev_priv->vlv_cdclk_freq); - I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE); /* WaDisableEarlyCull:vlv */ @@ -5421,6 +5724,35 @@ static void valleyview_init_clock_gating(struct drm_device *dev) static void cherryview_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; + u32 val; + + mutex_lock(&dev_priv->rps.hw_lock); + val = vlv_punit_read(dev_priv, CCK_FUSE_REG); + mutex_unlock(&dev_priv->rps.hw_lock); + switch ((val >> 2) & 0x7) { + case 0: + case 1: + dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_200; + dev_priv->mem_freq = 1600; + break; + case 2: + dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_267; + dev_priv->mem_freq = 1600; + break; + case 3: + dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_333; + dev_priv->mem_freq = 2000; + break; + case 4: + dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_320; + dev_priv->mem_freq = 1600; + break; + case 5: + dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_400; + dev_priv->mem_freq = 1600; + break; + } + DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq); I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE); @@ -5661,7 +5993,6 @@ bool intel_display_power_enabled(struct drm_i915_private *dev_priv, static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv) { struct drm_device *dev = dev_priv->dev; - unsigned long irqflags; /* * After we re-enable the power well, if we touch VGA register 0x3d5 @@ -5677,21 +6008,8 @@ static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv) outb(inb(VGA_MSR_READ), VGA_MSR_WRITE); vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); - if (IS_BROADWELL(dev)) { - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_B), - dev_priv->de_irq_mask[PIPE_B]); - I915_WRITE(GEN8_DE_PIPE_IER(PIPE_B), - ~dev_priv->de_irq_mask[PIPE_B] | - GEN8_PIPE_VBLANK); - I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_C), - dev_priv->de_irq_mask[PIPE_C]); - I915_WRITE(GEN8_DE_PIPE_IER(PIPE_C), - ~dev_priv->de_irq_mask[PIPE_C] | - GEN8_PIPE_VBLANK); - POSTING_READ(GEN8_DE_PIPE_IER(PIPE_C)); - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); - } + if (IS_BROADWELL(dev)) + gen8_irq_power_well_post_enable(dev_priv); } static void hsw_set_power_well(struct drm_i915_private *dev_priv, @@ -5762,34 +6080,13 @@ static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv, return true; } -void __vlv_set_power_well(struct drm_i915_private *dev_priv, - enum punit_power_well power_well_id, bool enable) +static void vlv_set_power_well(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well, bool enable) { - struct drm_device *dev = dev_priv->dev; + enum punit_power_well power_well_id = power_well->data; u32 mask; u32 state; u32 ctrl; - enum pipe pipe; - - if (power_well_id == PUNIT_POWER_WELL_DPIO_CMN_BC) { - if (enable) { - /* - * Enable the CRI clock source so we can get at the - * display and the reference clock for VGA - * hotplug / manual detection. - */ - I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | - DPLL_REFA_CLK_ENABLE_VLV | - DPLL_INTEGRATED_CRI_CLK_VLV); - udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ - } else { - for_each_pipe(pipe) - assert_pll_disabled(dev_priv, pipe); - /* Assert common reset */ - I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & - ~DPIO_CMNRST); - } - } mask = PUNIT_PWRGT_MASK(power_well_id); state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) : @@ -5817,28 +6114,6 @@ void __vlv_set_power_well(struct drm_i915_private *dev_priv, out: mutex_unlock(&dev_priv->rps.hw_lock); - - /* - * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx - - * 6. De-assert cmn_reset/side_reset. Same as VLV X0. - * a. GUnit 0x2110 bit[0] set to 1 (def 0) - * b. The other bits such as sfr settings / modesel may all - * be set to 0. - * - * This should only be done on init and resume from S3 with - * both PLLs disabled, or we risk losing DPIO and PLL - * synchronization. - */ - if (power_well_id == PUNIT_POWER_WELL_DPIO_CMN_BC && enable) - I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST); -} - -static void vlv_set_power_well(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well, bool enable) -{ - enum punit_power_well power_well_id = power_well->data; - - __vlv_set_power_well(dev_priv, power_well_id, enable); } static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv, @@ -5930,6 +6205,53 @@ static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv, vlv_set_power_well(dev_priv, power_well, false); } +static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC); + + /* + * Enable the CRI clock source so we can get at the + * display and the reference clock for VGA + * hotplug / manual detection. + */ + I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | + DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV); + udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ + + vlv_set_power_well(dev_priv, power_well, true); + + /* + * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx - + * 6. De-assert cmn_reset/side_reset. Same as VLV X0. + * a. GUnit 0x2110 bit[0] set to 1 (def 0) + * b. The other bits such as sfr settings / modesel may all + * be set to 0. + * + * This should only be done on init and resume from S3 with + * both PLLs disabled, or we risk losing DPIO and PLL + * synchronization. + */ + I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST); +} + +static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + struct drm_device *dev = dev_priv->dev; + enum pipe pipe; + + WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC); + + for_each_pipe(pipe) + assert_pll_disabled(dev_priv, pipe); + + /* Assert common reset */ + I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST); + + vlv_set_power_well(dev_priv, power_well, false); +} + static void check_power_well_state(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { @@ -6079,6 +6401,7 @@ EXPORT_SYMBOL_GPL(i915_get_cdclk_freq); BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \ BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \ BIT(POWER_DOMAIN_PORT_CRT) | \ + BIT(POWER_DOMAIN_PLLS) | \ BIT(POWER_DOMAIN_INIT)) #define HSW_DISPLAY_POWER_DOMAINS ( \ (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \ @@ -6178,6 +6501,13 @@ static const struct i915_power_well_ops vlv_display_power_well_ops = { .is_enabled = vlv_power_well_enabled, }; +static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = { + .sync_hw = vlv_power_well_sync_hw, + .enable = vlv_dpio_cmn_power_well_enable, + .disable = vlv_dpio_cmn_power_well_disable, + .is_enabled = vlv_power_well_enabled, +}; + static const struct i915_power_well_ops vlv_dpio_power_well_ops = { .sync_hw = vlv_power_well_sync_hw, .enable = vlv_power_well_enable, @@ -6238,10 +6568,25 @@ static struct i915_power_well vlv_power_wells[] = { .name = "dpio-common", .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS, .data = PUNIT_POWER_WELL_DPIO_CMN_BC, - .ops = &vlv_dpio_power_well_ops, + .ops = &vlv_dpio_cmn_power_well_ops, }, }; +static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv, + enum punit_power_well power_well_id) +{ + struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_well *power_well; + int i; + + for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) { + if (power_well->data == power_well_id) + return power_well; + } + + return NULL; +} + #define set_power_wells(power_domains, __power_wells) ({ \ (power_domains)->power_wells = (__power_wells); \ (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \ @@ -6292,11 +6637,50 @@ static void intel_power_domains_resume(struct drm_i915_private *dev_priv) mutex_unlock(&power_domains->lock); } +static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv) +{ + struct i915_power_well *cmn = + lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC); + struct i915_power_well *disp2d = + lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D); + + /* nothing to do if common lane is already off */ + if (!cmn->ops->is_enabled(dev_priv, cmn)) + return; + + /* If the display might be already active skip this */ + if (disp2d->ops->is_enabled(dev_priv, disp2d) && + I915_READ(DPIO_CTL) & DPIO_CMNRST) + return; + + DRM_DEBUG_KMS("toggling display PHY side reset\n"); + + /* cmnlane needs DPLL registers */ + disp2d->ops->enable(dev_priv, disp2d); + + /* + * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx: + * Need to assert and de-assert PHY SB reset by gating the + * common lane power, then un-gating it. + * Simply ungating isn't enough to reset the PHY enough to get + * ports and lanes running. + */ + cmn->ops->disable(dev_priv, cmn); +} + void intel_power_domains_init_hw(struct drm_i915_private *dev_priv) { + struct drm_device *dev = dev_priv->dev; struct i915_power_domains *power_domains = &dev_priv->power_domains; power_domains->initializing = true; + + if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) { + mutex_lock(&power_domains->lock); + vlv_cmnlane_wa(dev_priv); + mutex_unlock(&power_domains->lock); + } + /* For now, we need the power well to be always enabled. */ intel_display_set_init_power(dev_priv, true); intel_power_domains_resume(dev_priv); @@ -6469,7 +6853,7 @@ void intel_init_pm(struct drm_device *dev) (dev_priv->is_ddr3 == 1) ? "3" : "2", dev_priv->fsb_freq, dev_priv->mem_freq); /* Disable CxSR and never update its watermark again */ - pineview_disable_cxsr(dev); + intel_set_memory_cxsr(dev_priv, false); dev_priv->display.update_wm = NULL; } else dev_priv->display.update_wm = pineview_update_wm; @@ -6552,7 +6936,7 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val) return 0; } -int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val) +static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val) { int div; @@ -6574,7 +6958,7 @@ int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val) return DIV_ROUND_CLOSEST(dev_priv->mem_freq * (val + 6 - 0xbd), 4 * div); } -int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val) +static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val) { int mul; @@ -6596,6 +6980,80 @@ int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val) return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6; } +static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val) +{ + int div, freq; + + switch (dev_priv->rps.cz_freq) { + case 200: + div = 5; + break; + case 267: + div = 6; + break; + case 320: + case 333: + case 400: + div = 8; + break; + default: + return -1; + } + + freq = (DIV_ROUND_CLOSEST((dev_priv->rps.cz_freq * val), 2 * div) / 2); + + return freq; +} + +static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val) +{ + int mul, opcode; + + switch (dev_priv->rps.cz_freq) { + case 200: + mul = 5; + break; + case 267: + mul = 6; + break; + case 320: + case 333: + case 400: + mul = 8; + break; + default: + return -1; + } + + opcode = (DIV_ROUND_CLOSEST((val * 2 * mul), dev_priv->rps.cz_freq) * 2); + + return opcode; +} + +int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val) +{ + int ret = -1; + + if (IS_CHERRYVIEW(dev_priv->dev)) + ret = chv_gpu_freq(dev_priv, val); + else if (IS_VALLEYVIEW(dev_priv->dev)) + ret = byt_gpu_freq(dev_priv, val); + + return ret; +} + +int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val) +{ + int ret = -1; + + if (IS_CHERRYVIEW(dev_priv->dev)) + ret = chv_freq_opcode(dev_priv, val); + else if (IS_VALLEYVIEW(dev_priv->dev)) + ret = byt_freq_opcode(dev_priv, val); + + return ret; +} + void intel_pm_setup(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -6606,5 +7064,5 @@ void intel_pm_setup(struct drm_device *dev) intel_gen6_powersave_work); dev_priv->pm.suspended = false; - dev_priv->pm.irqs_disabled = false; + dev_priv->pm._irqs_disabled = false; } diff --git a/drivers/gpu/drm/i915/intel_renderstate.h b/drivers/gpu/drm/i915/intel_renderstate.h index a5e783a9928a..fd4f66231d30 100644 --- a/drivers/gpu/drm/i915/intel_renderstate.h +++ b/drivers/gpu/drm/i915/intel_renderstate.h @@ -28,7 +28,6 @@ struct intel_renderstate_rodata { const u32 *reloc; - const u32 reloc_items; const u32 *batch; const u32 batch_items; }; @@ -40,7 +39,6 @@ extern const struct intel_renderstate_rodata gen8_null_state; #define RO_RENDERSTATE(_g) \ const struct intel_renderstate_rodata gen ## _g ## _null_state = { \ .reloc = gen ## _g ## _null_state_relocs, \ - .reloc_items = sizeof(gen ## _g ## _null_state_relocs)/4, \ .batch = gen ## _g ## _null_state_batch, \ .batch_items = sizeof(gen ## _g ## _null_state_batch)/4, \ } diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen6.c b/drivers/gpu/drm/i915/intel_renderstate_gen6.c index 740538ad0977..56c1429d8a60 100644 --- a/drivers/gpu/drm/i915/intel_renderstate_gen6.c +++ b/drivers/gpu/drm/i915/intel_renderstate_gen6.c @@ -6,6 +6,7 @@ static const u32 gen6_null_state_relocs[] = { 0x0000002c, 0x000001e0, 0x000001e4, + -1, }; static const u32 gen6_null_state_batch[] = { diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen7.c b/drivers/gpu/drm/i915/intel_renderstate_gen7.c index 6fa7ff2a1298..419e35a7b0ff 100644 --- a/drivers/gpu/drm/i915/intel_renderstate_gen7.c +++ b/drivers/gpu/drm/i915/intel_renderstate_gen7.c @@ -5,6 +5,7 @@ static const u32 gen7_null_state_relocs[] = { 0x00000010, 0x00000018, 0x000001ec, + -1, }; static const u32 gen7_null_state_batch[] = { diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen8.c b/drivers/gpu/drm/i915/intel_renderstate_gen8.c index 5c875615d42a..75ef1b5de45c 100644 --- a/drivers/gpu/drm/i915/intel_renderstate_gen8.c +++ b/drivers/gpu/drm/i915/intel_renderstate_gen8.c @@ -5,6 +5,7 @@ static const u32 gen8_null_state_relocs[] = { 0x00000050, 0x00000060, 0x000003ec, + -1, }; static const u32 gen8_null_state_batch[] = { diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 279488addf3f..47a126a0493f 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -48,9 +48,8 @@ static inline int __ring_space(int head, int tail, int size) return space; } -static inline int ring_space(struct intel_engine_cs *ring) +static inline int ring_space(struct intel_ringbuffer *ringbuf) { - struct intel_ringbuffer *ringbuf = ring->buffer; return __ring_space(ringbuf->head & HEAD_ADDR, ringbuf->tail, ringbuf->size); } @@ -381,6 +380,27 @@ gen7_render_ring_flush(struct intel_engine_cs *ring, } static int +gen8_emit_pipe_control(struct intel_engine_cs *ring, + u32 flags, u32 scratch_addr) +{ + int ret; + + ret = intel_ring_begin(ring, 6); + if (ret) + return ret; + + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6)); + intel_ring_emit(ring, flags); + intel_ring_emit(ring, scratch_addr); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); + intel_ring_advance(ring); + + return 0; +} + +static int gen8_render_ring_flush(struct intel_engine_cs *ring, u32 invalidate_domains, u32 flush_domains) { @@ -403,22 +423,17 @@ gen8_render_ring_flush(struct intel_engine_cs *ring, flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; flags |= PIPE_CONTROL_QW_WRITE; flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; - } - - ret = intel_ring_begin(ring, 6); - if (ret) - return ret; - - intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6)); - intel_ring_emit(ring, flags); - intel_ring_emit(ring, scratch_addr); - intel_ring_emit(ring, 0); - intel_ring_emit(ring, 0); - intel_ring_emit(ring, 0); - intel_ring_advance(ring); - return 0; + /* WaCsStallBeforeStateCacheInvalidate:bdw,chv */ + ret = gen8_emit_pipe_control(ring, + PIPE_CONTROL_CS_STALL | + PIPE_CONTROL_STALL_AT_SCOREBOARD, + 0); + if (ret) + return ret; + } + return gen8_emit_pipe_control(ring, flags, scratch_addr); } static void ring_write_tail(struct intel_engine_cs *ring, @@ -517,6 +532,9 @@ static int init_ring_common(struct intel_engine_cs *ring) else ring_setup_phys_status_page(ring); + /* Enforce ordering by reading HEAD register back */ + I915_READ_HEAD(ring); + /* Initialize the ring. This must happen _after_ we've cleared the ring * registers with the above sequence (the readback of the HEAD registers * also enforces ordering), otherwise the hw might lose the new ring @@ -545,7 +563,7 @@ static int init_ring_common(struct intel_engine_cs *ring) else { ringbuf->head = I915_READ_HEAD(ring); ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR; - ringbuf->space = ring_space(ring); + ringbuf->space = ring_space(ringbuf); ringbuf->last_retired_head = -1; } @@ -604,6 +622,8 @@ static int init_render_ring(struct intel_engine_cs *ring) struct drm_device *dev = ring->dev; struct drm_i915_private *dev_priv = dev->dev_private; int ret = init_ring_common(ring); + if (ret) + return ret; /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */ if (INTEL_INFO(dev)->gen >= 4 && INTEL_INFO(dev)->gen < 7) @@ -658,6 +678,13 @@ static int init_render_ring(struct intel_engine_cs *ring) static void render_ring_cleanup(struct intel_engine_cs *ring) { struct drm_device *dev = ring->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + + if (dev_priv->semaphore_obj) { + i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj); + drm_gem_object_unreference(&dev_priv->semaphore_obj->base); + dev_priv->semaphore_obj = NULL; + } if (ring->scratch.obj == NULL) return; @@ -671,29 +698,96 @@ static void render_ring_cleanup(struct intel_engine_cs *ring) ring->scratch.obj = NULL; } +static int gen8_rcs_signal(struct intel_engine_cs *signaller, + unsigned int num_dwords) +{ +#define MBOX_UPDATE_DWORDS 8 + struct drm_device *dev = signaller->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_engine_cs *waiter; + int i, ret, num_rings; + + num_rings = hweight32(INTEL_INFO(dev)->ring_mask); + num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS; +#undef MBOX_UPDATE_DWORDS + + ret = intel_ring_begin(signaller, num_dwords); + if (ret) + return ret; + + for_each_ring(waiter, dev_priv, i) { + u64 gtt_offset = signaller->semaphore.signal_ggtt[i]; + if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID) + continue; + + intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6)); + intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB | + PIPE_CONTROL_QW_WRITE | + PIPE_CONTROL_FLUSH_ENABLE); + intel_ring_emit(signaller, lower_32_bits(gtt_offset)); + intel_ring_emit(signaller, upper_32_bits(gtt_offset)); + intel_ring_emit(signaller, signaller->outstanding_lazy_seqno); + intel_ring_emit(signaller, 0); + intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL | + MI_SEMAPHORE_TARGET(waiter->id)); + intel_ring_emit(signaller, 0); + } + + return 0; +} + +static int gen8_xcs_signal(struct intel_engine_cs *signaller, + unsigned int num_dwords) +{ +#define MBOX_UPDATE_DWORDS 6 + struct drm_device *dev = signaller->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_engine_cs *waiter; + int i, ret, num_rings; + + num_rings = hweight32(INTEL_INFO(dev)->ring_mask); + num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS; +#undef MBOX_UPDATE_DWORDS + + ret = intel_ring_begin(signaller, num_dwords); + if (ret) + return ret; + + for_each_ring(waiter, dev_priv, i) { + u64 gtt_offset = signaller->semaphore.signal_ggtt[i]; + if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID) + continue; + + intel_ring_emit(signaller, (MI_FLUSH_DW + 1) | + MI_FLUSH_DW_OP_STOREDW); + intel_ring_emit(signaller, lower_32_bits(gtt_offset) | + MI_FLUSH_DW_USE_GTT); + intel_ring_emit(signaller, upper_32_bits(gtt_offset)); + intel_ring_emit(signaller, signaller->outstanding_lazy_seqno); + intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL | + MI_SEMAPHORE_TARGET(waiter->id)); + intel_ring_emit(signaller, 0); + } + + return 0; +} + static int gen6_signal(struct intel_engine_cs *signaller, unsigned int num_dwords) { struct drm_device *dev = signaller->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *useless; - int i, ret; + int i, ret, num_rings; - /* NB: In order to be able to do semaphore MBOX updates for varying - * number of rings, it's easiest if we round up each individual update - * to a multiple of 2 (since ring updates must always be a multiple of - * 2) even though the actual update only requires 3 dwords. - */ -#define MBOX_UPDATE_DWORDS 4 - if (i915_semaphore_is_enabled(dev)) - num_dwords += ((I915_NUM_RINGS-1) * MBOX_UPDATE_DWORDS); - else - return intel_ring_begin(signaller, num_dwords); +#define MBOX_UPDATE_DWORDS 3 + num_rings = hweight32(INTEL_INFO(dev)->ring_mask); + num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2); +#undef MBOX_UPDATE_DWORDS ret = intel_ring_begin(signaller, num_dwords); if (ret) return ret; -#undef MBOX_UPDATE_DWORDS for_each_ring(useless, dev_priv, i) { u32 mbox_reg = signaller->semaphore.mbox.signal[i]; @@ -701,15 +795,13 @@ static int gen6_signal(struct intel_engine_cs *signaller, intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1)); intel_ring_emit(signaller, mbox_reg); intel_ring_emit(signaller, signaller->outstanding_lazy_seqno); - intel_ring_emit(signaller, MI_NOOP); - } else { - intel_ring_emit(signaller, MI_NOOP); - intel_ring_emit(signaller, MI_NOOP); - intel_ring_emit(signaller, MI_NOOP); - intel_ring_emit(signaller, MI_NOOP); } } + /* If num_dwords was rounded, make sure the tail pointer is correct */ + if (num_rings % 2 == 0) + intel_ring_emit(signaller, MI_NOOP); + return 0; } @@ -727,7 +819,11 @@ gen6_add_request(struct intel_engine_cs *ring) { int ret; - ret = ring->semaphore.signal(ring, 4); + if (ring->semaphore.signal) + ret = ring->semaphore.signal(ring, 4); + else + ret = intel_ring_begin(ring, 4); + if (ret) return ret; @@ -754,6 +850,32 @@ static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev, * @signaller - ring which has, or will signal * @seqno - seqno which the waiter will block on */ + +static int +gen8_ring_sync(struct intel_engine_cs *waiter, + struct intel_engine_cs *signaller, + u32 seqno) +{ + struct drm_i915_private *dev_priv = waiter->dev->dev_private; + int ret; + + ret = intel_ring_begin(waiter, 4); + if (ret) + return ret; + + intel_ring_emit(waiter, MI_SEMAPHORE_WAIT | + MI_SEMAPHORE_GLOBAL_GTT | + MI_SEMAPHORE_POLL | + MI_SEMAPHORE_SAD_GTE_SDD); + intel_ring_emit(waiter, seqno); + intel_ring_emit(waiter, + lower_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id))); + intel_ring_emit(waiter, + upper_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id))); + intel_ring_advance(waiter); + return 0; +} + static int gen6_ring_sync(struct intel_engine_cs *waiter, struct intel_engine_cs *signaller, @@ -901,7 +1023,7 @@ gen5_ring_get_irq(struct intel_engine_cs *ring) spin_lock_irqsave(&dev_priv->irq_lock, flags); if (ring->irq_refcount++ == 0) - ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask); + gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask); spin_unlock_irqrestore(&dev_priv->irq_lock, flags); return true; @@ -916,7 +1038,7 @@ gen5_ring_put_irq(struct intel_engine_cs *ring) spin_lock_irqsave(&dev_priv->irq_lock, flags); if (--ring->irq_refcount == 0) - ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask); + gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask); spin_unlock_irqrestore(&dev_priv->irq_lock, flags); } @@ -1109,7 +1231,7 @@ gen6_ring_get_irq(struct intel_engine_cs *ring) GT_PARITY_ERROR(dev))); else I915_WRITE_IMR(ring, ~ring->irq_enable_mask); - ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask); + gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask); } spin_unlock_irqrestore(&dev_priv->irq_lock, flags); @@ -1129,7 +1251,7 @@ gen6_ring_put_irq(struct intel_engine_cs *ring) I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev)); else I915_WRITE_IMR(ring, ~0); - ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask); + gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask); } spin_unlock_irqrestore(&dev_priv->irq_lock, flags); } @@ -1147,7 +1269,7 @@ hsw_vebox_get_irq(struct intel_engine_cs *ring) spin_lock_irqsave(&dev_priv->irq_lock, flags); if (ring->irq_refcount++ == 0) { I915_WRITE_IMR(ring, ~ring->irq_enable_mask); - snb_enable_pm_irq(dev_priv, ring->irq_enable_mask); + gen6_enable_pm_irq(dev_priv, ring->irq_enable_mask); } spin_unlock_irqrestore(&dev_priv->irq_lock, flags); @@ -1167,7 +1289,7 @@ hsw_vebox_put_irq(struct intel_engine_cs *ring) spin_lock_irqsave(&dev_priv->irq_lock, flags); if (--ring->irq_refcount == 0) { I915_WRITE_IMR(ring, ~0); - snb_disable_pm_irq(dev_priv, ring->irq_enable_mask); + gen6_disable_pm_irq(dev_priv, ring->irq_enable_mask); } spin_unlock_irqrestore(&dev_priv->irq_lock, flags); } @@ -1241,54 +1363,66 @@ i965_dispatch_execbuffer(struct intel_engine_cs *ring, /* Just userspace ABI convention to limit the wa batch bo to a resonable size */ #define I830_BATCH_LIMIT (256*1024) +#define I830_TLB_ENTRIES (2) +#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT) static int i830_dispatch_execbuffer(struct intel_engine_cs *ring, u64 offset, u32 len, unsigned flags) { + u32 cs_offset = ring->scratch.gtt_offset; int ret; - if (flags & I915_DISPATCH_PINNED) { - ret = intel_ring_begin(ring, 4); - if (ret) - return ret; + ret = intel_ring_begin(ring, 6); + if (ret) + return ret; - intel_ring_emit(ring, MI_BATCH_BUFFER); - intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); - intel_ring_emit(ring, offset + len - 8); - intel_ring_emit(ring, MI_NOOP); - intel_ring_advance(ring); - } else { - u32 cs_offset = ring->scratch.gtt_offset; + /* Evict the invalid PTE TLBs */ + intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA); + intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096); + intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */ + intel_ring_emit(ring, cs_offset); + intel_ring_emit(ring, 0xdeadbeef); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); + if ((flags & I915_DISPATCH_PINNED) == 0) { if (len > I830_BATCH_LIMIT) return -ENOSPC; - ret = intel_ring_begin(ring, 9+3); + ret = intel_ring_begin(ring, 6 + 2); if (ret) return ret; - /* Blit the batch (which has now all relocs applied) to the stable batch - * scratch bo area (so that the CS never stumbles over its tlb - * invalidation bug) ... */ - intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD | - XY_SRC_COPY_BLT_WRITE_ALPHA | - XY_SRC_COPY_BLT_WRITE_RGB); - intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096); - intel_ring_emit(ring, 0); - intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024); + + /* Blit the batch (which has now all relocs applied) to the + * stable batch scratch bo area (so that the CS never + * stumbles over its tlb invalidation bug) ... + */ + intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA); + intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096); + intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 4096); intel_ring_emit(ring, cs_offset); - intel_ring_emit(ring, 0); intel_ring_emit(ring, 4096); intel_ring_emit(ring, offset); + intel_ring_emit(ring, MI_FLUSH); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); /* ... and execute it. */ - intel_ring_emit(ring, MI_BATCH_BUFFER); - intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); - intel_ring_emit(ring, cs_offset + len - 8); - intel_ring_advance(ring); + offset = cs_offset; } + ret = intel_ring_begin(ring, 4); + if (ret) + return ret; + + intel_ring_emit(ring, MI_BATCH_BUFFER); + intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); + intel_ring_emit(ring, offset + len - 8); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); + return 0; } @@ -1329,6 +1463,7 @@ static int init_status_page(struct intel_engine_cs *ring) struct drm_i915_gem_object *obj; if ((obj = ring->status_page.obj) == NULL) { + unsigned flags; int ret; obj = i915_gem_alloc_object(ring->dev, 4096); @@ -1341,7 +1476,20 @@ static int init_status_page(struct intel_engine_cs *ring) if (ret) goto err_unref; - ret = i915_gem_obj_ggtt_pin(obj, 4096, 0); + flags = 0; + if (!HAS_LLC(ring->dev)) + /* On g33, we cannot place HWS above 256MiB, so + * restrict its pinning to the low mappable arena. + * Though this restriction is not documented for + * gen4, gen5, or byt, they also behave similarly + * and hang if the HWS is placed at the top of the + * GTT. To generalise, it appears that all !llc + * platforms have issues with us placing the HWS + * above the mappable region (even though we never + * actualy map it). + */ + flags |= PIN_MAPPABLE; + ret = i915_gem_obj_ggtt_pin(obj, 4096, flags); if (ret) { err_unref: drm_gem_object_unreference(&obj->base); @@ -1378,15 +1526,25 @@ static int init_phys_status_page(struct intel_engine_cs *ring) return 0; } -static int allocate_ring_buffer(struct intel_engine_cs *ring) +static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf) +{ + if (!ringbuf->obj) + return; + + iounmap(ringbuf->virtual_start); + i915_gem_object_ggtt_unpin(ringbuf->obj); + drm_gem_object_unreference(&ringbuf->obj->base); + ringbuf->obj = NULL; +} + +static int intel_alloc_ringbuffer_obj(struct drm_device *dev, + struct intel_ringbuffer *ringbuf) { - struct drm_device *dev = ring->dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_ringbuffer *ringbuf = ring->buffer; struct drm_i915_gem_object *obj; int ret; - if (intel_ring_initialized(ring)) + if (ringbuf->obj) return 0; obj = NULL; @@ -1397,6 +1555,9 @@ static int allocate_ring_buffer(struct intel_engine_cs *ring) if (obj == NULL) return -ENOMEM; + /* mark ring buffers as read-only from GPU side by default */ + obj->gt_ro = 1; + ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE); if (ret) goto err_unref; @@ -1455,7 +1616,7 @@ static int intel_init_ring_buffer(struct drm_device *dev, goto error; } - ret = allocate_ring_buffer(ring); + ret = intel_alloc_ringbuffer_obj(dev, ringbuf); if (ret) { DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", ring->name, ret); goto error; @@ -1496,11 +1657,7 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring) intel_stop_ring_buffer(ring); WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0); - iounmap(ringbuf->virtual_start); - - i915_gem_object_ggtt_unpin(ringbuf->obj); - drm_gem_object_unreference(&ringbuf->obj->base); - ringbuf->obj = NULL; + intel_destroy_ringbuffer_obj(ringbuf); ring->preallocated_lazy_request = NULL; ring->outstanding_lazy_seqno = 0; @@ -1526,7 +1683,7 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n) ringbuf->head = ringbuf->last_retired_head; ringbuf->last_retired_head = -1; - ringbuf->space = ring_space(ring); + ringbuf->space = ring_space(ringbuf); if (ringbuf->space >= n) return 0; } @@ -1549,7 +1706,7 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n) ringbuf->head = ringbuf->last_retired_head; ringbuf->last_retired_head = -1; - ringbuf->space = ring_space(ring); + ringbuf->space = ring_space(ringbuf); return 0; } @@ -1578,7 +1735,7 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n) trace_i915_ring_wait_begin(ring); do { ringbuf->head = I915_READ_HEAD(ring); - ringbuf->space = ring_space(ring); + ringbuf->space = ring_space(ringbuf); if (ringbuf->space >= n) { ret = 0; break; @@ -1630,7 +1787,7 @@ static int intel_wrap_ring_buffer(struct intel_engine_cs *ring) iowrite32(MI_NOOP, virt++); ringbuf->tail = 0; - ringbuf->space = ring_space(ring); + ringbuf->space = ring_space(ringbuf); return 0; } @@ -1746,14 +1903,15 @@ int intel_ring_cacheline_align(struct intel_engine_cs *ring) void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno) { - struct drm_i915_private *dev_priv = ring->dev->dev_private; + struct drm_device *dev = ring->dev; + struct drm_i915_private *dev_priv = dev->dev_private; BUG_ON(ring->outstanding_lazy_seqno); - if (INTEL_INFO(ring->dev)->gen >= 6) { + if (INTEL_INFO(dev)->gen == 6 || INTEL_INFO(dev)->gen == 7) { I915_WRITE(RING_SYNC_0(ring->mmio_base), 0); I915_WRITE(RING_SYNC_1(ring->mmio_base), 0); - if (HAS_VEBOX(ring->dev)) + if (HAS_VEBOX(dev)) I915_WRITE(RING_SYNC_2(ring->mmio_base), 0); } @@ -1941,45 +2099,74 @@ int intel_init_render_ring_buffer(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *ring = &dev_priv->ring[RCS]; + struct drm_i915_gem_object *obj; + int ret; ring->name = "render ring"; ring->id = RCS; ring->mmio_base = RENDER_RING_BASE; - if (INTEL_INFO(dev)->gen >= 6) { + if (INTEL_INFO(dev)->gen >= 8) { + if (i915_semaphore_is_enabled(dev)) { + obj = i915_gem_alloc_object(dev, 4096); + if (obj == NULL) { + DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n"); + i915.semaphores = 0; + } else { + i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); + ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_NONBLOCK); + if (ret != 0) { + drm_gem_object_unreference(&obj->base); + DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n"); + i915.semaphores = 0; + } else + dev_priv->semaphore_obj = obj; + } + } + ring->add_request = gen6_add_request; + ring->flush = gen8_render_ring_flush; + ring->irq_get = gen8_ring_get_irq; + ring->irq_put = gen8_ring_put_irq; + ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT; + ring->get_seqno = gen6_ring_get_seqno; + ring->set_seqno = ring_set_seqno; + if (i915_semaphore_is_enabled(dev)) { + WARN_ON(!dev_priv->semaphore_obj); + ring->semaphore.sync_to = gen8_ring_sync; + ring->semaphore.signal = gen8_rcs_signal; + GEN8_RING_SEMAPHORE_INIT; + } + } else if (INTEL_INFO(dev)->gen >= 6) { ring->add_request = gen6_add_request; ring->flush = gen7_render_ring_flush; if (INTEL_INFO(dev)->gen == 6) ring->flush = gen6_render_ring_flush; - if (INTEL_INFO(dev)->gen >= 8) { - ring->flush = gen8_render_ring_flush; - ring->irq_get = gen8_ring_get_irq; - ring->irq_put = gen8_ring_put_irq; - } else { - ring->irq_get = gen6_ring_get_irq; - ring->irq_put = gen6_ring_put_irq; - } + ring->irq_get = gen6_ring_get_irq; + ring->irq_put = gen6_ring_put_irq; ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT; ring->get_seqno = gen6_ring_get_seqno; ring->set_seqno = ring_set_seqno; - ring->semaphore.sync_to = gen6_ring_sync; - ring->semaphore.signal = gen6_signal; - /* - * The current semaphore is only applied on pre-gen8 platform. - * And there is no VCS2 ring on the pre-gen8 platform. So the - * semaphore between RCS and VCS2 is initialized as INVALID. - * Gen8 will initialize the sema between VCS2 and RCS later. - */ - ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID; - ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV; - ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB; - ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE; - ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; - ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC; - ring->semaphore.mbox.signal[VCS] = GEN6_VRSYNC; - ring->semaphore.mbox.signal[BCS] = GEN6_BRSYNC; - ring->semaphore.mbox.signal[VECS] = GEN6_VERSYNC; - ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; + if (i915_semaphore_is_enabled(dev)) { + ring->semaphore.sync_to = gen6_ring_sync; + ring->semaphore.signal = gen6_signal; + /* + * The current semaphore is only applied on pre-gen8 + * platform. And there is no VCS2 ring on the pre-gen8 + * platform. So the semaphore between RCS and VCS2 is + * initialized as INVALID. Gen8 will initialize the + * sema between VCS2 and RCS later. + */ + ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID; + ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV; + ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB; + ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE; + ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; + ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC; + ring->semaphore.mbox.signal[VCS] = GEN6_VRSYNC; + ring->semaphore.mbox.signal[BCS] = GEN6_BRSYNC; + ring->semaphore.mbox.signal[VECS] = GEN6_VERSYNC; + ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; + } } else if (IS_GEN5(dev)) { ring->add_request = pc_render_add_request; ring->flush = gen4_render_ring_flush; @@ -2007,6 +2194,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev) ring->irq_enable_mask = I915_USER_INTERRUPT; } ring->write_tail = ring_write_tail; + if (IS_HASWELL(dev)) ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer; else if (IS_GEN8(dev)) @@ -2024,10 +2212,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev) /* Workaround batchbuffer to combat CS tlb bug. */ if (HAS_BROKEN_CS_TLB(dev)) { - struct drm_i915_gem_object *obj; - int ret; - - obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT); + obj = i915_gem_alloc_object(dev, I830_WA_SIZE); if (obj == NULL) { DRM_ERROR("Failed to allocate batch bo\n"); return -ENOMEM; @@ -2157,31 +2342,32 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) ring->irq_put = gen8_ring_put_irq; ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; + if (i915_semaphore_is_enabled(dev)) { + ring->semaphore.sync_to = gen8_ring_sync; + ring->semaphore.signal = gen8_xcs_signal; + GEN8_RING_SEMAPHORE_INIT; + } } else { ring->irq_enable_mask = GT_BSD_USER_INTERRUPT; ring->irq_get = gen6_ring_get_irq; ring->irq_put = gen6_ring_put_irq; ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; + if (i915_semaphore_is_enabled(dev)) { + ring->semaphore.sync_to = gen6_ring_sync; + ring->semaphore.signal = gen6_signal; + ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR; + ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID; + ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB; + ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE; + ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; + ring->semaphore.mbox.signal[RCS] = GEN6_RVSYNC; + ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC; + ring->semaphore.mbox.signal[BCS] = GEN6_BVSYNC; + ring->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC; + ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; + } } - ring->semaphore.sync_to = gen6_ring_sync; - ring->semaphore.signal = gen6_signal; - /* - * The current semaphore is only applied on pre-gen8 platform. - * And there is no VCS2 ring on the pre-gen8 platform. So the - * semaphore between VCS and VCS2 is initialized as INVALID. - * Gen8 will initialize the sema between VCS2 and VCS later. - */ - ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR; - ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID; - ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB; - ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE; - ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; - ring->semaphore.mbox.signal[RCS] = GEN6_RVSYNC; - ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC; - ring->semaphore.mbox.signal[BCS] = GEN6_BVSYNC; - ring->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC; - ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; } else { ring->mmio_base = BSD_RING_BASE; ring->flush = bsd_ring_flush; @@ -2218,7 +2404,7 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev) return -EINVAL; } - ring->name = "bds2_ring"; + ring->name = "bsd2 ring"; ring->id = VCS2; ring->write_tail = ring_write_tail; @@ -2233,25 +2419,11 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev) ring->irq_put = gen8_ring_put_irq; ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; - ring->semaphore.sync_to = gen6_ring_sync; - ring->semaphore.signal = gen6_signal; - /* - * The current semaphore is only applied on the pre-gen8. And there - * is no bsd2 ring on the pre-gen8. So now the semaphore_register - * between VCS2 and other ring is initialized as invalid. - * Gen8 will initialize the sema between VCS2 and other ring later. - */ - ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID; - ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID; - ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID; - ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID; - ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; - ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC; - ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC; - ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC; - ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC; - ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; - + if (i915_semaphore_is_enabled(dev)) { + ring->semaphore.sync_to = gen8_ring_sync; + ring->semaphore.signal = gen8_xcs_signal; + GEN8_RING_SEMAPHORE_INIT; + } ring->init = init_ring_common; return intel_init_ring_buffer(dev, ring); @@ -2277,30 +2449,38 @@ int intel_init_blt_ring_buffer(struct drm_device *dev) ring->irq_get = gen8_ring_get_irq; ring->irq_put = gen8_ring_put_irq; ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; + if (i915_semaphore_is_enabled(dev)) { + ring->semaphore.sync_to = gen8_ring_sync; + ring->semaphore.signal = gen8_xcs_signal; + GEN8_RING_SEMAPHORE_INIT; + } } else { ring->irq_enable_mask = GT_BLT_USER_INTERRUPT; ring->irq_get = gen6_ring_get_irq; ring->irq_put = gen6_ring_put_irq; ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; + if (i915_semaphore_is_enabled(dev)) { + ring->semaphore.signal = gen6_signal; + ring->semaphore.sync_to = gen6_ring_sync; + /* + * The current semaphore is only applied on pre-gen8 + * platform. And there is no VCS2 ring on the pre-gen8 + * platform. So the semaphore between BCS and VCS2 is + * initialized as INVALID. Gen8 will initialize the + * sema between BCS and VCS2 later. + */ + ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR; + ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV; + ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID; + ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE; + ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; + ring->semaphore.mbox.signal[RCS] = GEN6_RBSYNC; + ring->semaphore.mbox.signal[VCS] = GEN6_VBSYNC; + ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC; + ring->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC; + ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; + } } - ring->semaphore.sync_to = gen6_ring_sync; - ring->semaphore.signal = gen6_signal; - /* - * The current semaphore is only applied on pre-gen8 platform. And - * there is no VCS2 ring on the pre-gen8 platform. So the semaphore - * between BCS and VCS2 is initialized as INVALID. - * Gen8 will initialize the sema between BCS and VCS2 later. - */ - ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR; - ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV; - ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID; - ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE; - ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; - ring->semaphore.mbox.signal[RCS] = GEN6_RBSYNC; - ring->semaphore.mbox.signal[VCS] = GEN6_VBSYNC; - ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC; - ring->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC; - ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; ring->init = init_ring_common; return intel_init_ring_buffer(dev, ring); @@ -2327,24 +2507,31 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev) ring->irq_get = gen8_ring_get_irq; ring->irq_put = gen8_ring_put_irq; ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; + if (i915_semaphore_is_enabled(dev)) { + ring->semaphore.sync_to = gen8_ring_sync; + ring->semaphore.signal = gen8_xcs_signal; + GEN8_RING_SEMAPHORE_INIT; + } } else { ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; ring->irq_get = hsw_vebox_get_irq; ring->irq_put = hsw_vebox_put_irq; ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; + if (i915_semaphore_is_enabled(dev)) { + ring->semaphore.sync_to = gen6_ring_sync; + ring->semaphore.signal = gen6_signal; + ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER; + ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV; + ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB; + ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID; + ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; + ring->semaphore.mbox.signal[RCS] = GEN6_RVESYNC; + ring->semaphore.mbox.signal[VCS] = GEN6_VVESYNC; + ring->semaphore.mbox.signal[BCS] = GEN6_BVESYNC; + ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC; + ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; + } } - ring->semaphore.sync_to = gen6_ring_sync; - ring->semaphore.signal = gen6_signal; - ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER; - ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV; - ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB; - ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID; - ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; - ring->semaphore.mbox.signal[RCS] = GEN6_RVESYNC; - ring->semaphore.mbox.signal[VCS] = GEN6_VVESYNC; - ring->semaphore.mbox.signal[BCS] = GEN6_BVESYNC; - ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC; - ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; ring->init = init_ring_common; return intel_init_ring_buffer(dev, ring); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index e72017bdcd7f..70525d0c2c74 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -40,10 +40,37 @@ struct intel_hw_status_page { #define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base)) #define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val) +/* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to + * do the writes, and that must have qw aligned offsets, simply pretend it's 8b. + */ +#define i915_semaphore_seqno_size sizeof(uint64_t) +#define GEN8_SIGNAL_OFFSET(__ring, to) \ + (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \ + ((__ring)->id * I915_NUM_RINGS * i915_semaphore_seqno_size) + \ + (i915_semaphore_seqno_size * (to))) + +#define GEN8_WAIT_OFFSET(__ring, from) \ + (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \ + ((from) * I915_NUM_RINGS * i915_semaphore_seqno_size) + \ + (i915_semaphore_seqno_size * (__ring)->id)) + +#define GEN8_RING_SEMAPHORE_INIT do { \ + if (!dev_priv->semaphore_obj) { \ + break; \ + } \ + ring->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET(ring, RCS); \ + ring->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET(ring, VCS); \ + ring->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET(ring, BCS); \ + ring->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET(ring, VECS); \ + ring->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET(ring, VCS2); \ + ring->semaphore.signal_ggtt[ring->id] = MI_SEMAPHORE_SYNC_INVALID; \ + } while(0) + enum intel_ring_hangcheck_action { HANGCHECK_IDLE = 0, HANGCHECK_WAIT, HANGCHECK_ACTIVE, + HANGCHECK_ACTIVE_LOOP, HANGCHECK_KICK, HANGCHECK_HUNG, }; @@ -52,6 +79,7 @@ enum intel_ring_hangcheck_action { struct intel_ring_hangcheck { u64 acthd; + u64 max_acthd; u32 seqno; int score; enum intel_ring_hangcheck_action action; @@ -127,15 +155,55 @@ struct intel_engine_cs { #define I915_DISPATCH_PINNED 0x2 void (*cleanup)(struct intel_engine_cs *ring); + /* GEN8 signal/wait table - never trust comments! + * signal to signal to signal to signal to signal to + * RCS VCS BCS VECS VCS2 + * -------------------------------------------------------------------- + * RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) | + * |------------------------------------------------------------------- + * VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) | + * |------------------------------------------------------------------- + * BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) | + * |------------------------------------------------------------------- + * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) | NOP (0x90) | VCS2 (0x98) | + * |------------------------------------------------------------------- + * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP (0xc0) | + * |------------------------------------------------------------------- + * + * Generalization: + * f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id) + * ie. transpose of g(x, y) + * + * sync from sync from sync from sync from sync from + * RCS VCS BCS VECS VCS2 + * -------------------------------------------------------------------- + * RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) | + * |------------------------------------------------------------------- + * VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) | + * |------------------------------------------------------------------- + * BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) | + * |------------------------------------------------------------------- + * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) | NOP (0x90) | VCS2 (0xb8) | + * |------------------------------------------------------------------- + * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) | NOP (0xc0) | + * |------------------------------------------------------------------- + * + * Generalization: + * g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id) + * ie. transpose of f(x, y) + */ struct { u32 sync_seqno[I915_NUM_RINGS-1]; - struct { - /* our mbox written by others */ - u32 wait[I915_NUM_RINGS]; - /* mboxes this ring signals to */ - u32 signal[I915_NUM_RINGS]; - } mbox; + union { + struct { + /* our mbox written by others */ + u32 wait[I915_NUM_RINGS]; + /* mboxes this ring signals to */ + u32 signal[I915_NUM_RINGS]; + } mbox; + u64 signal_ggtt[I915_NUM_RINGS]; + }; /* AKA wait() */ int (*sync_to)(struct intel_engine_cs *ring, @@ -238,9 +306,11 @@ intel_ring_sync_index(struct intel_engine_cs *ring, int idx; /* - * cs -> 0 = vcs, 1 = bcs - * vcs -> 0 = bcs, 1 = cs, - * bcs -> 0 = cs, 1 = vcs. + * rcs -> 0 = vcs, 1 = bcs, 2 = vecs, 3 = vcs2; + * vcs -> 0 = bcs, 1 = vecs, 2 = vcs2, 3 = rcs; + * bcs -> 0 = vecs, 1 = vcs2. 2 = rcs, 3 = vcs; + * vecs -> 0 = vcs2, 1 = rcs, 2 = vcs, 3 = bcs; + * vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs; */ idx = (other - ring) - 1; @@ -318,9 +388,9 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev); u64 intel_ring_get_active_head(struct intel_engine_cs *ring); void intel_ring_setup_status_page(struct intel_engine_cs *ring); -static inline u32 intel_ring_get_tail(struct intel_engine_cs *ring) +static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf) { - return ring->buffer->tail; + return ringbuf->tail; } static inline u32 intel_ring_get_seqno(struct intel_engine_cs *ring) diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 20375cc7f82d..9350edd6728d 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -2433,7 +2433,7 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector, connector->base.unregister = intel_sdvo_connector_unregister; intel_connector_attach_encoder(&connector->base, &encoder->base); - ret = drm_sysfs_connector_add(drm_connector); + ret = drm_connector_register(drm_connector); if (ret < 0) goto err1; @@ -2446,7 +2446,7 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector, return 0; err2: - drm_sysfs_connector_remove(drm_connector); + drm_connector_unregister(drm_connector); err1: drm_connector_cleanup(drm_connector); @@ -2559,7 +2559,7 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type) return true; err: - drm_sysfs_connector_remove(connector); + drm_connector_unregister(connector); intel_sdvo_destroy(connector); return false; } @@ -2638,7 +2638,7 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device) return true; err: - drm_sysfs_connector_remove(connector); + drm_connector_unregister(connector); intel_sdvo_destroy(connector); return false; } @@ -2711,7 +2711,7 @@ static void intel_sdvo_output_cleanup(struct intel_sdvo *intel_sdvo) list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) { if (intel_attached_encoder(connector) == &intel_sdvo->base) { - drm_sysfs_connector_remove(connector); + drm_connector_unregister(connector); intel_sdvo_destroy(connector); } } diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 9a17b4e92ef4..168c6652cda1 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -218,7 +218,8 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc, sprctl |= SP_ENABLE; - intel_update_sprite_watermarks(dplane, crtc, src_w, pixel_size, true, + intel_update_sprite_watermarks(dplane, crtc, src_w, src_h, + pixel_size, true, src_w != crtc_w || src_h != crtc_h); /* Sizes are 0 based */ @@ -283,7 +284,7 @@ vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc) if (atomic_update) intel_pipe_update_end(intel_crtc, start_vbl_count); - intel_update_sprite_watermarks(dplane, crtc, 0, 0, false, false); + intel_update_sprite_watermarks(dplane, crtc, 0, 0, 0, false, false); } static int @@ -406,7 +407,8 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, if (IS_HASWELL(dev) || IS_BROADWELL(dev)) sprctl |= SPRITE_PIPE_CSC_ENABLE; - intel_update_sprite_watermarks(plane, crtc, src_w, pixel_size, true, + intel_update_sprite_watermarks(plane, crtc, src_w, src_h, pixel_size, + true, src_w != crtc_w || src_h != crtc_h); /* Sizes are 0 based */ @@ -486,7 +488,7 @@ ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc) */ intel_wait_for_vblank(dev, pipe); - intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false); + intel_update_sprite_watermarks(plane, crtc, 0, 0, 0, false, false); } static int @@ -606,7 +608,8 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */ dvscntr |= DVS_ENABLE; - intel_update_sprite_watermarks(plane, crtc, src_w, pixel_size, true, + intel_update_sprite_watermarks(plane, crtc, src_w, src_h, + pixel_size, true, src_w != crtc_w || src_h != crtc_h); /* Sizes are 0 based */ @@ -681,7 +684,7 @@ ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc) */ intel_wait_for_vblank(dev, pipe); - intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false); + intel_update_sprite_watermarks(plane, crtc, 0, 0, 0, false, false); } static void @@ -819,6 +822,7 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, struct drm_device *dev = plane->dev; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_plane *intel_plane = to_intel_plane(plane); + enum pipe pipe = intel_crtc->pipe; struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); struct drm_i915_gem_object *obj = intel_fb->obj; struct drm_i915_gem_object *old_obj = intel_plane->obj; @@ -1006,6 +1010,8 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, */ ret = intel_pin_and_fence_fb_obj(dev, obj, NULL); + i915_gem_track_fb(old_obj, obj, + INTEL_FRONTBUFFER_SPRITE(pipe)); mutex_unlock(&dev->struct_mutex); if (ret) @@ -1039,6 +1045,8 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, else intel_plane->disable_plane(plane, crtc); + intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_SPRITE(pipe)); + if (!primary_was_enabled && primary_enabled) intel_post_enable_primary(crtc); } @@ -1068,6 +1076,7 @@ intel_disable_plane(struct drm_plane *plane) struct drm_device *dev = plane->dev; struct intel_plane *intel_plane = to_intel_plane(plane); struct intel_crtc *intel_crtc; + enum pipe pipe; if (!plane->fb) return 0; @@ -1076,6 +1085,7 @@ intel_disable_plane(struct drm_plane *plane) return -EINVAL; intel_crtc = to_intel_crtc(plane->crtc); + pipe = intel_crtc->pipe; if (intel_crtc->active) { bool primary_was_enabled = intel_crtc->primary_enabled; @@ -1094,6 +1104,8 @@ intel_disable_plane(struct drm_plane *plane) mutex_lock(&dev->struct_mutex); intel_unpin_fb_obj(intel_plane->obj); + i915_gem_track_fb(intel_plane->obj, NULL, + INTEL_FRONTBUFFER_SPRITE(pipe)); mutex_unlock(&dev->struct_mutex); intel_plane->obj = NULL; @@ -1114,7 +1126,6 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_intel_sprite_colorkey *set = data; - struct drm_mode_object *obj; struct drm_plane *plane; struct intel_plane *intel_plane; int ret = 0; @@ -1128,13 +1139,12 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data, drm_modeset_lock_all(dev); - obj = drm_mode_object_find(dev, set->plane_id, DRM_MODE_OBJECT_PLANE); - if (!obj) { + plane = drm_plane_find(dev, set->plane_id); + if (!plane) { ret = -ENOENT; goto out_unlock; } - plane = obj_to_plane(obj); intel_plane = to_intel_plane(plane); ret = intel_plane->update_colorkey(plane, set); @@ -1147,7 +1157,6 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_intel_sprite_colorkey *get = data; - struct drm_mode_object *obj; struct drm_plane *plane; struct intel_plane *intel_plane; int ret = 0; @@ -1157,13 +1166,12 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data, drm_modeset_lock_all(dev); - obj = drm_mode_object_find(dev, get->plane_id, DRM_MODE_OBJECT_PLANE); - if (!obj) { + plane = drm_plane_find(dev, get->plane_id); + if (!plane) { ret = -ENOENT; goto out_unlock; } - plane = obj_to_plane(obj); intel_plane = to_intel_plane(plane); intel_plane->get_colorkey(plane, get); diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 67c6c9a2eb1c..c14341ca3ef9 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -854,6 +854,10 @@ intel_enable_tv(struct intel_encoder *encoder) struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; + /* Prevents vblank waits from timing out in intel_tv_detect_type() */ + intel_wait_for_vblank(encoder->base.dev, + to_intel_crtc(encoder->base.crtc)->pipe); + I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE); } @@ -1311,6 +1315,7 @@ intel_tv_detect(struct drm_connector *connector, bool force) { struct drm_display_mode mode; struct intel_tv *intel_tv = intel_attached_tv(connector); + enum drm_connector_status status; int type; DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n", @@ -1323,16 +1328,24 @@ intel_tv_detect(struct drm_connector *connector, bool force) struct intel_load_detect_pipe tmp; struct drm_modeset_acquire_ctx ctx; + drm_modeset_acquire_init(&ctx, 0); + if (intel_get_load_detect_pipe(connector, &mode, &tmp, &ctx)) { type = intel_tv_detect_type(intel_tv, connector); - intel_release_load_detect_pipe(connector, &tmp, &ctx); + intel_release_load_detect_pipe(connector, &tmp); + status = type < 0 ? + connector_status_disconnected : + connector_status_connected; } else - return connector_status_unknown; + status = connector_status_unknown; + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); } else return connector->status; - if (type < 0) - return connector_status_disconnected; + if (status != connector_status_connected) + return status; intel_tv->type = type; intel_tv_find_better_format(connector); @@ -1680,5 +1693,5 @@ intel_tv_init(struct drm_device *dev) drm_object_attach_property(&connector->base, dev->mode_config.tv_bottom_margin_property, intel_tv->margin[TV_MARGIN_BOTTOM]); - drm_sysfs_connector_add(connector); + drm_connector_register(connector); } diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 4f6fef7ac069..e81bc3bdc533 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -231,8 +231,8 @@ static void __vlv_force_wake_get(struct drm_i915_private *dev_priv, } /* WaRsForcewakeWaitTC0:vlv */ - __gen6_gt_wait_for_thread_c0(dev_priv); - + if (!IS_CHERRYVIEW(dev_priv->dev)) + __gen6_gt_wait_for_thread_c0(dev_priv); } static void __vlv_force_wake_put(struct drm_i915_private *dev_priv, @@ -250,9 +250,10 @@ static void __vlv_force_wake_put(struct drm_i915_private *dev_priv, __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); - /* The below doubles as a POSTING_READ */ - gen6_gt_check_fifodbg(dev_priv); - + /* something from same cacheline, but !FORCEWAKE_VLV */ + __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV); + if (!IS_CHERRYVIEW(dev_priv->dev)) + gen6_gt_check_fifodbg(dev_priv); } static void vlv_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine) @@ -315,7 +316,7 @@ static void gen6_force_wake_timer(unsigned long arg) intel_runtime_pm_put(dev_priv); } -static void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore) +void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore) { struct drm_i915_private *dev_priv = dev->dev_private; unsigned long irqflags; @@ -357,16 +358,12 @@ static void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore) dev_priv->uncore.fifo_count = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK; - } else { - dev_priv->uncore.forcewake_count = 0; - dev_priv->uncore.fw_rendercount = 0; - dev_priv->uncore.fw_mediacount = 0; } spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } -void intel_uncore_early_sanitize(struct drm_device *dev) +void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -389,7 +386,7 @@ void intel_uncore_early_sanitize(struct drm_device *dev) __raw_i915_write32(dev_priv, GTFIFODBG, __raw_i915_read32(dev_priv, GTFIFODBG)); - intel_uncore_forcewake_reset(dev, false); + intel_uncore_forcewake_reset(dev, restore_forcewake); } void intel_uncore_sanitize(struct drm_device *dev) @@ -469,16 +466,43 @@ void assert_force_wake_inactive(struct drm_i915_private *dev_priv) #define NEEDS_FORCE_WAKE(dev_priv, reg) \ ((reg) < 0x40000 && (reg) != FORCEWAKE) -#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \ - (((reg) >= 0x2000 && (reg) < 0x4000) ||\ - ((reg) >= 0x5000 && (reg) < 0x8000) ||\ - ((reg) >= 0xB000 && (reg) < 0x12000) ||\ - ((reg) >= 0x2E000 && (reg) < 0x30000)) +#define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end)) -#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)\ - (((reg) >= 0x12000 && (reg) < 0x14000) ||\ - ((reg) >= 0x22000 && (reg) < 0x24000) ||\ - ((reg) >= 0x30000 && (reg) < 0x40000)) +#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \ + (REG_RANGE((reg), 0x2000, 0x4000) || \ + REG_RANGE((reg), 0x5000, 0x8000) || \ + REG_RANGE((reg), 0xB000, 0x12000) || \ + REG_RANGE((reg), 0x2E000, 0x30000)) + +#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg) \ + (REG_RANGE((reg), 0x12000, 0x14000) || \ + REG_RANGE((reg), 0x22000, 0x24000) || \ + REG_RANGE((reg), 0x30000, 0x40000)) + +#define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \ + (REG_RANGE((reg), 0x2000, 0x4000) || \ + REG_RANGE((reg), 0x5000, 0x8000) || \ + REG_RANGE((reg), 0x8300, 0x8500) || \ + REG_RANGE((reg), 0xB000, 0xC000) || \ + REG_RANGE((reg), 0xE000, 0xE800)) + +#define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \ + (REG_RANGE((reg), 0x8800, 0x8900) || \ + REG_RANGE((reg), 0xD000, 0xD800) || \ + REG_RANGE((reg), 0x12000, 0x14000) || \ + REG_RANGE((reg), 0x1A000, 0x1C000) || \ + REG_RANGE((reg), 0x1E800, 0x1EA00) || \ + REG_RANGE((reg), 0x30000, 0x40000)) + +#define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \ + (REG_RANGE((reg), 0x4000, 0x5000) || \ + REG_RANGE((reg), 0x8000, 0x8300) || \ + REG_RANGE((reg), 0x8500, 0x8600) || \ + REG_RANGE((reg), 0x9000, 0xB000) || \ + REG_RANGE((reg), 0xC000, 0xC800) || \ + REG_RANGE((reg), 0xF000, 0x10000) || \ + REG_RANGE((reg), 0x14000, 0x14400) || \ + REG_RANGE((reg), 0x22000, 0x24000)) static void ilk_dummy_write(struct drm_i915_private *dev_priv) @@ -490,20 +514,30 @@ ilk_dummy_write(struct drm_i915_private *dev_priv) } static void -hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg) +hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv, u32 reg, bool read, + bool before) { + const char *op = read ? "reading" : "writing to"; + const char *when = before ? "before" : "after"; + + if (!i915.mmio_debug) + return; + if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) { - DRM_ERROR("Unknown unclaimed register before writing to %x\n", - reg); + WARN(1, "Unclaimed register detected %s %s register 0x%x\n", + when, op, reg); __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); } } static void -hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg) +hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv) { + if (i915.mmio_debug) + return; + if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) { - DRM_ERROR("Unclaimed write to %x\n", reg); + DRM_ERROR("Unclaimed register detected. Please use the i915.mmio_debug=1 to debug this problem."); __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); } } @@ -540,6 +574,7 @@ gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ static u##x \ gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ REG_READ_HEADER(x); \ + hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \ if (dev_priv->uncore.forcewake_count == 0 && \ NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ dev_priv->uncore.funcs.force_wake_get(dev_priv, \ @@ -550,6 +585,7 @@ gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ } else { \ val = __raw_i915_read##x(dev_priv, reg); \ } \ + hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \ REG_READ_FOOTER; \ } @@ -573,7 +609,35 @@ vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ REG_READ_FOOTER; \ } +#define __chv_read(x) \ +static u##x \ +chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ + unsigned fwengine = 0; \ + REG_READ_HEADER(x); \ + if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) { \ + if (dev_priv->uncore.fw_rendercount == 0) \ + fwengine = FORCEWAKE_RENDER; \ + } else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) { \ + if (dev_priv->uncore.fw_mediacount == 0) \ + fwengine = FORCEWAKE_MEDIA; \ + } else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) { \ + if (dev_priv->uncore.fw_rendercount == 0) \ + fwengine |= FORCEWAKE_RENDER; \ + if (dev_priv->uncore.fw_mediacount == 0) \ + fwengine |= FORCEWAKE_MEDIA; \ + } \ + if (fwengine) \ + dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \ + val = __raw_i915_read##x(dev_priv, reg); \ + if (fwengine) \ + dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \ + REG_READ_FOOTER; \ +} +__chv_read(8) +__chv_read(16) +__chv_read(32) +__chv_read(64) __vlv_read(8) __vlv_read(16) __vlv_read(32) @@ -591,6 +655,7 @@ __gen4_read(16) __gen4_read(32) __gen4_read(64) +#undef __chv_read #undef __vlv_read #undef __gen6_read #undef __gen5_read @@ -647,12 +712,13 @@ hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ } \ - hsw_unclaimed_reg_clear(dev_priv, reg); \ + hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \ __raw_i915_write##x(dev_priv, reg, val); \ if (unlikely(__fifo_ret)) { \ gen6_gt_check_fifodbg(dev_priv); \ } \ - hsw_unclaimed_reg_check(dev_priv, reg); \ + hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \ + hsw_unclaimed_reg_detect(dev_priv); \ REG_WRITE_FOOTER; \ } @@ -681,6 +747,7 @@ static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg) static void \ gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ REG_WRITE_HEADER; \ + hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \ if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) { \ if (dev_priv->uncore.forcewake_count == 0) \ dev_priv->uncore.funcs.force_wake_get(dev_priv, \ @@ -692,9 +759,43 @@ gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace } else { \ __raw_i915_write##x(dev_priv, reg, val); \ } \ + hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \ + hsw_unclaimed_reg_detect(dev_priv); \ REG_WRITE_FOOTER; \ } +#define __chv_write(x) \ +static void \ +chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ + unsigned fwengine = 0; \ + bool shadowed = is_gen8_shadowed(dev_priv, reg); \ + REG_WRITE_HEADER; \ + if (!shadowed) { \ + if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) { \ + if (dev_priv->uncore.fw_rendercount == 0) \ + fwengine = FORCEWAKE_RENDER; \ + } else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) { \ + if (dev_priv->uncore.fw_mediacount == 0) \ + fwengine = FORCEWAKE_MEDIA; \ + } else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) { \ + if (dev_priv->uncore.fw_rendercount == 0) \ + fwengine |= FORCEWAKE_RENDER; \ + if (dev_priv->uncore.fw_mediacount == 0) \ + fwengine |= FORCEWAKE_MEDIA; \ + } \ + } \ + if (fwengine) \ + dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \ + __raw_i915_write##x(dev_priv, reg, val); \ + if (fwengine) \ + dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \ + REG_WRITE_FOOTER; \ +} + +__chv_write(8) +__chv_write(16) +__chv_write(32) +__chv_write(64) __gen8_write(8) __gen8_write(16) __gen8_write(32) @@ -716,6 +817,7 @@ __gen4_write(16) __gen4_write(32) __gen4_write(64) +#undef __chv_write #undef __gen8_write #undef __hsw_write #undef __gen6_write @@ -731,7 +833,7 @@ void intel_uncore_init(struct drm_device *dev) setup_timer(&dev_priv->uncore.force_wake_timer, gen6_force_wake_timer, (unsigned long)dev_priv); - intel_uncore_early_sanitize(dev); + intel_uncore_early_sanitize(dev, false); if (IS_VALLEYVIEW(dev)) { dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get; @@ -779,14 +881,26 @@ void intel_uncore_init(struct drm_device *dev) switch (INTEL_INFO(dev)->gen) { default: - dev_priv->uncore.funcs.mmio_writeb = gen8_write8; - dev_priv->uncore.funcs.mmio_writew = gen8_write16; - dev_priv->uncore.funcs.mmio_writel = gen8_write32; - dev_priv->uncore.funcs.mmio_writeq = gen8_write64; - dev_priv->uncore.funcs.mmio_readb = gen6_read8; - dev_priv->uncore.funcs.mmio_readw = gen6_read16; - dev_priv->uncore.funcs.mmio_readl = gen6_read32; - dev_priv->uncore.funcs.mmio_readq = gen6_read64; + if (IS_CHERRYVIEW(dev)) { + dev_priv->uncore.funcs.mmio_writeb = chv_write8; + dev_priv->uncore.funcs.mmio_writew = chv_write16; + dev_priv->uncore.funcs.mmio_writel = chv_write32; + dev_priv->uncore.funcs.mmio_writeq = chv_write64; + dev_priv->uncore.funcs.mmio_readb = chv_read8; + dev_priv->uncore.funcs.mmio_readw = chv_read16; + dev_priv->uncore.funcs.mmio_readl = chv_read32; + dev_priv->uncore.funcs.mmio_readq = chv_read64; + + } else { + dev_priv->uncore.funcs.mmio_writeb = gen8_write8; + dev_priv->uncore.funcs.mmio_writew = gen8_write16; + dev_priv->uncore.funcs.mmio_writel = gen8_write32; + dev_priv->uncore.funcs.mmio_writeq = gen8_write64; + dev_priv->uncore.funcs.mmio_readb = gen6_read8; + dev_priv->uncore.funcs.mmio_readw = gen6_read16; + dev_priv->uncore.funcs.mmio_readl = gen6_read32; + dev_priv->uncore.funcs.mmio_readq = gen6_read64; + } break; case 7: case 6: @@ -912,7 +1026,7 @@ int i915_get_reset_stats_ioctl(struct drm_device *dev, if (args->flags || args->pad) return -EINVAL; - if (args->ctx_id == DEFAULT_CONTEXT_ID && !capable(CAP_SYS_ADMIN)) + if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN)) return -EPERM; ret = mutex_lock_interruptible(&dev->struct_mutex); @@ -1053,18 +1167,16 @@ static int gen6_do_reset(struct drm_device *dev) int intel_gpu_reset(struct drm_device *dev) { - switch (INTEL_INFO(dev)->gen) { - case 8: - case 7: - case 6: return gen6_do_reset(dev); - case 5: return ironlake_do_reset(dev); - case 4: - if (IS_G4X(dev)) - return g4x_do_reset(dev); - else - return i965_do_reset(dev); - default: return -ENODEV; - } + if (INTEL_INFO(dev)->gen >= 6) + return gen6_do_reset(dev); + else if (IS_GEN5(dev)) + return ironlake_do_reset(dev); + else if (IS_G4X(dev)) + return g4x_do_reset(dev); + else if (IS_GEN4(dev)) + return i965_do_reset(dev); + else + return -ENODEV; } void intel_uncore_check_errors(struct drm_device *dev) diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c index f15ea3c4a90a..2d75d6df0789 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.c +++ b/drivers/gpu/drm/mgag200/mgag200_drv.c @@ -28,7 +28,7 @@ module_param_named(modeset, mgag200_modeset, int, 0400); static struct drm_driver driver; -static DEFINE_PCI_DEVICE_TABLE(pciidlist) = { +static const struct pci_device_id pciidlist[] = { { PCI_VENDOR_ID_MATROX, 0x522, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_SE_A }, { PCI_VENDOR_ID_MATROX, 0x524, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_SE_B }, { PCI_VENDOR_ID_MATROX, 0x530, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EV }, diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h index cf11ee68a6d9..80de23d9b9c9 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.h +++ b/drivers/gpu/drm/mgag200/mgag200_drv.h @@ -280,7 +280,7 @@ static inline int mgag200_bo_reserve(struct mgag200_bo *bo, bool no_wait) { int ret; - ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0); + ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, NULL); if (ret) { if (ret != -ERESTARTSYS && ret != -EBUSY) DRM_ERROR("reserve failed %p\n", bo); diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c index 13b7dd83faa9..5451dc58eff1 100644 --- a/drivers/gpu/drm/mgag200/mgag200_fb.c +++ b/drivers/gpu/drm/mgag200/mgag200_fb.c @@ -272,7 +272,7 @@ static int mga_fbdev_destroy(struct drm_device *dev, return 0; } -static struct drm_fb_helper_funcs mga_fb_helper_funcs = { +static const struct drm_fb_helper_funcs mga_fb_helper_funcs = { .gamma_set = mga_crtc_fb_gamma_set, .gamma_get = mga_crtc_fb_gamma_get, .fb_probe = mgag200fb_create, @@ -293,9 +293,10 @@ int mgag200_fbdev_init(struct mga_device *mdev) return -ENOMEM; mdev->mfbdev = mfbdev; - mfbdev->helper.funcs = &mga_fb_helper_funcs; spin_lock_init(&mfbdev->dirty_lock); + drm_fb_helper_prepare(mdev->dev, &mfbdev->helper, &mga_fb_helper_funcs); + ret = drm_fb_helper_init(mdev->dev, &mfbdev->helper, mdev->num_crtc, MGAG200FB_CONN_LIMIT); if (ret) diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index a034ed408252..45f04dea0ac2 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -1562,19 +1562,9 @@ static struct drm_encoder *mga_connector_best_encoder(struct drm_connector *connector) { int enc_id = connector->encoder_ids[0]; - struct drm_mode_object *obj; - struct drm_encoder *encoder; - /* pick the encoder ids */ - if (enc_id) { - obj = - drm_mode_object_find(connector->dev, enc_id, - DRM_MODE_OBJECT_ENCODER); - if (!obj) - return NULL; - encoder = obj_to_encoder(obj); - return encoder; - } + if (enc_id) + return drm_encoder_find(connector->dev, enc_id); return NULL; } @@ -1621,7 +1611,7 @@ static struct drm_connector *mga_vga_init(struct drm_device *dev) drm_connector_helper_add(connector, &mga_vga_connector_helper_funcs); - drm_sysfs_connector_add(connector); + drm_connector_register(connector); mga_connector->i2c = mgag200_i2c_create(dev); if (!mga_connector->i2c) diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index f12388967856..c99c50de3226 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig @@ -2,7 +2,6 @@ config DRM_MSM tristate "MSM DRM" depends on DRM - depends on MSM_IOMMU depends on ARCH_QCOM || (ARM && COMPILE_TEST) select DRM_KMS_HELPER select SHMEM diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h index 85d615e7d62f..a8a144b38eaa 100644 --- a/drivers/gpu/drm/msm/adreno/a2xx.xml.h +++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h @@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: - /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15) - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32814 bytes, from 2013-11-30 15:07:33) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 8900 bytes, from 2013-10-22 23:57:49) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 10574 bytes, from 2013-11-13 05:44:45) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 53644 bytes, from 2013-11-30 15:07:33) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 8344 bytes, from 2013-11-30 14:49:47) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 9859 bytes, from 2014-06-02 15:21:30) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14477 bytes, from 2014-05-16 11:51:57) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 58020 bytes, from 2014-06-25 12:57:16) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 26602 bytes, from 2014-06-25 12:57:16) -Copyright (C) 2013 by the following authors: +Copyright (C) 2013-2014 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) Permission is hereby granted, free of charge, to any person obtaining @@ -203,6 +203,15 @@ enum a2xx_rb_copy_sample_select { SAMPLE_0123 = 6, }; +enum a2xx_rb_blend_opcode { + BLEND_DST_PLUS_SRC = 0, + BLEND_SRC_MINUS_DST = 1, + BLEND_MIN_DST_SRC = 2, + BLEND_MAX_DST_SRC = 3, + BLEND_DST_MINUS_SRC = 4, + BLEND_DST_PLUS_SRC_BIAS = 5, +}; + enum adreno_mmu_clnt_beh { BEH_NEVR = 0, BEH_TRAN_RNG = 1, @@ -890,6 +899,39 @@ static inline uint32_t A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS(uint32_t val) #define REG_A2XX_VGT_EVENT_INITIATOR 0x000021f9 #define REG_A2XX_VGT_DRAW_INITIATOR 0x000021fc +#define A2XX_VGT_DRAW_INITIATOR_PRIM_TYPE__MASK 0x0000003f +#define A2XX_VGT_DRAW_INITIATOR_PRIM_TYPE__SHIFT 0 +static inline uint32_t A2XX_VGT_DRAW_INITIATOR_PRIM_TYPE(enum pc_di_primtype val) +{ + return ((val) << A2XX_VGT_DRAW_INITIATOR_PRIM_TYPE__SHIFT) & A2XX_VGT_DRAW_INITIATOR_PRIM_TYPE__MASK; +} +#define A2XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__MASK 0x000000c0 +#define A2XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__SHIFT 6 +static inline uint32_t A2XX_VGT_DRAW_INITIATOR_SOURCE_SELECT(enum pc_di_src_sel val) +{ + return ((val) << A2XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__SHIFT) & A2XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__MASK; +} +#define A2XX_VGT_DRAW_INITIATOR_VIS_CULL__MASK 0x00000600 +#define A2XX_VGT_DRAW_INITIATOR_VIS_CULL__SHIFT 9 +static inline uint32_t A2XX_VGT_DRAW_INITIATOR_VIS_CULL(enum pc_di_vis_cull_mode val) +{ + return ((val) << A2XX_VGT_DRAW_INITIATOR_VIS_CULL__SHIFT) & A2XX_VGT_DRAW_INITIATOR_VIS_CULL__MASK; +} +#define A2XX_VGT_DRAW_INITIATOR_INDEX_SIZE__MASK 0x00000800 +#define A2XX_VGT_DRAW_INITIATOR_INDEX_SIZE__SHIFT 11 +static inline uint32_t A2XX_VGT_DRAW_INITIATOR_INDEX_SIZE(enum pc_di_index_size val) +{ + return ((val) << A2XX_VGT_DRAW_INITIATOR_INDEX_SIZE__SHIFT) & A2XX_VGT_DRAW_INITIATOR_INDEX_SIZE__MASK; +} +#define A2XX_VGT_DRAW_INITIATOR_NOT_EOP 0x00001000 +#define A2XX_VGT_DRAW_INITIATOR_SMALL_INDEX 0x00002000 +#define A2XX_VGT_DRAW_INITIATOR_PRE_DRAW_INITIATOR_ENABLE 0x00004000 +#define A2XX_VGT_DRAW_INITIATOR_NUM_INDICES__MASK 0xffff0000 +#define A2XX_VGT_DRAW_INITIATOR_NUM_INDICES__SHIFT 16 +static inline uint32_t A2XX_VGT_DRAW_INITIATOR_NUM_INDICES(uint32_t val) +{ + return ((val) << A2XX_VGT_DRAW_INITIATOR_NUM_INDICES__SHIFT) & A2XX_VGT_DRAW_INITIATOR_NUM_INDICES__MASK; +} #define REG_A2XX_VGT_IMMED_DATA 0x000021fd @@ -963,7 +1005,7 @@ static inline uint32_t A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND(enum adreno_rb_blend } #define A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__MASK 0x000000e0 #define A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__SHIFT 5 -static inline uint32_t A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN(enum adreno_rb_blend_opcode val) +static inline uint32_t A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN(enum a2xx_rb_blend_opcode val) { return ((val) << A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__SHIFT) & A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__MASK; } @@ -981,7 +1023,7 @@ static inline uint32_t A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND(enum adreno_rb_blend } #define A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__MASK 0x00e00000 #define A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__SHIFT 21 -static inline uint32_t A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN(enum adreno_rb_blend_opcode val) +static inline uint32_t A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN(enum a2xx_rb_blend_opcode val) { return ((val) << A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__SHIFT) & A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__MASK; } diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h index a7be56163d23..303e8a9e91a5 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx.xml.h +++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h @@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: - /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15) - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32814 bytes, from 2013-11-30 15:07:33) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 8900 bytes, from 2013-10-22 23:57:49) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 10574 bytes, from 2013-11-13 05:44:45) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 53644 bytes, from 2013-11-30 15:07:33) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 8344 bytes, from 2013-11-30 14:49:47) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 9859 bytes, from 2014-06-02 15:21:30) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14477 bytes, from 2014-05-16 11:51:57) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 58020 bytes, from 2014-06-25 12:57:16) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 26602 bytes, from 2014-06-25 12:57:16) -Copyright (C) 2013 by the following authors: +Copyright (C) 2013-2014 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) Permission is hereby granted, free of charge, to any person obtaining @@ -41,31 +41,11 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ -enum a3xx_render_mode { - RB_RENDERING_PASS = 0, - RB_TILING_PASS = 1, - RB_RESOLVE_PASS = 2, -}; - enum a3xx_tile_mode { LINEAR = 0, TILE_32X32 = 2, }; -enum a3xx_threadmode { - MULTI = 0, - SINGLE = 1, -}; - -enum a3xx_instrbuffermode { - BUFFER = 1, -}; - -enum a3xx_threadsize { - TWO_QUADS = 0, - FOUR_QUADS = 1, -}; - enum a3xx_state_block_id { HLSQ_BLOCK_ID_TP_TEX = 2, HLSQ_BLOCK_ID_TP_MIPMAP = 3, @@ -169,6 +149,8 @@ enum a3xx_color_fmt { RB_R8G8B8A8_UNORM = 8, RB_Z16_UNORM = 12, RB_A8_UNORM = 20, + RB_R16G16B16A16_FLOAT = 27, + RB_R32G32B32A32_FLOAT = 51, }; enum a3xx_color_swap { @@ -178,12 +160,6 @@ enum a3xx_color_swap { XYZW = 3, }; -enum a3xx_msaa_samples { - MSAA_ONE = 0, - MSAA_TWO = 1, - MSAA_FOUR = 2, -}; - enum a3xx_sp_perfcounter_select { SP_FS_CFLOW_INSTRUCTIONS = 12, SP_FS_FULL_ALU_INSTRUCTIONS = 14, @@ -191,21 +167,45 @@ enum a3xx_sp_perfcounter_select { SP_ALU_ACTIVE_CYCLES = 29, }; -enum adreno_rb_copy_control_mode { - RB_COPY_RESOLVE = 1, - RB_COPY_DEPTH_STENCIL = 5, +enum a3xx_rop_code { + ROP_CLEAR = 0, + ROP_NOR = 1, + ROP_AND_INVERTED = 2, + ROP_COPY_INVERTED = 3, + ROP_AND_REVERSE = 4, + ROP_INVERT = 5, + ROP_XOR = 6, + ROP_NAND = 7, + ROP_AND = 8, + ROP_EQUIV = 9, + ROP_NOOP = 10, + ROP_OR_INVERTED = 11, + ROP_COPY = 12, + ROP_OR_REVERSE = 13, + ROP_OR = 14, + ROP_SET = 15, +}; + +enum a3xx_rb_blend_opcode { + BLEND_DST_PLUS_SRC = 0, + BLEND_SRC_MINUS_DST = 1, + BLEND_DST_MINUS_SRC = 2, + BLEND_MIN_DST_SRC = 3, + BLEND_MAX_DST_SRC = 4, }; enum a3xx_tex_filter { A3XX_TEX_NEAREST = 0, A3XX_TEX_LINEAR = 1, + A3XX_TEX_ANISO = 2, }; enum a3xx_tex_clamp { A3XX_TEX_REPEAT = 0, A3XX_TEX_CLAMP_TO_EDGE = 1, A3XX_TEX_MIRROR_REPEAT = 2, - A3XX_TEX_CLAMP_NONE = 3, + A3XX_TEX_CLAMP_TO_BORDER = 3, + A3XX_TEX_MIRROR_CLAMP = 4, }; enum a3xx_tex_swiz { @@ -316,6 +316,7 @@ enum a3xx_tex_type { #define REG_A3XX_RBBM_INT_0_STATUS 0x00000064 #define REG_A3XX_RBBM_PERFCTR_CTL 0x00000080 +#define A3XX_RBBM_PERFCTR_CTL_ENABLE 0x00000001 #define REG_A3XX_RBBM_PERFCTR_LOAD_CMD0 0x00000081 @@ -549,6 +550,10 @@ static inline uint32_t REG_A3XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000460 #define REG_A3XX_CP_AHB_FAULT 0x0000054d +#define REG_A3XX_SP_GLOBAL_MEM_SIZE 0x00000e22 + +#define REG_A3XX_SP_GLOBAL_MEM_ADDR 0x00000e23 + #define REG_A3XX_GRAS_CL_CLIP_CNTL 0x00002040 #define A3XX_GRAS_CL_CLIP_CNTL_IJ_PERSP_CENTER 0x00001000 #define A3XX_GRAS_CL_CLIP_CNTL_CLIP_DISABLE 0x00010000 @@ -556,6 +561,9 @@ static inline uint32_t REG_A3XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000460 #define A3XX_GRAS_CL_CLIP_CNTL_VP_CLIP_CODE_IGNORE 0x00080000 #define A3XX_GRAS_CL_CLIP_CNTL_VP_XFORM_DISABLE 0x00100000 #define A3XX_GRAS_CL_CLIP_CNTL_PERSP_DIVISION_DISABLE 0x00200000 +#define A3XX_GRAS_CL_CLIP_CNTL_ZCOORD 0x00800000 +#define A3XX_GRAS_CL_CLIP_CNTL_WCOORD 0x01000000 +#define A3XX_GRAS_CL_CLIP_CNTL_ZCLIP_DISABLE 0x02000000 #define REG_A3XX_GRAS_CL_GB_CLIP_ADJ 0x00002044 #define A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ__MASK 0x000003ff @@ -620,8 +628,26 @@ static inline uint32_t A3XX_GRAS_CL_VPORT_ZSCALE(float val) } #define REG_A3XX_GRAS_SU_POINT_MINMAX 0x00002068 +#define A3XX_GRAS_SU_POINT_MINMAX_MIN__MASK 0x0000ffff +#define A3XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT 0 +static inline uint32_t A3XX_GRAS_SU_POINT_MINMAX_MIN(float val) +{ + return ((((uint32_t)(val * 8.0))) << A3XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT) & A3XX_GRAS_SU_POINT_MINMAX_MIN__MASK; +} +#define A3XX_GRAS_SU_POINT_MINMAX_MAX__MASK 0xffff0000 +#define A3XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT 16 +static inline uint32_t A3XX_GRAS_SU_POINT_MINMAX_MAX(float val) +{ + return ((((uint32_t)(val * 8.0))) << A3XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT) & A3XX_GRAS_SU_POINT_MINMAX_MAX__MASK; +} #define REG_A3XX_GRAS_SU_POINT_SIZE 0x00002069 +#define A3XX_GRAS_SU_POINT_SIZE__MASK 0xffffffff +#define A3XX_GRAS_SU_POINT_SIZE__SHIFT 0 +static inline uint32_t A3XX_GRAS_SU_POINT_SIZE(float val) +{ + return ((((uint32_t)(val * 8.0))) << A3XX_GRAS_SU_POINT_SIZE__SHIFT) & A3XX_GRAS_SU_POINT_SIZE__MASK; +} #define REG_A3XX_GRAS_SU_POLY_OFFSET_SCALE 0x0000206c #define A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__MASK 0x00ffffff @@ -743,6 +769,7 @@ static inline uint32_t A3XX_RB_MODE_CONTROL_RENDER_MODE(enum a3xx_render_mode va #define A3XX_RB_MODE_CONTROL_PACKER_TIMER_ENABLE 0x00010000 #define REG_A3XX_RB_RENDER_CONTROL 0x000020c1 +#define A3XX_RB_RENDER_CONTROL_FACENESS 0x00000008 #define A3XX_RB_RENDER_CONTROL_BIN_WIDTH__MASK 0x00000ff0 #define A3XX_RB_RENDER_CONTROL_BIN_WIDTH__SHIFT 4 static inline uint32_t A3XX_RB_RENDER_CONTROL_BIN_WIDTH(uint32_t val) @@ -751,6 +778,10 @@ static inline uint32_t A3XX_RB_RENDER_CONTROL_BIN_WIDTH(uint32_t val) } #define A3XX_RB_RENDER_CONTROL_DISABLE_COLOR_PIPE 0x00001000 #define A3XX_RB_RENDER_CONTROL_ENABLE_GMEM 0x00002000 +#define A3XX_RB_RENDER_CONTROL_XCOORD 0x00004000 +#define A3XX_RB_RENDER_CONTROL_YCOORD 0x00008000 +#define A3XX_RB_RENDER_CONTROL_ZCOORD 0x00010000 +#define A3XX_RB_RENDER_CONTROL_WCOORD 0x00020000 #define A3XX_RB_RENDER_CONTROL_ALPHA_TEST 0x00400000 #define A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__MASK 0x07000000 #define A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__SHIFT 24 @@ -796,7 +827,7 @@ static inline uint32_t REG_A3XX_RB_MRT_CONTROL(uint32_t i0) { return 0x000020c4 #define A3XX_RB_MRT_CONTROL_BLEND2 0x00000020 #define A3XX_RB_MRT_CONTROL_ROP_CODE__MASK 0x00000f00 #define A3XX_RB_MRT_CONTROL_ROP_CODE__SHIFT 8 -static inline uint32_t A3XX_RB_MRT_CONTROL_ROP_CODE(uint32_t val) +static inline uint32_t A3XX_RB_MRT_CONTROL_ROP_CODE(enum a3xx_rop_code val) { return ((val) << A3XX_RB_MRT_CONTROL_ROP_CODE__SHIFT) & A3XX_RB_MRT_CONTROL_ROP_CODE__MASK; } @@ -856,7 +887,7 @@ static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_b } #define A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK 0x000000e0 #define A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT 5 -static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum adreno_rb_blend_opcode val) +static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a3xx_rb_blend_opcode val) { return ((val) << A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK; } @@ -874,7 +905,7 @@ static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb } #define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK 0x00e00000 #define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT 21 -static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum adreno_rb_blend_opcode val) +static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a3xx_rb_blend_opcode val) { return ((val) << A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK; } @@ -957,17 +988,24 @@ static inline uint32_t A3XX_RB_COPY_CONTROL_MSAA_RESOLVE(enum a3xx_msaa_samples { return ((val) << A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__SHIFT) & A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK; } +#define A3XX_RB_COPY_CONTROL_DEPTHCLEAR 0x00000008 #define A3XX_RB_COPY_CONTROL_MODE__MASK 0x00000070 #define A3XX_RB_COPY_CONTROL_MODE__SHIFT 4 static inline uint32_t A3XX_RB_COPY_CONTROL_MODE(enum adreno_rb_copy_control_mode val) { return ((val) << A3XX_RB_COPY_CONTROL_MODE__SHIFT) & A3XX_RB_COPY_CONTROL_MODE__MASK; } -#define A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK 0xfffffc00 -#define A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT 10 +#define A3XX_RB_COPY_CONTROL_FASTCLEAR__MASK 0x00000f00 +#define A3XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT 8 +static inline uint32_t A3XX_RB_COPY_CONTROL_FASTCLEAR(uint32_t val) +{ + return ((val) << A3XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT) & A3XX_RB_COPY_CONTROL_FASTCLEAR__MASK; +} +#define A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK 0xffffc000 +#define A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT 14 static inline uint32_t A3XX_RB_COPY_CONTROL_GMEM_BASE(uint32_t val) { - return ((val >> 10) << A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT) & A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK; + return ((val >> 14) << A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT) & A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK; } #define REG_A3XX_RB_COPY_DEST_BASE 0x000020ed @@ -1005,6 +1043,12 @@ static inline uint32_t A3XX_RB_COPY_DEST_INFO_SWAP(enum a3xx_color_swap val) { return ((val) << A3XX_RB_COPY_DEST_INFO_SWAP__SHIFT) & A3XX_RB_COPY_DEST_INFO_SWAP__MASK; } +#define A3XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK 0x00000c00 +#define A3XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT 10 +static inline uint32_t A3XX_RB_COPY_DEST_INFO_DITHER_MODE(enum adreno_rb_dither_mode val) +{ + return ((val) << A3XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT) & A3XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK; +} #define A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__MASK 0x0003c000 #define A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__SHIFT 14 static inline uint32_t A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE(uint32_t val) @@ -1019,6 +1063,7 @@ static inline uint32_t A3XX_RB_COPY_DEST_INFO_ENDIAN(enum adreno_rb_surface_endi } #define REG_A3XX_RB_DEPTH_CONTROL 0x00002100 +#define A3XX_RB_DEPTH_CONTROL_FRAG_WRITES_Z 0x00000001 #define A3XX_RB_DEPTH_CONTROL_Z_ENABLE 0x00000002 #define A3XX_RB_DEPTH_CONTROL_Z_WRITE_ENABLE 0x00000004 #define A3XX_RB_DEPTH_CONTROL_EARLY_Z_DISABLE 0x00000008 @@ -1044,7 +1089,7 @@ static inline uint32_t A3XX_RB_DEPTH_INFO_DEPTH_FORMAT(enum adreno_rb_depth_form #define A3XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT 11 static inline uint32_t A3XX_RB_DEPTH_INFO_DEPTH_BASE(uint32_t val) { - return ((val >> 10) << A3XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A3XX_RB_DEPTH_INFO_DEPTH_BASE__MASK; + return ((val >> 12) << A3XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A3XX_RB_DEPTH_INFO_DEPTH_BASE__MASK; } #define REG_A3XX_RB_DEPTH_PITCH 0x00002103 @@ -1172,6 +1217,8 @@ static inline uint32_t A3XX_RB_WINDOW_OFFSET_Y(uint32_t val) } #define REG_A3XX_RB_SAMPLE_COUNT_CONTROL 0x00002110 +#define A3XX_RB_SAMPLE_COUNT_CONTROL_RESET 0x00000001 +#define A3XX_RB_SAMPLE_COUNT_CONTROL_COPY 0x00000002 #define REG_A3XX_RB_SAMPLE_COUNT_ADDR 0x00002111 @@ -1179,7 +1226,23 @@ static inline uint32_t A3XX_RB_WINDOW_OFFSET_Y(uint32_t val) #define REG_A3XX_RB_Z_CLAMP_MAX 0x00002115 +#define REG_A3XX_VGT_BIN_BASE 0x000021e1 + +#define REG_A3XX_VGT_BIN_SIZE 0x000021e2 + #define REG_A3XX_PC_VSTREAM_CONTROL 0x000021e4 +#define A3XX_PC_VSTREAM_CONTROL_SIZE__MASK 0x003f0000 +#define A3XX_PC_VSTREAM_CONTROL_SIZE__SHIFT 16 +static inline uint32_t A3XX_PC_VSTREAM_CONTROL_SIZE(uint32_t val) +{ + return ((val) << A3XX_PC_VSTREAM_CONTROL_SIZE__SHIFT) & A3XX_PC_VSTREAM_CONTROL_SIZE__MASK; +} +#define A3XX_PC_VSTREAM_CONTROL_N__MASK 0x07c00000 +#define A3XX_PC_VSTREAM_CONTROL_N__SHIFT 22 +static inline uint32_t A3XX_PC_VSTREAM_CONTROL_N(uint32_t val) +{ + return ((val) << A3XX_PC_VSTREAM_CONTROL_N__SHIFT) & A3XX_PC_VSTREAM_CONTROL_N__MASK; +} #define REG_A3XX_PC_VERTEX_REUSE_BLOCK_CNTL 0x000021ea @@ -1203,6 +1266,7 @@ static inline uint32_t A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE(enum adreno_pa_ return ((val) << A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__SHIFT) & A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__MASK; } #define A3XX_PC_PRIM_VTX_CNTL_PROVOKING_VTX_LAST 0x02000000 +#define A3XX_PC_PRIM_VTX_CNTL_PSIZE 0x04000000 #define REG_A3XX_PC_RESTART_INDEX 0x000021ed @@ -1232,6 +1296,7 @@ static inline uint32_t A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE(enum a3xx_threadsize } #define A3XX_HLSQ_CONTROL_1_REG_VSSUPERTHREADENABLE 0x00000100 #define A3XX_HLSQ_CONTROL_1_REG_RESERVED1 0x00000200 +#define A3XX_HLSQ_CONTROL_1_REG_ZWCOORD 0x02000000 #define REG_A3XX_HLSQ_CONTROL_2_REG 0x00002202 #define A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK 0xfc000000 @@ -1242,6 +1307,12 @@ static inline uint32_t A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD(uint32_t val) } #define REG_A3XX_HLSQ_CONTROL_3_REG 0x00002203 +#define A3XX_HLSQ_CONTROL_3_REG_REGID__MASK 0x000000ff +#define A3XX_HLSQ_CONTROL_3_REG_REGID__SHIFT 0 +static inline uint32_t A3XX_HLSQ_CONTROL_3_REG_REGID(uint32_t val) +{ + return ((val) << A3XX_HLSQ_CONTROL_3_REG_REGID__SHIFT) & A3XX_HLSQ_CONTROL_3_REG_REGID__MASK; +} #define REG_A3XX_HLSQ_VS_CONTROL_REG 0x00002204 #define A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK 0x00000fff @@ -1312,10 +1383,36 @@ static inline uint32_t A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY(uint32_t val) } #define REG_A3XX_HLSQ_CL_NDRANGE_0_REG 0x0000220a +#define A3XX_HLSQ_CL_NDRANGE_0_REG_WORKDIM__MASK 0x00000003 +#define A3XX_HLSQ_CL_NDRANGE_0_REG_WORKDIM__SHIFT 0 +static inline uint32_t A3XX_HLSQ_CL_NDRANGE_0_REG_WORKDIM(uint32_t val) +{ + return ((val) << A3XX_HLSQ_CL_NDRANGE_0_REG_WORKDIM__SHIFT) & A3XX_HLSQ_CL_NDRANGE_0_REG_WORKDIM__MASK; +} +#define A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE0__MASK 0x00000ffc +#define A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE0__SHIFT 2 +static inline uint32_t A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE0(uint32_t val) +{ + return ((val) << A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE0__SHIFT) & A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE0__MASK; +} +#define A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE1__MASK 0x003ff000 +#define A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE1__SHIFT 12 +static inline uint32_t A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE1(uint32_t val) +{ + return ((val) << A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE1__SHIFT) & A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE1__MASK; +} +#define A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE2__MASK 0xffc00000 +#define A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE2__SHIFT 22 +static inline uint32_t A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE2(uint32_t val) +{ + return ((val) << A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE2__SHIFT) & A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE2__MASK; +} + +static inline uint32_t REG_A3XX_HLSQ_CL_GLOBAL_WORK(uint32_t i0) { return 0x0000220b + 0x2*i0; } -#define REG_A3XX_HLSQ_CL_NDRANGE_1_REG 0x0000220b +static inline uint32_t REG_A3XX_HLSQ_CL_GLOBAL_WORK_SIZE(uint32_t i0) { return 0x0000220b + 0x2*i0; } -#define REG_A3XX_HLSQ_CL_NDRANGE_2_REG 0x0000220c +static inline uint32_t REG_A3XX_HLSQ_CL_GLOBAL_WORK_OFFSET(uint32_t i0) { return 0x0000220c + 0x2*i0; } #define REG_A3XX_HLSQ_CL_CONTROL_0_REG 0x00002211 @@ -1323,7 +1420,9 @@ static inline uint32_t A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY(uint32_t val) #define REG_A3XX_HLSQ_CL_KERNEL_CONST_REG 0x00002214 -#define REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG 0x00002215 +static inline uint32_t REG_A3XX_HLSQ_CL_KERNEL_GROUP(uint32_t i0) { return 0x00002215 + 0x1*i0; } + +static inline uint32_t REG_A3XX_HLSQ_CL_KERNEL_GROUP_RATIO(uint32_t i0) { return 0x00002215 + 0x1*i0; } #define REG_A3XX_HLSQ_CL_KERNEL_GROUP_Y_REG 0x00002216 @@ -1438,6 +1537,12 @@ static inline uint32_t A3XX_VFD_DECODE_INSTR_REGID(uint32_t val) { return ((val) << A3XX_VFD_DECODE_INSTR_REGID__SHIFT) & A3XX_VFD_DECODE_INSTR_REGID__MASK; } +#define A3XX_VFD_DECODE_INSTR_SWAP__MASK 0x00c00000 +#define A3XX_VFD_DECODE_INSTR_SWAP__SHIFT 22 +static inline uint32_t A3XX_VFD_DECODE_INSTR_SWAP(enum a3xx_color_swap val) +{ + return ((val) << A3XX_VFD_DECODE_INSTR_SWAP__SHIFT) & A3XX_VFD_DECODE_INSTR_SWAP__MASK; +} #define A3XX_VFD_DECODE_INSTR_SHIFTCNT__MASK 0x1f000000 #define A3XX_VFD_DECODE_INSTR_SHIFTCNT__SHIFT 24 static inline uint32_t A3XX_VFD_DECODE_INSTR_SHIFTCNT(uint32_t val) @@ -1462,12 +1567,13 @@ static inline uint32_t A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT(uint32_t val } #define REG_A3XX_VPC_ATTR 0x00002280 -#define A3XX_VPC_ATTR_TOTALATTR__MASK 0x00000fff +#define A3XX_VPC_ATTR_TOTALATTR__MASK 0x000001ff #define A3XX_VPC_ATTR_TOTALATTR__SHIFT 0 static inline uint32_t A3XX_VPC_ATTR_TOTALATTR(uint32_t val) { return ((val) << A3XX_VPC_ATTR_TOTALATTR__SHIFT) & A3XX_VPC_ATTR_TOTALATTR__MASK; } +#define A3XX_VPC_ATTR_PSIZE 0x00000200 #define A3XX_VPC_ATTR_THRDASSIGN__MASK 0x0ffff000 #define A3XX_VPC_ATTR_THRDASSIGN__SHIFT 12 static inline uint32_t A3XX_VPC_ATTR_THRDASSIGN(uint32_t val) @@ -1522,11 +1628,11 @@ static inline uint32_t A3XX_SP_SP_CTRL_REG_SLEEPMODE(uint32_t val) { return ((val) << A3XX_SP_SP_CTRL_REG_SLEEPMODE__SHIFT) & A3XX_SP_SP_CTRL_REG_SLEEPMODE__MASK; } -#define A3XX_SP_SP_CTRL_REG_LOMODE__MASK 0x00c00000 -#define A3XX_SP_SP_CTRL_REG_LOMODE__SHIFT 22 -static inline uint32_t A3XX_SP_SP_CTRL_REG_LOMODE(uint32_t val) +#define A3XX_SP_SP_CTRL_REG_L0MODE__MASK 0x00c00000 +#define A3XX_SP_SP_CTRL_REG_L0MODE__SHIFT 22 +static inline uint32_t A3XX_SP_SP_CTRL_REG_L0MODE(uint32_t val) { - return ((val) << A3XX_SP_SP_CTRL_REG_LOMODE__SHIFT) & A3XX_SP_SP_CTRL_REG_LOMODE__MASK; + return ((val) << A3XX_SP_SP_CTRL_REG_L0MODE__SHIFT) & A3XX_SP_SP_CTRL_REG_L0MODE__MASK; } #define REG_A3XX_SP_VS_CTRL_REG0 0x000022c4 @@ -1569,6 +1675,7 @@ static inline uint32_t A3XX_SP_VS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val) } #define A3XX_SP_VS_CTRL_REG0_SUPERTHREADMODE 0x00200000 #define A3XX_SP_VS_CTRL_REG0_PIXLODENABLE 0x00400000 +#define A3XX_SP_VS_CTRL_REG0_COMPUTEMODE 0x00800000 #define A3XX_SP_VS_CTRL_REG0_LENGTH__MASK 0xff000000 #define A3XX_SP_VS_CTRL_REG0_LENGTH__SHIFT 24 static inline uint32_t A3XX_SP_VS_CTRL_REG0_LENGTH(uint32_t val) @@ -1742,6 +1849,7 @@ static inline uint32_t A3XX_SP_FS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val) } #define A3XX_SP_FS_CTRL_REG0_SUPERTHREADMODE 0x00200000 #define A3XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x00400000 +#define A3XX_SP_FS_CTRL_REG0_COMPUTEMODE 0x00800000 #define A3XX_SP_FS_CTRL_REG0_LENGTH__MASK 0xff000000 #define A3XX_SP_FS_CTRL_REG0_LENGTH__SHIFT 24 static inline uint32_t A3XX_SP_FS_CTRL_REG0_LENGTH(uint32_t val) @@ -1802,6 +1910,13 @@ static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val) #define REG_A3XX_SP_FS_FLAT_SHAD_MODE_REG_1 0x000022e9 #define REG_A3XX_SP_FS_OUTPUT_REG 0x000022ec +#define A3XX_SP_FS_OUTPUT_REG_DEPTH_ENABLE 0x00000080 +#define A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID__MASK 0x0000ff00 +#define A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID__SHIFT 8 +static inline uint32_t A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID(uint32_t val) +{ + return ((val) << A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID__SHIFT) & A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID__MASK; +} static inline uint32_t REG_A3XX_SP_FS_MRT(uint32_t i0) { return 0x000022f0 + 0x1*i0; } @@ -1914,6 +2029,42 @@ static inline uint32_t A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR(uint32_t val) #define REG_A3XX_VBIF_OUT_AXI_AOOO 0x0000305f +#define REG_A3XX_VBIF_PERF_CNT_EN 0x00003070 +#define A3XX_VBIF_PERF_CNT_EN_CNT0 0x00000001 +#define A3XX_VBIF_PERF_CNT_EN_CNT1 0x00000002 +#define A3XX_VBIF_PERF_CNT_EN_PWRCNT0 0x00000004 +#define A3XX_VBIF_PERF_CNT_EN_PWRCNT1 0x00000008 +#define A3XX_VBIF_PERF_CNT_EN_PWRCNT2 0x00000010 + +#define REG_A3XX_VBIF_PERF_CNT_CLR 0x00003071 +#define A3XX_VBIF_PERF_CNT_CLR_CNT0 0x00000001 +#define A3XX_VBIF_PERF_CNT_CLR_CNT1 0x00000002 +#define A3XX_VBIF_PERF_CNT_CLR_PWRCNT0 0x00000004 +#define A3XX_VBIF_PERF_CNT_CLR_PWRCNT1 0x00000008 +#define A3XX_VBIF_PERF_CNT_CLR_PWRCNT2 0x00000010 + +#define REG_A3XX_VBIF_PERF_CNT_SEL 0x00003072 + +#define REG_A3XX_VBIF_PERF_CNT0_LO 0x00003073 + +#define REG_A3XX_VBIF_PERF_CNT0_HI 0x00003074 + +#define REG_A3XX_VBIF_PERF_CNT1_LO 0x00003075 + +#define REG_A3XX_VBIF_PERF_CNT1_HI 0x00003076 + +#define REG_A3XX_VBIF_PERF_PWR_CNT0_LO 0x00003077 + +#define REG_A3XX_VBIF_PERF_PWR_CNT0_HI 0x00003078 + +#define REG_A3XX_VBIF_PERF_PWR_CNT1_LO 0x00003079 + +#define REG_A3XX_VBIF_PERF_PWR_CNT1_HI 0x0000307a + +#define REG_A3XX_VBIF_PERF_PWR_CNT2_LO 0x0000307b + +#define REG_A3XX_VBIF_PERF_PWR_CNT2_HI 0x0000307c + #define REG_A3XX_VSC_BIN_SIZE 0x00000c01 #define A3XX_VSC_BIN_SIZE_WIDTH__MASK 0x0000001f #define A3XX_VSC_BIN_SIZE_WIDTH__SHIFT 0 @@ -2080,6 +2231,8 @@ static inline uint32_t A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE(enum a3xx_cache_op } #define A3XX_UCHE_CACHE_INVALIDATE1_REG_ENTIRE_CACHE 0x80000000 +#define REG_A3XX_UNKNOWN_0EA6 0x00000ea6 + #define REG_A3XX_SP_PERFCOUNTER0_SELECT 0x00000ec4 #define REG_A3XX_SP_PERFCOUNTER1_SELECT 0x00000ec5 @@ -2117,6 +2270,39 @@ static inline uint32_t A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE(enum a3xx_cache_op #define REG_A3XX_VGT_EVENT_INITIATOR 0x000021f9 #define REG_A3XX_VGT_DRAW_INITIATOR 0x000021fc +#define A3XX_VGT_DRAW_INITIATOR_PRIM_TYPE__MASK 0x0000003f +#define A3XX_VGT_DRAW_INITIATOR_PRIM_TYPE__SHIFT 0 +static inline uint32_t A3XX_VGT_DRAW_INITIATOR_PRIM_TYPE(enum pc_di_primtype val) +{ + return ((val) << A3XX_VGT_DRAW_INITIATOR_PRIM_TYPE__SHIFT) & A3XX_VGT_DRAW_INITIATOR_PRIM_TYPE__MASK; +} +#define A3XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__MASK 0x000000c0 +#define A3XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__SHIFT 6 +static inline uint32_t A3XX_VGT_DRAW_INITIATOR_SOURCE_SELECT(enum pc_di_src_sel val) +{ + return ((val) << A3XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__SHIFT) & A3XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__MASK; +} +#define A3XX_VGT_DRAW_INITIATOR_VIS_CULL__MASK 0x00000600 +#define A3XX_VGT_DRAW_INITIATOR_VIS_CULL__SHIFT 9 +static inline uint32_t A3XX_VGT_DRAW_INITIATOR_VIS_CULL(enum pc_di_vis_cull_mode val) +{ + return ((val) << A3XX_VGT_DRAW_INITIATOR_VIS_CULL__SHIFT) & A3XX_VGT_DRAW_INITIATOR_VIS_CULL__MASK; +} +#define A3XX_VGT_DRAW_INITIATOR_INDEX_SIZE__MASK 0x00000800 +#define A3XX_VGT_DRAW_INITIATOR_INDEX_SIZE__SHIFT 11 +static inline uint32_t A3XX_VGT_DRAW_INITIATOR_INDEX_SIZE(enum pc_di_index_size val) +{ + return ((val) << A3XX_VGT_DRAW_INITIATOR_INDEX_SIZE__SHIFT) & A3XX_VGT_DRAW_INITIATOR_INDEX_SIZE__MASK; +} +#define A3XX_VGT_DRAW_INITIATOR_NOT_EOP 0x00001000 +#define A3XX_VGT_DRAW_INITIATOR_SMALL_INDEX 0x00002000 +#define A3XX_VGT_DRAW_INITIATOR_PRE_DRAW_INITIATOR_ENABLE 0x00004000 +#define A3XX_VGT_DRAW_INITIATOR_NUM_INDICES__MASK 0xffff0000 +#define A3XX_VGT_DRAW_INITIATOR_NUM_INDICES__SHIFT 16 +static inline uint32_t A3XX_VGT_DRAW_INITIATOR_NUM_INDICES(uint32_t val) +{ + return ((val) << A3XX_VGT_DRAW_INITIATOR_NUM_INDICES__SHIFT) & A3XX_VGT_DRAW_INITIATOR_NUM_INDICES__MASK; +} #define REG_A3XX_VGT_IMMED_DATA 0x000021fd @@ -2152,6 +2338,12 @@ static inline uint32_t A3XX_TEX_SAMP_0_WRAP_R(enum a3xx_tex_clamp val) { return ((val) << A3XX_TEX_SAMP_0_WRAP_R__SHIFT) & A3XX_TEX_SAMP_0_WRAP_R__MASK; } +#define A3XX_TEX_SAMP_0_COMPARE_FUNC__MASK 0x00700000 +#define A3XX_TEX_SAMP_0_COMPARE_FUNC__SHIFT 20 +static inline uint32_t A3XX_TEX_SAMP_0_COMPARE_FUNC(enum adreno_compare_func val) +{ + return ((val) << A3XX_TEX_SAMP_0_COMPARE_FUNC__SHIFT) & A3XX_TEX_SAMP_0_COMPARE_FUNC__MASK; +} #define A3XX_TEX_SAMP_0_UNNORM_COORDS 0x80000000 #define REG_A3XX_TEX_SAMP_1 0x00000001 @@ -2170,6 +2362,7 @@ static inline uint32_t A3XX_TEX_SAMP_1_MIN_LOD(float val) #define REG_A3XX_TEX_CONST_0 0x00000000 #define A3XX_TEX_CONST_0_TILED 0x00000001 +#define A3XX_TEX_CONST_0_SRGB 0x00000004 #define A3XX_TEX_CONST_0_SWIZ_X__MASK 0x00000070 #define A3XX_TEX_CONST_0_SWIZ_X__SHIFT 4 static inline uint32_t A3XX_TEX_CONST_0_SWIZ_X(enum a3xx_tex_swiz val) @@ -2206,6 +2399,7 @@ static inline uint32_t A3XX_TEX_CONST_0_FMT(enum a3xx_tex_fmt val) { return ((val) << A3XX_TEX_CONST_0_FMT__SHIFT) & A3XX_TEX_CONST_0_FMT__MASK; } +#define A3XX_TEX_CONST_0_NOCONVERT 0x20000000 #define A3XX_TEX_CONST_0_TYPE__MASK 0xc0000000 #define A3XX_TEX_CONST_0_TYPE__SHIFT 30 static inline uint32_t A3XX_TEX_CONST_0_TYPE(enum a3xx_tex_type val) diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c index 942e09d898a8..2773600c9488 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c @@ -392,13 +392,10 @@ static const unsigned int a3xx_registers[] = { #ifdef CONFIG_DEBUG_FS static void a3xx_show(struct msm_gpu *gpu, struct seq_file *m) { - struct drm_device *dev = gpu->dev; int i; adreno_show(gpu, m); - mutex_lock(&dev->struct_mutex); - gpu->funcs->pm_resume(gpu); seq_printf(m, "status: %08x\n", @@ -418,8 +415,6 @@ static void a3xx_show(struct msm_gpu *gpu, struct seq_file *m) } gpu->funcs->pm_suspend(gpu); - - mutex_unlock(&dev->struct_mutex); } #endif @@ -685,6 +680,8 @@ static int a3xx_remove(struct platform_device *pdev) } static const struct of_device_id dt_match[] = { + { .compatible = "qcom,adreno-3xx" }, + /* for backwards compat w/ downstream kgsl DT files: */ { .compatible = "qcom,kgsl-3d0" }, {} }; diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.h b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h index bb9a8ca0507b..85ff66cbddd6 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.h +++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h @@ -19,6 +19,11 @@ #define __A3XX_GPU_H__ #include "adreno_gpu.h" + +/* arrg, somehow fb.h is getting pulled in: */ +#undef ROP_COPY +#undef ROP_XOR + #include "a3xx.xml.h" struct a3xx_gpu { diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h index d6e6ce2d1abd..9de19ac2e86c 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h +++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h @@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: - /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15) - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32814 bytes, from 2013-11-30 15:07:33) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 8900 bytes, from 2013-10-22 23:57:49) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 10574 bytes, from 2013-11-13 05:44:45) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 53644 bytes, from 2013-11-30 15:07:33) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 8344 bytes, from 2013-11-30 14:49:47) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 9859 bytes, from 2014-06-02 15:21:30) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14477 bytes, from 2014-05-16 11:51:57) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 58020 bytes, from 2014-06-25 12:57:16) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 26602 bytes, from 2014-06-25 12:57:16) -Copyright (C) 2013 by the following authors: +Copyright (C) 2013-2014 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) Permission is hereby granted, free of charge, to any person obtaining @@ -87,15 +87,6 @@ enum adreno_rb_blend_factor { FACTOR_SRC_ALPHA_SATURATE = 16, }; -enum adreno_rb_blend_opcode { - BLEND_DST_PLUS_SRC = 0, - BLEND_SRC_MINUS_DST = 1, - BLEND_MIN_DST_SRC = 2, - BLEND_MAX_DST_SRC = 3, - BLEND_DST_MINUS_SRC = 4, - BLEND_DST_PLUS_SRC_BIAS = 5, -}; - enum adreno_rb_surface_endian { ENDIAN_NONE = 0, ENDIAN_8IN16 = 1, @@ -116,6 +107,39 @@ enum adreno_rb_depth_format { DEPTHX_24_8 = 1, }; +enum adreno_rb_copy_control_mode { + RB_COPY_RESOLVE = 1, + RB_COPY_CLEAR = 2, + RB_COPY_DEPTH_STENCIL = 5, +}; + +enum a3xx_render_mode { + RB_RENDERING_PASS = 0, + RB_TILING_PASS = 1, + RB_RESOLVE_PASS = 2, + RB_COMPUTE_PASS = 3, +}; + +enum a3xx_msaa_samples { + MSAA_ONE = 0, + MSAA_TWO = 1, + MSAA_FOUR = 2, +}; + +enum a3xx_threadmode { + MULTI = 0, + SINGLE = 1, +}; + +enum a3xx_instrbuffermode { + BUFFER = 1, +}; + +enum a3xx_threadsize { + TWO_QUADS = 0, + FOUR_QUADS = 1, +}; + #define REG_AXXX_CP_RB_BASE 0x000001c0 #define REG_AXXX_CP_RB_CNTL 0x000001c1 @@ -264,6 +288,8 @@ static inline uint32_t AXXX_SCRATCH_UMSK_SWAP(uint32_t val) #define REG_AXXX_CP_INT_ACK 0x000001f4 #define REG_AXXX_CP_ME_CNTL 0x000001f6 +#define AXXX_CP_ME_CNTL_BUSY 0x20000000 +#define AXXX_CP_ME_CNTL_HALT 0x10000000 #define REG_AXXX_CP_ME_STATUS 0x000001f7 diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 28ca8cd8b09e..655ce5b14ad0 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -91,9 +91,17 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value) int adreno_hw_init(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + int ret; DBG("%s", gpu->name); + ret = msm_gem_get_iova(gpu->rb->bo, gpu->id, &gpu->rb_iova); + if (ret) { + gpu->rb_iova = 0; + dev_err(gpu->dev->dev, "could not map ringbuffer: %d\n", ret); + return ret; + } + /* Setup REG_CP_RB_CNTL: */ gpu_write(gpu, REG_AXXX_CP_RB_CNTL, /* size is log2(quad-words): */ @@ -362,8 +370,10 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, return ret; } + mutex_lock(&drm->struct_mutex); gpu->memptrs_bo = msm_gem_new(drm, sizeof(*gpu->memptrs), MSM_BO_UNCACHED); + mutex_unlock(&drm->struct_mutex); if (IS_ERR(gpu->memptrs_bo)) { ret = PTR_ERR(gpu->memptrs_bo); gpu->memptrs_bo = NULL; @@ -371,13 +381,13 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, return ret; } - gpu->memptrs = msm_gem_vaddr_locked(gpu->memptrs_bo); + gpu->memptrs = msm_gem_vaddr(gpu->memptrs_bo); if (!gpu->memptrs) { dev_err(drm->dev, "could not vmap memptrs\n"); return -ENOMEM; } - ret = msm_gem_get_iova_locked(gpu->memptrs_bo, gpu->base.id, + ret = msm_gem_get_iova(gpu->memptrs_bo, gpu->base.id, &gpu->memptrs_iova); if (ret) { dev_err(drm->dev, "could not map memptrs: %d\n", ret); diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h index ae992c71703f..4eee0ec8f069 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h +++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h @@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: - /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15) - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32814 bytes, from 2013-11-30 15:07:33) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 8900 bytes, from 2013-10-22 23:57:49) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 10574 bytes, from 2013-11-13 05:44:45) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 53644 bytes, from 2013-11-30 15:07:33) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 8344 bytes, from 2013-11-30 14:49:47) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 9859 bytes, from 2014-06-02 15:21:30) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14477 bytes, from 2014-05-16 11:51:57) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 58020 bytes, from 2014-06-25 12:57:16) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 26602 bytes, from 2014-06-25 12:57:16) -Copyright (C) 2013 by the following authors: +Copyright (C) 2013-2014 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) Permission is hereby granted, free of charge, to any person obtaining @@ -105,6 +105,7 @@ enum pc_di_index_size { enum pc_di_vis_cull_mode { IGNORE_VISIBILITY = 0, + USE_VISIBILITY = 1, }; enum adreno_pm4_packet_type { @@ -163,6 +164,11 @@ enum adreno_pm4_type3_packets { CP_SET_BIN = 76, CP_TEST_TWO_MEMS = 113, CP_WAIT_FOR_ME = 19, + CP_SET_DRAW_STATE = 67, + CP_DRAW_INDX_OFFSET = 56, + CP_DRAW_INDIRECT = 40, + CP_DRAW_INDX_INDIRECT = 41, + CP_DRAW_AUTO = 36, IN_IB_PREFETCH_END = 23, IN_SUBBLK_PREFETCH = 31, IN_INSTR_PREFETCH = 32, @@ -232,6 +238,211 @@ static inline uint32_t CP_LOAD_STATE_1_EXT_SRC_ADDR(uint32_t val) return ((val >> 2) << CP_LOAD_STATE_1_EXT_SRC_ADDR__SHIFT) & CP_LOAD_STATE_1_EXT_SRC_ADDR__MASK; } +#define REG_CP_DRAW_INDX_0 0x00000000 +#define CP_DRAW_INDX_0_VIZ_QUERY__MASK 0xffffffff +#define CP_DRAW_INDX_0_VIZ_QUERY__SHIFT 0 +static inline uint32_t CP_DRAW_INDX_0_VIZ_QUERY(uint32_t val) +{ + return ((val) << CP_DRAW_INDX_0_VIZ_QUERY__SHIFT) & CP_DRAW_INDX_0_VIZ_QUERY__MASK; +} + +#define REG_CP_DRAW_INDX_1 0x00000001 +#define CP_DRAW_INDX_1_PRIM_TYPE__MASK 0x0000003f +#define CP_DRAW_INDX_1_PRIM_TYPE__SHIFT 0 +static inline uint32_t CP_DRAW_INDX_1_PRIM_TYPE(enum pc_di_primtype val) +{ + return ((val) << CP_DRAW_INDX_1_PRIM_TYPE__SHIFT) & CP_DRAW_INDX_1_PRIM_TYPE__MASK; +} +#define CP_DRAW_INDX_1_SOURCE_SELECT__MASK 0x000000c0 +#define CP_DRAW_INDX_1_SOURCE_SELECT__SHIFT 6 +static inline uint32_t CP_DRAW_INDX_1_SOURCE_SELECT(enum pc_di_src_sel val) +{ + return ((val) << CP_DRAW_INDX_1_SOURCE_SELECT__SHIFT) & CP_DRAW_INDX_1_SOURCE_SELECT__MASK; +} +#define CP_DRAW_INDX_1_VIS_CULL__MASK 0x00000600 +#define CP_DRAW_INDX_1_VIS_CULL__SHIFT 9 +static inline uint32_t CP_DRAW_INDX_1_VIS_CULL(enum pc_di_vis_cull_mode val) +{ + return ((val) << CP_DRAW_INDX_1_VIS_CULL__SHIFT) & CP_DRAW_INDX_1_VIS_CULL__MASK; +} +#define CP_DRAW_INDX_1_INDEX_SIZE__MASK 0x00000800 +#define CP_DRAW_INDX_1_INDEX_SIZE__SHIFT 11 +static inline uint32_t CP_DRAW_INDX_1_INDEX_SIZE(enum pc_di_index_size val) +{ + return ((val) << CP_DRAW_INDX_1_INDEX_SIZE__SHIFT) & CP_DRAW_INDX_1_INDEX_SIZE__MASK; +} +#define CP_DRAW_INDX_1_NOT_EOP 0x00001000 +#define CP_DRAW_INDX_1_SMALL_INDEX 0x00002000 +#define CP_DRAW_INDX_1_PRE_DRAW_INITIATOR_ENABLE 0x00004000 +#define CP_DRAW_INDX_1_NUM_INDICES__MASK 0xffff0000 +#define CP_DRAW_INDX_1_NUM_INDICES__SHIFT 16 +static inline uint32_t CP_DRAW_INDX_1_NUM_INDICES(uint32_t val) +{ + return ((val) << CP_DRAW_INDX_1_NUM_INDICES__SHIFT) & CP_DRAW_INDX_1_NUM_INDICES__MASK; +} + +#define REG_CP_DRAW_INDX_2 0x00000002 +#define CP_DRAW_INDX_2_NUM_INDICES__MASK 0xffffffff +#define CP_DRAW_INDX_2_NUM_INDICES__SHIFT 0 +static inline uint32_t CP_DRAW_INDX_2_NUM_INDICES(uint32_t val) +{ + return ((val) << CP_DRAW_INDX_2_NUM_INDICES__SHIFT) & CP_DRAW_INDX_2_NUM_INDICES__MASK; +} + +#define REG_CP_DRAW_INDX_2 0x00000002 +#define CP_DRAW_INDX_2_INDX_BASE__MASK 0xffffffff +#define CP_DRAW_INDX_2_INDX_BASE__SHIFT 0 +static inline uint32_t CP_DRAW_INDX_2_INDX_BASE(uint32_t val) +{ + return ((val) << CP_DRAW_INDX_2_INDX_BASE__SHIFT) & CP_DRAW_INDX_2_INDX_BASE__MASK; +} + +#define REG_CP_DRAW_INDX_2 0x00000002 +#define CP_DRAW_INDX_2_INDX_SIZE__MASK 0xffffffff +#define CP_DRAW_INDX_2_INDX_SIZE__SHIFT 0 +static inline uint32_t CP_DRAW_INDX_2_INDX_SIZE(uint32_t val) +{ + return ((val) << CP_DRAW_INDX_2_INDX_SIZE__SHIFT) & CP_DRAW_INDX_2_INDX_SIZE__MASK; +} + +#define REG_CP_DRAW_INDX_2_0 0x00000000 +#define CP_DRAW_INDX_2_0_VIZ_QUERY__MASK 0xffffffff +#define CP_DRAW_INDX_2_0_VIZ_QUERY__SHIFT 0 +static inline uint32_t CP_DRAW_INDX_2_0_VIZ_QUERY(uint32_t val) +{ + return ((val) << CP_DRAW_INDX_2_0_VIZ_QUERY__SHIFT) & CP_DRAW_INDX_2_0_VIZ_QUERY__MASK; +} + +#define REG_CP_DRAW_INDX_2_1 0x00000001 +#define CP_DRAW_INDX_2_1_PRIM_TYPE__MASK 0x0000003f +#define CP_DRAW_INDX_2_1_PRIM_TYPE__SHIFT 0 +static inline uint32_t CP_DRAW_INDX_2_1_PRIM_TYPE(enum pc_di_primtype val) +{ + return ((val) << CP_DRAW_INDX_2_1_PRIM_TYPE__SHIFT) & CP_DRAW_INDX_2_1_PRIM_TYPE__MASK; +} +#define CP_DRAW_INDX_2_1_SOURCE_SELECT__MASK 0x000000c0 +#define CP_DRAW_INDX_2_1_SOURCE_SELECT__SHIFT 6 +static inline uint32_t CP_DRAW_INDX_2_1_SOURCE_SELECT(enum pc_di_src_sel val) +{ + return ((val) << CP_DRAW_INDX_2_1_SOURCE_SELECT__SHIFT) & CP_DRAW_INDX_2_1_SOURCE_SELECT__MASK; +} +#define CP_DRAW_INDX_2_1_VIS_CULL__MASK 0x00000600 +#define CP_DRAW_INDX_2_1_VIS_CULL__SHIFT 9 +static inline uint32_t CP_DRAW_INDX_2_1_VIS_CULL(enum pc_di_vis_cull_mode val) +{ + return ((val) << CP_DRAW_INDX_2_1_VIS_CULL__SHIFT) & CP_DRAW_INDX_2_1_VIS_CULL__MASK; +} +#define CP_DRAW_INDX_2_1_INDEX_SIZE__MASK 0x00000800 +#define CP_DRAW_INDX_2_1_INDEX_SIZE__SHIFT 11 +static inline uint32_t CP_DRAW_INDX_2_1_INDEX_SIZE(enum pc_di_index_size val) +{ + return ((val) << CP_DRAW_INDX_2_1_INDEX_SIZE__SHIFT) & CP_DRAW_INDX_2_1_INDEX_SIZE__MASK; +} +#define CP_DRAW_INDX_2_1_NOT_EOP 0x00001000 +#define CP_DRAW_INDX_2_1_SMALL_INDEX 0x00002000 +#define CP_DRAW_INDX_2_1_PRE_DRAW_INITIATOR_ENABLE 0x00004000 +#define CP_DRAW_INDX_2_1_NUM_INDICES__MASK 0xffff0000 +#define CP_DRAW_INDX_2_1_NUM_INDICES__SHIFT 16 +static inline uint32_t CP_DRAW_INDX_2_1_NUM_INDICES(uint32_t val) +{ + return ((val) << CP_DRAW_INDX_2_1_NUM_INDICES__SHIFT) & CP_DRAW_INDX_2_1_NUM_INDICES__MASK; +} + +#define REG_CP_DRAW_INDX_2_2 0x00000002 +#define CP_DRAW_INDX_2_2_NUM_INDICES__MASK 0xffffffff +#define CP_DRAW_INDX_2_2_NUM_INDICES__SHIFT 0 +static inline uint32_t CP_DRAW_INDX_2_2_NUM_INDICES(uint32_t val) +{ + return ((val) << CP_DRAW_INDX_2_2_NUM_INDICES__SHIFT) & CP_DRAW_INDX_2_2_NUM_INDICES__MASK; +} + +#define REG_CP_DRAW_INDX_OFFSET_0 0x00000000 +#define CP_DRAW_INDX_OFFSET_0_PRIM_TYPE__MASK 0x0000003f +#define CP_DRAW_INDX_OFFSET_0_PRIM_TYPE__SHIFT 0 +static inline uint32_t CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(enum pc_di_primtype val) +{ + return ((val) << CP_DRAW_INDX_OFFSET_0_PRIM_TYPE__SHIFT) & CP_DRAW_INDX_OFFSET_0_PRIM_TYPE__MASK; +} +#define CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__MASK 0x000000c0 +#define CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__SHIFT 6 +static inline uint32_t CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(enum pc_di_src_sel val) +{ + return ((val) << CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__SHIFT) & CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__MASK; +} +#define CP_DRAW_INDX_OFFSET_0_VIS_CULL__MASK 0x00000700 +#define CP_DRAW_INDX_OFFSET_0_VIS_CULL__SHIFT 8 +static inline uint32_t CP_DRAW_INDX_OFFSET_0_VIS_CULL(enum pc_di_vis_cull_mode val) +{ + return ((val) << CP_DRAW_INDX_OFFSET_0_VIS_CULL__SHIFT) & CP_DRAW_INDX_OFFSET_0_VIS_CULL__MASK; +} +#define CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__MASK 0x00000800 +#define CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__SHIFT 11 +static inline uint32_t CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(enum pc_di_index_size val) +{ + return ((val) << CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__SHIFT) & CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__MASK; +} +#define CP_DRAW_INDX_OFFSET_0_NOT_EOP 0x00001000 +#define CP_DRAW_INDX_OFFSET_0_SMALL_INDEX 0x00002000 +#define CP_DRAW_INDX_OFFSET_0_PRE_DRAW_INITIATOR_ENABLE 0x00004000 +#define CP_DRAW_INDX_OFFSET_0_NUM_INDICES__MASK 0xffff0000 +#define CP_DRAW_INDX_OFFSET_0_NUM_INDICES__SHIFT 16 +static inline uint32_t CP_DRAW_INDX_OFFSET_0_NUM_INDICES(uint32_t val) +{ + return ((val) << CP_DRAW_INDX_OFFSET_0_NUM_INDICES__SHIFT) & CP_DRAW_INDX_OFFSET_0_NUM_INDICES__MASK; +} + +#define REG_CP_DRAW_INDX_OFFSET_1 0x00000001 + +#define REG_CP_DRAW_INDX_OFFSET_2 0x00000002 +#define CP_DRAW_INDX_OFFSET_2_NUM_INDICES__MASK 0xffffffff +#define CP_DRAW_INDX_OFFSET_2_NUM_INDICES__SHIFT 0 +static inline uint32_t CP_DRAW_INDX_OFFSET_2_NUM_INDICES(uint32_t val) +{ + return ((val) << CP_DRAW_INDX_OFFSET_2_NUM_INDICES__SHIFT) & CP_DRAW_INDX_OFFSET_2_NUM_INDICES__MASK; +} + +#define REG_CP_DRAW_INDX_OFFSET_2 0x00000002 +#define CP_DRAW_INDX_OFFSET_2_INDX_BASE__MASK 0xffffffff +#define CP_DRAW_INDX_OFFSET_2_INDX_BASE__SHIFT 0 +static inline uint32_t CP_DRAW_INDX_OFFSET_2_INDX_BASE(uint32_t val) +{ + return ((val) << CP_DRAW_INDX_OFFSET_2_INDX_BASE__SHIFT) & CP_DRAW_INDX_OFFSET_2_INDX_BASE__MASK; +} + +#define REG_CP_DRAW_INDX_OFFSET_2 0x00000002 +#define CP_DRAW_INDX_OFFSET_2_INDX_SIZE__MASK 0xffffffff +#define CP_DRAW_INDX_OFFSET_2_INDX_SIZE__SHIFT 0 +static inline uint32_t CP_DRAW_INDX_OFFSET_2_INDX_SIZE(uint32_t val) +{ + return ((val) << CP_DRAW_INDX_OFFSET_2_INDX_SIZE__SHIFT) & CP_DRAW_INDX_OFFSET_2_INDX_SIZE__MASK; +} + +#define REG_CP_SET_DRAW_STATE_0 0x00000000 +#define CP_SET_DRAW_STATE_0_COUNT__MASK 0x0000ffff +#define CP_SET_DRAW_STATE_0_COUNT__SHIFT 0 +static inline uint32_t CP_SET_DRAW_STATE_0_COUNT(uint32_t val) +{ + return ((val) << CP_SET_DRAW_STATE_0_COUNT__SHIFT) & CP_SET_DRAW_STATE_0_COUNT__MASK; +} +#define CP_SET_DRAW_STATE_0_DIRTY 0x00010000 +#define CP_SET_DRAW_STATE_0_DISABLE 0x00020000 +#define CP_SET_DRAW_STATE_0_DISABLE_ALL_GROUPS 0x00040000 +#define CP_SET_DRAW_STATE_0_LOAD_IMMED 0x00080000 +#define CP_SET_DRAW_STATE_0_GROUP_ID__MASK 0x1f000000 +#define CP_SET_DRAW_STATE_0_GROUP_ID__SHIFT 24 +static inline uint32_t CP_SET_DRAW_STATE_0_GROUP_ID(uint32_t val) +{ + return ((val) << CP_SET_DRAW_STATE_0_GROUP_ID__SHIFT) & CP_SET_DRAW_STATE_0_GROUP_ID__MASK; +} + +#define REG_CP_SET_DRAW_STATE_1 0x00000001 +#define CP_SET_DRAW_STATE_1_ADDR__MASK 0xffffffff +#define CP_SET_DRAW_STATE_1_ADDR__SHIFT 0 +static inline uint32_t CP_SET_DRAW_STATE_1_ADDR(uint32_t val) +{ + return ((val) << CP_SET_DRAW_STATE_1_ADDR__SHIFT) & CP_SET_DRAW_STATE_1_ADDR__MASK; +} + #define REG_CP_SET_BIN_0 0x00000000 #define REG_CP_SET_BIN_1 0x00000001 @@ -262,5 +473,21 @@ static inline uint32_t CP_SET_BIN_2_Y2(uint32_t val) return ((val) << CP_SET_BIN_2_Y2__SHIFT) & CP_SET_BIN_2_Y2__MASK; } +#define REG_CP_SET_BIN_DATA_0 0x00000000 +#define CP_SET_BIN_DATA_0_BIN_DATA_ADDR__MASK 0xffffffff +#define CP_SET_BIN_DATA_0_BIN_DATA_ADDR__SHIFT 0 +static inline uint32_t CP_SET_BIN_DATA_0_BIN_DATA_ADDR(uint32_t val) +{ + return ((val) << CP_SET_BIN_DATA_0_BIN_DATA_ADDR__SHIFT) & CP_SET_BIN_DATA_0_BIN_DATA_ADDR__MASK; +} + +#define REG_CP_SET_BIN_DATA_1 0x00000001 +#define CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS__MASK 0xffffffff +#define CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS__SHIFT 0 +static inline uint32_t CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS(uint32_t val) +{ + return ((val) << CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS__SHIFT) & CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS__MASK; +} + #endif /* ADRENO_PM4_XML */ diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h index 87be647e3825..0f1f5b9459a5 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.xml.h +++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h @@ -12,12 +12,12 @@ The rules-ng-ng source files this header was generated from are: - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-06-25 12:55:02) - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-06-25 12:53:44) Copyright (C) 2013 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) diff --git a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h index 747a6ef4211f..d468f86f637c 100644 --- a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h +++ b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h @@ -12,12 +12,12 @@ The rules-ng-ng source files this header was generated from are: - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-06-25 12:55:02) - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-06-25 12:53:44) Copyright (C) 2013 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) diff --git a/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/drivers/gpu/drm/msm/dsi/sfpb.xml.h index 48e03acf19bf..da8740054cdf 100644 --- a/drivers/gpu/drm/msm/dsi/sfpb.xml.h +++ b/drivers/gpu/drm/msm/dsi/sfpb.xml.h @@ -12,12 +12,12 @@ The rules-ng-ng source files this header was generated from are: - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-06-25 12:55:02) - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-06-25 12:53:44) Copyright (C) 2013 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c index 7f7aadef8a82..c6c9b02e0ada 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi.c @@ -123,7 +123,8 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder) for (i = 0; i < config->hpd_reg_cnt; i++) { struct regulator *reg; - reg = devm_regulator_get(&pdev->dev, config->hpd_reg_names[i]); + reg = devm_regulator_get_exclusive(&pdev->dev, + config->hpd_reg_names[i]); if (IS_ERR(reg)) { ret = PTR_ERR(reg); dev_err(dev->dev, "failed to get hpd regulator: %s (%d)\n", @@ -138,7 +139,8 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder) for (i = 0; i < config->pwr_reg_cnt; i++) { struct regulator *reg; - reg = devm_regulator_get(&pdev->dev, config->pwr_reg_names[i]); + reg = devm_regulator_get_exclusive(&pdev->dev, + config->pwr_reg_names[i]); if (IS_ERR(reg)) { ret = PTR_ERR(reg); dev_err(dev->dev, "failed to get pwr regulator: %s (%d)\n", @@ -256,47 +258,68 @@ static void set_hdmi_pdev(struct drm_device *dev, priv->hdmi_pdev = pdev; } -static int hdmi_bind(struct device *dev, struct device *master, void *data) -{ - static struct hdmi_platform_config config = {}; #ifdef CONFIG_OF - struct device_node *of_node = dev->of_node; - - int get_gpio(const char *name) - { - int gpio = of_get_named_gpio(of_node, name, 0); +static int get_gpio(struct device *dev, struct device_node *of_node, const char *name) +{ + int gpio = of_get_named_gpio(of_node, name, 0); + if (gpio < 0) { + char name2[32]; + snprintf(name2, sizeof(name2), "%s-gpio", name); + gpio = of_get_named_gpio(of_node, name2, 0); if (gpio < 0) { dev_err(dev, "failed to get gpio: %s (%d)\n", name, gpio); gpio = -1; } - return gpio; } + return gpio; +} +#endif + +static int hdmi_bind(struct device *dev, struct device *master, void *data) +{ + static struct hdmi_platform_config config = {}; +#ifdef CONFIG_OF + struct device_node *of_node = dev->of_node; - /* TODO actually use DT.. */ - static const char *hpd_reg_names[] = {"hpd-gdsc", "hpd-5v"}; - static const char *pwr_reg_names[] = {"core-vdda", "core-vcc"}; - static const char *hpd_clk_names[] = {"iface_clk", "core_clk", "mdp_core_clk"}; - static unsigned long hpd_clk_freq[] = {0, 19200000, 0}; - static const char *pwr_clk_names[] = {"extp_clk", "alt_iface_clk"}; + if (of_device_is_compatible(of_node, "qcom,hdmi-tx-8074")) { + static const char *hpd_reg_names[] = {"hpd-gdsc", "hpd-5v"}; + static const char *pwr_reg_names[] = {"core-vdda", "core-vcc"}; + static const char *hpd_clk_names[] = {"iface_clk", "core_clk", "mdp_core_clk"}; + static unsigned long hpd_clk_freq[] = {0, 19200000, 0}; + static const char *pwr_clk_names[] = {"extp_clk", "alt_iface_clk"}; + config.phy_init = hdmi_phy_8x74_init; + config.hpd_reg_names = hpd_reg_names; + config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names); + config.pwr_reg_names = pwr_reg_names; + config.pwr_reg_cnt = ARRAY_SIZE(pwr_reg_names); + config.hpd_clk_names = hpd_clk_names; + config.hpd_freq = hpd_clk_freq; + config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names); + config.pwr_clk_names = pwr_clk_names; + config.pwr_clk_cnt = ARRAY_SIZE(pwr_clk_names); + config.shared_irq = true; + } else if (of_device_is_compatible(of_node, "qcom,hdmi-tx-8960")) { + static const char *hpd_clk_names[] = {"core_clk", "master_iface_clk", "slave_iface_clk"}; + static const char *hpd_reg_names[] = {"core-vdda", "hdmi-mux"}; + config.phy_init = hdmi_phy_8960_init; + config.hpd_reg_names = hpd_reg_names; + config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names); + config.hpd_clk_names = hpd_clk_names; + config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names); + } else if (of_device_is_compatible(of_node, "qcom,hdmi-tx-8660")) { + config.phy_init = hdmi_phy_8x60_init; + } else { + dev_err(dev, "unknown phy: %s\n", of_node->name); + } - config.phy_init = hdmi_phy_8x74_init; config.mmio_name = "core_physical"; - config.hpd_reg_names = hpd_reg_names; - config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names); - config.pwr_reg_names = pwr_reg_names; - config.pwr_reg_cnt = ARRAY_SIZE(pwr_reg_names); - config.hpd_clk_names = hpd_clk_names; - config.hpd_freq = hpd_clk_freq; - config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names); - config.pwr_clk_names = pwr_clk_names; - config.pwr_clk_cnt = ARRAY_SIZE(pwr_clk_names); - config.ddc_clk_gpio = get_gpio("qcom,hdmi-tx-ddc-clk"); - config.ddc_data_gpio = get_gpio("qcom,hdmi-tx-ddc-data"); - config.hpd_gpio = get_gpio("qcom,hdmi-tx-hpd"); - config.mux_en_gpio = get_gpio("qcom,hdmi-tx-mux-en"); - config.mux_sel_gpio = get_gpio("qcom,hdmi-tx-mux-sel"); - config.shared_irq = true; + config.ddc_clk_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-clk"); + config.ddc_data_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-data"); + config.hpd_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-hpd"); + config.mux_en_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-en"); + config.mux_sel_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-sel"); + config.mux_lpm_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-lpm"); #else static const char *hpd_clk_names[] = { @@ -373,7 +396,9 @@ static int hdmi_dev_remove(struct platform_device *pdev) } static const struct of_device_id dt_match[] = { - { .compatible = "qcom,hdmi-tx" }, + { .compatible = "qcom,hdmi-tx-8074" }, + { .compatible = "qcom,hdmi-tx-8960" }, + { .compatible = "qcom,hdmi-tx-8660" }, {} }; diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h index 9d7723c6528a..b981995410b5 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.h +++ b/drivers/gpu/drm/msm/hdmi/hdmi.h @@ -96,6 +96,7 @@ struct hdmi_platform_config { /* gpio's: */ int ddc_clk_gpio, ddc_data_gpio, hpd_gpio, mux_en_gpio, mux_sel_gpio; + int mux_lpm_gpio; /* older devices had their own irq, mdp5+ it is shared w/ mdp: */ bool shared_irq; diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h index e2636582cfd7..e89fe053d375 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h +++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h @@ -12,14 +12,14 @@ The rules-ng-ng source files this header was generated from are: - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-06-25 12:55:02) - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-06-25 12:53:44) -Copyright (C) 2013 by the following authors: +Copyright (C) 2013-2014 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) Permission is hereby granted, free of charge, to any person obtaining @@ -148,9 +148,9 @@ static inline uint32_t REG_HDMI_GENERIC0(uint32_t i0) { return 0x00000088 + 0x4* static inline uint32_t REG_HDMI_GENERIC1(uint32_t i0) { return 0x000000a8 + 0x4*i0; } -static inline uint32_t REG_HDMI_ACR(uint32_t i0) { return 0x000000c4 + 0x8*i0; } +static inline uint32_t REG_HDMI_ACR(enum hdmi_acr_cts i0) { return 0x000000c4 + 0x8*i0; } -static inline uint32_t REG_HDMI_ACR_0(uint32_t i0) { return 0x000000c4 + 0x8*i0; } +static inline uint32_t REG_HDMI_ACR_0(enum hdmi_acr_cts i0) { return 0x000000c4 + 0x8*i0; } #define HDMI_ACR_0_CTS__MASK 0xfffff000 #define HDMI_ACR_0_CTS__SHIFT 12 static inline uint32_t HDMI_ACR_0_CTS(uint32_t val) @@ -158,7 +158,7 @@ static inline uint32_t HDMI_ACR_0_CTS(uint32_t val) return ((val) << HDMI_ACR_0_CTS__SHIFT) & HDMI_ACR_0_CTS__MASK; } -static inline uint32_t REG_HDMI_ACR_1(uint32_t i0) { return 0x000000c8 + 0x8*i0; } +static inline uint32_t REG_HDMI_ACR_1(enum hdmi_acr_cts i0) { return 0x000000c8 + 0x8*i0; } #define HDMI_ACR_1_N__MASK 0xffffffff #define HDMI_ACR_1_N__SHIFT 0 static inline uint32_t HDMI_ACR_1_N(uint32_t val) @@ -552,6 +552,103 @@ static inline uint32_t HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(uint32_t val) #define REG_HDMI_8960_PHY_REG11 0x0000042c #define REG_HDMI_8960_PHY_REG12 0x00000430 +#define HDMI_8960_PHY_REG12_SW_RESET 0x00000020 +#define HDMI_8960_PHY_REG12_PWRDN_B 0x00000080 + +#define REG_HDMI_8960_PHY_REG_BIST_CFG 0x00000434 + +#define REG_HDMI_8960_PHY_DEBUG_BUS_SEL 0x00000438 + +#define REG_HDMI_8960_PHY_REG_MISC0 0x0000043c + +#define REG_HDMI_8960_PHY_REG13 0x00000440 + +#define REG_HDMI_8960_PHY_REG14 0x00000444 + +#define REG_HDMI_8960_PHY_REG15 0x00000448 + +#define REG_HDMI_8960_PHY_PLL_REFCLK_CFG 0x00000500 + +#define REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG 0x00000504 + +#define REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 0x00000508 + +#define REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 0x0000050c + +#define REG_HDMI_8960_PHY_PLL_IDAC_ADJ_CFG 0x00000510 + +#define REG_HDMI_8960_PHY_PLL_I_VI_KVCO_CFG 0x00000514 + +#define REG_HDMI_8960_PHY_PLL_PWRDN_B 0x00000518 +#define HDMI_8960_PHY_PLL_PWRDN_B_PD_PLL 0x00000002 +#define HDMI_8960_PHY_PLL_PWRDN_B_PLL_PWRDN_B 0x00000008 + +#define REG_HDMI_8960_PHY_PLL_SDM_CFG0 0x0000051c + +#define REG_HDMI_8960_PHY_PLL_SDM_CFG1 0x00000520 + +#define REG_HDMI_8960_PHY_PLL_SDM_CFG2 0x00000524 + +#define REG_HDMI_8960_PHY_PLL_SDM_CFG3 0x00000528 + +#define REG_HDMI_8960_PHY_PLL_SDM_CFG4 0x0000052c + +#define REG_HDMI_8960_PHY_PLL_SSC_CFG0 0x00000530 + +#define REG_HDMI_8960_PHY_PLL_SSC_CFG1 0x00000534 + +#define REG_HDMI_8960_PHY_PLL_SSC_CFG2 0x00000538 + +#define REG_HDMI_8960_PHY_PLL_SSC_CFG3 0x0000053c + +#define REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0 0x00000540 + +#define REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1 0x00000544 + +#define REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2 0x00000548 + +#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 0x0000054c + +#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 0x00000550 + +#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 0x00000554 + +#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 0x00000558 + +#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 0x0000055c + +#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 0x00000560 + +#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 0x00000564 + +#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 0x00000568 + +#define REG_HDMI_8960_PHY_PLL_DEBUG_SEL 0x0000056c + +#define REG_HDMI_8960_PHY_PLL_MISC0 0x00000570 + +#define REG_HDMI_8960_PHY_PLL_MISC1 0x00000574 + +#define REG_HDMI_8960_PHY_PLL_MISC2 0x00000578 + +#define REG_HDMI_8960_PHY_PLL_MISC3 0x0000057c + +#define REG_HDMI_8960_PHY_PLL_MISC4 0x00000580 + +#define REG_HDMI_8960_PHY_PLL_MISC5 0x00000584 + +#define REG_HDMI_8960_PHY_PLL_MISC6 0x00000588 + +#define REG_HDMI_8960_PHY_PLL_DEBUG_BUS0 0x0000058c + +#define REG_HDMI_8960_PHY_PLL_DEBUG_BUS1 0x00000590 + +#define REG_HDMI_8960_PHY_PLL_DEBUG_BUS2 0x00000594 + +#define REG_HDMI_8960_PHY_PLL_STATUS0 0x00000598 +#define HDMI_8960_PHY_PLL_STATUS0_PLL_LOCK 0x00000001 + +#define REG_HDMI_8960_PHY_PLL_STATUS1 0x0000059c #define REG_HDMI_8x74_ANA_CFG0 0x00000000 diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c index 28f7e3ec6c28..4aca2a3c667c 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c @@ -63,7 +63,7 @@ static int gpio_config(struct hdmi *hdmi, bool on) ret = gpio_request(config->mux_en_gpio, "HDMI_MUX_EN"); if (ret) { dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n", - "HDMI_MUX_SEL", config->mux_en_gpio, ret); + "HDMI_MUX_EN", config->mux_en_gpio, ret); goto error4; } gpio_set_value_cansleep(config->mux_en_gpio, 1); @@ -78,6 +78,19 @@ static int gpio_config(struct hdmi *hdmi, bool on) } gpio_set_value_cansleep(config->mux_sel_gpio, 0); } + + if (config->mux_lpm_gpio != -1) { + ret = gpio_request(config->mux_lpm_gpio, + "HDMI_MUX_LPM"); + if (ret) { + dev_err(dev->dev, + "'%s'(%d) gpio_request failed: %d\n", + "HDMI_MUX_LPM", + config->mux_lpm_gpio, ret); + goto error6; + } + gpio_set_value_cansleep(config->mux_lpm_gpio, 1); + } DBG("gpio on"); } else { gpio_free(config->ddc_clk_gpio); @@ -93,11 +106,19 @@ static int gpio_config(struct hdmi *hdmi, bool on) gpio_set_value_cansleep(config->mux_sel_gpio, 1); gpio_free(config->mux_sel_gpio); } + + if (config->mux_lpm_gpio != -1) { + gpio_set_value_cansleep(config->mux_lpm_gpio, 0); + gpio_free(config->mux_lpm_gpio); + } DBG("gpio off"); } return 0; +error6: + if (config->mux_sel_gpio != -1) + gpio_free(config->mux_sel_gpio); error5: if (config->mux_en_gpio != -1) gpio_free(config->mux_en_gpio); @@ -306,7 +327,7 @@ static void hdmi_connector_destroy(struct drm_connector *connector) hdp_disable(hdmi_connector); - drm_sysfs_connector_remove(connector); + drm_connector_unregister(connector); drm_connector_cleanup(connector); hdmi_unreference(hdmi_connector->hdmi); @@ -416,7 +437,7 @@ struct drm_connector *hdmi_connector_init(struct hdmi *hdmi) connector->interlace_allowed = 1; connector->doublescan_allowed = 0; - drm_sysfs_connector_add(connector); + drm_connector_register(connector); ret = hpd_enable(hdmi_connector); if (ret) { diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c index e5b7ed5b8f01..f408b69486a8 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c @@ -15,14 +15,377 @@ * this program. If not, see <http://www.gnu.org/licenses/>. */ +#ifdef CONFIG_COMMON_CLK +#include <linux/clk.h> +#include <linux/clk-provider.h> +#endif + #include "hdmi.h" struct hdmi_phy_8960 { struct hdmi_phy base; struct hdmi *hdmi; +#ifdef CONFIG_COMMON_CLK + struct clk_hw pll_hw; + struct clk *pll; + unsigned long pixclk; +#endif }; #define to_hdmi_phy_8960(x) container_of(x, struct hdmi_phy_8960, base) +#ifdef CONFIG_COMMON_CLK +#define clk_to_phy(x) container_of(x, struct hdmi_phy_8960, pll_hw) + +/* + * HDMI PLL: + * + * To get the parent clock setup properly, we need to plug in hdmi pll + * configuration into common-clock-framework. + */ + +struct pll_rate { + unsigned long rate; + struct { + uint32_t val; + uint32_t reg; + } conf[32]; +}; + +/* NOTE: keep sorted highest freq to lowest: */ +static const struct pll_rate freqtbl[] = { + /* 1080p60/1080p50 case */ + { 148500000, { + { 0x02, REG_HDMI_8960_PHY_PLL_REFCLK_CFG }, + { 0x02, REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG }, + { 0x01, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 }, + { 0x33, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 }, + { 0x2c, REG_HDMI_8960_PHY_PLL_IDAC_ADJ_CFG }, + { 0x06, REG_HDMI_8960_PHY_PLL_I_VI_KVCO_CFG }, + { 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B }, + { 0x76, REG_HDMI_8960_PHY_PLL_SDM_CFG0 }, + { 0x01, REG_HDMI_8960_PHY_PLL_SDM_CFG1 }, + { 0x4c, REG_HDMI_8960_PHY_PLL_SDM_CFG2 }, + { 0xc0, REG_HDMI_8960_PHY_PLL_SDM_CFG3 }, + { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 }, + { 0x9a, REG_HDMI_8960_PHY_PLL_SSC_CFG0 }, + { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG1 }, + { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG2 }, + { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG3 }, + { 0x10, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0 }, + { 0x1a, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1 }, + { 0x0d, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2 }, + { 0xe6, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 }, + { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 }, + { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 }, + { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 }, + { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 }, + { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 }, + { 0x33, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 }, + { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 }, + { 0, 0 } } + }, + { 108000000, { + { 0x08, REG_HDMI_8960_PHY_PLL_REFCLK_CFG }, + { 0x21, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 }, + { 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 }, + { 0x1c, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 }, + { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 }, + { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 }, + { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 }, + { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 }, + { 0x49, REG_HDMI_8960_PHY_PLL_SDM_CFG0 }, + { 0x49, REG_HDMI_8960_PHY_PLL_SDM_CFG1 }, + { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG2 }, + { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG3 }, + { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 }, + { 0, 0 } } + }, + /* 720p60/720p50/1080i60/1080i50/1080p24/1080p30/1080p25 */ + { 74250000, { + { 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B }, + { 0x12, REG_HDMI_8960_PHY_PLL_REFCLK_CFG }, + { 0x01, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 }, + { 0x33, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 }, + { 0x76, REG_HDMI_8960_PHY_PLL_SDM_CFG0 }, + { 0xe6, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 }, + { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 }, + { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 }, + { 0, 0 } } + }, + { 65000000, { + { 0x18, REG_HDMI_8960_PHY_PLL_REFCLK_CFG }, + { 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 }, + { 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 }, + { 0x8a, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 }, + { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 }, + { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 }, + { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 }, + { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 }, + { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 }, + { 0x0b, REG_HDMI_8960_PHY_PLL_SDM_CFG0 }, + { 0x4b, REG_HDMI_8960_PHY_PLL_SDM_CFG1 }, + { 0x7b, REG_HDMI_8960_PHY_PLL_SDM_CFG2 }, + { 0x09, REG_HDMI_8960_PHY_PLL_SDM_CFG3 }, + { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 }, + { 0, 0 } } + }, + /* 480p60/480i60 */ + { 27030000, { + { 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B }, + { 0x38, REG_HDMI_8960_PHY_PLL_REFCLK_CFG }, + { 0x02, REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG }, + { 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 }, + { 0xff, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 }, + { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG0 }, + { 0x4e, REG_HDMI_8960_PHY_PLL_SDM_CFG1 }, + { 0xd7, REG_HDMI_8960_PHY_PLL_SDM_CFG2 }, + { 0x03, REG_HDMI_8960_PHY_PLL_SDM_CFG3 }, + { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 }, + { 0x2a, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 }, + { 0x03, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 }, + { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 }, + { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 }, + { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 }, + { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 }, + { 0x33, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 }, + { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 }, + { 0, 0 } } + }, + /* 576p50/576i50 */ + { 27000000, { + { 0x32, REG_HDMI_8960_PHY_PLL_REFCLK_CFG }, + { 0x02, REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG }, + { 0x01, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 }, + { 0x33, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 }, + { 0x2c, REG_HDMI_8960_PHY_PLL_IDAC_ADJ_CFG }, + { 0x06, REG_HDMI_8960_PHY_PLL_I_VI_KVCO_CFG }, + { 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B }, + { 0x7b, REG_HDMI_8960_PHY_PLL_SDM_CFG0 }, + { 0x01, REG_HDMI_8960_PHY_PLL_SDM_CFG1 }, + { 0x4c, REG_HDMI_8960_PHY_PLL_SDM_CFG2 }, + { 0xc0, REG_HDMI_8960_PHY_PLL_SDM_CFG3 }, + { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 }, + { 0x9a, REG_HDMI_8960_PHY_PLL_SSC_CFG0 }, + { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG1 }, + { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG2 }, + { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG3 }, + { 0x10, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0 }, + { 0x1a, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1 }, + { 0x0d, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2 }, + { 0x2a, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 }, + { 0x03, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 }, + { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 }, + { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 }, + { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 }, + { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 }, + { 0x33, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 }, + { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 }, + { 0, 0 } } + }, + /* 640x480p60 */ + { 25200000, { + { 0x32, REG_HDMI_8960_PHY_PLL_REFCLK_CFG }, + { 0x02, REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG }, + { 0x01, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 }, + { 0x33, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 }, + { 0x2c, REG_HDMI_8960_PHY_PLL_IDAC_ADJ_CFG }, + { 0x06, REG_HDMI_8960_PHY_PLL_I_VI_KVCO_CFG }, + { 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B }, + { 0x77, REG_HDMI_8960_PHY_PLL_SDM_CFG0 }, + { 0x4c, REG_HDMI_8960_PHY_PLL_SDM_CFG1 }, + { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG2 }, + { 0xc0, REG_HDMI_8960_PHY_PLL_SDM_CFG3 }, + { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 }, + { 0x9a, REG_HDMI_8960_PHY_PLL_SSC_CFG0 }, + { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG1 }, + { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG2 }, + { 0x20, REG_HDMI_8960_PHY_PLL_SSC_CFG3 }, + { 0x10, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0 }, + { 0x1a, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1 }, + { 0x0d, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2 }, + { 0xf4, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 }, + { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 }, + { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 }, + { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 }, + { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 }, + { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 }, + { 0x33, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 }, + { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 }, + { 0, 0 } } + }, +}; + +static int hdmi_pll_enable(struct clk_hw *hw) +{ + struct hdmi_phy_8960 *phy_8960 = clk_to_phy(hw); + struct hdmi *hdmi = phy_8960->hdmi; + int timeout_count, pll_lock_retry = 10; + unsigned int val; + + DBG(""); + + /* Assert PLL S/W reset */ + hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x8d); + hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0, 0x10); + hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1, 0x1a); + + /* Wait for a short time before de-asserting + * to allow the hardware to complete its job. + * This much of delay should be fine for hardware + * to assert and de-assert. + */ + udelay(10); + + /* De-assert PLL S/W reset */ + hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x0d); + + val = hdmi_read(hdmi, REG_HDMI_8960_PHY_REG12); + val |= HDMI_8960_PHY_REG12_SW_RESET; + /* Assert PHY S/W reset */ + hdmi_write(hdmi, REG_HDMI_8960_PHY_REG12, val); + val &= ~HDMI_8960_PHY_REG12_SW_RESET; + /* Wait for a short time before de-asserting + to allow the hardware to complete its job. + This much of delay should be fine for hardware + to assert and de-assert. */ + udelay(10); + /* De-assert PHY S/W reset */ + hdmi_write(hdmi, REG_HDMI_8960_PHY_REG12, val); + hdmi_write(hdmi, REG_HDMI_8960_PHY_REG2, 0x3f); + + val = hdmi_read(hdmi, REG_HDMI_8960_PHY_REG12); + val |= HDMI_8960_PHY_REG12_PWRDN_B; + hdmi_write(hdmi, REG_HDMI_8960_PHY_REG12, val); + /* Wait 10 us for enabling global power for PHY */ + mb(); + udelay(10); + + val = hdmi_read(hdmi, REG_HDMI_8960_PHY_PLL_PWRDN_B); + val |= HDMI_8960_PHY_PLL_PWRDN_B_PLL_PWRDN_B; + val &= ~HDMI_8960_PHY_PLL_PWRDN_B_PD_PLL; + hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_PWRDN_B, val); + hdmi_write(hdmi, REG_HDMI_8960_PHY_REG2, 0x80); + + timeout_count = 1000; + while (--pll_lock_retry > 0) { + + /* are we there yet? */ + val = hdmi_read(hdmi, REG_HDMI_8960_PHY_PLL_STATUS0); + if (val & HDMI_8960_PHY_PLL_STATUS0_PLL_LOCK) + break; + + udelay(1); + + if (--timeout_count > 0) + continue; + + /* + * PLL has still not locked. + * Do a software reset and try again + * Assert PLL S/W reset first + */ + hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x8d); + udelay(10); + hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x0d); + + /* + * Wait for a short duration for the PLL calibration + * before checking if the PLL gets locked + */ + udelay(350); + + timeout_count = 1000; + } + + return 0; +} + +static void hdmi_pll_disable(struct clk_hw *hw) +{ + struct hdmi_phy_8960 *phy_8960 = clk_to_phy(hw); + struct hdmi *hdmi = phy_8960->hdmi; + unsigned int val; + + DBG(""); + + val = hdmi_read(hdmi, REG_HDMI_8960_PHY_REG12); + val &= ~HDMI_8960_PHY_REG12_PWRDN_B; + hdmi_write(hdmi, REG_HDMI_8960_PHY_REG12, val); + + val = hdmi_read(hdmi, REG_HDMI_8960_PHY_PLL_PWRDN_B); + val |= HDMI_8960_PHY_REG12_SW_RESET; + val &= ~HDMI_8960_PHY_REG12_PWRDN_B; + hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_PWRDN_B, val); + /* Make sure HDMI PHY/PLL are powered down */ + mb(); +} + +static const struct pll_rate *find_rate(unsigned long rate) +{ + int i; + for (i = 1; i < ARRAY_SIZE(freqtbl); i++) + if (rate > freqtbl[i].rate) + return &freqtbl[i-1]; + return &freqtbl[i-1]; +} + +static unsigned long hdmi_pll_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct hdmi_phy_8960 *phy_8960 = clk_to_phy(hw); + return phy_8960->pixclk; +} + +static long hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + const struct pll_rate *pll_rate = find_rate(rate); + return pll_rate->rate; +} + +static int hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct hdmi_phy_8960 *phy_8960 = clk_to_phy(hw); + struct hdmi *hdmi = phy_8960->hdmi; + const struct pll_rate *pll_rate = find_rate(rate); + int i; + + DBG("rate=%lu", rate); + + for (i = 0; pll_rate->conf[i].reg; i++) + hdmi_write(hdmi, pll_rate->conf[i].reg, pll_rate->conf[i].val); + + phy_8960->pixclk = rate; + + return 0; +} + + +static const struct clk_ops hdmi_pll_ops = { + .enable = hdmi_pll_enable, + .disable = hdmi_pll_disable, + .recalc_rate = hdmi_pll_recalc_rate, + .round_rate = hdmi_pll_round_rate, + .set_rate = hdmi_pll_set_rate, +}; + +static const char *hdmi_pll_parents[] = { + "pxo", +}; + +static struct clk_init_data pll_init = { + .name = "hdmi_pll", + .ops = &hdmi_pll_ops, + .parent_names = hdmi_pll_parents, + .num_parents = ARRAY_SIZE(hdmi_pll_parents), +}; +#endif + +/* + * HDMI Phy: + */ + static void hdmi_phy_8960_destroy(struct hdmi_phy *phy) { struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy); @@ -86,6 +449,9 @@ static void hdmi_phy_8960_powerup(struct hdmi_phy *phy, struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy); struct hdmi *hdmi = phy_8960->hdmi; + DBG("pixclock: %lu", pixclock); + + hdmi_write(hdmi, REG_HDMI_8960_PHY_REG2, 0x00); hdmi_write(hdmi, REG_HDMI_8960_PHY_REG0, 0x1b); hdmi_write(hdmi, REG_HDMI_8960_PHY_REG1, 0xf2); hdmi_write(hdmi, REG_HDMI_8960_PHY_REG4, 0x00); @@ -104,6 +470,8 @@ static void hdmi_phy_8960_powerdown(struct hdmi_phy *phy) struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy); struct hdmi *hdmi = phy_8960->hdmi; + DBG(""); + hdmi_write(hdmi, REG_HDMI_8960_PHY_REG2, 0x7f); } @@ -119,6 +487,14 @@ struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi) struct hdmi_phy_8960 *phy_8960; struct hdmi_phy *phy = NULL; int ret; +#ifdef CONFIG_COMMON_CLK + int i; + + /* sanity check: */ + for (i = 0; i < (ARRAY_SIZE(freqtbl) - 1); i++) + if (WARN_ON(freqtbl[i].rate < freqtbl[i+1].rate)) + return ERR_PTR(-EINVAL); +#endif phy_8960 = kzalloc(sizeof(*phy_8960), GFP_KERNEL); if (!phy_8960) { @@ -132,6 +508,16 @@ struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi) phy_8960->hdmi = hdmi; +#ifdef CONFIG_COMMON_CLK + phy_8960->pll_hw.init = &pll_init; + phy_8960->pll = devm_clk_register(hdmi->dev->dev, &phy_8960->pll_hw); + if (IS_ERR(phy_8960->pll)) { + ret = PTR_ERR(phy_8960->pll); + phy_8960->pll = NULL; + goto fail; + } +#endif + return phy; fail: diff --git a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h index d591567173c4..bd81db6a7829 100644 --- a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h +++ b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h @@ -12,12 +12,12 @@ The rules-ng-ng source files this header was generated from are: - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-06-25 12:55:02) - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-06-25 12:53:44) Copyright (C) 2013 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h index 416a26e1e58d..122208e8a2ee 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h @@ -12,12 +12,12 @@ The rules-ng-ng source files this header was generated from are: - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-06-25 12:55:02) - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-06-25 12:53:44) Copyright (C) 2013 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c index 74cebb51e8c2..c6c80ea28c35 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c @@ -397,6 +397,7 @@ static void mdp4_crtc_prepare(struct drm_crtc *crtc) struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); DBG("%s", mdp4_crtc->name); /* make sure we hold a ref to mdp clks while setting up mode: */ + drm_crtc_vblank_get(crtc); mdp4_enable(get_kms(crtc)); mdp4_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); } @@ -407,6 +408,7 @@ static void mdp4_crtc_commit(struct drm_crtc *crtc) crtc_flush(crtc); /* drop the ref to mdp clk's that we got in prepare: */ mdp4_disable(get_kms(crtc)); + drm_crtc_vblank_put(crtc); } static int mdp4_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c index 0bb4faa17523..733646c0d3f8 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c @@ -147,7 +147,7 @@ static void mdp4_destroy(struct msm_kms *kms) if (mdp4_kms->blank_cursor_iova) msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id); if (mdp4_kms->blank_cursor_bo) - drm_gem_object_unreference(mdp4_kms->blank_cursor_bo); + drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo); kfree(mdp4_kms); } @@ -176,6 +176,8 @@ int mdp4_disable(struct mdp4_kms *mdp4_kms) if (mdp4_kms->pclk) clk_disable_unprepare(mdp4_kms->pclk); clk_disable_unprepare(mdp4_kms->lut_clk); + if (mdp4_kms->axi_clk) + clk_disable_unprepare(mdp4_kms->axi_clk); return 0; } @@ -188,6 +190,8 @@ int mdp4_enable(struct mdp4_kms *mdp4_kms) if (mdp4_kms->pclk) clk_prepare_enable(mdp4_kms->pclk); clk_prepare_enable(mdp4_kms->lut_clk); + if (mdp4_kms->axi_clk) + clk_prepare_enable(mdp4_kms->axi_clk); return 0; } @@ -294,15 +298,17 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) goto fail; } - mdp4_kms->dsi_pll_vdda = devm_regulator_get(&pdev->dev, "dsi_pll_vdda"); + mdp4_kms->dsi_pll_vdda = + devm_regulator_get_optional(&pdev->dev, "dsi_pll_vdda"); if (IS_ERR(mdp4_kms->dsi_pll_vdda)) mdp4_kms->dsi_pll_vdda = NULL; - mdp4_kms->dsi_pll_vddio = devm_regulator_get(&pdev->dev, "dsi_pll_vddio"); + mdp4_kms->dsi_pll_vddio = + devm_regulator_get_optional(&pdev->dev, "dsi_pll_vddio"); if (IS_ERR(mdp4_kms->dsi_pll_vddio)) mdp4_kms->dsi_pll_vddio = NULL; - mdp4_kms->vdd = devm_regulator_get(&pdev->dev, "vdd"); + mdp4_kms->vdd = devm_regulator_get_exclusive(&pdev->dev, "vdd"); if (IS_ERR(mdp4_kms->vdd)) mdp4_kms->vdd = NULL; @@ -333,6 +339,13 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) goto fail; } + mdp4_kms->axi_clk = devm_clk_get(&pdev->dev, "mdp_axi_clk"); + if (IS_ERR(mdp4_kms->axi_clk)) { + dev_err(dev->dev, "failed to get axi_clk\n"); + ret = PTR_ERR(mdp4_kms->axi_clk); + goto fail; + } + clk_set_rate(mdp4_kms->clk, config->max_clk); clk_set_rate(mdp4_kms->lut_clk, config->max_clk); @@ -348,7 +361,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) mdelay(16); if (config->iommu) { - mmu = msm_iommu_new(dev, config->iommu); + mmu = msm_iommu_new(&pdev->dev, config->iommu); if (IS_ERR(mmu)) { ret = PTR_ERR(mmu); goto fail; @@ -406,6 +419,8 @@ static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev) static struct mdp4_platform_config config = {}; #ifdef CONFIG_OF /* TODO */ + config.max_clk = 266667000; + config.iommu = iommu_domain_alloc(&platform_bus_type); #else if (cpu_is_apq8064()) config.max_clk = 266667000; diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h index 715520c54cde..3225da804c61 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h @@ -42,6 +42,7 @@ struct mdp4_kms { struct clk *clk; struct clk *pclk; struct clk *lut_clk; + struct clk *axi_clk; struct mdp_irq error_handler; diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h index 0aa51517f826..67f4f896ba8c 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h @@ -12,14 +12,14 @@ The rules-ng-ng source files this header was generated from are: - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-06-25 12:55:02) - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-06-25 12:53:44) -Copyright (C) 2013 by the following authors: +Copyright (C) 2013-2014 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) Permission is hereby granted, free of charge, to any person obtaining @@ -68,6 +68,8 @@ enum mdp5_pipe { SSPP_RGB2 = 5, SSPP_DMA0 = 6, SSPP_DMA1 = 7, + SSPP_VIG3 = 8, + SSPP_RGB3 = 9, }; enum mdp5_ctl_mode { @@ -126,7 +128,11 @@ enum mdp5_client_id { CID_RGB0 = 16, CID_RGB1 = 17, CID_RGB2 = 18, - CID_MAX = 19, + CID_VIG3_Y = 19, + CID_VIG3_CR = 20, + CID_VIG3_CB = 21, + CID_RGB3 = 22, + CID_MAX = 23, }; enum mdp5_igc_type { @@ -299,11 +305,34 @@ static inline uint32_t MDP5_IGC_LUT_REG_VAL(uint32_t val) #define MDP5_IGC_LUT_REG_DISABLE_PIPE_1 0x20000000 #define MDP5_IGC_LUT_REG_DISABLE_PIPE_2 0x40000000 -static inline uint32_t REG_MDP5_CTL(uint32_t i0) { return 0x00000600 + 0x100*i0; } +static inline uint32_t __offset_CTL(uint32_t idx) +{ + switch (idx) { + case 0: return (mdp5_cfg->ctl.base[0]); + case 1: return (mdp5_cfg->ctl.base[1]); + case 2: return (mdp5_cfg->ctl.base[2]); + case 3: return (mdp5_cfg->ctl.base[3]); + case 4: return (mdp5_cfg->ctl.base[4]); + default: return INVALID_IDX(idx); + } +} +static inline uint32_t REG_MDP5_CTL(uint32_t i0) { return 0x00000000 + __offset_CTL(i0); } -static inline uint32_t REG_MDP5_CTL_LAYER(uint32_t i0, uint32_t i1) { return 0x00000600 + 0x100*i0 + 0x4*i1; } +static inline uint32_t __offset_LAYER(uint32_t idx) +{ + switch (idx) { + case 0: return 0x00000000; + case 1: return 0x00000004; + case 2: return 0x00000008; + case 3: return 0x0000000c; + case 4: return 0x00000010; + case 5: return 0x00000024; + default: return INVALID_IDX(idx); + } +} +static inline uint32_t REG_MDP5_CTL_LAYER(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER(i1); } -static inline uint32_t REG_MDP5_CTL_LAYER_REG(uint32_t i0, uint32_t i1) { return 0x00000600 + 0x100*i0 + 0x4*i1; } +static inline uint32_t REG_MDP5_CTL_LAYER_REG(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER(i1); } #define MDP5_CTL_LAYER_REG_VIG0__MASK 0x00000007 #define MDP5_CTL_LAYER_REG_VIG0__SHIFT 0 static inline uint32_t MDP5_CTL_LAYER_REG_VIG0(enum mdp_mixer_stage_id val) @@ -354,8 +383,20 @@ static inline uint32_t MDP5_CTL_LAYER_REG_DMA1(enum mdp_mixer_stage_id val) } #define MDP5_CTL_LAYER_REG_BORDER_COLOR 0x01000000 #define MDP5_CTL_LAYER_REG_CURSOR_OUT 0x02000000 +#define MDP5_CTL_LAYER_REG_VIG3__MASK 0x1c000000 +#define MDP5_CTL_LAYER_REG_VIG3__SHIFT 26 +static inline uint32_t MDP5_CTL_LAYER_REG_VIG3(enum mdp_mixer_stage_id val) +{ + return ((val) << MDP5_CTL_LAYER_REG_VIG3__SHIFT) & MDP5_CTL_LAYER_REG_VIG3__MASK; +} +#define MDP5_CTL_LAYER_REG_RGB3__MASK 0xe0000000 +#define MDP5_CTL_LAYER_REG_RGB3__SHIFT 29 +static inline uint32_t MDP5_CTL_LAYER_REG_RGB3(enum mdp_mixer_stage_id val) +{ + return ((val) << MDP5_CTL_LAYER_REG_RGB3__SHIFT) & MDP5_CTL_LAYER_REG_RGB3__MASK; +} -static inline uint32_t REG_MDP5_CTL_OP(uint32_t i0) { return 0x00000614 + 0x100*i0; } +static inline uint32_t REG_MDP5_CTL_OP(uint32_t i0) { return 0x00000014 + __offset_CTL(i0); } #define MDP5_CTL_OP_MODE__MASK 0x0000000f #define MDP5_CTL_OP_MODE__SHIFT 0 static inline uint32_t MDP5_CTL_OP_MODE(enum mdp5_ctl_mode val) @@ -377,7 +418,7 @@ static inline uint32_t MDP5_CTL_OP_PACK_3D(enum mdp5_pack_3d val) return ((val) << MDP5_CTL_OP_PACK_3D__SHIFT) & MDP5_CTL_OP_PACK_3D__MASK; } -static inline uint32_t REG_MDP5_CTL_FLUSH(uint32_t i0) { return 0x00000618 + 0x100*i0; } +static inline uint32_t REG_MDP5_CTL_FLUSH(uint32_t i0) { return 0x00000018 + __offset_CTL(i0); } #define MDP5_CTL_FLUSH_VIG0 0x00000001 #define MDP5_CTL_FLUSH_VIG1 0x00000002 #define MDP5_CTL_FLUSH_VIG2 0x00000004 @@ -387,26 +428,48 @@ static inline uint32_t REG_MDP5_CTL_FLUSH(uint32_t i0) { return 0x00000618 + 0x1 #define MDP5_CTL_FLUSH_LM0 0x00000040 #define MDP5_CTL_FLUSH_LM1 0x00000080 #define MDP5_CTL_FLUSH_LM2 0x00000100 +#define MDP5_CTL_FLUSH_LM3 0x00000200 +#define MDP5_CTL_FLUSH_LM4 0x00000400 #define MDP5_CTL_FLUSH_DMA0 0x00000800 #define MDP5_CTL_FLUSH_DMA1 0x00001000 #define MDP5_CTL_FLUSH_DSPP0 0x00002000 #define MDP5_CTL_FLUSH_DSPP1 0x00004000 #define MDP5_CTL_FLUSH_DSPP2 0x00008000 #define MDP5_CTL_FLUSH_CTL 0x00020000 +#define MDP5_CTL_FLUSH_VIG3 0x00040000 +#define MDP5_CTL_FLUSH_RGB3 0x00080000 +#define MDP5_CTL_FLUSH_LM5 0x00100000 +#define MDP5_CTL_FLUSH_DSPP3 0x00200000 -static inline uint32_t REG_MDP5_CTL_START(uint32_t i0) { return 0x0000061c + 0x100*i0; } +static inline uint32_t REG_MDP5_CTL_START(uint32_t i0) { return 0x0000001c + __offset_CTL(i0); } -static inline uint32_t REG_MDP5_CTL_PACK_3D(uint32_t i0) { return 0x00000620 + 0x100*i0; } +static inline uint32_t REG_MDP5_CTL_PACK_3D(uint32_t i0) { return 0x00000020 + __offset_CTL(i0); } -static inline uint32_t REG_MDP5_PIPE(enum mdp5_pipe i0) { return 0x00001200 + 0x400*i0; } +static inline uint32_t __offset_PIPE(enum mdp5_pipe idx) +{ + switch (idx) { + case SSPP_VIG0: return (mdp5_cfg->pipe_vig.base[0]); + case SSPP_VIG1: return (mdp5_cfg->pipe_vig.base[1]); + case SSPP_VIG2: return (mdp5_cfg->pipe_vig.base[2]); + case SSPP_RGB0: return (mdp5_cfg->pipe_rgb.base[0]); + case SSPP_RGB1: return (mdp5_cfg->pipe_rgb.base[1]); + case SSPP_RGB2: return (mdp5_cfg->pipe_rgb.base[2]); + case SSPP_DMA0: return (mdp5_cfg->pipe_dma.base[0]); + case SSPP_DMA1: return (mdp5_cfg->pipe_dma.base[1]); + case SSPP_VIG3: return (mdp5_cfg->pipe_vig.base[3]); + case SSPP_RGB3: return (mdp5_cfg->pipe_rgb.base[3]); + default: return INVALID_IDX(idx); + } +} +static inline uint32_t REG_MDP5_PIPE(enum mdp5_pipe i0) { return 0x00000000 + __offset_PIPE(i0); } -static inline uint32_t REG_MDP5_PIPE_HIST_CTL_BASE(enum mdp5_pipe i0) { return 0x000014c4 + 0x400*i0; } +static inline uint32_t REG_MDP5_PIPE_HIST_CTL_BASE(enum mdp5_pipe i0) { return 0x000002c4 + __offset_PIPE(i0); } -static inline uint32_t REG_MDP5_PIPE_HIST_LUT_BASE(enum mdp5_pipe i0) { return 0x000014f0 + 0x400*i0; } +static inline uint32_t REG_MDP5_PIPE_HIST_LUT_BASE(enum mdp5_pipe i0) { return 0x000002f0 + __offset_PIPE(i0); } -static inline uint32_t REG_MDP5_PIPE_HIST_LUT_SWAP(enum mdp5_pipe i0) { return 0x00001500 + 0x400*i0; } +static inline uint32_t REG_MDP5_PIPE_HIST_LUT_SWAP(enum mdp5_pipe i0) { return 0x00000300 + __offset_PIPE(i0); } -static inline uint32_t REG_MDP5_PIPE_SRC_SIZE(enum mdp5_pipe i0) { return 0x00001200 + 0x400*i0; } +static inline uint32_t REG_MDP5_PIPE_SRC_SIZE(enum mdp5_pipe i0) { return 0x00000000 + __offset_PIPE(i0); } #define MDP5_PIPE_SRC_SIZE_HEIGHT__MASK 0xffff0000 #define MDP5_PIPE_SRC_SIZE_HEIGHT__SHIFT 16 static inline uint32_t MDP5_PIPE_SRC_SIZE_HEIGHT(uint32_t val) @@ -420,7 +483,7 @@ static inline uint32_t MDP5_PIPE_SRC_SIZE_WIDTH(uint32_t val) return ((val) << MDP5_PIPE_SRC_SIZE_WIDTH__SHIFT) & MDP5_PIPE_SRC_SIZE_WIDTH__MASK; } -static inline uint32_t REG_MDP5_PIPE_SRC_IMG_SIZE(enum mdp5_pipe i0) { return 0x00001204 + 0x400*i0; } +static inline uint32_t REG_MDP5_PIPE_SRC_IMG_SIZE(enum mdp5_pipe i0) { return 0x00000004 + __offset_PIPE(i0); } #define MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__MASK 0xffff0000 #define MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__SHIFT 16 static inline uint32_t MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(uint32_t val) @@ -434,7 +497,7 @@ static inline uint32_t MDP5_PIPE_SRC_IMG_SIZE_WIDTH(uint32_t val) return ((val) << MDP5_PIPE_SRC_IMG_SIZE_WIDTH__SHIFT) & MDP5_PIPE_SRC_IMG_SIZE_WIDTH__MASK; } -static inline uint32_t REG_MDP5_PIPE_SRC_XY(enum mdp5_pipe i0) { return 0x00001208 + 0x400*i0; } +static inline uint32_t REG_MDP5_PIPE_SRC_XY(enum mdp5_pipe i0) { return 0x00000008 + __offset_PIPE(i0); } #define MDP5_PIPE_SRC_XY_Y__MASK 0xffff0000 #define MDP5_PIPE_SRC_XY_Y__SHIFT 16 static inline uint32_t MDP5_PIPE_SRC_XY_Y(uint32_t val) @@ -448,7 +511,7 @@ static inline uint32_t MDP5_PIPE_SRC_XY_X(uint32_t val) return ((val) << MDP5_PIPE_SRC_XY_X__SHIFT) & MDP5_PIPE_SRC_XY_X__MASK; } -static inline uint32_t REG_MDP5_PIPE_OUT_SIZE(enum mdp5_pipe i0) { return 0x0000120c + 0x400*i0; } +static inline uint32_t REG_MDP5_PIPE_OUT_SIZE(enum mdp5_pipe i0) { return 0x0000000c + __offset_PIPE(i0); } #define MDP5_PIPE_OUT_SIZE_HEIGHT__MASK 0xffff0000 #define MDP5_PIPE_OUT_SIZE_HEIGHT__SHIFT 16 static inline uint32_t MDP5_PIPE_OUT_SIZE_HEIGHT(uint32_t val) @@ -462,7 +525,7 @@ static inline uint32_t MDP5_PIPE_OUT_SIZE_WIDTH(uint32_t val) return ((val) << MDP5_PIPE_OUT_SIZE_WIDTH__SHIFT) & MDP5_PIPE_OUT_SIZE_WIDTH__MASK; } -static inline uint32_t REG_MDP5_PIPE_OUT_XY(enum mdp5_pipe i0) { return 0x00001210 + 0x400*i0; } +static inline uint32_t REG_MDP5_PIPE_OUT_XY(enum mdp5_pipe i0) { return 0x00000010 + __offset_PIPE(i0); } #define MDP5_PIPE_OUT_XY_Y__MASK 0xffff0000 #define MDP5_PIPE_OUT_XY_Y__SHIFT 16 static inline uint32_t MDP5_PIPE_OUT_XY_Y(uint32_t val) @@ -476,15 +539,15 @@ static inline uint32_t MDP5_PIPE_OUT_XY_X(uint32_t val) return ((val) << MDP5_PIPE_OUT_XY_X__SHIFT) & MDP5_PIPE_OUT_XY_X__MASK; } -static inline uint32_t REG_MDP5_PIPE_SRC0_ADDR(enum mdp5_pipe i0) { return 0x00001214 + 0x400*i0; } +static inline uint32_t REG_MDP5_PIPE_SRC0_ADDR(enum mdp5_pipe i0) { return 0x00000014 + __offset_PIPE(i0); } -static inline uint32_t REG_MDP5_PIPE_SRC1_ADDR(enum mdp5_pipe i0) { return 0x00001218 + 0x400*i0; } +static inline uint32_t REG_MDP5_PIPE_SRC1_ADDR(enum mdp5_pipe i0) { return 0x00000018 + __offset_PIPE(i0); } -static inline uint32_t REG_MDP5_PIPE_SRC2_ADDR(enum mdp5_pipe i0) { return 0x0000121c + 0x400*i0; } +static inline uint32_t REG_MDP5_PIPE_SRC2_ADDR(enum mdp5_pipe i0) { return 0x0000001c + __offset_PIPE(i0); } -static inline uint32_t REG_MDP5_PIPE_SRC3_ADDR(enum mdp5_pipe i0) { return 0x00001220 + 0x400*i0; } +static inline uint32_t REG_MDP5_PIPE_SRC3_ADDR(enum mdp5_pipe i0) { return 0x00000020 + __offset_PIPE(i0); } -static inline uint32_t REG_MDP5_PIPE_SRC_STRIDE_A(enum mdp5_pipe i0) { return 0x00001224 + 0x400*i0; } +static inline uint32_t REG_MDP5_PIPE_SRC_STRIDE_A(enum mdp5_pipe i0) { return 0x00000024 + __offset_PIPE(i0); } #define MDP5_PIPE_SRC_STRIDE_A_P0__MASK 0x0000ffff #define MDP5_PIPE_SRC_STRIDE_A_P0__SHIFT 0 static inline uint32_t MDP5_PIPE_SRC_STRIDE_A_P0(uint32_t val) @@ -498,7 +561,7 @@ static inline uint32_t MDP5_PIPE_SRC_STRIDE_A_P1(uint32_t val) return ((val) << MDP5_PIPE_SRC_STRIDE_A_P1__SHIFT) & MDP5_PIPE_SRC_STRIDE_A_P1__MASK; } -static inline uint32_t REG_MDP5_PIPE_SRC_STRIDE_B(enum mdp5_pipe i0) { return 0x00001228 + 0x400*i0; } +static inline uint32_t REG_MDP5_PIPE_SRC_STRIDE_B(enum mdp5_pipe i0) { return 0x00000028 + __offset_PIPE(i0); } #define MDP5_PIPE_SRC_STRIDE_B_P2__MASK 0x0000ffff #define MDP5_PIPE_SRC_STRIDE_B_P2__SHIFT 0 static inline uint32_t MDP5_PIPE_SRC_STRIDE_B_P2(uint32_t val) @@ -512,9 +575,9 @@ static inline uint32_t MDP5_PIPE_SRC_STRIDE_B_P3(uint32_t val) return ((val) << MDP5_PIPE_SRC_STRIDE_B_P3__SHIFT) & MDP5_PIPE_SRC_STRIDE_B_P3__MASK; } -static inline uint32_t REG_MDP5_PIPE_STILE_FRAME_SIZE(enum mdp5_pipe i0) { return 0x0000122c + 0x400*i0; } +static inline uint32_t REG_MDP5_PIPE_STILE_FRAME_SIZE(enum mdp5_pipe i0) { return 0x0000002c + __offset_PIPE(i0); } -static inline uint32_t REG_MDP5_PIPE_SRC_FORMAT(enum mdp5_pipe i0) { return 0x00001230 + 0x400*i0; } +static inline uint32_t REG_MDP5_PIPE_SRC_FORMAT(enum mdp5_pipe i0) { return 0x00000030 + __offset_PIPE(i0); } #define MDP5_PIPE_SRC_FORMAT_G_BPC__MASK 0x00000003 #define MDP5_PIPE_SRC_FORMAT_G_BPC__SHIFT 0 static inline uint32_t MDP5_PIPE_SRC_FORMAT_G_BPC(enum mdp_bpc val) @@ -568,7 +631,7 @@ static inline uint32_t MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(enum mdp5_chroma_samp_ty return ((val) << MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT) & MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK; } -static inline uint32_t REG_MDP5_PIPE_SRC_UNPACK(enum mdp5_pipe i0) { return 0x00001234 + 0x400*i0; } +static inline uint32_t REG_MDP5_PIPE_SRC_UNPACK(enum mdp5_pipe i0) { return 0x00000034 + __offset_PIPE(i0); } #define MDP5_PIPE_SRC_UNPACK_ELEM0__MASK 0x000000ff #define MDP5_PIPE_SRC_UNPACK_ELEM0__SHIFT 0 static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM0(uint32_t val) @@ -594,7 +657,7 @@ static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM3(uint32_t val) return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM3__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM3__MASK; } -static inline uint32_t REG_MDP5_PIPE_SRC_OP_MODE(enum mdp5_pipe i0) { return 0x00001238 + 0x400*i0; } +static inline uint32_t REG_MDP5_PIPE_SRC_OP_MODE(enum mdp5_pipe i0) { return 0x00000038 + __offset_PIPE(i0); } #define MDP5_PIPE_SRC_OP_MODE_BWC_EN 0x00000001 #define MDP5_PIPE_SRC_OP_MODE_BWC__MASK 0x00000006 #define MDP5_PIPE_SRC_OP_MODE_BWC__SHIFT 1 @@ -610,29 +673,29 @@ static inline uint32_t MDP5_PIPE_SRC_OP_MODE_BWC(enum mdp5_pipe_bwc val) #define MDP5_PIPE_SRC_OP_MODE_DEINTERLACE 0x00400000 #define MDP5_PIPE_SRC_OP_MODE_DEINTERLACE_ODD 0x00800000 -static inline uint32_t REG_MDP5_PIPE_SRC_CONSTANT_COLOR(enum mdp5_pipe i0) { return 0x0000123c + 0x400*i0; } +static inline uint32_t REG_MDP5_PIPE_SRC_CONSTANT_COLOR(enum mdp5_pipe i0) { return 0x0000003c + __offset_PIPE(i0); } -static inline uint32_t REG_MDP5_PIPE_FETCH_CONFIG(enum mdp5_pipe i0) { return 0x00001248 + 0x400*i0; } +static inline uint32_t REG_MDP5_PIPE_FETCH_CONFIG(enum mdp5_pipe i0) { return 0x00000048 + __offset_PIPE(i0); } -static inline uint32_t REG_MDP5_PIPE_VC1_RANGE(enum mdp5_pipe i0) { return 0x0000124c + 0x400*i0; } +static inline uint32_t REG_MDP5_PIPE_VC1_RANGE(enum mdp5_pipe i0) { return 0x0000004c + __offset_PIPE(i0); } -static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(enum mdp5_pipe i0) { return 0x00001250 + 0x400*i0; } +static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(enum mdp5_pipe i0) { return 0x00000050 + __offset_PIPE(i0); } -static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(enum mdp5_pipe i0) { return 0x00001254 + 0x400*i0; } +static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(enum mdp5_pipe i0) { return 0x00000054 + __offset_PIPE(i0); } -static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(enum mdp5_pipe i0) { return 0x00001258 + 0x400*i0; } +static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(enum mdp5_pipe i0) { return 0x00000058 + __offset_PIPE(i0); } -static inline uint32_t REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(enum mdp5_pipe i0) { return 0x00001270 + 0x400*i0; } +static inline uint32_t REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(enum mdp5_pipe i0) { return 0x00000070 + __offset_PIPE(i0); } -static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC0_ADDR(enum mdp5_pipe i0) { return 0x000012a4 + 0x400*i0; } +static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC0_ADDR(enum mdp5_pipe i0) { return 0x000000a4 + __offset_PIPE(i0); } -static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC1_ADDR(enum mdp5_pipe i0) { return 0x000012a8 + 0x400*i0; } +static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC1_ADDR(enum mdp5_pipe i0) { return 0x000000a8 + __offset_PIPE(i0); } -static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC2_ADDR(enum mdp5_pipe i0) { return 0x000012ac + 0x400*i0; } +static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC2_ADDR(enum mdp5_pipe i0) { return 0x000000ac + __offset_PIPE(i0); } -static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC3_ADDR(enum mdp5_pipe i0) { return 0x000012b0 + 0x400*i0; } +static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC3_ADDR(enum mdp5_pipe i0) { return 0x000000b0 + __offset_PIPE(i0); } -static inline uint32_t REG_MDP5_PIPE_DECIMATION(enum mdp5_pipe i0) { return 0x000012b4 + 0x400*i0; } +static inline uint32_t REG_MDP5_PIPE_DECIMATION(enum mdp5_pipe i0) { return 0x000000b4 + __offset_PIPE(i0); } #define MDP5_PIPE_DECIMATION_VERT__MASK 0x000000ff #define MDP5_PIPE_DECIMATION_VERT__SHIFT 0 static inline uint32_t MDP5_PIPE_DECIMATION_VERT(uint32_t val) @@ -646,7 +709,7 @@ static inline uint32_t MDP5_PIPE_DECIMATION_HORZ(uint32_t val) return ((val) << MDP5_PIPE_DECIMATION_HORZ__SHIFT) & MDP5_PIPE_DECIMATION_HORZ__MASK; } -static inline uint32_t REG_MDP5_PIPE_SCALE_CONFIG(enum mdp5_pipe i0) { return 0x00001404 + 0x400*i0; } +static inline uint32_t REG_MDP5_PIPE_SCALE_CONFIG(enum mdp5_pipe i0) { return 0x00000204 + __offset_PIPE(i0); } #define MDP5_PIPE_SCALE_CONFIG_SCALEX_EN 0x00000001 #define MDP5_PIPE_SCALE_CONFIG_SCALEY_EN 0x00000002 #define MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__MASK 0x00000300 @@ -686,23 +749,34 @@ static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(enum mdp5_scale_ return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__MASK; } -static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_X(enum mdp5_pipe i0) { return 0x00001410 + 0x400*i0; } +static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_X(enum mdp5_pipe i0) { return 0x00000210 + __offset_PIPE(i0); } -static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(enum mdp5_pipe i0) { return 0x00001414 + 0x400*i0; } +static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(enum mdp5_pipe i0) { return 0x00000214 + __offset_PIPE(i0); } -static inline uint32_t REG_MDP5_PIPE_SCALE_INIT_PHASE_X(enum mdp5_pipe i0) { return 0x00001420 + 0x400*i0; } +static inline uint32_t REG_MDP5_PIPE_SCALE_INIT_PHASE_X(enum mdp5_pipe i0) { return 0x00000220 + __offset_PIPE(i0); } -static inline uint32_t REG_MDP5_PIPE_SCALE_INIT_PHASE_Y(enum mdp5_pipe i0) { return 0x00001424 + 0x400*i0; } +static inline uint32_t REG_MDP5_PIPE_SCALE_INIT_PHASE_Y(enum mdp5_pipe i0) { return 0x00000224 + __offset_PIPE(i0); } -static inline uint32_t REG_MDP5_LM(uint32_t i0) { return 0x00003200 + 0x400*i0; } +static inline uint32_t __offset_LM(uint32_t idx) +{ + switch (idx) { + case 0: return (mdp5_cfg->lm.base[0]); + case 1: return (mdp5_cfg->lm.base[1]); + case 2: return (mdp5_cfg->lm.base[2]); + case 3: return (mdp5_cfg->lm.base[3]); + case 4: return (mdp5_cfg->lm.base[4]); + default: return INVALID_IDX(idx); + } +} +static inline uint32_t REG_MDP5_LM(uint32_t i0) { return 0x00000000 + __offset_LM(i0); } -static inline uint32_t REG_MDP5_LM_BLEND_COLOR_OUT(uint32_t i0) { return 0x00003200 + 0x400*i0; } +static inline uint32_t REG_MDP5_LM_BLEND_COLOR_OUT(uint32_t i0) { return 0x00000000 + __offset_LM(i0); } #define MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA 0x00000002 #define MDP5_LM_BLEND_COLOR_OUT_STAGE1_FG_ALPHA 0x00000004 #define MDP5_LM_BLEND_COLOR_OUT_STAGE2_FG_ALPHA 0x00000008 #define MDP5_LM_BLEND_COLOR_OUT_STAGE3_FG_ALPHA 0x00000010 -static inline uint32_t REG_MDP5_LM_OUT_SIZE(uint32_t i0) { return 0x00003204 + 0x400*i0; } +static inline uint32_t REG_MDP5_LM_OUT_SIZE(uint32_t i0) { return 0x00000004 + __offset_LM(i0); } #define MDP5_LM_OUT_SIZE_HEIGHT__MASK 0xffff0000 #define MDP5_LM_OUT_SIZE_HEIGHT__SHIFT 16 static inline uint32_t MDP5_LM_OUT_SIZE_HEIGHT(uint32_t val) @@ -716,13 +790,13 @@ static inline uint32_t MDP5_LM_OUT_SIZE_WIDTH(uint32_t val) return ((val) << MDP5_LM_OUT_SIZE_WIDTH__SHIFT) & MDP5_LM_OUT_SIZE_WIDTH__MASK; } -static inline uint32_t REG_MDP5_LM_BORDER_COLOR_0(uint32_t i0) { return 0x00003208 + 0x400*i0; } +static inline uint32_t REG_MDP5_LM_BORDER_COLOR_0(uint32_t i0) { return 0x00000008 + __offset_LM(i0); } -static inline uint32_t REG_MDP5_LM_BORDER_COLOR_1(uint32_t i0) { return 0x00003210 + 0x400*i0; } +static inline uint32_t REG_MDP5_LM_BORDER_COLOR_1(uint32_t i0) { return 0x00000010 + __offset_LM(i0); } -static inline uint32_t REG_MDP5_LM_BLEND(uint32_t i0, uint32_t i1) { return 0x00003220 + 0x400*i0 + 0x30*i1; } +static inline uint32_t REG_MDP5_LM_BLEND(uint32_t i0, uint32_t i1) { return 0x00000020 + __offset_LM(i0) + 0x30*i1; } -static inline uint32_t REG_MDP5_LM_BLEND_OP_MODE(uint32_t i0, uint32_t i1) { return 0x00003220 + 0x400*i0 + 0x30*i1; } +static inline uint32_t REG_MDP5_LM_BLEND_OP_MODE(uint32_t i0, uint32_t i1) { return 0x00000020 + __offset_LM(i0) + 0x30*i1; } #define MDP5_LM_BLEND_OP_MODE_FG_ALPHA__MASK 0x00000003 #define MDP5_LM_BLEND_OP_MODE_FG_ALPHA__SHIFT 0 static inline uint32_t MDP5_LM_BLEND_OP_MODE_FG_ALPHA(enum mdp_alpha_type val) @@ -744,57 +818,67 @@ static inline uint32_t MDP5_LM_BLEND_OP_MODE_BG_ALPHA(enum mdp_alpha_type val) #define MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA 0x00001000 #define MDP5_LM_BLEND_OP_MODE_BG_TRANSP_EN 0x00002000 -static inline uint32_t REG_MDP5_LM_BLEND_FG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00003224 + 0x400*i0 + 0x30*i1; } +static inline uint32_t REG_MDP5_LM_BLEND_FG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000024 + __offset_LM(i0) + 0x30*i1; } -static inline uint32_t REG_MDP5_LM_BLEND_BG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00003228 + 0x400*i0 + 0x30*i1; } +static inline uint32_t REG_MDP5_LM_BLEND_BG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000028 + __offset_LM(i0) + 0x30*i1; } -static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000322c + 0x400*i0 + 0x30*i1; } +static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000002c + __offset_LM(i0) + 0x30*i1; } -static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00003230 + 0x400*i0 + 0x30*i1; } +static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000030 + __offset_LM(i0) + 0x30*i1; } -static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00003234 + 0x400*i0 + 0x30*i1; } +static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000034 + __offset_LM(i0) + 0x30*i1; } -static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00003238 + 0x400*i0 + 0x30*i1; } +static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000038 + __offset_LM(i0) + 0x30*i1; } -static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000323c + 0x400*i0 + 0x30*i1; } +static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000003c + __offset_LM(i0) + 0x30*i1; } -static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00003240 + 0x400*i0 + 0x30*i1; } +static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000040 + __offset_LM(i0) + 0x30*i1; } -static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00003244 + 0x400*i0 + 0x30*i1; } +static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000044 + __offset_LM(i0) + 0x30*i1; } -static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00003248 + 0x400*i0 + 0x30*i1; } +static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000048 + __offset_LM(i0) + 0x30*i1; } -static inline uint32_t REG_MDP5_LM_CURSOR_IMG_SIZE(uint32_t i0) { return 0x000032e0 + 0x400*i0; } +static inline uint32_t REG_MDP5_LM_CURSOR_IMG_SIZE(uint32_t i0) { return 0x000000e0 + __offset_LM(i0); } -static inline uint32_t REG_MDP5_LM_CURSOR_SIZE(uint32_t i0) { return 0x000032e4 + 0x400*i0; } +static inline uint32_t REG_MDP5_LM_CURSOR_SIZE(uint32_t i0) { return 0x000000e4 + __offset_LM(i0); } -static inline uint32_t REG_MDP5_LM_CURSOR_XY(uint32_t i0) { return 0x000032e8 + 0x400*i0; } +static inline uint32_t REG_MDP5_LM_CURSOR_XY(uint32_t i0) { return 0x000000e8 + __offset_LM(i0); } -static inline uint32_t REG_MDP5_LM_CURSOR_STRIDE(uint32_t i0) { return 0x000032dc + 0x400*i0; } +static inline uint32_t REG_MDP5_LM_CURSOR_STRIDE(uint32_t i0) { return 0x000000dc + __offset_LM(i0); } -static inline uint32_t REG_MDP5_LM_CURSOR_FORMAT(uint32_t i0) { return 0x000032ec + 0x400*i0; } +static inline uint32_t REG_MDP5_LM_CURSOR_FORMAT(uint32_t i0) { return 0x000000ec + __offset_LM(i0); } -static inline uint32_t REG_MDP5_LM_CURSOR_BASE_ADDR(uint32_t i0) { return 0x000032f0 + 0x400*i0; } +static inline uint32_t REG_MDP5_LM_CURSOR_BASE_ADDR(uint32_t i0) { return 0x000000f0 + __offset_LM(i0); } -static inline uint32_t REG_MDP5_LM_CURSOR_START_XY(uint32_t i0) { return 0x000032f4 + 0x400*i0; } +static inline uint32_t REG_MDP5_LM_CURSOR_START_XY(uint32_t i0) { return 0x000000f4 + __offset_LM(i0); } -static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_CONFIG(uint32_t i0) { return 0x000032f8 + 0x400*i0; } +static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_CONFIG(uint32_t i0) { return 0x000000f8 + __offset_LM(i0); } -static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_PARAM(uint32_t i0) { return 0x000032fc + 0x400*i0; } +static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_PARAM(uint32_t i0) { return 0x000000fc + __offset_LM(i0); } -static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_LOW0(uint32_t i0) { return 0x00003300 + 0x400*i0; } +static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_LOW0(uint32_t i0) { return 0x00000100 + __offset_LM(i0); } -static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_LOW1(uint32_t i0) { return 0x00003304 + 0x400*i0; } +static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_LOW1(uint32_t i0) { return 0x00000104 + __offset_LM(i0); } -static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_HIGH0(uint32_t i0) { return 0x00003308 + 0x400*i0; } +static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_HIGH0(uint32_t i0) { return 0x00000108 + __offset_LM(i0); } -static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_HIGH1(uint32_t i0) { return 0x0000330c + 0x400*i0; } +static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_HIGH1(uint32_t i0) { return 0x0000010c + __offset_LM(i0); } -static inline uint32_t REG_MDP5_LM_GC_LUT_BASE(uint32_t i0) { return 0x00003310 + 0x400*i0; } +static inline uint32_t REG_MDP5_LM_GC_LUT_BASE(uint32_t i0) { return 0x00000110 + __offset_LM(i0); } -static inline uint32_t REG_MDP5_DSPP(uint32_t i0) { return 0x00004600 + 0x400*i0; } +static inline uint32_t __offset_DSPP(uint32_t idx) +{ + switch (idx) { + case 0: return (mdp5_cfg->dspp.base[0]); + case 1: return (mdp5_cfg->dspp.base[1]); + case 2: return (mdp5_cfg->dspp.base[2]); + case 3: return (mdp5_cfg->dspp.base[3]); + default: return INVALID_IDX(idx); + } +} +static inline uint32_t REG_MDP5_DSPP(uint32_t i0) { return 0x00000000 + __offset_DSPP(i0); } -static inline uint32_t REG_MDP5_DSPP_OP_MODE(uint32_t i0) { return 0x00004600 + 0x400*i0; } +static inline uint32_t REG_MDP5_DSPP_OP_MODE(uint32_t i0) { return 0x00000000 + __offset_DSPP(i0); } #define MDP5_DSPP_OP_MODE_IGC_LUT_EN 0x00000001 #define MDP5_DSPP_OP_MODE_IGC_TBL_IDX__MASK 0x0000000e #define MDP5_DSPP_OP_MODE_IGC_TBL_IDX__SHIFT 1 @@ -811,29 +895,40 @@ static inline uint32_t MDP5_DSPP_OP_MODE_IGC_TBL_IDX(uint32_t val) #define MDP5_DSPP_OP_MODE_GAMUT_EN 0x00800000 #define MDP5_DSPP_OP_MODE_GAMUT_ORDER 0x01000000 -static inline uint32_t REG_MDP5_DSPP_PCC_BASE(uint32_t i0) { return 0x00004630 + 0x400*i0; } +static inline uint32_t REG_MDP5_DSPP_PCC_BASE(uint32_t i0) { return 0x00000030 + __offset_DSPP(i0); } -static inline uint32_t REG_MDP5_DSPP_DITHER_DEPTH(uint32_t i0) { return 0x00004750 + 0x400*i0; } +static inline uint32_t REG_MDP5_DSPP_DITHER_DEPTH(uint32_t i0) { return 0x00000150 + __offset_DSPP(i0); } -static inline uint32_t REG_MDP5_DSPP_HIST_CTL_BASE(uint32_t i0) { return 0x00004810 + 0x400*i0; } +static inline uint32_t REG_MDP5_DSPP_HIST_CTL_BASE(uint32_t i0) { return 0x00000210 + __offset_DSPP(i0); } -static inline uint32_t REG_MDP5_DSPP_HIST_LUT_BASE(uint32_t i0) { return 0x00004830 + 0x400*i0; } +static inline uint32_t REG_MDP5_DSPP_HIST_LUT_BASE(uint32_t i0) { return 0x00000230 + __offset_DSPP(i0); } -static inline uint32_t REG_MDP5_DSPP_HIST_LUT_SWAP(uint32_t i0) { return 0x00004834 + 0x400*i0; } +static inline uint32_t REG_MDP5_DSPP_HIST_LUT_SWAP(uint32_t i0) { return 0x00000234 + __offset_DSPP(i0); } -static inline uint32_t REG_MDP5_DSPP_PA_BASE(uint32_t i0) { return 0x00004838 + 0x400*i0; } +static inline uint32_t REG_MDP5_DSPP_PA_BASE(uint32_t i0) { return 0x00000238 + __offset_DSPP(i0); } -static inline uint32_t REG_MDP5_DSPP_GAMUT_BASE(uint32_t i0) { return 0x000048dc + 0x400*i0; } +static inline uint32_t REG_MDP5_DSPP_GAMUT_BASE(uint32_t i0) { return 0x000002dc + __offset_DSPP(i0); } -static inline uint32_t REG_MDP5_DSPP_GC_BASE(uint32_t i0) { return 0x000048b0 + 0x400*i0; } +static inline uint32_t REG_MDP5_DSPP_GC_BASE(uint32_t i0) { return 0x000002b0 + __offset_DSPP(i0); } -static inline uint32_t REG_MDP5_INTF(uint32_t i0) { return 0x00012500 + 0x200*i0; } +static inline uint32_t __offset_INTF(uint32_t idx) +{ + switch (idx) { + case 0: return (mdp5_cfg->intf.base[0]); + case 1: return (mdp5_cfg->intf.base[1]); + case 2: return (mdp5_cfg->intf.base[2]); + case 3: return (mdp5_cfg->intf.base[3]); + case 4: return (mdp5_cfg->intf.base[4]); + default: return INVALID_IDX(idx); + } +} +static inline uint32_t REG_MDP5_INTF(uint32_t i0) { return 0x00000000 + __offset_INTF(i0); } -static inline uint32_t REG_MDP5_INTF_TIMING_ENGINE_EN(uint32_t i0) { return 0x00012500 + 0x200*i0; } +static inline uint32_t REG_MDP5_INTF_TIMING_ENGINE_EN(uint32_t i0) { return 0x00000000 + __offset_INTF(i0); } -static inline uint32_t REG_MDP5_INTF_CONFIG(uint32_t i0) { return 0x00012504 + 0x200*i0; } +static inline uint32_t REG_MDP5_INTF_CONFIG(uint32_t i0) { return 0x00000004 + __offset_INTF(i0); } -static inline uint32_t REG_MDP5_INTF_HSYNC_CTL(uint32_t i0) { return 0x00012508 + 0x200*i0; } +static inline uint32_t REG_MDP5_INTF_HSYNC_CTL(uint32_t i0) { return 0x00000008 + __offset_INTF(i0); } #define MDP5_INTF_HSYNC_CTL_PULSEW__MASK 0x0000ffff #define MDP5_INTF_HSYNC_CTL_PULSEW__SHIFT 0 static inline uint32_t MDP5_INTF_HSYNC_CTL_PULSEW(uint32_t val) @@ -847,23 +942,23 @@ static inline uint32_t MDP5_INTF_HSYNC_CTL_PERIOD(uint32_t val) return ((val) << MDP5_INTF_HSYNC_CTL_PERIOD__SHIFT) & MDP5_INTF_HSYNC_CTL_PERIOD__MASK; } -static inline uint32_t REG_MDP5_INTF_VSYNC_PERIOD_F0(uint32_t i0) { return 0x0001250c + 0x200*i0; } +static inline uint32_t REG_MDP5_INTF_VSYNC_PERIOD_F0(uint32_t i0) { return 0x0000000c + __offset_INTF(i0); } -static inline uint32_t REG_MDP5_INTF_VSYNC_PERIOD_F1(uint32_t i0) { return 0x00012510 + 0x200*i0; } +static inline uint32_t REG_MDP5_INTF_VSYNC_PERIOD_F1(uint32_t i0) { return 0x00000010 + __offset_INTF(i0); } -static inline uint32_t REG_MDP5_INTF_VSYNC_LEN_F0(uint32_t i0) { return 0x00012514 + 0x200*i0; } +static inline uint32_t REG_MDP5_INTF_VSYNC_LEN_F0(uint32_t i0) { return 0x00000014 + __offset_INTF(i0); } -static inline uint32_t REG_MDP5_INTF_VSYNC_LEN_F1(uint32_t i0) { return 0x00012518 + 0x200*i0; } +static inline uint32_t REG_MDP5_INTF_VSYNC_LEN_F1(uint32_t i0) { return 0x00000018 + __offset_INTF(i0); } -static inline uint32_t REG_MDP5_INTF_DISPLAY_VSTART_F0(uint32_t i0) { return 0x0001251c + 0x200*i0; } +static inline uint32_t REG_MDP5_INTF_DISPLAY_VSTART_F0(uint32_t i0) { return 0x0000001c + __offset_INTF(i0); } -static inline uint32_t REG_MDP5_INTF_DISPLAY_VSTART_F1(uint32_t i0) { return 0x00012520 + 0x200*i0; } +static inline uint32_t REG_MDP5_INTF_DISPLAY_VSTART_F1(uint32_t i0) { return 0x00000020 + __offset_INTF(i0); } -static inline uint32_t REG_MDP5_INTF_DISPLAY_VEND_F0(uint32_t i0) { return 0x00012524 + 0x200*i0; } +static inline uint32_t REG_MDP5_INTF_DISPLAY_VEND_F0(uint32_t i0) { return 0x00000024 + __offset_INTF(i0); } -static inline uint32_t REG_MDP5_INTF_DISPLAY_VEND_F1(uint32_t i0) { return 0x00012528 + 0x200*i0; } +static inline uint32_t REG_MDP5_INTF_DISPLAY_VEND_F1(uint32_t i0) { return 0x00000028 + __offset_INTF(i0); } -static inline uint32_t REG_MDP5_INTF_ACTIVE_VSTART_F0(uint32_t i0) { return 0x0001252c + 0x200*i0; } +static inline uint32_t REG_MDP5_INTF_ACTIVE_VSTART_F0(uint32_t i0) { return 0x0000002c + __offset_INTF(i0); } #define MDP5_INTF_ACTIVE_VSTART_F0_VAL__MASK 0x7fffffff #define MDP5_INTF_ACTIVE_VSTART_F0_VAL__SHIFT 0 static inline uint32_t MDP5_INTF_ACTIVE_VSTART_F0_VAL(uint32_t val) @@ -872,7 +967,7 @@ static inline uint32_t MDP5_INTF_ACTIVE_VSTART_F0_VAL(uint32_t val) } #define MDP5_INTF_ACTIVE_VSTART_F0_ACTIVE_V_ENABLE 0x80000000 -static inline uint32_t REG_MDP5_INTF_ACTIVE_VSTART_F1(uint32_t i0) { return 0x00012530 + 0x200*i0; } +static inline uint32_t REG_MDP5_INTF_ACTIVE_VSTART_F1(uint32_t i0) { return 0x00000030 + __offset_INTF(i0); } #define MDP5_INTF_ACTIVE_VSTART_F1_VAL__MASK 0x7fffffff #define MDP5_INTF_ACTIVE_VSTART_F1_VAL__SHIFT 0 static inline uint32_t MDP5_INTF_ACTIVE_VSTART_F1_VAL(uint32_t val) @@ -880,11 +975,11 @@ static inline uint32_t MDP5_INTF_ACTIVE_VSTART_F1_VAL(uint32_t val) return ((val) << MDP5_INTF_ACTIVE_VSTART_F1_VAL__SHIFT) & MDP5_INTF_ACTIVE_VSTART_F1_VAL__MASK; } -static inline uint32_t REG_MDP5_INTF_ACTIVE_VEND_F0(uint32_t i0) { return 0x00012534 + 0x200*i0; } +static inline uint32_t REG_MDP5_INTF_ACTIVE_VEND_F0(uint32_t i0) { return 0x00000034 + __offset_INTF(i0); } -static inline uint32_t REG_MDP5_INTF_ACTIVE_VEND_F1(uint32_t i0) { return 0x00012538 + 0x200*i0; } +static inline uint32_t REG_MDP5_INTF_ACTIVE_VEND_F1(uint32_t i0) { return 0x00000038 + __offset_INTF(i0); } -static inline uint32_t REG_MDP5_INTF_DISPLAY_HCTL(uint32_t i0) { return 0x0001253c + 0x200*i0; } +static inline uint32_t REG_MDP5_INTF_DISPLAY_HCTL(uint32_t i0) { return 0x0000003c + __offset_INTF(i0); } #define MDP5_INTF_DISPLAY_HCTL_START__MASK 0x0000ffff #define MDP5_INTF_DISPLAY_HCTL_START__SHIFT 0 static inline uint32_t MDP5_INTF_DISPLAY_HCTL_START(uint32_t val) @@ -898,7 +993,7 @@ static inline uint32_t MDP5_INTF_DISPLAY_HCTL_END(uint32_t val) return ((val) << MDP5_INTF_DISPLAY_HCTL_END__SHIFT) & MDP5_INTF_DISPLAY_HCTL_END__MASK; } -static inline uint32_t REG_MDP5_INTF_ACTIVE_HCTL(uint32_t i0) { return 0x00012540 + 0x200*i0; } +static inline uint32_t REG_MDP5_INTF_ACTIVE_HCTL(uint32_t i0) { return 0x00000040 + __offset_INTF(i0); } #define MDP5_INTF_ACTIVE_HCTL_START__MASK 0x00007fff #define MDP5_INTF_ACTIVE_HCTL_START__SHIFT 0 static inline uint32_t MDP5_INTF_ACTIVE_HCTL_START(uint32_t val) @@ -913,124 +1008,132 @@ static inline uint32_t MDP5_INTF_ACTIVE_HCTL_END(uint32_t val) } #define MDP5_INTF_ACTIVE_HCTL_ACTIVE_H_ENABLE 0x80000000 -static inline uint32_t REG_MDP5_INTF_BORDER_COLOR(uint32_t i0) { return 0x00012544 + 0x200*i0; } +static inline uint32_t REG_MDP5_INTF_BORDER_COLOR(uint32_t i0) { return 0x00000044 + __offset_INTF(i0); } -static inline uint32_t REG_MDP5_INTF_UNDERFLOW_COLOR(uint32_t i0) { return 0x00012548 + 0x200*i0; } +static inline uint32_t REG_MDP5_INTF_UNDERFLOW_COLOR(uint32_t i0) { return 0x00000048 + __offset_INTF(i0); } -static inline uint32_t REG_MDP5_INTF_HSYNC_SKEW(uint32_t i0) { return 0x0001254c + 0x200*i0; } +static inline uint32_t REG_MDP5_INTF_HSYNC_SKEW(uint32_t i0) { return 0x0000004c + __offset_INTF(i0); } -static inline uint32_t REG_MDP5_INTF_POLARITY_CTL(uint32_t i0) { return 0x00012550 + 0x200*i0; } +static inline uint32_t REG_MDP5_INTF_POLARITY_CTL(uint32_t i0) { return 0x00000050 + __offset_INTF(i0); } #define MDP5_INTF_POLARITY_CTL_HSYNC_LOW 0x00000001 #define MDP5_INTF_POLARITY_CTL_VSYNC_LOW 0x00000002 #define MDP5_INTF_POLARITY_CTL_DATA_EN_LOW 0x00000004 -static inline uint32_t REG_MDP5_INTF_TEST_CTL(uint32_t i0) { return 0x00012554 + 0x200*i0; } +static inline uint32_t REG_MDP5_INTF_TEST_CTL(uint32_t i0) { return 0x00000054 + __offset_INTF(i0); } -static inline uint32_t REG_MDP5_INTF_TP_COLOR0(uint32_t i0) { return 0x00012558 + 0x200*i0; } +static inline uint32_t REG_MDP5_INTF_TP_COLOR0(uint32_t i0) { return 0x00000058 + __offset_INTF(i0); } -static inline uint32_t REG_MDP5_INTF_TP_COLOR1(uint32_t i0) { return 0x0001255c + 0x200*i0; } +static inline uint32_t REG_MDP5_INTF_TP_COLOR1(uint32_t i0) { return 0x0000005c + __offset_INTF(i0); } -static inline uint32_t REG_MDP5_INTF_DSI_CMD_MODE_TRIGGER_EN(uint32_t i0) { return 0x00012584 + 0x200*i0; } +static inline uint32_t REG_MDP5_INTF_DSI_CMD_MODE_TRIGGER_EN(uint32_t i0) { return 0x00000084 + __offset_INTF(i0); } -static inline uint32_t REG_MDP5_INTF_PANEL_FORMAT(uint32_t i0) { return 0x00012590 + 0x200*i0; } +static inline uint32_t REG_MDP5_INTF_PANEL_FORMAT(uint32_t i0) { return 0x00000090 + __offset_INTF(i0); } -static inline uint32_t REG_MDP5_INTF_FRAME_LINE_COUNT_EN(uint32_t i0) { return 0x000125a8 + 0x200*i0; } +static inline uint32_t REG_MDP5_INTF_FRAME_LINE_COUNT_EN(uint32_t i0) { return 0x000000a8 + __offset_INTF(i0); } -static inline uint32_t REG_MDP5_INTF_FRAME_COUNT(uint32_t i0) { return 0x000125ac + 0x200*i0; } +static inline uint32_t REG_MDP5_INTF_FRAME_COUNT(uint32_t i0) { return 0x000000ac + __offset_INTF(i0); } -static inline uint32_t REG_MDP5_INTF_LINE_COUNT(uint32_t i0) { return 0x000125b0 + 0x200*i0; } +static inline uint32_t REG_MDP5_INTF_LINE_COUNT(uint32_t i0) { return 0x000000b0 + __offset_INTF(i0); } -static inline uint32_t REG_MDP5_INTF_DEFLICKER_CONFIG(uint32_t i0) { return 0x000125f0 + 0x200*i0; } +static inline uint32_t REG_MDP5_INTF_DEFLICKER_CONFIG(uint32_t i0) { return 0x000000f0 + __offset_INTF(i0); } -static inline uint32_t REG_MDP5_INTF_DEFLICKER_STRNG_COEFF(uint32_t i0) { return 0x000125f4 + 0x200*i0; } +static inline uint32_t REG_MDP5_INTF_DEFLICKER_STRNG_COEFF(uint32_t i0) { return 0x000000f4 + __offset_INTF(i0); } -static inline uint32_t REG_MDP5_INTF_DEFLICKER_WEAK_COEFF(uint32_t i0) { return 0x000125f8 + 0x200*i0; } +static inline uint32_t REG_MDP5_INTF_DEFLICKER_WEAK_COEFF(uint32_t i0) { return 0x000000f8 + __offset_INTF(i0); } -static inline uint32_t REG_MDP5_INTF_TPG_ENABLE(uint32_t i0) { return 0x00012600 + 0x200*i0; } +static inline uint32_t REG_MDP5_INTF_TPG_ENABLE(uint32_t i0) { return 0x00000100 + __offset_INTF(i0); } -static inline uint32_t REG_MDP5_INTF_TPG_MAIN_CONTROL(uint32_t i0) { return 0x00012604 + 0x200*i0; } +static inline uint32_t REG_MDP5_INTF_TPG_MAIN_CONTROL(uint32_t i0) { return 0x00000104 + __offset_INTF(i0); } -static inline uint32_t REG_MDP5_INTF_TPG_VIDEO_CONFIG(uint32_t i0) { return 0x00012608 + 0x200*i0; } +static inline uint32_t REG_MDP5_INTF_TPG_VIDEO_CONFIG(uint32_t i0) { return 0x00000108 + __offset_INTF(i0); } -static inline uint32_t REG_MDP5_INTF_TPG_COMPONENT_LIMITS(uint32_t i0) { return 0x0001260c + 0x200*i0; } +static inline uint32_t REG_MDP5_INTF_TPG_COMPONENT_LIMITS(uint32_t i0) { return 0x0000010c + __offset_INTF(i0); } -static inline uint32_t REG_MDP5_INTF_TPG_RECTANGLE(uint32_t i0) { return 0x00012610 + 0x200*i0; } +static inline uint32_t REG_MDP5_INTF_TPG_RECTANGLE(uint32_t i0) { return 0x00000110 + __offset_INTF(i0); } -static inline uint32_t REG_MDP5_INTF_TPG_INITIAL_VALUE(uint32_t i0) { return 0x00012614 + 0x200*i0; } +static inline uint32_t REG_MDP5_INTF_TPG_INITIAL_VALUE(uint32_t i0) { return 0x00000114 + __offset_INTF(i0); } -static inline uint32_t REG_MDP5_INTF_TPG_BLK_WHITE_PATTERN_FRAME(uint32_t i0) { return 0x00012618 + 0x200*i0; } +static inline uint32_t REG_MDP5_INTF_TPG_BLK_WHITE_PATTERN_FRAME(uint32_t i0) { return 0x00000118 + __offset_INTF(i0); } -static inline uint32_t REG_MDP5_INTF_TPG_RGB_MAPPING(uint32_t i0) { return 0x0001261c + 0x200*i0; } +static inline uint32_t REG_MDP5_INTF_TPG_RGB_MAPPING(uint32_t i0) { return 0x0000011c + __offset_INTF(i0); } -static inline uint32_t REG_MDP5_AD(uint32_t i0) { return 0x00013100 + 0x200*i0; } +static inline uint32_t __offset_AD(uint32_t idx) +{ + switch (idx) { + case 0: return (mdp5_cfg->ad.base[0]); + case 1: return (mdp5_cfg->ad.base[1]); + default: return INVALID_IDX(idx); + } +} +static inline uint32_t REG_MDP5_AD(uint32_t i0) { return 0x00000000 + __offset_AD(i0); } -static inline uint32_t REG_MDP5_AD_BYPASS(uint32_t i0) { return 0x00013100 + 0x200*i0; } +static inline uint32_t REG_MDP5_AD_BYPASS(uint32_t i0) { return 0x00000000 + __offset_AD(i0); } -static inline uint32_t REG_MDP5_AD_CTRL_0(uint32_t i0) { return 0x00013104 + 0x200*i0; } +static inline uint32_t REG_MDP5_AD_CTRL_0(uint32_t i0) { return 0x00000004 + __offset_AD(i0); } -static inline uint32_t REG_MDP5_AD_CTRL_1(uint32_t i0) { return 0x00013108 + 0x200*i0; } +static inline uint32_t REG_MDP5_AD_CTRL_1(uint32_t i0) { return 0x00000008 + __offset_AD(i0); } -static inline uint32_t REG_MDP5_AD_FRAME_SIZE(uint32_t i0) { return 0x0001310c + 0x200*i0; } +static inline uint32_t REG_MDP5_AD_FRAME_SIZE(uint32_t i0) { return 0x0000000c + __offset_AD(i0); } -static inline uint32_t REG_MDP5_AD_CON_CTRL_0(uint32_t i0) { return 0x00013110 + 0x200*i0; } +static inline uint32_t REG_MDP5_AD_CON_CTRL_0(uint32_t i0) { return 0x00000010 + __offset_AD(i0); } -static inline uint32_t REG_MDP5_AD_CON_CTRL_1(uint32_t i0) { return 0x00013114 + 0x200*i0; } +static inline uint32_t REG_MDP5_AD_CON_CTRL_1(uint32_t i0) { return 0x00000014 + __offset_AD(i0); } -static inline uint32_t REG_MDP5_AD_STR_MAN(uint32_t i0) { return 0x00013118 + 0x200*i0; } +static inline uint32_t REG_MDP5_AD_STR_MAN(uint32_t i0) { return 0x00000018 + __offset_AD(i0); } -static inline uint32_t REG_MDP5_AD_VAR(uint32_t i0) { return 0x0001311c + 0x200*i0; } +static inline uint32_t REG_MDP5_AD_VAR(uint32_t i0) { return 0x0000001c + __offset_AD(i0); } -static inline uint32_t REG_MDP5_AD_DITH(uint32_t i0) { return 0x00013120 + 0x200*i0; } +static inline uint32_t REG_MDP5_AD_DITH(uint32_t i0) { return 0x00000020 + __offset_AD(i0); } -static inline uint32_t REG_MDP5_AD_DITH_CTRL(uint32_t i0) { return 0x00013124 + 0x200*i0; } +static inline uint32_t REG_MDP5_AD_DITH_CTRL(uint32_t i0) { return 0x00000024 + __offset_AD(i0); } -static inline uint32_t REG_MDP5_AD_AMP_LIM(uint32_t i0) { return 0x00013128 + 0x200*i0; } +static inline uint32_t REG_MDP5_AD_AMP_LIM(uint32_t i0) { return 0x00000028 + __offset_AD(i0); } -static inline uint32_t REG_MDP5_AD_SLOPE(uint32_t i0) { return 0x0001312c + 0x200*i0; } +static inline uint32_t REG_MDP5_AD_SLOPE(uint32_t i0) { return 0x0000002c + __offset_AD(i0); } -static inline uint32_t REG_MDP5_AD_BW_LVL(uint32_t i0) { return 0x00013130 + 0x200*i0; } +static inline uint32_t REG_MDP5_AD_BW_LVL(uint32_t i0) { return 0x00000030 + __offset_AD(i0); } -static inline uint32_t REG_MDP5_AD_LOGO_POS(uint32_t i0) { return 0x00013134 + 0x200*i0; } +static inline uint32_t REG_MDP5_AD_LOGO_POS(uint32_t i0) { return 0x00000034 + __offset_AD(i0); } -static inline uint32_t REG_MDP5_AD_LUT_FI(uint32_t i0) { return 0x00013138 + 0x200*i0; } +static inline uint32_t REG_MDP5_AD_LUT_FI(uint32_t i0) { return 0x00000038 + __offset_AD(i0); } -static inline uint32_t REG_MDP5_AD_LUT_CC(uint32_t i0) { return 0x0001317c + 0x200*i0; } +static inline uint32_t REG_MDP5_AD_LUT_CC(uint32_t i0) { return 0x0000007c + __offset_AD(i0); } -static inline uint32_t REG_MDP5_AD_STR_LIM(uint32_t i0) { return 0x000131c8 + 0x200*i0; } +static inline uint32_t REG_MDP5_AD_STR_LIM(uint32_t i0) { return 0x000000c8 + __offset_AD(i0); } -static inline uint32_t REG_MDP5_AD_CALIB_AB(uint32_t i0) { return 0x000131cc + 0x200*i0; } +static inline uint32_t REG_MDP5_AD_CALIB_AB(uint32_t i0) { return 0x000000cc + __offset_AD(i0); } -static inline uint32_t REG_MDP5_AD_CALIB_CD(uint32_t i0) { return 0x000131d0 + 0x200*i0; } +static inline uint32_t REG_MDP5_AD_CALIB_CD(uint32_t i0) { return 0x000000d0 + __offset_AD(i0); } -static inline uint32_t REG_MDP5_AD_MODE_SEL(uint32_t i0) { return 0x000131d4 + 0x200*i0; } +static inline uint32_t REG_MDP5_AD_MODE_SEL(uint32_t i0) { return 0x000000d4 + __offset_AD(i0); } -static inline uint32_t REG_MDP5_AD_TFILT_CTRL(uint32_t i0) { return 0x000131d8 + 0x200*i0; } +static inline uint32_t REG_MDP5_AD_TFILT_CTRL(uint32_t i0) { return 0x000000d8 + __offset_AD(i0); } -static inline uint32_t REG_MDP5_AD_BL_MINMAX(uint32_t i0) { return 0x000131dc + 0x200*i0; } +static inline uint32_t REG_MDP5_AD_BL_MINMAX(uint32_t i0) { return 0x000000dc + __offset_AD(i0); } -static inline uint32_t REG_MDP5_AD_BL(uint32_t i0) { return 0x000131e0 + 0x200*i0; } +static inline uint32_t REG_MDP5_AD_BL(uint32_t i0) { return 0x000000e0 + __offset_AD(i0); } -static inline uint32_t REG_MDP5_AD_BL_MAX(uint32_t i0) { return 0x000131e8 + 0x200*i0; } +static inline uint32_t REG_MDP5_AD_BL_MAX(uint32_t i0) { return 0x000000e8 + __offset_AD(i0); } -static inline uint32_t REG_MDP5_AD_AL(uint32_t i0) { return 0x000131ec + 0x200*i0; } +static inline uint32_t REG_MDP5_AD_AL(uint32_t i0) { return 0x000000ec + __offset_AD(i0); } -static inline uint32_t REG_MDP5_AD_AL_MIN(uint32_t i0) { return 0x000131f0 + 0x200*i0; } +static inline uint32_t REG_MDP5_AD_AL_MIN(uint32_t i0) { return 0x000000f0 + __offset_AD(i0); } -static inline uint32_t REG_MDP5_AD_AL_FILT(uint32_t i0) { return 0x000131f4 + 0x200*i0; } +static inline uint32_t REG_MDP5_AD_AL_FILT(uint32_t i0) { return 0x000000f4 + __offset_AD(i0); } -static inline uint32_t REG_MDP5_AD_CFG_BUF(uint32_t i0) { return 0x000131f8 + 0x200*i0; } +static inline uint32_t REG_MDP5_AD_CFG_BUF(uint32_t i0) { return 0x000000f8 + __offset_AD(i0); } -static inline uint32_t REG_MDP5_AD_LUT_AL(uint32_t i0) { return 0x00013200 + 0x200*i0; } +static inline uint32_t REG_MDP5_AD_LUT_AL(uint32_t i0) { return 0x00000100 + __offset_AD(i0); } -static inline uint32_t REG_MDP5_AD_TARG_STR(uint32_t i0) { return 0x00013244 + 0x200*i0; } +static inline uint32_t REG_MDP5_AD_TARG_STR(uint32_t i0) { return 0x00000144 + __offset_AD(i0); } -static inline uint32_t REG_MDP5_AD_START_CALC(uint32_t i0) { return 0x00013248 + 0x200*i0; } +static inline uint32_t REG_MDP5_AD_START_CALC(uint32_t i0) { return 0x00000148 + __offset_AD(i0); } -static inline uint32_t REG_MDP5_AD_STR_OUT(uint32_t i0) { return 0x0001324c + 0x200*i0; } +static inline uint32_t REG_MDP5_AD_STR_OUT(uint32_t i0) { return 0x0000014c + __offset_AD(i0); } -static inline uint32_t REG_MDP5_AD_BL_OUT(uint32_t i0) { return 0x00013254 + 0x200*i0; } +static inline uint32_t REG_MDP5_AD_BL_OUT(uint32_t i0) { return 0x00000154 + __offset_AD(i0); } -static inline uint32_t REG_MDP5_AD_CALC_DONE(uint32_t i0) { return 0x00013258 + 0x200*i0; } +static inline uint32_t REG_MDP5_AD_CALC_DONE(uint32_t i0) { return 0x00000158 + __offset_AD(i0); } #endif /* MDP5_XML */ diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c index 71510ee26e96..31a2c6331a1d 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c @@ -26,14 +26,98 @@ static const char *iommu_ports[] = { static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev); -static int mdp5_hw_init(struct msm_kms *kms) +const struct mdp5_config *mdp5_cfg; + +static const struct mdp5_config msm8x74_config = { + .name = "msm8x74", + .ctl = { + .count = 5, + .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 }, + }, + .pipe_vig = { + .count = 3, + .base = { 0x01200, 0x01600, 0x01a00 }, + }, + .pipe_rgb = { + .count = 3, + .base = { 0x01e00, 0x02200, 0x02600 }, + }, + .pipe_dma = { + .count = 2, + .base = { 0x02a00, 0x02e00 }, + }, + .lm = { + .count = 5, + .base = { 0x03200, 0x03600, 0x03a00, 0x03e00, 0x04200 }, + }, + .dspp = { + .count = 3, + .base = { 0x04600, 0x04a00, 0x04e00 }, + }, + .ad = { + .count = 2, + .base = { 0x13100, 0x13300 }, /* NOTE: no ad in v1.0 */ + }, + .intf = { + .count = 4, + .base = { 0x12500, 0x12700, 0x12900, 0x12b00 }, + }, +}; + +static const struct mdp5_config apq8084_config = { + .name = "apq8084", + .ctl = { + .count = 5, + .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 }, + }, + .pipe_vig = { + .count = 4, + .base = { 0x01200, 0x01600, 0x01a00, 0x01e00 }, + }, + .pipe_rgb = { + .count = 4, + .base = { 0x02200, 0x02600, 0x02a00, 0x02e00 }, + }, + .pipe_dma = { + .count = 2, + .base = { 0x03200, 0x03600 }, + }, + .lm = { + .count = 6, + .base = { 0x03a00, 0x03e00, 0x04200, 0x04600, 0x04a00, 0x04e00 }, + }, + .dspp = { + .count = 4, + .base = { 0x05200, 0x05600, 0x05a00, 0x05e00 }, + + }, + .ad = { + .count = 3, + .base = { 0x13500, 0x13700, 0x13900 }, + }, + .intf = { + .count = 5, + .base = { 0x12500, 0x12700, 0x12900, 0x12b00, 0x12d00 }, + }, +}; + +struct mdp5_config_entry { + int revision; + const struct mdp5_config *config; +}; + +static const struct mdp5_config_entry mdp5_configs[] = { + { .revision = 0, .config = &msm8x74_config }, + { .revision = 2, .config = &msm8x74_config }, + { .revision = 3, .config = &apq8084_config }, +}; + +static int mdp5_select_hw_cfg(struct msm_kms *kms) { struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); struct drm_device *dev = mdp5_kms->dev; uint32_t version, major, minor; - int ret = 0; - - pm_runtime_get_sync(dev->dev); + int i, ret = 0; mdp5_enable(mdp5_kms); version = mdp5_read(mdp5_kms, REG_MDP5_MDP_VERSION); @@ -44,8 +128,8 @@ static int mdp5_hw_init(struct msm_kms *kms) DBG("found MDP5 version v%d.%d", major, minor); - if ((major != 1) || ((minor != 0) && (minor != 2))) { - dev_err(dev->dev, "unexpected MDP version: v%d.%d\n", + if (major != 1) { + dev_err(dev->dev, "unexpected MDP major version: v%d.%d\n", major, minor); ret = -ENXIO; goto out; @@ -53,6 +137,35 @@ static int mdp5_hw_init(struct msm_kms *kms) mdp5_kms->rev = minor; + /* only after mdp5_cfg global pointer's init can we access the hw */ + for (i = 0; i < ARRAY_SIZE(mdp5_configs); i++) { + if (mdp5_configs[i].revision != minor) + continue; + mdp5_kms->hw_cfg = mdp5_cfg = mdp5_configs[i].config; + break; + } + if (unlikely(!mdp5_kms->hw_cfg)) { + dev_err(dev->dev, "unexpected MDP minor revision: v%d.%d\n", + major, minor); + ret = -ENXIO; + goto out; + } + + DBG("MDP5: %s config selected", mdp5_kms->hw_cfg->name); + + return 0; +out: + return ret; +} + +static int mdp5_hw_init(struct msm_kms *kms) +{ + struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); + struct drm_device *dev = mdp5_kms->dev; + int i; + + pm_runtime_get_sync(dev->dev); + /* Magic unknown register writes: * * W VBIF:0x004 00000001 (mdss_mdp.c:839) @@ -78,15 +191,13 @@ static int mdp5_hw_init(struct msm_kms *kms) */ mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0); - mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(0), 0); - mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(1), 0); - mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(2), 0); - mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(3), 0); -out: + for (i = 0; i < mdp5_kms->hw_cfg->ctl.count; i++) + mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(i), 0); + pm_runtime_put_sync(dev->dev); - return ret; + return 0; } static long mdp5_round_pixclk(struct msm_kms *kms, unsigned long rate, @@ -161,7 +272,7 @@ int mdp5_enable(struct mdp5_kms *mdp5_kms) static int modeset_init(struct mdp5_kms *mdp5_kms) { static const enum mdp5_pipe crtcs[] = { - SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, + SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, SSPP_RGB3, }; struct drm_device *dev = mdp5_kms->dev; struct msm_drm_private *priv = dev->dev_private; @@ -169,7 +280,7 @@ static int modeset_init(struct mdp5_kms *mdp5_kms) int i, ret; /* construct CRTCs: */ - for (i = 0; i < ARRAY_SIZE(crtcs); i++) { + for (i = 0; i < mdp5_kms->hw_cfg->pipe_rgb.count; i++) { struct drm_plane *plane; struct drm_crtc *crtc; @@ -246,7 +357,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) struct mdp5_kms *mdp5_kms; struct msm_kms *kms = NULL; struct msm_mmu *mmu; - int ret; + int i, ret; mdp5_kms = kzalloc(sizeof(*mdp5_kms), GFP_KERNEL); if (!mdp5_kms) { @@ -307,20 +418,22 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) ret = clk_set_rate(mdp5_kms->src_clk, config->max_clk); + ret = mdp5_select_hw_cfg(kms); + if (ret) + goto fail; + /* make sure things are off before attaching iommu (bootloader could * have left things on, in which case we'll start getting faults if * we don't disable): */ mdp5_enable(mdp5_kms); - mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(0), 0); - mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(1), 0); - mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(2), 0); - mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(3), 0); + for (i = 0; i < mdp5_kms->hw_cfg->intf.count; i++) + mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0); mdp5_disable(mdp5_kms); mdelay(16); if (config->iommu) { - mmu = msm_iommu_new(dev, config->iommu); + mmu = msm_iommu_new(&pdev->dev, config->iommu); if (IS_ERR(mmu)) { ret = PTR_ERR(mmu); dev_err(dev->dev, "failed to init iommu: %d\n", ret); @@ -368,5 +481,11 @@ static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev) #ifdef CONFIG_OF /* TODO */ #endif + config.iommu = iommu_domain_alloc(&platform_bus_type); + /* TODO hard-coded in downstream mdss, but should it be? */ + config.max_clk = 200000000; + /* TODO get from DT: */ + config.smp_blk_cnt = 22; + return &config; } diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h index 6e981b692d1d..5bf340dd0f00 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h @@ -21,6 +21,24 @@ #include "msm_drv.h" #include "msm_kms.h" #include "mdp/mdp_kms.h" +/* dynamic offsets used by mdp5.xml.h (initialized in mdp5_kms.c) */ +#define MDP5_MAX_BASES 8 +struct mdp5_sub_block { + int count; + uint32_t base[MDP5_MAX_BASES]; +}; +struct mdp5_config { + char *name; + struct mdp5_sub_block ctl; + struct mdp5_sub_block pipe_vig; + struct mdp5_sub_block pipe_rgb; + struct mdp5_sub_block pipe_dma; + struct mdp5_sub_block lm; + struct mdp5_sub_block dspp; + struct mdp5_sub_block ad; + struct mdp5_sub_block intf; +}; +extern const struct mdp5_config *mdp5_cfg; #include "mdp5.xml.h" #include "mdp5_smp.h" @@ -30,6 +48,7 @@ struct mdp5_kms { struct drm_device *dev; int rev; + const struct mdp5_config *hw_cfg; /* mapper-id used to request GEM buffer mapped for scanout: */ int id; @@ -82,6 +101,7 @@ static inline const char *pipe2name(enum mdp5_pipe pipe) NAME(VIG0), NAME(VIG1), NAME(VIG2), NAME(RGB0), NAME(RGB1), NAME(RGB2), NAME(DMA0), NAME(DMA1), + NAME(VIG3), NAME(RGB3), #undef NAME }; return names[pipe]; @@ -98,6 +118,8 @@ static inline uint32_t pipe2flush(enum mdp5_pipe pipe) case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2; case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0; case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1; + case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3; + case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3; default: return 0; } } @@ -108,6 +130,7 @@ static inline int pipe2nclients(enum mdp5_pipe pipe) case SSPP_RGB0: case SSPP_RGB1: case SSPP_RGB2: + case SSPP_RGB3: return 1; default: return 3; @@ -126,6 +149,8 @@ static inline enum mdp5_client_id pipe2client(enum mdp5_pipe pipe, int plane) case SSPP_RGB2: return CID_RGB2; case SSPP_DMA0: return CID_DMA0_Y + plane; case SSPP_DMA1: return CID_DMA1_Y + plane; + case SSPP_VIG3: return CID_VIG3_Y + plane; + case SSPP_RGB3: return CID_RGB3; default: return CID_UNUSED; } } diff --git a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h index a9629b85b983..64c1afd6030a 100644 --- a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h +++ b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h @@ -12,12 +12,12 @@ The rules-ng-ng source files this header was generated from are: - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31) - /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-06-25 12:55:02) - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-06-25 12:53:44) Copyright (C) 2013 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 9a5d87db5c23..fcf95680413d 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -52,7 +52,7 @@ module_param(reglog, bool, 0600); #define reglog 0 #endif -static char *vram; +static char *vram = "16m"; MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU"); module_param(vram, charp, 0); @@ -181,7 +181,6 @@ static int msm_load(struct drm_device *dev, unsigned long flags) struct msm_kms *kms; int ret; - priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) { dev_err(dev->dev, "failed to allocate private data\n"); @@ -314,13 +313,15 @@ fail: static void load_gpu(struct drm_device *dev) { + static DEFINE_MUTEX(init_lock); struct msm_drm_private *priv = dev->dev_private; struct msm_gpu *gpu; + mutex_lock(&init_lock); + if (priv->gpu) - return; + goto out; - mutex_lock(&dev->struct_mutex); gpu = a3xx_gpu_init(dev); if (IS_ERR(gpu)) { dev_warn(dev->dev, "failed to load a3xx gpu\n"); @@ -330,7 +331,9 @@ static void load_gpu(struct drm_device *dev) if (gpu) { int ret; + mutex_lock(&dev->struct_mutex); gpu->funcs->pm_resume(gpu); + mutex_unlock(&dev->struct_mutex); ret = gpu->funcs->hw_init(gpu); if (ret) { dev_err(dev->dev, "gpu hw init failed: %d\n", ret); @@ -340,12 +343,12 @@ static void load_gpu(struct drm_device *dev) /* give inactive pm a chance to kick in: */ msm_gpu_retire(gpu); } - } priv->gpu = gpu; - mutex_unlock(&dev->struct_mutex); +out: + mutex_unlock(&init_lock); } static int msm_open(struct drm_device *dev, struct drm_file *file) @@ -906,25 +909,22 @@ static int compare_of(struct device *dev, void *data) return dev->of_node == data; } -static int msm_drm_add_components(struct device *master, struct master *m) +static int add_components(struct device *dev, struct component_match **matchptr, + const char *name) { - struct device_node *np = master->of_node; + struct device_node *np = dev->of_node; unsigned i; - int ret; for (i = 0; ; i++) { struct device_node *node; - node = of_parse_phandle(np, "connectors", i); + node = of_parse_phandle(np, name, i); if (!node) break; - ret = component_master_add_child(m, compare_of, node); - of_node_put(node); - - if (ret) - return ret; + component_match_add(dev, matchptr, compare_of, node); } + return 0; } #else @@ -932,9 +932,34 @@ static int compare_dev(struct device *dev, void *data) { return dev == data; } +#endif -static int msm_drm_add_components(struct device *master, struct master *m) +static int msm_drm_bind(struct device *dev) { + return drm_platform_init(&msm_driver, to_platform_device(dev)); +} + +static void msm_drm_unbind(struct device *dev) +{ + drm_put_dev(platform_get_drvdata(to_platform_device(dev))); +} + +static const struct component_master_ops msm_drm_ops = { + .bind = msm_drm_bind, + .unbind = msm_drm_unbind, +}; + +/* + * Platform driver: + */ + +static int msm_pdev_probe(struct platform_device *pdev) +{ + struct component_match *match = NULL; +#ifdef CONFIG_OF + add_components(&pdev->dev, &match, "connectors"); + add_components(&pdev->dev, &match, "gpus"); +#else /* For non-DT case, it kinda sucks. We don't actually have a way * to know whether or not we are waiting for certain devices (or if * they are simply not present). But for non-DT we only need to @@ -949,50 +974,20 @@ static int msm_drm_add_components(struct device *master, struct master *m) for (i = 0; i < ARRAY_SIZE(devnames); i++) { struct device *dev; - int ret; dev = bus_find_device_by_name(&platform_bus_type, NULL, devnames[i]); if (!dev) { - dev_info(master, "still waiting for %s\n", devnames[i]); + dev_info(&pdev->dev, "still waiting for %s\n", devnames[i]); return -EPROBE_DEFER; } - ret = component_master_add_child(m, compare_dev, dev); - if (ret) { - DBG("could not add child: %d", ret); - return ret; - } + component_match_add(&pdev->dev, &match, compare_dev, dev); } - - return 0; -} #endif -static int msm_drm_bind(struct device *dev) -{ - return drm_platform_init(&msm_driver, to_platform_device(dev)); -} - -static void msm_drm_unbind(struct device *dev) -{ - drm_put_dev(platform_get_drvdata(to_platform_device(dev))); -} - -static const struct component_master_ops msm_drm_ops = { - .add_components = msm_drm_add_components, - .bind = msm_drm_bind, - .unbind = msm_drm_unbind, -}; - -/* - * Platform driver: - */ - -static int msm_pdev_probe(struct platform_device *pdev) -{ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); - return component_master_add(&pdev->dev, &msm_drm_ops); + return component_master_add_with_match(&pdev->dev, &msm_drm_ops, match); } static int msm_pdev_remove(struct platform_device *pdev) @@ -1008,7 +1003,8 @@ static const struct platform_device_id msm_id[] = { }; static const struct of_device_id dt_match[] = { - { .compatible = "qcom,mdss_mdp" }, + { .compatible = "qcom,mdp" }, /* mdp4 */ + { .compatible = "qcom,mdss_mdp" }, /* mdp5 */ {} }; MODULE_DEVICE_TABLE(of, dt_match); diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c index 5107fc4826bc..ab5bfd2d0ebf 100644 --- a/drivers/gpu/drm/msm/msm_fbdev.c +++ b/drivers/gpu/drm/msm/msm_fbdev.c @@ -19,6 +19,11 @@ #include "drm_crtc.h" #include "drm_fb_helper.h" +#include "msm_gem.h" + +extern int msm_gem_mmap_obj(struct drm_gem_object *obj, + struct vm_area_struct *vma); +static int msm_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma); /* * fbdev funcs, to implement legacy fbdev interface on top of drm driver @@ -43,6 +48,7 @@ static struct fb_ops msm_fb_ops = { .fb_fillrect = sys_fillrect, .fb_copyarea = sys_copyarea, .fb_imageblit = sys_imageblit, + .fb_mmap = msm_fbdev_mmap, .fb_check_var = drm_fb_helper_check_var, .fb_set_par = drm_fb_helper_set_par, @@ -51,6 +57,31 @@ static struct fb_ops msm_fb_ops = { .fb_setcmap = drm_fb_helper_setcmap, }; +static int msm_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma) +{ + struct drm_fb_helper *helper = (struct drm_fb_helper *)info->par; + struct msm_fbdev *fbdev = to_msm_fbdev(helper); + struct drm_gem_object *drm_obj = fbdev->bo; + struct drm_device *dev = helper->dev; + int ret = 0; + + if (drm_device_is_unplugged(dev)) + return -ENODEV; + + mutex_lock(&dev->struct_mutex); + + ret = drm_gem_mmap_obj(drm_obj, drm_obj->size, vma); + + mutex_unlock(&dev->struct_mutex); + + if (ret) { + pr_err("%s:drm_gem_mmap_obj fail\n", __func__); + return ret; + } + + return msm_gem_mmap_obj(drm_obj, vma); +} + static int msm_fbdev_create(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) { @@ -104,8 +135,16 @@ static int msm_fbdev_create(struct drm_fb_helper *helper, mutex_lock(&dev->struct_mutex); - /* TODO implement our own fb_mmap so we don't need this: */ - msm_gem_get_iova_locked(fbdev->bo, 0, &paddr); + /* + * NOTE: if we can be guaranteed to be able to map buffer + * in panic (ie. lock-safe, etc) we could avoid pinning the + * buffer now: + */ + ret = msm_gem_get_iova_locked(fbdev->bo, 0, &paddr); + if (ret) { + dev_err(dev->dev, "failed to get buffer obj iova: %d\n", ret); + goto fail_unlock; + } fbi = framebuffer_alloc(0, dev->dev); if (!fbi) { @@ -177,7 +216,7 @@ static void msm_crtc_fb_gamma_get(struct drm_crtc *crtc, DBG("fbdev: get gamma"); } -static struct drm_fb_helper_funcs msm_fb_helper_funcs = { +static const struct drm_fb_helper_funcs msm_fb_helper_funcs = { .gamma_set = msm_crtc_fb_gamma_set, .gamma_get = msm_crtc_fb_gamma_get, .fb_probe = msm_fbdev_create, @@ -189,7 +228,7 @@ struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev) struct msm_drm_private *priv = dev->dev_private; struct msm_fbdev *fbdev = NULL; struct drm_fb_helper *helper; - int ret = 0; + int ret; fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL); if (!fbdev) @@ -197,7 +236,7 @@ struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev) helper = &fbdev->base; - helper->funcs = &msm_fb_helper_funcs; + drm_fb_helper_prepare(dev, helper, &msm_fb_helper_funcs); ret = drm_fb_helper_init(dev, helper, priv->num_crtcs, priv->num_connectors); diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 690d7e7b6d1e..4b1b82adabde 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -73,7 +73,7 @@ static struct page **get_pages(struct drm_gem_object *obj) int npages = obj->size >> PAGE_SHIFT; if (iommu_present(&platform_bus_type)) - p = drm_gem_get_pages(obj, 0); + p = drm_gem_get_pages(obj); else p = get_pages_vram(obj, npages); @@ -278,24 +278,23 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, uint32_t *iova) { struct msm_gem_object *msm_obj = to_msm_bo(obj); - struct drm_device *dev = obj->dev; int ret = 0; if (!msm_obj->domain[id].iova) { struct msm_drm_private *priv = obj->dev->dev_private; - struct msm_mmu *mmu = priv->mmus[id]; struct page **pages = get_pages(obj); - if (!mmu) { - dev_err(dev->dev, "null MMU pointer\n"); - return -EINVAL; - } - if (IS_ERR(pages)) return PTR_ERR(pages); if (iommu_present(&platform_bus_type)) { - uint32_t offset = (uint32_t)mmap_offset(obj); + struct msm_mmu *mmu = priv->mmus[id]; + uint32_t offset; + + if (WARN_ON(!mmu)) + return -EINVAL; + + offset = (uint32_t)mmap_offset(obj); ret = mmu->funcs->map(mmu, offset, msm_obj->sgt, obj->size, IOMMU_READ | IOMMU_WRITE); msm_obj->domain[id].iova = offset; diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index c6322197db8c..4a0dce587745 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -606,14 +606,17 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, iommu = iommu_domain_alloc(&platform_bus_type); if (iommu) { dev_info(drm->dev, "%s: using IOMMU\n", name); - gpu->mmu = msm_iommu_new(drm, iommu); + gpu->mmu = msm_iommu_new(&pdev->dev, iommu); } else { dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name); } gpu->id = msm_register_mmu(drm, gpu->mmu); + /* Create ringbuffer: */ + mutex_lock(&drm->struct_mutex); gpu->rb = msm_ringbuffer_new(gpu, ringsz); + mutex_unlock(&drm->struct_mutex); if (IS_ERR(gpu->rb)) { ret = PTR_ERR(gpu->rb); gpu->rb = NULL; @@ -621,13 +624,6 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, goto fail; } - ret = msm_gem_get_iova_locked(gpu->rb->bo, gpu->id, &gpu->rb_iova); - if (ret) { - gpu->rb_iova = 0; - dev_err(drm->dev, "could not map ringbuffer: %d\n", ret); - goto fail; - } - bs_init(gpu); return 0; diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c index 4b2ad9181edf..7acdaa5688b7 100644 --- a/drivers/gpu/drm/msm/msm_iommu.c +++ b/drivers/gpu/drm/msm/msm_iommu.c @@ -27,45 +27,20 @@ struct msm_iommu { static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev, unsigned long iova, int flags, void *arg) { - DBG("*** fault: iova=%08lx, flags=%d", iova, flags); - return -ENOSYS; + pr_warn_ratelimited("*** fault: iova=%08lx, flags=%d\n", iova, flags); + return 0; } static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt) { - struct drm_device *dev = mmu->dev; struct msm_iommu *iommu = to_msm_iommu(mmu); - int i, ret; - - for (i = 0; i < cnt; i++) { - struct device *msm_iommu_get_ctx(const char *ctx_name); - struct device *ctx = msm_iommu_get_ctx(names[i]); - if (IS_ERR_OR_NULL(ctx)) { - dev_warn(dev->dev, "couldn't get %s context", names[i]); - continue; - } - ret = iommu_attach_device(iommu->domain, ctx); - if (ret) { - dev_warn(dev->dev, "could not attach iommu to %s", names[i]); - return ret; - } - } - - return 0; + return iommu_attach_device(iommu->domain, mmu->dev); } static void msm_iommu_detach(struct msm_mmu *mmu, const char **names, int cnt) { struct msm_iommu *iommu = to_msm_iommu(mmu); - int i; - - for (i = 0; i < cnt; i++) { - struct device *msm_iommu_get_ctx(const char *ctx_name); - struct device *ctx = msm_iommu_get_ctx(names[i]); - if (IS_ERR_OR_NULL(ctx)) - continue; - iommu_detach_device(iommu->domain, ctx); - } + iommu_detach_device(iommu->domain, mmu->dev); } static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova, @@ -149,7 +124,7 @@ static const struct msm_mmu_funcs funcs = { .destroy = msm_iommu_destroy, }; -struct msm_mmu *msm_iommu_new(struct drm_device *dev, struct iommu_domain *domain) +struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain) { struct msm_iommu *iommu; diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h index 21da6d154f71..7cd88d9dc155 100644 --- a/drivers/gpu/drm/msm/msm_mmu.h +++ b/drivers/gpu/drm/msm/msm_mmu.h @@ -32,17 +32,17 @@ struct msm_mmu_funcs { struct msm_mmu { const struct msm_mmu_funcs *funcs; - struct drm_device *dev; + struct device *dev; }; -static inline void msm_mmu_init(struct msm_mmu *mmu, struct drm_device *dev, +static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev, const struct msm_mmu_funcs *funcs) { mmu->dev = dev; mmu->funcs = funcs; } -struct msm_mmu *msm_iommu_new(struct drm_device *dev, struct iommu_domain *domain); -struct msm_mmu *msm_gpummu_new(struct drm_device *dev, struct msm_gpu *gpu); +struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain); +struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu); #endif /* __MSM_MMU_H__ */ diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig index 637c29a33127..40afc69a3778 100644 --- a/drivers/gpu/drm/nouveau/Kconfig +++ b/drivers/gpu/drm/nouveau/Kconfig @@ -1,5 +1,5 @@ config DRM_NOUVEAU - tristate "Nouveau (nVidia) cards" + tristate "Nouveau (NVIDIA) cards" depends on DRM && PCI select FW_LOADER select DRM_KMS_HELPER @@ -23,7 +23,15 @@ config DRM_NOUVEAU select THERMAL if ACPI && X86 select ACPI_VIDEO if ACPI && X86 help - Choose this option for open-source nVidia support. + Choose this option for open-source NVIDIA support. + +config NOUVEAU_PLATFORM_DRIVER + tristate "Nouveau (NVIDIA) SoC GPUs" + depends on DRM_NOUVEAU && ARCH_TEGRA + default y + help + Support for Nouveau platform driver, used for SoC GPUs as found + on NVIDIA Tegra K1. config NOUVEAU_DEBUG int "Maximum debug level" diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile index 8b307e143632..f5d7f7ce4bc6 100644 --- a/drivers/gpu/drm/nouveau/Makefile +++ b/drivers/gpu/drm/nouveau/Makefile @@ -14,8 +14,10 @@ nouveau-y += core/core/enum.o nouveau-y += core/core/event.o nouveau-y += core/core/gpuobj.o nouveau-y += core/core/handle.o +nouveau-y += core/core/ioctl.o nouveau-y += core/core/mm.o nouveau-y += core/core/namedb.o +nouveau-y += core/core/notify.o nouveau-y += core/core/object.o nouveau-y += core/core/option.o nouveau-y += core/core/parent.o @@ -26,6 +28,7 @@ nouveau-y += core/core/subdev.o nouveau-y += core/subdev/bar/base.o nouveau-y += core/subdev/bar/nv50.o nouveau-y += core/subdev/bar/nvc0.o +nouveau-y += core/subdev/bar/gk20a.o nouveau-y += core/subdev/bios/base.o nouveau-y += core/subdev/bios/bit.o nouveau-y += core/subdev/bios/boost.o @@ -64,6 +67,7 @@ nouveau-y += core/subdev/clock/nva3.o nouveau-y += core/subdev/clock/nvaa.o nouveau-y += core/subdev/clock/nvc0.o nouveau-y += core/subdev/clock/nve0.o +nouveau-y += core/subdev/clock/gk20a.o nouveau-y += core/subdev/clock/pllnv04.o nouveau-y += core/subdev/clock/pllnva3.o nouveau-y += core/subdev/devinit/base.o @@ -149,8 +153,10 @@ nouveau-y += core/subdev/instmem/base.o nouveau-y += core/subdev/instmem/nv04.o nouveau-y += core/subdev/instmem/nv40.o nouveau-y += core/subdev/instmem/nv50.o -nouveau-y += core/subdev/ltcg/gf100.o -nouveau-y += core/subdev/ltcg/gm107.o +nouveau-y += core/subdev/ltc/base.o +nouveau-y += core/subdev/ltc/gf100.o +nouveau-y += core/subdev/ltc/gk104.o +nouveau-y += core/subdev/ltc/gm107.o nouveau-y += core/subdev/mc/base.o nouveau-y += core/subdev/mc/nv04.o nouveau-y += core/subdev/mc/nv40.o @@ -161,6 +167,7 @@ nouveau-y += core/subdev/mc/nv94.o nouveau-y += core/subdev/mc/nv98.o nouveau-y += core/subdev/mc/nvc0.o nouveau-y += core/subdev/mc/nvc3.o +nouveau-y += core/subdev/mc/gk20a.o nouveau-y += core/subdev/mxm/base.o nouveau-y += core/subdev/mxm/mxms.o nouveau-y += core/subdev/mxm/nv50.o @@ -169,6 +176,7 @@ nouveau-y += core/subdev/pwr/memx.o nouveau-y += core/subdev/pwr/nva3.o nouveau-y += core/subdev/pwr/nvc0.o nouveau-y += core/subdev/pwr/nvd0.o +nouveau-y += core/subdev/pwr/gk104.o nouveau-y += core/subdev/pwr/nv108.o nouveau-y += core/subdev/therm/base.o nouveau-y += core/subdev/therm/fan.o @@ -211,6 +219,7 @@ nouveau-y += core/engine/copy/nvc0.o nouveau-y += core/engine/copy/nve0.o nouveau-y += core/engine/crypt/nv84.o nouveau-y += core/engine/crypt/nv98.o +nouveau-y += core/engine/device/acpi.o nouveau-y += core/engine/device/base.o nouveau-y += core/engine/device/ctrl.o nouveau-y += core/engine/device/nv04.o @@ -270,6 +279,7 @@ nouveau-y += core/engine/graph/ctxnvd9.o nouveau-y += core/engine/graph/ctxnve4.o nouveau-y += core/engine/graph/ctxgk20a.o nouveau-y += core/engine/graph/ctxnvf0.o +nouveau-y += core/engine/graph/ctxgk110b.o nouveau-y += core/engine/graph/ctxnv108.o nouveau-y += core/engine/graph/ctxgm107.o nouveau-y += core/engine/graph/nv04.o @@ -291,6 +301,7 @@ nouveau-y += core/engine/graph/nvd9.o nouveau-y += core/engine/graph/nve4.o nouveau-y += core/engine/graph/gk20a.o nouveau-y += core/engine/graph/nvf0.o +nouveau-y += core/engine/graph/gk110b.o nouveau-y += core/engine/graph/nv108.o nouveau-y += core/engine/graph/gm107.o nouveau-y += core/engine/mpeg/nv31.o @@ -318,11 +329,18 @@ nouveau-y += core/engine/vp/nv98.o nouveau-y += core/engine/vp/nvc0.o nouveau-y += core/engine/vp/nve0.o +# nvif +nouveau-y += nvif/object.o +nouveau-y += nvif/client.o +nouveau-y += nvif/device.o +nouveau-y += nvif/notify.o + # drm/core nouveau-y += nouveau_drm.o nouveau_chan.o nouveau_dma.o nouveau_fence.o nouveau-y += nouveau_vga.o nouveau_agp.o nouveau-y += nouveau_ttm.o nouveau_sgdma.o nouveau_bo.o nouveau_gem.o nouveau-y += nouveau_prime.o nouveau_abi16.o +nouveau-y += nouveau_nvif.o nouveau_usif.o nouveau-y += nv04_fence.o nv10_fence.o nv17_fence.o nouveau-y += nv50_fence.o nv84_fence.o nvc0_fence.o @@ -349,3 +367,6 @@ nouveau-$(CONFIG_DRM_NOUVEAU_BACKLIGHT) += nouveau_backlight.o nouveau-$(CONFIG_DEBUG_FS) += nouveau_debugfs.o obj-$(CONFIG_DRM_NOUVEAU)+= nouveau.o + +# platform driver +obj-$(CONFIG_NOUVEAU_PLATFORM_DRIVER) += nouveau_platform.o diff --git a/drivers/gpu/drm/nouveau/core/core/client.c b/drivers/gpu/drm/nouveau/core/core/client.c index 9079c0ac58e6..68bf06768123 100644 --- a/drivers/gpu/drm/nouveau/core/core/client.c +++ b/drivers/gpu/drm/nouveau/core/core/client.c @@ -26,13 +26,167 @@ #include <core/client.h> #include <core/handle.h> #include <core/option.h> +#include <nvif/unpack.h> +#include <nvif/class.h> + +#include <nvif/unpack.h> +#include <nvif/event.h> #include <engine/device.h> +struct nvkm_client_notify { + struct nouveau_client *client; + struct nvkm_notify n; + u8 version; + u8 size; + union { + struct nvif_notify_rep_v0 v0; + } rep; +}; + +static int +nvkm_client_notify(struct nvkm_notify *n) +{ + struct nvkm_client_notify *notify = container_of(n, typeof(*notify), n); + struct nouveau_client *client = notify->client; + return client->ntfy(¬ify->rep, notify->size, n->data, n->size); +} + +int +nvkm_client_notify_put(struct nouveau_client *client, int index) +{ + if (index < ARRAY_SIZE(client->notify)) { + if (client->notify[index]) { + nvkm_notify_put(&client->notify[index]->n); + return 0; + } + } + return -ENOENT; +} + +int +nvkm_client_notify_get(struct nouveau_client *client, int index) +{ + if (index < ARRAY_SIZE(client->notify)) { + if (client->notify[index]) { + nvkm_notify_get(&client->notify[index]->n); + return 0; + } + } + return -ENOENT; +} + +int +nvkm_client_notify_del(struct nouveau_client *client, int index) +{ + if (index < ARRAY_SIZE(client->notify)) { + if (client->notify[index]) { + nvkm_notify_fini(&client->notify[index]->n); + kfree(client->notify[index]); + client->notify[index] = NULL; + return 0; + } + } + return -ENOENT; +} + +int +nvkm_client_notify_new(struct nouveau_client *client, + struct nvkm_event *event, void *data, u32 size) +{ + struct nvkm_client_notify *notify; + union { + struct nvif_notify_req_v0 v0; + } *req = data; + u8 index, reply; + int ret; + + for (index = 0; index < ARRAY_SIZE(client->notify); index++) { + if (!client->notify[index]) + break; + } + + if (index == ARRAY_SIZE(client->notify)) + return -ENOSPC; + + notify = kzalloc(sizeof(*notify), GFP_KERNEL); + if (!notify) + return -ENOMEM; + + nv_ioctl(client, "notify new size %d\n", size); + if (nvif_unpack(req->v0, 0, 0, true)) { + nv_ioctl(client, "notify new vers %d reply %d route %02x " + "token %llx\n", req->v0.version, + req->v0.reply, req->v0.route, req->v0.token); + notify->version = req->v0.version; + notify->size = sizeof(notify->rep.v0); + notify->rep.v0.version = req->v0.version; + notify->rep.v0.route = req->v0.route; + notify->rep.v0.token = req->v0.token; + reply = req->v0.reply; + } + + if (ret == 0) { + ret = nvkm_notify_init(event, nvkm_client_notify, false, + data, size, reply, ¬ify->n); + if (ret == 0) { + client->notify[index] = notify; + notify->client = client; + return index; + } + } + + kfree(notify); + return ret; +} + +static int +nouveau_client_devlist(struct nouveau_object *object, void *data, u32 size) +{ + union { + struct nv_client_devlist_v0 v0; + } *args = data; + int ret; + + nv_ioctl(object, "client devlist size %d\n", size); + if (nvif_unpack(args->v0, 0, 0, true)) { + nv_ioctl(object, "client devlist vers %d count %d\n", + args->v0.version, args->v0.count); + if (size == sizeof(args->v0.device[0]) * args->v0.count) { + ret = nouveau_device_list(args->v0.device, + args->v0.count); + if (ret >= 0) { + args->v0.count = ret; + ret = 0; + } + } else { + ret = -EINVAL; + } + } + + return ret; +} + +static int +nouveau_client_mthd(struct nouveau_object *object, u32 mthd, + void *data, u32 size) +{ + switch (mthd) { + case NV_CLIENT_DEVLIST: + return nouveau_client_devlist(object, data, size); + default: + break; + } + return -EINVAL; +} + static void nouveau_client_dtor(struct nouveau_object *object) { struct nouveau_client *client = (void *)object; + int i; + for (i = 0; i < ARRAY_SIZE(client->notify); i++) + nvkm_client_notify_del(client, i); nouveau_object_ref(NULL, &client->device); nouveau_handle_destroy(client->root); nouveau_namedb_destroy(&client->base); @@ -42,6 +196,7 @@ static struct nouveau_oclass nouveau_client_oclass = { .ofuncs = &(struct nouveau_ofuncs) { .dtor = nouveau_client_dtor, + .mthd = nouveau_client_mthd, }, }; @@ -93,9 +248,12 @@ int nouveau_client_fini(struct nouveau_client *client, bool suspend) { const char *name[2] = { "fini", "suspend" }; - int ret; - + int ret, i; nv_debug(client, "%s running\n", name[suspend]); + nv_debug(client, "%s notify\n", name[suspend]); + for (i = 0; i < ARRAY_SIZE(client->notify); i++) + nvkm_client_notify_put(client, i); + nv_debug(client, "%s object\n", name[suspend]); ret = nouveau_handle_fini(client->root, suspend); nv_debug(client, "%s completed with %d\n", name[suspend], ret); return ret; diff --git a/drivers/gpu/drm/nouveau/core/core/event.c b/drivers/gpu/drm/nouveau/core/core/event.c index ae81d3b5d8b7..0540a48c5678 100644 --- a/drivers/gpu/drm/nouveau/core/core/event.c +++ b/drivers/gpu/drm/nouveau/core/core/event.c @@ -1,5 +1,5 @@ /* - * Copyright 2013 Red Hat Inc. + * Copyright 2013-2014 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -24,173 +24,77 @@ #include <core/event.h> void -nouveau_event_put(struct nouveau_eventh *handler) +nvkm_event_put(struct nvkm_event *event, u32 types, int index) { - struct nouveau_event *event = handler->event; - unsigned long flags; - u32 m, t; - - if (!__test_and_clear_bit(NVKM_EVENT_ENABLE, &handler->flags)) - return; - - spin_lock_irqsave(&event->refs_lock, flags); - for (m = handler->types; t = __ffs(m), m; m &= ~(1 << t)) { - if (!--event->refs[handler->index * event->types_nr + t]) { - if (event->disable) - event->disable(event, 1 << t, handler->index); + BUG_ON(!spin_is_locked(&event->refs_lock)); + while (types) { + int type = __ffs(types); types &= ~(1 << type); + if (--event->refs[index * event->types_nr + type] == 0) { + if (event->func->fini) + event->func->fini(event, 1 << type, index); } - } - spin_unlock_irqrestore(&event->refs_lock, flags); } void -nouveau_event_get(struct nouveau_eventh *handler) +nvkm_event_get(struct nvkm_event *event, u32 types, int index) { - struct nouveau_event *event = handler->event; - unsigned long flags; - u32 m, t; - - if (__test_and_set_bit(NVKM_EVENT_ENABLE, &handler->flags)) - return; - - spin_lock_irqsave(&event->refs_lock, flags); - for (m = handler->types; t = __ffs(m), m; m &= ~(1 << t)) { - if (!event->refs[handler->index * event->types_nr + t]++) { - if (event->enable) - event->enable(event, 1 << t, handler->index); + BUG_ON(!spin_is_locked(&event->refs_lock)); + while (types) { + int type = __ffs(types); types &= ~(1 << type); + if (++event->refs[index * event->types_nr + type] == 1) { + if (event->func->init) + event->func->init(event, 1 << type, index); } - } - spin_unlock_irqrestore(&event->refs_lock, flags); -} - -static void -nouveau_event_fini(struct nouveau_eventh *handler) -{ - struct nouveau_event *event = handler->event; - unsigned long flags; - nouveau_event_put(handler); - spin_lock_irqsave(&event->list_lock, flags); - list_del(&handler->head); - spin_unlock_irqrestore(&event->list_lock, flags); -} - -static int -nouveau_event_init(struct nouveau_event *event, u32 types, int index, - int (*func)(void *, u32, int), void *priv, - struct nouveau_eventh *handler) -{ - unsigned long flags; - - if (types & ~((1 << event->types_nr) - 1)) - return -EINVAL; - if (index >= event->index_nr) - return -EINVAL; - - handler->event = event; - handler->flags = 0; - handler->types = types; - handler->index = index; - handler->func = func; - handler->priv = priv; - - spin_lock_irqsave(&event->list_lock, flags); - list_add_tail(&handler->head, &event->list[index]); - spin_unlock_irqrestore(&event->list_lock, flags); - return 0; -} - -int -nouveau_event_new(struct nouveau_event *event, u32 types, int index, - int (*func)(void *, u32, int), void *priv, - struct nouveau_eventh **phandler) -{ - struct nouveau_eventh *handler; - int ret = -ENOMEM; - - if (event->check) { - ret = event->check(event, types, index); - if (ret) - return ret; - } - - handler = *phandler = kmalloc(sizeof(*handler), GFP_KERNEL); - if (handler) { - ret = nouveau_event_init(event, types, index, func, priv, handler); - if (ret) - kfree(handler); - } - - return ret; -} - -void -nouveau_event_ref(struct nouveau_eventh *handler, struct nouveau_eventh **ref) -{ - BUG_ON(handler != NULL); - if (*ref) { - nouveau_event_fini(*ref); - kfree(*ref); - } - *ref = handler; } void -nouveau_event_trigger(struct nouveau_event *event, u32 types, int index) +nvkm_event_send(struct nvkm_event *event, u32 types, int index, + void *data, u32 size) { - struct nouveau_eventh *handler; + struct nvkm_notify *notify; unsigned long flags; - if (WARN_ON(index >= event->index_nr)) + if (!event->refs || WARN_ON(index >= event->index_nr)) return; spin_lock_irqsave(&event->list_lock, flags); - list_for_each_entry(handler, &event->list[index], head) { - if (!test_bit(NVKM_EVENT_ENABLE, &handler->flags)) - continue; - if (!(handler->types & types)) - continue; - if (handler->func(handler->priv, handler->types & types, index) - != NVKM_EVENT_DROP) - continue; - nouveau_event_put(handler); + list_for_each_entry(notify, &event->list, head) { + if (notify->index == index && (notify->types & types)) { + if (event->func->send) { + event->func->send(data, size, notify); + continue; + } + nvkm_notify_send(notify, data, size); + } } spin_unlock_irqrestore(&event->list_lock, flags); } void -nouveau_event_destroy(struct nouveau_event **pevent) +nvkm_event_fini(struct nvkm_event *event) { - struct nouveau_event *event = *pevent; - if (event) { - kfree(event); - *pevent = NULL; + if (event->refs) { + kfree(event->refs); + event->refs = NULL; } } int -nouveau_event_create(int types_nr, int index_nr, struct nouveau_event **pevent) +nvkm_event_init(const struct nvkm_event_func *func, int types_nr, int index_nr, + struct nvkm_event *event) { - struct nouveau_event *event; - int i; - - event = *pevent = kzalloc(sizeof(*event) + (index_nr * types_nr) * - sizeof(event->refs[0]), GFP_KERNEL); - if (!event) - return -ENOMEM; - - event->list = kmalloc(sizeof(*event->list) * index_nr, GFP_KERNEL); - if (!event->list) { - kfree(event); + event->refs = kzalloc(sizeof(*event->refs) * index_nr * types_nr, + GFP_KERNEL); + if (!event->refs) return -ENOMEM; - } - spin_lock_init(&event->list_lock); - spin_lock_init(&event->refs_lock); - for (i = 0; i < index_nr; i++) - INIT_LIST_HEAD(&event->list[i]); + event->func = func; event->types_nr = types_nr; event->index_nr = index_nr; + spin_lock_init(&event->refs_lock); + spin_lock_init(&event->list_lock); + INIT_LIST_HEAD(&event->list); return 0; } diff --git a/drivers/gpu/drm/nouveau/core/core/handle.c b/drivers/gpu/drm/nouveau/core/core/handle.c index 264c2b338ac3..a490b805d7e3 100644 --- a/drivers/gpu/drm/nouveau/core/core/handle.c +++ b/drivers/gpu/drm/nouveau/core/core/handle.c @@ -146,9 +146,7 @@ nouveau_handle_create(struct nouveau_object *parent, u32 _parent, u32 _handle, } hprintk(handle, TRACE, "created\n"); - *phandle = handle; - return 0; } @@ -224,3 +222,116 @@ nouveau_handle_put(struct nouveau_handle *handle) if (handle) nouveau_namedb_put(handle); } + +int +nouveau_handle_new(struct nouveau_object *client, u32 _parent, u32 _handle, + u16 _oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nouveau_object *parent = NULL; + struct nouveau_object *engctx = NULL; + struct nouveau_object *object = NULL; + struct nouveau_object *engine; + struct nouveau_oclass *oclass; + struct nouveau_handle *handle; + int ret; + + /* lookup parent object and ensure it *is* a parent */ + parent = nouveau_handle_ref(client, _parent); + if (!parent) { + nv_error(client, "parent 0x%08x not found\n", _parent); + return -ENOENT; + } + + if (!nv_iclass(parent, NV_PARENT_CLASS)) { + nv_error(parent, "cannot have children\n"); + ret = -EINVAL; + goto fail_class; + } + + /* check that parent supports the requested subclass */ + ret = nouveau_parent_sclass(parent, _oclass, &engine, &oclass); + if (ret) { + nv_debug(parent, "illegal class 0x%04x\n", _oclass); + goto fail_class; + } + + /* make sure engine init has been completed *before* any objects + * it controls are created - the constructors may depend on + * state calculated at init (ie. default context construction) + */ + if (engine) { + ret = nouveau_object_inc(engine); + if (ret) + goto fail_class; + } + + /* if engine requires it, create a context object to insert + * between the parent and its children (eg. PGRAPH context) + */ + if (engine && nv_engine(engine)->cclass) { + ret = nouveau_object_ctor(parent, engine, + nv_engine(engine)->cclass, + data, size, &engctx); + if (ret) + goto fail_engctx; + } else { + nouveau_object_ref(parent, &engctx); + } + + /* finally, create new object and bind it to its handle */ + ret = nouveau_object_ctor(engctx, engine, oclass, data, size, &object); + *pobject = object; + if (ret) + goto fail_ctor; + + ret = nouveau_object_inc(object); + if (ret) + goto fail_init; + + ret = nouveau_handle_create(parent, _parent, _handle, object, &handle); + if (ret) + goto fail_handle; + + ret = nouveau_handle_init(handle); + if (ret) + nouveau_handle_destroy(handle); + +fail_handle: + nouveau_object_dec(object, false); +fail_init: + nouveau_object_ref(NULL, &object); +fail_ctor: + nouveau_object_ref(NULL, &engctx); +fail_engctx: + if (engine) + nouveau_object_dec(engine, false); +fail_class: + nouveau_object_ref(NULL, &parent); + return ret; +} + +int +nouveau_handle_del(struct nouveau_object *client, u32 _parent, u32 _handle) +{ + struct nouveau_object *parent = NULL; + struct nouveau_object *namedb = NULL; + struct nouveau_handle *handle = NULL; + + parent = nouveau_handle_ref(client, _parent); + if (!parent) + return -ENOENT; + + namedb = nv_pclass(parent, NV_NAMEDB_CLASS); + if (namedb) { + handle = nouveau_namedb_get(nv_namedb(namedb), _handle); + if (handle) { + nouveau_namedb_put(handle); + nouveau_handle_fini(handle, false); + nouveau_handle_destroy(handle); + } + } + + nouveau_object_ref(NULL, &parent); + return handle ? 0 : -EINVAL; +} diff --git a/drivers/gpu/drm/nouveau/core/core/ioctl.c b/drivers/gpu/drm/nouveau/core/core/ioctl.c new file mode 100644 index 000000000000..f7e19bfb489c --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/core/ioctl.c @@ -0,0 +1,531 @@ +/* + * Copyright 2014 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs <bskeggs@redhat.com> + */ + +#include <core/object.h> +#include <core/parent.h> +#include <core/handle.h> +#include <core/namedb.h> +#include <core/client.h> +#include <core/device.h> +#include <core/ioctl.h> +#include <core/event.h> + +#include <nvif/unpack.h> +#include <nvif/ioctl.h> + +static int +nvkm_ioctl_nop(struct nouveau_handle *handle, void *data, u32 size) +{ + struct nouveau_object *object = handle->object; + union { + struct nvif_ioctl_nop none; + } *args = data; + int ret; + + nv_ioctl(object, "nop size %d\n", size); + if (nvif_unvers(args->none)) { + nv_ioctl(object, "nop\n"); + } + + return ret; +} + +static int +nvkm_ioctl_sclass(struct nouveau_handle *handle, void *data, u32 size) +{ + struct nouveau_object *object = handle->object; + union { + struct nvif_ioctl_sclass_v0 v0; + } *args = data; + int ret; + + if (!nv_iclass(object, NV_PARENT_CLASS)) { + nv_debug(object, "cannot have children (sclass)\n"); + return -ENODEV; + } + + nv_ioctl(object, "sclass size %d\n", size); + if (nvif_unpack(args->v0, 0, 0, true)) { + nv_ioctl(object, "sclass vers %d count %d\n", + args->v0.version, args->v0.count); + if (size == args->v0.count * sizeof(args->v0.oclass[0])) { + ret = nouveau_parent_lclass(object, args->v0.oclass, + args->v0.count); + if (ret >= 0) { + args->v0.count = ret; + ret = 0; + } + } else { + ret = -EINVAL; + } + } + + return ret; +} + +static int +nvkm_ioctl_new(struct nouveau_handle *parent, void *data, u32 size) +{ + union { + struct nvif_ioctl_new_v0 v0; + } *args = data; + struct nouveau_client *client = nouveau_client(parent->object); + struct nouveau_object *engctx = NULL; + struct nouveau_object *object = NULL; + struct nouveau_object *engine; + struct nouveau_oclass *oclass; + struct nouveau_handle *handle; + u32 _handle, _oclass; + int ret; + + nv_ioctl(client, "new size %d\n", size); + if (nvif_unpack(args->v0, 0, 0, true)) { + _handle = args->v0.handle; + _oclass = args->v0.oclass; + } else + return ret; + + nv_ioctl(client, "new vers %d handle %08x class %08x " + "route %02x token %llx\n", + args->v0.version, _handle, _oclass, + args->v0.route, args->v0.token); + + if (!nv_iclass(parent->object, NV_PARENT_CLASS)) { + nv_debug(parent->object, "cannot have children (ctor)\n"); + ret = -ENODEV; + goto fail_class; + } + + /* check that parent supports the requested subclass */ + ret = nouveau_parent_sclass(parent->object, _oclass, &engine, &oclass); + if (ret) { + nv_debug(parent->object, "illegal class 0x%04x\n", _oclass); + goto fail_class; + } + + /* make sure engine init has been completed *before* any objects + * it controls are created - the constructors may depend on + * state calculated at init (ie. default context construction) + */ + if (engine) { + ret = nouveau_object_inc(engine); + if (ret) + goto fail_class; + } + + /* if engine requires it, create a context object to insert + * between the parent and its children (eg. PGRAPH context) + */ + if (engine && nv_engine(engine)->cclass) { + ret = nouveau_object_ctor(parent->object, engine, + nv_engine(engine)->cclass, + data, size, &engctx); + if (ret) + goto fail_engctx; + } else { + nouveau_object_ref(parent->object, &engctx); + } + + /* finally, create new object and bind it to its handle */ + ret = nouveau_object_ctor(engctx, engine, oclass, data, size, &object); + client->data = object; + if (ret) + goto fail_ctor; + + ret = nouveau_object_inc(object); + if (ret) + goto fail_init; + + ret = nouveau_handle_create(parent->object, parent->name, + _handle, object, &handle); + if (ret) + goto fail_handle; + + ret = nouveau_handle_init(handle); + handle->route = args->v0.route; + handle->token = args->v0.token; + if (ret) + nouveau_handle_destroy(handle); + +fail_handle: + nouveau_object_dec(object, false); +fail_init: + nouveau_object_ref(NULL, &object); +fail_ctor: + nouveau_object_ref(NULL, &engctx); +fail_engctx: + if (engine) + nouveau_object_dec(engine, false); +fail_class: + return ret; +} + +static int +nvkm_ioctl_del(struct nouveau_handle *handle, void *data, u32 size) +{ + struct nouveau_object *object = handle->object; + union { + struct nvif_ioctl_del none; + } *args = data; + int ret; + + nv_ioctl(object, "delete size %d\n", size); + if (nvif_unvers(args->none)) { + nv_ioctl(object, "delete\n"); + nouveau_handle_fini(handle, false); + nouveau_handle_destroy(handle); + } + + return ret; +} + +static int +nvkm_ioctl_mthd(struct nouveau_handle *handle, void *data, u32 size) +{ + struct nouveau_object *object = handle->object; + struct nouveau_ofuncs *ofuncs = object->oclass->ofuncs; + union { + struct nvif_ioctl_mthd_v0 v0; + } *args = data; + int ret; + + nv_ioctl(object, "mthd size %d\n", size); + if (nvif_unpack(args->v0, 0, 0, true)) { + nv_ioctl(object, "mthd vers %d mthd %02x\n", + args->v0.version, args->v0.method); + if (ret = -ENODEV, ofuncs->mthd) + ret = ofuncs->mthd(object, args->v0.method, data, size); + } + + return ret; +} + + +static int +nvkm_ioctl_rd(struct nouveau_handle *handle, void *data, u32 size) +{ + struct nouveau_object *object = handle->object; + struct nouveau_ofuncs *ofuncs = object->oclass->ofuncs; + union { + struct nvif_ioctl_rd_v0 v0; + } *args = data; + int ret; + + nv_ioctl(object, "rd size %d\n", size); + if (nvif_unpack(args->v0, 0, 0, false)) { + nv_ioctl(object, "rd vers %d size %d addr %016llx\n", + args->v0.version, args->v0.size, args->v0.addr); + switch (args->v0.size) { + case 1: + if (ret = -ENODEV, ofuncs->rd08) { + args->v0.data = nv_ro08(object, args->v0.addr); + ret = 0; + } + break; + case 2: + if (ret = -ENODEV, ofuncs->rd16) { + args->v0.data = nv_ro16(object, args->v0.addr); + ret = 0; + } + break; + case 4: + if (ret = -ENODEV, ofuncs->rd32) { + args->v0.data = nv_ro32(object, args->v0.addr); + ret = 0; + } + break; + default: + ret = -EINVAL; + break; + } + } + + return ret; +} + +static int +nvkm_ioctl_wr(struct nouveau_handle *handle, void *data, u32 size) +{ + struct nouveau_object *object = handle->object; + struct nouveau_ofuncs *ofuncs = object->oclass->ofuncs; + union { + struct nvif_ioctl_wr_v0 v0; + } *args = data; + int ret; + + nv_ioctl(object, "wr size %d\n", size); + if (nvif_unpack(args->v0, 0, 0, false)) { + nv_ioctl(object, "wr vers %d size %d addr %016llx data %08x\n", + args->v0.version, args->v0.size, args->v0.addr, + args->v0.data); + switch (args->v0.size) { + case 1: + if (ret = -ENODEV, ofuncs->wr08) { + nv_wo08(object, args->v0.addr, args->v0.data); + ret = 0; + } + break; + case 2: + if (ret = -ENODEV, ofuncs->wr16) { + nv_wo16(object, args->v0.addr, args->v0.data); + ret = 0; + } + break; + case 4: + if (ret = -ENODEV, ofuncs->wr32) { + nv_wo32(object, args->v0.addr, args->v0.data); + ret = 0; + } + break; + default: + ret = -EINVAL; + break; + } + } + + return ret; +} + +static int +nvkm_ioctl_map(struct nouveau_handle *handle, void *data, u32 size) +{ + struct nouveau_object *object = handle->object; + struct nouveau_ofuncs *ofuncs = object->oclass->ofuncs; + union { + struct nvif_ioctl_map_v0 v0; + } *args = data; + int ret; + + nv_ioctl(object, "map size %d\n", size); + if (nvif_unpack(args->v0, 0, 0, false)) { + nv_ioctl(object, "map vers %d\n", args->v0.version); + if (ret = -ENODEV, ofuncs->map) { + ret = ofuncs->map(object, &args->v0.handle, + &args->v0.length); + } + } + + return ret; +} + +static int +nvkm_ioctl_unmap(struct nouveau_handle *handle, void *data, u32 size) +{ + struct nouveau_object *object = handle->object; + union { + struct nvif_ioctl_unmap none; + } *args = data; + int ret; + + nv_ioctl(object, "unmap size %d\n", size); + if (nvif_unvers(args->none)) { + nv_ioctl(object, "unmap\n"); + } + + return ret; +} + +static int +nvkm_ioctl_ntfy_new(struct nouveau_handle *handle, void *data, u32 size) +{ + struct nouveau_client *client = nouveau_client(handle->object); + struct nouveau_object *object = handle->object; + struct nouveau_ofuncs *ofuncs = object->oclass->ofuncs; + union { + struct nvif_ioctl_ntfy_new_v0 v0; + } *args = data; + struct nvkm_event *event; + int ret; + + nv_ioctl(object, "ntfy new size %d\n", size); + if (nvif_unpack(args->v0, 0, 0, true)) { + nv_ioctl(object, "ntfy new vers %d event %02x\n", + args->v0.version, args->v0.event); + if (ret = -ENODEV, ofuncs->ntfy) + ret = ofuncs->ntfy(object, args->v0.event, &event); + if (ret == 0) { + ret = nvkm_client_notify_new(client, event, data, size); + if (ret >= 0) { + args->v0.index = ret; + ret = 0; + } + } + } + + return ret; +} + +static int +nvkm_ioctl_ntfy_del(struct nouveau_handle *handle, void *data, u32 size) +{ + struct nouveau_client *client = nouveau_client(handle->object); + struct nouveau_object *object = handle->object; + union { + struct nvif_ioctl_ntfy_del_v0 v0; + } *args = data; + int ret; + + nv_ioctl(object, "ntfy del size %d\n", size); + if (nvif_unpack(args->v0, 0, 0, false)) { + nv_ioctl(object, "ntfy del vers %d index %d\n", + args->v0.version, args->v0.index); + ret = nvkm_client_notify_del(client, args->v0.index); + } + + return ret; +} + +static int +nvkm_ioctl_ntfy_get(struct nouveau_handle *handle, void *data, u32 size) +{ + struct nouveau_client *client = nouveau_client(handle->object); + struct nouveau_object *object = handle->object; + union { + struct nvif_ioctl_ntfy_get_v0 v0; + } *args = data; + int ret; + + nv_ioctl(object, "ntfy get size %d\n", size); + if (nvif_unpack(args->v0, 0, 0, false)) { + nv_ioctl(object, "ntfy get vers %d index %d\n", + args->v0.version, args->v0.index); + ret = nvkm_client_notify_get(client, args->v0.index); + } + + return ret; +} + +static int +nvkm_ioctl_ntfy_put(struct nouveau_handle *handle, void *data, u32 size) +{ + struct nouveau_client *client = nouveau_client(handle->object); + struct nouveau_object *object = handle->object; + union { + struct nvif_ioctl_ntfy_put_v0 v0; + } *args = data; + int ret; + + nv_ioctl(object, "ntfy put size %d\n", size); + if (nvif_unpack(args->v0, 0, 0, false)) { + nv_ioctl(object, "ntfy put vers %d index %d\n", + args->v0.version, args->v0.index); + ret = nvkm_client_notify_put(client, args->v0.index); + } + + return ret; +} + +static struct { + int version; + int (*func)(struct nouveau_handle *, void *, u32); +} +nvkm_ioctl_v0[] = { + { 0x00, nvkm_ioctl_nop }, + { 0x00, nvkm_ioctl_sclass }, + { 0x00, nvkm_ioctl_new }, + { 0x00, nvkm_ioctl_del }, + { 0x00, nvkm_ioctl_mthd }, + { 0x00, nvkm_ioctl_rd }, + { 0x00, nvkm_ioctl_wr }, + { 0x00, nvkm_ioctl_map }, + { 0x00, nvkm_ioctl_unmap }, + { 0x00, nvkm_ioctl_ntfy_new }, + { 0x00, nvkm_ioctl_ntfy_del }, + { 0x00, nvkm_ioctl_ntfy_get }, + { 0x00, nvkm_ioctl_ntfy_put }, +}; + +static int +nvkm_ioctl_path(struct nouveau_handle *parent, u32 type, u32 nr, + u32 *path, void *data, u32 size, + u8 owner, u8 *route, u64 *token) +{ + struct nouveau_handle *handle = parent; + struct nouveau_namedb *namedb; + struct nouveau_object *object; + int ret; + + while ((object = parent->object), nr--) { + nv_ioctl(object, "path 0x%08x\n", path[nr]); + if (!nv_iclass(object, NV_PARENT_CLASS)) { + nv_debug(object, "cannot have children (path)\n"); + return -EINVAL; + } + + if (!(namedb = (void *)nv_pclass(object, NV_NAMEDB_CLASS)) || + !(handle = nouveau_namedb_get(namedb, path[nr]))) { + nv_debug(object, "handle 0x%08x not found\n", path[nr]); + return -ENOENT; + } + nouveau_namedb_put(handle); + parent = handle; + } + + if (owner != NVIF_IOCTL_V0_OWNER_ANY && + owner != handle->route) { + nv_ioctl(object, "object route != owner\n"); + return -EACCES; + } + *route = handle->route; + *token = handle->token; + + if (ret = -EINVAL, type < ARRAY_SIZE(nvkm_ioctl_v0)) { + if (nvkm_ioctl_v0[type].version == 0) { + ret = nvkm_ioctl_v0[type].func(handle, data, size); + } + } + + return ret; +} + +int +nvkm_ioctl(struct nouveau_client *client, bool supervisor, + void *data, u32 size, void **hack) +{ + union { + struct nvif_ioctl_v0 v0; + } *args = data; + int ret; + + client->super = supervisor; + nv_ioctl(client, "size %d\n", size); + + if (nvif_unpack(args->v0, 0, 0, true)) { + nv_ioctl(client, "vers %d type %02x path %d owner %02x\n", + args->v0.version, args->v0.type, args->v0.path_nr, + args->v0.owner); + ret = nvkm_ioctl_path(client->root, args->v0.type, + args->v0.path_nr, args->v0.path, + data, size, args->v0.owner, + &args->v0.route, &args->v0.token); + } + + nv_ioctl(client, "return %d\n", ret); + if (hack) { + *hack = client->data; + client->data = NULL; + } + client->super = false; + return ret; +} diff --git a/drivers/gpu/drm/nouveau/core/core/notify.c b/drivers/gpu/drm/nouveau/core/core/notify.c new file mode 100644 index 000000000000..76adb81bdea2 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/core/notify.c @@ -0,0 +1,167 @@ +/* + * Copyright 2014 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs <bskeggs@redhat.com> + */ + +#include <core/client.h> +#include <core/event.h> +#include <core/notify.h> + +#include <nvif/unpack.h> +#include <nvif/event.h> + +static inline void +nvkm_notify_put_locked(struct nvkm_notify *notify) +{ + if (notify->block++ == 0) + nvkm_event_put(notify->event, notify->types, notify->index); +} + +void +nvkm_notify_put(struct nvkm_notify *notify) +{ + struct nvkm_event *event = notify->event; + unsigned long flags; + if (likely(event) && + test_and_clear_bit(NVKM_NOTIFY_USER, ¬ify->flags)) { + spin_lock_irqsave(&event->refs_lock, flags); + nvkm_notify_put_locked(notify); + spin_unlock_irqrestore(&event->refs_lock, flags); + if (test_bit(NVKM_NOTIFY_WORK, ¬ify->flags)) + flush_work(¬ify->work); + } +} + +static inline void +nvkm_notify_get_locked(struct nvkm_notify *notify) +{ + if (--notify->block == 0) + nvkm_event_get(notify->event, notify->types, notify->index); +} + +void +nvkm_notify_get(struct nvkm_notify *notify) +{ + struct nvkm_event *event = notify->event; + unsigned long flags; + if (likely(event) && + !test_and_set_bit(NVKM_NOTIFY_USER, ¬ify->flags)) { + spin_lock_irqsave(&event->refs_lock, flags); + nvkm_notify_get_locked(notify); + spin_unlock_irqrestore(&event->refs_lock, flags); + } +} + +static inline void +nvkm_notify_func(struct nvkm_notify *notify) +{ + struct nvkm_event *event = notify->event; + int ret = notify->func(notify); + unsigned long flags; + if ((ret == NVKM_NOTIFY_KEEP) || + !test_and_clear_bit(NVKM_NOTIFY_USER, ¬ify->flags)) { + spin_lock_irqsave(&event->refs_lock, flags); + nvkm_notify_get_locked(notify); + spin_unlock_irqrestore(&event->refs_lock, flags); + } +} + +static void +nvkm_notify_work(struct work_struct *work) +{ + struct nvkm_notify *notify = container_of(work, typeof(*notify), work); + nvkm_notify_func(notify); +} + +void +nvkm_notify_send(struct nvkm_notify *notify, void *data, u32 size) +{ + struct nvkm_event *event = notify->event; + unsigned long flags; + + BUG_ON(!spin_is_locked(&event->list_lock)); + BUG_ON(size != notify->size); + + spin_lock_irqsave(&event->refs_lock, flags); + if (notify->block) { + spin_unlock_irqrestore(&event->refs_lock, flags); + return; + } + nvkm_notify_put_locked(notify); + spin_unlock_irqrestore(&event->refs_lock, flags); + + if (test_bit(NVKM_NOTIFY_WORK, ¬ify->flags)) { + memcpy((void *)notify->data, data, size); + schedule_work(¬ify->work); + } else { + notify->data = data; + nvkm_notify_func(notify); + notify->data = NULL; + } +} + +void +nvkm_notify_fini(struct nvkm_notify *notify) +{ + unsigned long flags; + if (notify->event) { + nvkm_notify_put(notify); + spin_lock_irqsave(¬ify->event->list_lock, flags); + list_del(¬ify->head); + spin_unlock_irqrestore(¬ify->event->list_lock, flags); + kfree((void *)notify->data); + notify->event = NULL; + } +} + +int +nvkm_notify_init(struct nvkm_event *event, int (*func)(struct nvkm_notify *), + bool work, void *data, u32 size, u32 reply, + struct nvkm_notify *notify) +{ + unsigned long flags; + int ret = -ENODEV; + if ((notify->event = event), event->refs) { + ret = event->func->ctor(data, size, notify); + if (ret == 0 && (ret = -EINVAL, notify->size == reply)) { + notify->flags = 0; + notify->block = 1; + notify->func = func; + notify->data = NULL; + if (ret = 0, work) { + INIT_WORK(¬ify->work, nvkm_notify_work); + set_bit(NVKM_NOTIFY_WORK, ¬ify->flags); + notify->data = kmalloc(reply, GFP_KERNEL); + if (!notify->data) + ret = -ENOMEM; + } + } + if (ret == 0) { + spin_lock_irqsave(&event->list_lock, flags); + list_add_tail(¬ify->head, &event->list); + spin_unlock_irqrestore(&event->list_lock, flags); + } + } + if (ret) + notify->event = NULL; + return ret; +} diff --git a/drivers/gpu/drm/nouveau/core/core/object.c b/drivers/gpu/drm/nouveau/core/core/object.c index 124538555904..b08630577c82 100644 --- a/drivers/gpu/drm/nouveau/core/core/object.c +++ b/drivers/gpu/drm/nouveau/core/core/object.c @@ -23,9 +23,6 @@ */ #include <core/object.h> -#include <core/parent.h> -#include <core/namedb.h> -#include <core/handle.h> #include <core/engine.h> #ifdef NOUVEAU_OBJECT_MAGIC @@ -61,21 +58,15 @@ nouveau_object_create_(struct nouveau_object *parent, return 0; } -static int +int _nouveau_object_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { - struct nouveau_object *object; - int ret; - - ret = nouveau_object_create(parent, engine, oclass, 0, &object); - *pobject = nv_object(object); - if (ret) - return ret; - - return 0; + if (size != 0) + return -ENOSYS; + return nouveau_object_create(parent, engine, oclass, 0, pobject); } void @@ -91,42 +82,24 @@ nouveau_object_destroy(struct nouveau_object *object) kfree(object); } -static void -_nouveau_object_dtor(struct nouveau_object *object) -{ - nouveau_object_destroy(object); -} - int nouveau_object_init(struct nouveau_object *object) { return 0; } -static int -_nouveau_object_init(struct nouveau_object *object) -{ - return nouveau_object_init(object); -} - int nouveau_object_fini(struct nouveau_object *object, bool suspend) { return 0; } -static int -_nouveau_object_fini(struct nouveau_object *object, bool suspend) -{ - return nouveau_object_fini(object, suspend); -} - struct nouveau_ofuncs nouveau_object_ofuncs = { .ctor = _nouveau_object_ctor, - .dtor = _nouveau_object_dtor, - .init = _nouveau_object_init, - .fini = _nouveau_object_fini, + .dtor = nouveau_object_destroy, + .init = nouveau_object_init, + .fini = nouveau_object_fini, }; int @@ -189,119 +162,6 @@ nouveau_object_ref(struct nouveau_object *obj, struct nouveau_object **ref) } int -nouveau_object_new(struct nouveau_object *client, u32 _parent, u32 _handle, - u16 _oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nouveau_object *parent = NULL; - struct nouveau_object *engctx = NULL; - struct nouveau_object *object = NULL; - struct nouveau_object *engine; - struct nouveau_oclass *oclass; - struct nouveau_handle *handle; - int ret; - - /* lookup parent object and ensure it *is* a parent */ - parent = nouveau_handle_ref(client, _parent); - if (!parent) { - nv_error(client, "parent 0x%08x not found\n", _parent); - return -ENOENT; - } - - if (!nv_iclass(parent, NV_PARENT_CLASS)) { - nv_error(parent, "cannot have children\n"); - ret = -EINVAL; - goto fail_class; - } - - /* check that parent supports the requested subclass */ - ret = nouveau_parent_sclass(parent, _oclass, &engine, &oclass); - if (ret) { - nv_debug(parent, "illegal class 0x%04x\n", _oclass); - goto fail_class; - } - - /* make sure engine init has been completed *before* any objects - * it controls are created - the constructors may depend on - * state calculated at init (ie. default context construction) - */ - if (engine) { - ret = nouveau_object_inc(engine); - if (ret) - goto fail_class; - } - - /* if engine requires it, create a context object to insert - * between the parent and its children (eg. PGRAPH context) - */ - if (engine && nv_engine(engine)->cclass) { - ret = nouveau_object_ctor(parent, engine, - nv_engine(engine)->cclass, - data, size, &engctx); - if (ret) - goto fail_engctx; - } else { - nouveau_object_ref(parent, &engctx); - } - - /* finally, create new object and bind it to its handle */ - ret = nouveau_object_ctor(engctx, engine, oclass, data, size, &object); - *pobject = object; - if (ret) - goto fail_ctor; - - ret = nouveau_object_inc(object); - if (ret) - goto fail_init; - - ret = nouveau_handle_create(parent, _parent, _handle, object, &handle); - if (ret) - goto fail_handle; - - ret = nouveau_handle_init(handle); - if (ret) - nouveau_handle_destroy(handle); - -fail_handle: - nouveau_object_dec(object, false); -fail_init: - nouveau_object_ref(NULL, &object); -fail_ctor: - nouveau_object_ref(NULL, &engctx); -fail_engctx: - if (engine) - nouveau_object_dec(engine, false); -fail_class: - nouveau_object_ref(NULL, &parent); - return ret; -} - -int -nouveau_object_del(struct nouveau_object *client, u32 _parent, u32 _handle) -{ - struct nouveau_object *parent = NULL; - struct nouveau_object *namedb = NULL; - struct nouveau_handle *handle = NULL; - - parent = nouveau_handle_ref(client, _parent); - if (!parent) - return -ENOENT; - - namedb = nv_pclass(parent, NV_NAMEDB_CLASS); - if (namedb) { - handle = nouveau_namedb_get(nv_namedb(namedb), _handle); - if (handle) { - nouveau_namedb_put(handle); - nouveau_handle_fini(handle, false); - nouveau_handle_destroy(handle); - } - } - - nouveau_object_ref(NULL, &parent); - return handle ? 0 : -EINVAL; -} - -int nouveau_object_inc(struct nouveau_object *object) { int ref = atomic_add_return(1, &object->usecount); diff --git a/drivers/gpu/drm/nouveau/core/core/parent.c b/drivers/gpu/drm/nouveau/core/core/parent.c index dee5d1235e9b..30a2911878f8 100644 --- a/drivers/gpu/drm/nouveau/core/core/parent.c +++ b/drivers/gpu/drm/nouveau/core/core/parent.c @@ -75,6 +75,39 @@ nouveau_parent_sclass(struct nouveau_object *parent, u16 handle, } int +nouveau_parent_lclass(struct nouveau_object *parent, u32 *lclass, int size) +{ + struct nouveau_sclass *sclass; + struct nouveau_engine *engine; + struct nouveau_oclass *oclass; + int nr = -1, i; + u64 mask; + + sclass = nv_parent(parent)->sclass; + while (sclass) { + if (++nr < size) + lclass[nr] = sclass->oclass->handle & 0xffff; + sclass = sclass->sclass; + } + + mask = nv_parent(parent)->engine; + while (i = __ffs64(mask), mask) { + engine = nouveau_engine(parent, i); + if (engine && (oclass = engine->sclass)) { + while (oclass->ofuncs) { + if (++nr < size) + lclass[nr] = oclass->handle & 0xffff; + oclass++; + } + } + + mask &= ~(1ULL << i); + } + + return nr + 1; +} + +int nouveau_parent_create_(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, u32 pclass, diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c b/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c index f31527733e00..abb410ef09ea 100644 --- a/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c +++ b/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c @@ -30,7 +30,6 @@ #include <subdev/vm.h> #include <core/client.h> -#include <core/class.h> #include <core/enum.h> diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c index ac3291f781f6..9261694d0d35 100644 --- a/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c @@ -26,9 +26,7 @@ #include <engine/fifo.h> #include <engine/copy.h> -#include <core/class.h> #include <core/enum.h> -#include <core/class.h> #include <core/enum.h> #include "fuc/nvc0.fuc.h" diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c b/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c index 748a61eb3c6f..c7194b354605 100644 --- a/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c +++ b/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c @@ -24,7 +24,6 @@ #include <core/os.h> #include <core/enum.h> -#include <core/class.h> #include <core/engctx.h> #include <engine/copy.h> diff --git a/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c b/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c index 2551dafbec73..ea5c42f31791 100644 --- a/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c +++ b/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c @@ -25,7 +25,6 @@ #include <core/client.h> #include <core/os.h> #include <core/enum.h> -#include <core/class.h> #include <core/engctx.h> #include <core/gpuobj.h> diff --git a/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c b/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c index c7082377ec76..5571c09534cb 100644 --- a/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c +++ b/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c @@ -25,7 +25,6 @@ #include <core/client.h> #include <core/os.h> #include <core/enum.h> -#include <core/class.h> #include <core/engctx.h> #include <subdev/timer.h> diff --git a/drivers/gpu/drm/nouveau/core/engine/device/acpi.c b/drivers/gpu/drm/nouveau/core/engine/device/acpi.c new file mode 100644 index 000000000000..4dbf0ba89e5c --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/device/acpi.c @@ -0,0 +1,59 @@ +/* + * Copyright 2014 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "acpi.h" + +#ifdef CONFIG_ACPI +static int +nvkm_acpi_ntfy(struct notifier_block *nb, unsigned long val, void *data) +{ + struct nouveau_device *device = + container_of(nb, typeof(*device), acpi.nb); + struct acpi_bus_event *info = data; + + if (!strcmp(info->device_class, "ac_adapter")) + nvkm_event_send(&device->event, 1, 0, NULL, 0); + + return NOTIFY_DONE; +} +#endif + +int +nvkm_acpi_fini(struct nouveau_device *device, bool suspend) +{ +#ifdef CONFIG_ACPI + unregister_acpi_notifier(&device->acpi.nb); +#endif + return 0; +} + +int +nvkm_acpi_init(struct nouveau_device *device) +{ +#ifdef CONFIG_ACPI + device->acpi.nb.notifier_call = nvkm_acpi_ntfy; + register_acpi_notifier(&device->acpi.nb); +#endif + return 0; +} diff --git a/drivers/gpu/drm/nouveau/core/engine/device/acpi.h b/drivers/gpu/drm/nouveau/core/engine/device/acpi.h new file mode 100644 index 000000000000..cc49f4f568cd --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/device/acpi.h @@ -0,0 +1,9 @@ +#ifndef __NVKM_DEVICE_ACPI_H__ +#define __NVKM_DEVICE_ACPI_H__ + +#include <engine/device.h> + +int nvkm_acpi_init(struct nouveau_device *); +int nvkm_acpi_fini(struct nouveau_device *, bool); + +#endif diff --git a/drivers/gpu/drm/nouveau/core/engine/device/base.c b/drivers/gpu/drm/nouveau/core/engine/device/base.c index 18c8c7245b73..8928f7981d4a 100644 --- a/drivers/gpu/drm/nouveau/core/engine/device/base.c +++ b/drivers/gpu/drm/nouveau/core/engine/device/base.c @@ -26,10 +26,14 @@ #include <core/device.h> #include <core/client.h> #include <core/option.h> +#include <nvif/unpack.h> +#include <nvif/class.h> -#include <core/class.h> +#include <subdev/fb.h> +#include <subdev/instmem.h> #include "priv.h" +#include "acpi.h" static DEFINE_MUTEX(nv_devices_mutex); static LIST_HEAD(nv_devices); @@ -49,74 +53,258 @@ nouveau_device_find(u64 name) return match; } +int +nouveau_device_list(u64 *name, int size) +{ + struct nouveau_device *device; + int nr = 0; + mutex_lock(&nv_devices_mutex); + list_for_each_entry(device, &nv_devices, head) { + if (nr++ < size) + name[nr - 1] = device->handle; + } + mutex_unlock(&nv_devices_mutex); + return nr; +} + /****************************************************************************** * nouveau_devobj (0x0080): class implementation *****************************************************************************/ + struct nouveau_devobj { struct nouveau_parent base; struct nouveau_object *subdev[NVDEV_SUBDEV_NR]; }; +static int +nouveau_devobj_info(struct nouveau_object *object, void *data, u32 size) +{ + struct nouveau_device *device = nv_device(object); + struct nouveau_fb *pfb = nouveau_fb(device); + struct nouveau_instmem *imem = nouveau_instmem(device); + union { + struct nv_device_info_v0 v0; + } *args = data; + int ret; + + nv_ioctl(object, "device info size %d\n", size); + if (nvif_unpack(args->v0, 0, 0, false)) { + nv_ioctl(object, "device info vers %d\n", args->v0.version); + } else + return ret; + + switch (device->chipset) { + case 0x01a: + case 0x01f: + case 0x04c: + case 0x04e: + case 0x063: + case 0x067: + case 0x068: + case 0x0aa: + case 0x0ac: + case 0x0af: + args->v0.platform = NV_DEVICE_INFO_V0_IGP; + break; + default: + if (device->pdev) { + if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP)) + args->v0.platform = NV_DEVICE_INFO_V0_AGP; + else + if (pci_is_pcie(device->pdev)) + args->v0.platform = NV_DEVICE_INFO_V0_PCIE; + else + args->v0.platform = NV_DEVICE_INFO_V0_PCI; + } else { + args->v0.platform = NV_DEVICE_INFO_V0_SOC; + } + break; + } + + switch (device->card_type) { + case NV_04: args->v0.family = NV_DEVICE_INFO_V0_TNT; break; + case NV_10: + case NV_11: args->v0.family = NV_DEVICE_INFO_V0_CELSIUS; break; + case NV_20: args->v0.family = NV_DEVICE_INFO_V0_KELVIN; break; + case NV_30: args->v0.family = NV_DEVICE_INFO_V0_RANKINE; break; + case NV_40: args->v0.family = NV_DEVICE_INFO_V0_CURIE; break; + case NV_50: args->v0.family = NV_DEVICE_INFO_V0_TESLA; break; + case NV_C0: args->v0.family = NV_DEVICE_INFO_V0_FERMI; break; + case NV_E0: args->v0.family = NV_DEVICE_INFO_V0_KEPLER; break; + case GM100: args->v0.family = NV_DEVICE_INFO_V0_MAXWELL; break; + default: + args->v0.family = 0; + break; + } + + args->v0.chipset = device->chipset; + args->v0.revision = device->chipset >= 0x10 ? nv_rd32(device, 0) : 0x00; + if (pfb) args->v0.ram_size = args->v0.ram_user = pfb->ram->size; + else args->v0.ram_size = args->v0.ram_user = 0; + if (imem) args->v0.ram_user = args->v0.ram_user - imem->reserved; + return 0; +} + +static int +nouveau_devobj_mthd(struct nouveau_object *object, u32 mthd, + void *data, u32 size) +{ + switch (mthd) { + case NV_DEVICE_V0_INFO: + return nouveau_devobj_info(object, data, size); + default: + break; + } + return -EINVAL; +} + +static u8 +nouveau_devobj_rd08(struct nouveau_object *object, u64 addr) +{ + return nv_rd08(object->engine, addr); +} + +static u16 +nouveau_devobj_rd16(struct nouveau_object *object, u64 addr) +{ + return nv_rd16(object->engine, addr); +} + +static u32 +nouveau_devobj_rd32(struct nouveau_object *object, u64 addr) +{ + return nv_rd32(object->engine, addr); +} + +static void +nouveau_devobj_wr08(struct nouveau_object *object, u64 addr, u8 data) +{ + nv_wr08(object->engine, addr, data); +} + +static void +nouveau_devobj_wr16(struct nouveau_object *object, u64 addr, u16 data) +{ + nv_wr16(object->engine, addr, data); +} + +static void +nouveau_devobj_wr32(struct nouveau_object *object, u64 addr, u32 data) +{ + nv_wr32(object->engine, addr, data); +} + +static int +nouveau_devobj_map(struct nouveau_object *object, u64 *addr, u32 *size) +{ + struct nouveau_device *device = nv_device(object); + *addr = nv_device_resource_start(device, 0); + *size = nv_device_resource_len(device, 0); + return 0; +} + static const u64 disable_map[] = { - [NVDEV_SUBDEV_VBIOS] = NV_DEVICE_DISABLE_VBIOS, - [NVDEV_SUBDEV_DEVINIT] = NV_DEVICE_DISABLE_CORE, - [NVDEV_SUBDEV_GPIO] = NV_DEVICE_DISABLE_CORE, - [NVDEV_SUBDEV_I2C] = NV_DEVICE_DISABLE_CORE, - [NVDEV_SUBDEV_CLOCK] = NV_DEVICE_DISABLE_CORE, - [NVDEV_SUBDEV_MXM] = NV_DEVICE_DISABLE_CORE, - [NVDEV_SUBDEV_MC] = NV_DEVICE_DISABLE_CORE, - [NVDEV_SUBDEV_BUS] = NV_DEVICE_DISABLE_CORE, - [NVDEV_SUBDEV_TIMER] = NV_DEVICE_DISABLE_CORE, - [NVDEV_SUBDEV_FB] = NV_DEVICE_DISABLE_CORE, - [NVDEV_SUBDEV_LTCG] = NV_DEVICE_DISABLE_CORE, - [NVDEV_SUBDEV_IBUS] = NV_DEVICE_DISABLE_CORE, - [NVDEV_SUBDEV_INSTMEM] = NV_DEVICE_DISABLE_CORE, - [NVDEV_SUBDEV_VM] = NV_DEVICE_DISABLE_CORE, - [NVDEV_SUBDEV_BAR] = NV_DEVICE_DISABLE_CORE, - [NVDEV_SUBDEV_VOLT] = NV_DEVICE_DISABLE_CORE, - [NVDEV_SUBDEV_THERM] = NV_DEVICE_DISABLE_CORE, - [NVDEV_SUBDEV_PWR] = NV_DEVICE_DISABLE_CORE, - [NVDEV_ENGINE_DMAOBJ] = NV_DEVICE_DISABLE_CORE, - [NVDEV_ENGINE_PERFMON] = NV_DEVICE_DISABLE_CORE, - [NVDEV_ENGINE_FIFO] = NV_DEVICE_DISABLE_FIFO, - [NVDEV_ENGINE_SW] = NV_DEVICE_DISABLE_FIFO, - [NVDEV_ENGINE_GR] = NV_DEVICE_DISABLE_GRAPH, - [NVDEV_ENGINE_MPEG] = NV_DEVICE_DISABLE_MPEG, - [NVDEV_ENGINE_ME] = NV_DEVICE_DISABLE_ME, - [NVDEV_ENGINE_VP] = NV_DEVICE_DISABLE_VP, - [NVDEV_ENGINE_CRYPT] = NV_DEVICE_DISABLE_CRYPT, - [NVDEV_ENGINE_BSP] = NV_DEVICE_DISABLE_BSP, - [NVDEV_ENGINE_PPP] = NV_DEVICE_DISABLE_PPP, - [NVDEV_ENGINE_COPY0] = NV_DEVICE_DISABLE_COPY0, - [NVDEV_ENGINE_COPY1] = NV_DEVICE_DISABLE_COPY1, - [NVDEV_ENGINE_VIC] = NV_DEVICE_DISABLE_VIC, - [NVDEV_ENGINE_VENC] = NV_DEVICE_DISABLE_VENC, - [NVDEV_ENGINE_DISP] = NV_DEVICE_DISABLE_DISP, + [NVDEV_SUBDEV_VBIOS] = NV_DEVICE_V0_DISABLE_VBIOS, + [NVDEV_SUBDEV_DEVINIT] = NV_DEVICE_V0_DISABLE_CORE, + [NVDEV_SUBDEV_GPIO] = NV_DEVICE_V0_DISABLE_CORE, + [NVDEV_SUBDEV_I2C] = NV_DEVICE_V0_DISABLE_CORE, + [NVDEV_SUBDEV_CLOCK] = NV_DEVICE_V0_DISABLE_CORE, + [NVDEV_SUBDEV_MXM] = NV_DEVICE_V0_DISABLE_CORE, + [NVDEV_SUBDEV_MC] = NV_DEVICE_V0_DISABLE_CORE, + [NVDEV_SUBDEV_BUS] = NV_DEVICE_V0_DISABLE_CORE, + [NVDEV_SUBDEV_TIMER] = NV_DEVICE_V0_DISABLE_CORE, + [NVDEV_SUBDEV_FB] = NV_DEVICE_V0_DISABLE_CORE, + [NVDEV_SUBDEV_LTC] = NV_DEVICE_V0_DISABLE_CORE, + [NVDEV_SUBDEV_IBUS] = NV_DEVICE_V0_DISABLE_CORE, + [NVDEV_SUBDEV_INSTMEM] = NV_DEVICE_V0_DISABLE_CORE, + [NVDEV_SUBDEV_VM] = NV_DEVICE_V0_DISABLE_CORE, + [NVDEV_SUBDEV_BAR] = NV_DEVICE_V0_DISABLE_CORE, + [NVDEV_SUBDEV_VOLT] = NV_DEVICE_V0_DISABLE_CORE, + [NVDEV_SUBDEV_THERM] = NV_DEVICE_V0_DISABLE_CORE, + [NVDEV_SUBDEV_PWR] = NV_DEVICE_V0_DISABLE_CORE, + [NVDEV_ENGINE_DMAOBJ] = NV_DEVICE_V0_DISABLE_CORE, + [NVDEV_ENGINE_PERFMON] = NV_DEVICE_V0_DISABLE_CORE, + [NVDEV_ENGINE_FIFO] = NV_DEVICE_V0_DISABLE_FIFO, + [NVDEV_ENGINE_SW] = NV_DEVICE_V0_DISABLE_FIFO, + [NVDEV_ENGINE_GR] = NV_DEVICE_V0_DISABLE_GRAPH, + [NVDEV_ENGINE_MPEG] = NV_DEVICE_V0_DISABLE_MPEG, + [NVDEV_ENGINE_ME] = NV_DEVICE_V0_DISABLE_ME, + [NVDEV_ENGINE_VP] = NV_DEVICE_V0_DISABLE_VP, + [NVDEV_ENGINE_CRYPT] = NV_DEVICE_V0_DISABLE_CRYPT, + [NVDEV_ENGINE_BSP] = NV_DEVICE_V0_DISABLE_BSP, + [NVDEV_ENGINE_PPP] = NV_DEVICE_V0_DISABLE_PPP, + [NVDEV_ENGINE_COPY0] = NV_DEVICE_V0_DISABLE_COPY0, + [NVDEV_ENGINE_COPY1] = NV_DEVICE_V0_DISABLE_COPY1, + [NVDEV_ENGINE_VIC] = NV_DEVICE_V0_DISABLE_VIC, + [NVDEV_ENGINE_VENC] = NV_DEVICE_V0_DISABLE_VENC, + [NVDEV_ENGINE_DISP] = NV_DEVICE_V0_DISABLE_DISP, [NVDEV_SUBDEV_NR] = 0, }; +static void +nouveau_devobj_dtor(struct nouveau_object *object) +{ + struct nouveau_devobj *devobj = (void *)object; + int i; + + for (i = NVDEV_SUBDEV_NR - 1; i >= 0; i--) + nouveau_object_ref(NULL, &devobj->subdev[i]); + + nouveau_parent_destroy(&devobj->base); +} + +static struct nouveau_oclass +nouveau_devobj_oclass_super = { + .handle = NV_DEVICE, + .ofuncs = &(struct nouveau_ofuncs) { + .dtor = nouveau_devobj_dtor, + .init = _nouveau_parent_init, + .fini = _nouveau_parent_fini, + .mthd = nouveau_devobj_mthd, + .map = nouveau_devobj_map, + .rd08 = nouveau_devobj_rd08, + .rd16 = nouveau_devobj_rd16, + .rd32 = nouveau_devobj_rd32, + .wr08 = nouveau_devobj_wr08, + .wr16 = nouveau_devobj_wr16, + .wr32 = nouveau_devobj_wr32, + } +}; + static int nouveau_devobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { + union { + struct nv_device_v0 v0; + } *args = data; struct nouveau_client *client = nv_client(parent); struct nouveau_device *device; struct nouveau_devobj *devobj; - struct nv_device_class *args = data; u32 boot0, strap; u64 disable, mmio_base, mmio_size; void __iomem *map; int ret, i, c; - if (size < sizeof(struct nv_device_class)) - return -EINVAL; + nv_ioctl(parent, "create device size %d\n", size); + if (nvif_unpack(args->v0, 0, 0, false)) { + nv_ioctl(parent, "create device v%d device %016llx " + "disable %016llx debug0 %016llx\n", + args->v0.version, args->v0.device, + args->v0.disable, args->v0.debug0); + } else + return ret; + + /* give priviledged clients register access */ + if (client->super) + oclass = &nouveau_devobj_oclass_super; /* find the device subdev that matches what the client requested */ device = nv_device(client->device); - if (args->device != ~0) { - device = nouveau_device_find(args->device); + if (args->v0.device != ~0) { + device = nouveau_device_find(args->v0.device); if (!device) return -ENODEV; } @@ -135,14 +323,14 @@ nouveau_devobj_ctor(struct nouveau_object *parent, mmio_size = nv_device_resource_len(device, 0); /* translate api disable mask into internal mapping */ - disable = args->debug0; + disable = args->v0.debug0; for (i = 0; i < NVDEV_SUBDEV_NR; i++) { - if (args->disable & disable_map[i]) + if (args->v0.disable & disable_map[i]) disable |= (1ULL << i); } /* identify the chipset, and determine classes of subdev/engines */ - if (!(args->disable & NV_DEVICE_DISABLE_IDENTIFY) && + if (!(args->v0.disable & NV_DEVICE_V0_DISABLE_IDENTIFY) && !device->card_type) { map = ioremap(mmio_base, 0x102000); if (map == NULL) @@ -180,8 +368,8 @@ nouveau_devobj_ctor(struct nouveau_object *parent, case 0x080: case 0x090: case 0x0a0: device->card_type = NV_50; break; - case 0x0c0: device->card_type = NV_C0; break; - case 0x0d0: device->card_type = NV_D0; break; + case 0x0c0: + case 0x0d0: device->card_type = NV_C0; break; case 0x0e0: case 0x0f0: case 0x100: device->card_type = NV_E0; break; @@ -206,8 +394,7 @@ nouveau_devobj_ctor(struct nouveau_object *parent, case NV_30: ret = nv30_identify(device); break; case NV_40: ret = nv40_identify(device); break; case NV_50: ret = nv50_identify(device); break; - case NV_C0: - case NV_D0: ret = nvc0_identify(device); break; + case NV_C0: ret = nvc0_identify(device); break; case NV_E0: ret = nve0_identify(device); break; case GM100: ret = gm100_identify(device); break; default: @@ -242,7 +429,7 @@ nouveau_devobj_ctor(struct nouveau_object *parent, nv_debug(device, "crystal freq: %dKHz\n", device->crystal); } - if (!(args->disable & NV_DEVICE_DISABLE_MMIO) && + if (!(args->v0.disable & NV_DEVICE_V0_DISABLE_MMIO) && !nv_subdev(device)->mmio) { nv_subdev(device)->mmio = ioremap(mmio_base, mmio_size); if (!nv_subdev(device)->mmio) { @@ -298,71 +485,19 @@ nouveau_devobj_ctor(struct nouveau_object *parent, return 0; } -static void -nouveau_devobj_dtor(struct nouveau_object *object) -{ - struct nouveau_devobj *devobj = (void *)object; - int i; - - for (i = NVDEV_SUBDEV_NR - 1; i >= 0; i--) - nouveau_object_ref(NULL, &devobj->subdev[i]); - - nouveau_parent_destroy(&devobj->base); -} - -static u8 -nouveau_devobj_rd08(struct nouveau_object *object, u64 addr) -{ - return nv_rd08(object->engine, addr); -} - -static u16 -nouveau_devobj_rd16(struct nouveau_object *object, u64 addr) -{ - return nv_rd16(object->engine, addr); -} - -static u32 -nouveau_devobj_rd32(struct nouveau_object *object, u64 addr) -{ - return nv_rd32(object->engine, addr); -} - -static void -nouveau_devobj_wr08(struct nouveau_object *object, u64 addr, u8 data) -{ - nv_wr08(object->engine, addr, data); -} - -static void -nouveau_devobj_wr16(struct nouveau_object *object, u64 addr, u16 data) -{ - nv_wr16(object->engine, addr, data); -} - -static void -nouveau_devobj_wr32(struct nouveau_object *object, u64 addr, u32 data) -{ - nv_wr32(object->engine, addr, data); -} - static struct nouveau_ofuncs nouveau_devobj_ofuncs = { .ctor = nouveau_devobj_ctor, .dtor = nouveau_devobj_dtor, .init = _nouveau_parent_init, .fini = _nouveau_parent_fini, - .rd08 = nouveau_devobj_rd08, - .rd16 = nouveau_devobj_rd16, - .rd32 = nouveau_devobj_rd32, - .wr08 = nouveau_devobj_wr08, - .wr16 = nouveau_devobj_wr16, - .wr32 = nouveau_devobj_wr32, + .mthd = nouveau_devobj_mthd, }; /****************************************************************************** * nouveau_device: engine functions *****************************************************************************/ + static struct nouveau_oclass nouveau_device_sclass[] = { { 0x0080, &nouveau_devobj_ofuncs }, @@ -370,6 +505,23 @@ nouveau_device_sclass[] = { }; static int +nouveau_device_event_ctor(void *data, u32 size, struct nvkm_notify *notify) +{ + if (!WARN_ON(size != 0)) { + notify->size = 0; + notify->types = 1; + notify->index = 0; + return 0; + } + return -EINVAL; +} + +static const struct nvkm_event_func +nouveau_device_event_func = { + .ctor = nouveau_device_event_ctor, +}; + +static int nouveau_device_fini(struct nouveau_object *object, bool suspend) { struct nouveau_device *device = (void *)object; @@ -386,7 +538,7 @@ nouveau_device_fini(struct nouveau_object *object, bool suspend) } } - ret = 0; + ret = nvkm_acpi_fini(device, suspend); fail: for (; ret && i < NVDEV_SUBDEV_NR; i++) { if ((subdev = device->subdev[i])) { @@ -407,7 +559,11 @@ nouveau_device_init(struct nouveau_object *object) { struct nouveau_device *device = (void *)object; struct nouveau_object *subdev; - int ret, i; + int ret, i = 0; + + ret = nvkm_acpi_init(device); + if (ret) + goto fail; for (i = 0; i < NVDEV_SUBDEV_NR; i++) { if ((subdev = device->subdev[i])) { @@ -430,6 +586,8 @@ fail: } } + if (ret) + nvkm_acpi_fini(device, false); return ret; } @@ -438,6 +596,8 @@ nouveau_device_dtor(struct nouveau_object *object) { struct nouveau_device *device = (void *)object; + nvkm_event_fini(&device->event); + mutex_lock(&nv_devices_mutex); list_del(&device->head); mutex_unlock(&nv_devices_mutex); @@ -478,31 +638,6 @@ nv_device_resource_len(struct nouveau_device *device, unsigned int bar) } } -dma_addr_t -nv_device_map_page(struct nouveau_device *device, struct page *page) -{ - dma_addr_t ret; - - if (nv_device_is_pci(device)) { - ret = pci_map_page(device->pdev, page, 0, PAGE_SIZE, - PCI_DMA_BIDIRECTIONAL); - if (pci_dma_mapping_error(device->pdev, ret)) - ret = 0; - } else { - ret = page_to_phys(page); - } - - return ret; -} - -void -nv_device_unmap_page(struct nouveau_device *device, dma_addr_t addr) -{ - if (nv_device_is_pci(device)) - pci_unmap_page(device->pdev, addr, PAGE_SIZE, - PCI_DMA_BIDIRECTIONAL); -} - int nv_device_get_irq(struct nouveau_device *device, bool stall) { @@ -560,6 +695,9 @@ nouveau_device_create_(void *dev, enum nv_bus_type type, u64 name, nv_subdev(device)->debug = nouveau_dbgopt(device->dbgopt, "DEVICE"); nv_engine(device)->sclass = nouveau_device_sclass; list_add(&device->head, &nv_devices); + + ret = nvkm_event_init(&nouveau_device_event_func, 1, 1, + &device->event); done: mutex_unlock(&nv_devices_mutex); return ret; diff --git a/drivers/gpu/drm/nouveau/core/engine/device/ctrl.c b/drivers/gpu/drm/nouveau/core/engine/device/ctrl.c index 4b69bf56ed01..e34101a3490e 100644 --- a/drivers/gpu/drm/nouveau/core/engine/device/ctrl.c +++ b/drivers/gpu/drm/nouveau/core/engine/device/ctrl.c @@ -22,55 +22,82 @@ * Authors: Ben Skeggs <bskeggs@redhat.com> */ +#include <core/client.h> #include <core/object.h> -#include <core/class.h> +#include <nvif/unpack.h> +#include <nvif/class.h> +#include <nvif/ioctl.h> #include <subdev/clock.h> #include "priv.h" static int -nouveau_control_mthd_pstate_info(struct nouveau_object *object, u32 mthd, - void *data, u32 size) +nouveau_control_mthd_pstate_info(struct nouveau_object *object, + void *data, u32 size) { + union { + struct nvif_control_pstate_info_v0 v0; + } *args = data; struct nouveau_clock *clk = nouveau_clock(object); - struct nv_control_pstate_info *args = data; + int ret; - if (size < sizeof(*args)) - return -EINVAL; + nv_ioctl(object, "control pstate info size %d\n", size); + if (nvif_unpack(args->v0, 0, 0, false)) { + nv_ioctl(object, "control pstate info vers %d\n", + args->v0.version); + } else + return ret; if (clk) { - args->count = clk->state_nr; - args->ustate = clk->ustate; - args->pstate = clk->pstate; + args->v0.count = clk->state_nr; + args->v0.ustate_ac = clk->ustate_ac; + args->v0.ustate_dc = clk->ustate_dc; + args->v0.pwrsrc = clk->pwrsrc; + args->v0.pstate = clk->pstate; } else { - args->count = 0; - args->ustate = NV_CONTROL_PSTATE_INFO_USTATE_DISABLE; - args->pstate = NV_CONTROL_PSTATE_INFO_PSTATE_UNKNOWN; + args->v0.count = 0; + args->v0.ustate_ac = NVIF_CONTROL_PSTATE_INFO_V0_USTATE_DISABLE; + args->v0.ustate_dc = NVIF_CONTROL_PSTATE_INFO_V0_USTATE_DISABLE; + args->v0.pwrsrc = -ENOSYS; + args->v0.pstate = NVIF_CONTROL_PSTATE_INFO_V0_PSTATE_UNKNOWN; } return 0; } static int -nouveau_control_mthd_pstate_attr(struct nouveau_object *object, u32 mthd, - void *data, u32 size) +nouveau_control_mthd_pstate_attr(struct nouveau_object *object, + void *data, u32 size) { + union { + struct nvif_control_pstate_attr_v0 v0; + } *args = data; struct nouveau_clock *clk = nouveau_clock(object); - struct nv_control_pstate_attr *args = data; struct nouveau_clocks *domain; struct nouveau_pstate *pstate; struct nouveau_cstate *cstate; int i = 0, j = -1; u32 lo, hi; - - if ((size < sizeof(*args)) || !clk || - (args->state >= 0 && args->state >= clk->state_nr)) - return -EINVAL; + int ret; + + nv_ioctl(object, "control pstate attr size %d\n", size); + if (nvif_unpack(args->v0, 0, 0, false)) { + nv_ioctl(object, "control pstate attr vers %d state %d " + "index %d\n", + args->v0.version, args->v0.state, args->v0.index); + if (!clk) + return -ENODEV; + if (args->v0.state < NVIF_CONTROL_PSTATE_ATTR_V0_STATE_CURRENT) + return -EINVAL; + if (args->v0.state >= clk->state_nr) + return -EINVAL; + } else + return ret; domain = clk->domains; while (domain->name != nv_clk_src_max) { - if (domain->mname && ++j == args->index) + if (domain->mname && ++j == args->v0.index) break; domain++; } @@ -78,9 +105,9 @@ nouveau_control_mthd_pstate_attr(struct nouveau_object *object, u32 mthd, if (domain->name == nv_clk_src_max) return -EINVAL; - if (args->state != NV_CONTROL_PSTATE_ATTR_STATE_CURRENT) { + if (args->v0.state != NVIF_CONTROL_PSTATE_ATTR_V0_STATE_CURRENT) { list_for_each_entry(pstate, &clk->states, head) { - if (i++ == args->state) + if (i++ == args->v0.state) break; } @@ -91,21 +118,21 @@ nouveau_control_mthd_pstate_attr(struct nouveau_object *object, u32 mthd, hi = max(hi, cstate->domain[domain->name]); } - args->state = pstate->pstate; + args->v0.state = pstate->pstate; } else { lo = max(clk->read(clk, domain->name), 0); hi = lo; } - snprintf(args->name, sizeof(args->name), "%s", domain->mname); - snprintf(args->unit, sizeof(args->unit), "MHz"); - args->min = lo / domain->mdiv; - args->max = hi / domain->mdiv; + snprintf(args->v0.name, sizeof(args->v0.name), "%s", domain->mname); + snprintf(args->v0.unit, sizeof(args->v0.unit), "MHz"); + args->v0.min = lo / domain->mdiv; + args->v0.max = hi / domain->mdiv; - args->index = 0; + args->v0.index = 0; while ((++domain)->name != nv_clk_src_max) { if (domain->mname) { - args->index = ++j; + args->v0.index = ++j; break; } } @@ -114,31 +141,65 @@ nouveau_control_mthd_pstate_attr(struct nouveau_object *object, u32 mthd, } static int -nouveau_control_mthd_pstate_user(struct nouveau_object *object, u32 mthd, - void *data, u32 size) +nouveau_control_mthd_pstate_user(struct nouveau_object *object, + void *data, u32 size) { + union { + struct nvif_control_pstate_user_v0 v0; + } *args = data; struct nouveau_clock *clk = nouveau_clock(object); - struct nv_control_pstate_user *args = data; + int ret; + + nv_ioctl(object, "control pstate user size %d\n", size); + if (nvif_unpack(args->v0, 0, 0, false)) { + nv_ioctl(object, "control pstate user vers %d ustate %d " + "pwrsrc %d\n", args->v0.version, + args->v0.ustate, args->v0.pwrsrc); + if (!clk) + return -ENODEV; + } else + return ret; + + if (args->v0.pwrsrc >= 0) { + ret |= nouveau_clock_ustate(clk, args->v0.ustate, args->v0.pwrsrc); + } else { + ret |= nouveau_clock_ustate(clk, args->v0.ustate, 0); + ret |= nouveau_clock_ustate(clk, args->v0.ustate, 1); + } - if (size < sizeof(*args) || !clk) - return -EINVAL; + return ret; +} - return nouveau_clock_ustate(clk, args->state); +static int +nouveau_control_mthd(struct nouveau_object *object, u32 mthd, + void *data, u32 size) +{ + switch (mthd) { + case NVIF_CONTROL_PSTATE_INFO: + return nouveau_control_mthd_pstate_info(object, data, size); + case NVIF_CONTROL_PSTATE_ATTR: + return nouveau_control_mthd_pstate_attr(object, data, size); + case NVIF_CONTROL_PSTATE_USER: + return nouveau_control_mthd_pstate_user(object, data, size); + default: + break; + } + return -EINVAL; } +static struct nouveau_ofuncs +nouveau_control_ofuncs = { + .ctor = _nouveau_object_ctor, + .dtor = nouveau_object_destroy, + .init = nouveau_object_init, + .fini = nouveau_object_fini, + .mthd = nouveau_control_mthd, +}; + struct nouveau_oclass nouveau_control_oclass[] = { - { .handle = NV_CONTROL_CLASS, - .ofuncs = &nouveau_object_ofuncs, - .omthds = (struct nouveau_omthds[]) { - { NV_CONTROL_PSTATE_INFO, - NV_CONTROL_PSTATE_INFO, nouveau_control_mthd_pstate_info }, - { NV_CONTROL_PSTATE_ATTR, - NV_CONTROL_PSTATE_ATTR, nouveau_control_mthd_pstate_attr }, - { NV_CONTROL_PSTATE_USER, - NV_CONTROL_PSTATE_USER, nouveau_control_mthd_pstate_user }, - {}, - }, + { .handle = NVIF_IOCTL_NEW_V0_CONTROL, + .ofuncs = &nouveau_control_ofuncs }, {} }; diff --git a/drivers/gpu/drm/nouveau/core/engine/device/gm100.c b/drivers/gpu/drm/nouveau/core/engine/device/gm100.c index a520029e25d9..377ec0b8851e 100644 --- a/drivers/gpu/drm/nouveau/core/engine/device/gm100.c +++ b/drivers/gpu/drm/nouveau/core/engine/device/gm100.c @@ -33,7 +33,7 @@ #include <subdev/mc.h> #include <subdev/timer.h> #include <subdev/fb.h> -#include <subdev/ltcg.h> +#include <subdev/ltc.h> #include <subdev/ibus.h> #include <subdev/instmem.h> #include <subdev/vm.h> @@ -68,20 +68,20 @@ gm100_identify(struct nouveau_device *device) #endif device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; device->oclass[NVDEV_SUBDEV_DEVINIT] = gm107_devinit_oclass; - device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &gk20a_timer_oclass; device->oclass[NVDEV_SUBDEV_FB ] = gm107_fb_oclass; - device->oclass[NVDEV_SUBDEV_LTCG ] = gm107_ltcg_oclass; + device->oclass[NVDEV_SUBDEV_LTC ] = gm107_ltc_oclass; device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; #if 0 - device->oclass[NVDEV_SUBDEV_PWR ] = &nv108_pwr_oclass; + device->oclass[NVDEV_SUBDEV_PWR ] = nv108_pwr_oclass; device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; #endif - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nv108_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = gm107_graph_oclass; diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv04.c b/drivers/gpu/drm/nouveau/core/engine/device/nv04.c index 40b29d0214cb..573b55f5c2f9 100644 --- a/drivers/gpu/drm/nouveau/core/engine/device/nv04.c +++ b/drivers/gpu/drm/nouveau/core/engine/device/nv04.c @@ -56,7 +56,7 @@ nv04_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_FB ] = nv04_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nv04_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nv04_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nv04_graph_oclass; @@ -74,7 +74,7 @@ nv04_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_FB ] = nv04_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nv04_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nv04_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nv04_graph_oclass; diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv10.c b/drivers/gpu/drm/nouveau/core/engine/device/nv10.c index 5f7c25ff523d..183a85a6204e 100644 --- a/drivers/gpu/drm/nouveau/core/engine/device/nv10.c +++ b/drivers/gpu/drm/nouveau/core/engine/device/nv10.c @@ -58,7 +58,7 @@ nv10_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass; break; @@ -75,7 +75,7 @@ nv10_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; @@ -94,7 +94,7 @@ nv10_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; @@ -113,7 +113,7 @@ nv10_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_FB ] = nv1a_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; @@ -132,7 +132,7 @@ nv10_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; @@ -151,7 +151,7 @@ nv10_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; @@ -170,7 +170,7 @@ nv10_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_FB ] = nv1a_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; @@ -189,7 +189,7 @@ nv10_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass; diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv20.c b/drivers/gpu/drm/nouveau/core/engine/device/nv20.c index 75fed11bba0a..aa564c68a920 100644 --- a/drivers/gpu/drm/nouveau/core/engine/device/nv20.c +++ b/drivers/gpu/drm/nouveau/core/engine/device/nv20.c @@ -59,7 +59,7 @@ nv20_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_FB ] = nv20_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nv20_graph_oclass; @@ -78,7 +78,7 @@ nv20_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_FB ] = nv25_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nv25_graph_oclass; @@ -97,7 +97,7 @@ nv20_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_FB ] = nv25_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nv25_graph_oclass; @@ -116,7 +116,7 @@ nv20_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_FB ] = nv25_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nv2a_graph_oclass; diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv30.c b/drivers/gpu/drm/nouveau/core/engine/device/nv30.c index 36919d7db7cc..11bd31da82ab 100644 --- a/drivers/gpu/drm/nouveau/core/engine/device/nv30.c +++ b/drivers/gpu/drm/nouveau/core/engine/device/nv30.c @@ -59,7 +59,7 @@ nv30_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_FB ] = nv30_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nv30_graph_oclass; @@ -78,7 +78,7 @@ nv30_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_FB ] = nv35_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nv35_graph_oclass; @@ -97,7 +97,7 @@ nv30_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_FB ] = nv30_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nv30_graph_oclass; @@ -117,7 +117,7 @@ nv30_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_FB ] = nv36_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nv35_graph_oclass; @@ -137,7 +137,7 @@ nv30_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nv34_graph_oclass; diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv40.c b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c index 1130a62be2c7..e96c223cb797 100644 --- a/drivers/gpu/drm/nouveau/core/engine/device/nv40.c +++ b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c @@ -65,7 +65,7 @@ nv40_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; @@ -88,7 +88,7 @@ nv40_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; @@ -111,7 +111,7 @@ nv40_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; @@ -134,7 +134,7 @@ nv40_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; @@ -157,7 +157,7 @@ nv40_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; @@ -180,7 +180,7 @@ nv40_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; @@ -203,7 +203,7 @@ nv40_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; @@ -226,7 +226,7 @@ nv40_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; @@ -249,7 +249,7 @@ nv40_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; @@ -272,7 +272,7 @@ nv40_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; @@ -295,7 +295,7 @@ nv40_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; @@ -318,7 +318,7 @@ nv40_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; @@ -341,7 +341,7 @@ nv40_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; @@ -364,7 +364,7 @@ nv40_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; @@ -387,7 +387,7 @@ nv40_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; @@ -410,7 +410,7 @@ nv40_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass; diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c index ef0b0bde1a91..932f84fae459 100644 --- a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c +++ b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c @@ -74,7 +74,7 @@ nv50_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nv50_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; @@ -99,7 +99,7 @@ nv50_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; @@ -127,7 +127,7 @@ nv50_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; @@ -155,7 +155,7 @@ nv50_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; @@ -183,7 +183,7 @@ nv50_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; @@ -211,7 +211,7 @@ nv50_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; @@ -239,7 +239,7 @@ nv50_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; @@ -267,7 +267,7 @@ nv50_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; @@ -295,7 +295,7 @@ nv50_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; @@ -323,7 +323,7 @@ nv50_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; @@ -350,9 +350,9 @@ nv50_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; - device->oclass[NVDEV_SUBDEV_PWR ] = &nva3_pwr_oclass; + device->oclass[NVDEV_SUBDEV_PWR ] = nva3_pwr_oclass; device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; @@ -380,9 +380,9 @@ nv50_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; - device->oclass[NVDEV_SUBDEV_PWR ] = &nva3_pwr_oclass; + device->oclass[NVDEV_SUBDEV_PWR ] = nva3_pwr_oclass; device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; @@ -409,9 +409,9 @@ nv50_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; - device->oclass[NVDEV_SUBDEV_PWR ] = &nva3_pwr_oclass; + device->oclass[NVDEV_SUBDEV_PWR ] = nva3_pwr_oclass; device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; @@ -438,9 +438,9 @@ nv50_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass; device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass; - device->oclass[NVDEV_SUBDEV_PWR ] = &nva3_pwr_oclass; + device->oclass[NVDEV_SUBDEV_PWR ] = nva3_pwr_oclass; device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass; diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c index 8d55ed633b19..b4a2917ce555 100644 --- a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c @@ -33,7 +33,7 @@ #include <subdev/mc.h> #include <subdev/timer.h> #include <subdev/fb.h> -#include <subdev/ltcg.h> +#include <subdev/ltc.h> #include <subdev/ibus.h> #include <subdev/instmem.h> #include <subdev/vm.h> @@ -70,14 +70,14 @@ nvc0_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; - device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass; + device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass; device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; - device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass; + device->oclass[NVDEV_SUBDEV_PWR ] = nvc0_pwr_oclass; device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvc0_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = nvc0_graph_oclass; @@ -102,14 +102,14 @@ nvc0_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; - device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass; + device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass; device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; - device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass; + device->oclass[NVDEV_SUBDEV_PWR ] = nvc0_pwr_oclass; device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvc0_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = nvc4_graph_oclass; @@ -134,14 +134,14 @@ nvc0_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; - device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass; + device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass; device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; - device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass; + device->oclass[NVDEV_SUBDEV_PWR ] = nvc0_pwr_oclass; device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvc0_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = nvc4_graph_oclass; @@ -165,14 +165,14 @@ nvc0_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; - device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass; + device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass; device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; - device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass; + device->oclass[NVDEV_SUBDEV_PWR ] = nvc0_pwr_oclass; device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvc0_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = nvc4_graph_oclass; @@ -197,14 +197,14 @@ nvc0_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; - device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass; + device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass; device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; - device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass; + device->oclass[NVDEV_SUBDEV_PWR ] = nvc0_pwr_oclass; device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvc0_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = nvc4_graph_oclass; @@ -229,14 +229,14 @@ nvc0_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; - device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass; + device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass; device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; - device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass; + device->oclass[NVDEV_SUBDEV_PWR ] = nvc0_pwr_oclass; device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvc0_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = nvc1_graph_oclass; @@ -260,14 +260,14 @@ nvc0_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; - device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass; + device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass; device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; - device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass; + device->oclass[NVDEV_SUBDEV_PWR ] = nvc0_pwr_oclass; device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvc0_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = nvc8_graph_oclass; @@ -292,14 +292,14 @@ nvc0_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; - device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass; + device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass; device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; - device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass; + device->oclass[NVDEV_SUBDEV_PWR ] = nvd0_pwr_oclass; device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = nvd9_graph_oclass; @@ -323,12 +323,12 @@ nvc0_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; - device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass; + device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass; device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = nvd7_graph_oclass; diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c index 2d1e97d4264f..cdf9147f32a1 100644 --- a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c +++ b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c @@ -33,7 +33,7 @@ #include <subdev/mc.h> #include <subdev/timer.h> #include <subdev/fb.h> -#include <subdev/ltcg.h> +#include <subdev/ltc.h> #include <subdev/ibus.h> #include <subdev/instmem.h> #include <subdev/vm.h> @@ -70,14 +70,14 @@ nve0_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass; - device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass; + device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass; device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; - device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass; + device->oclass[NVDEV_SUBDEV_PWR ] = gk104_pwr_oclass; device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = nve4_graph_oclass; @@ -103,14 +103,14 @@ nve0_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass; - device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass; + device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass; device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; - device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass; + device->oclass[NVDEV_SUBDEV_PWR ] = nvd0_pwr_oclass; device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = nve4_graph_oclass; @@ -136,14 +136,14 @@ nve0_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass; - device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass; + device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass; device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; - device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass; + device->oclass[NVDEV_SUBDEV_PWR ] = gk104_pwr_oclass; device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = nve4_graph_oclass; @@ -158,15 +158,17 @@ nve0_identify(struct nouveau_device *device) break; case 0xea: device->cname = "GK20A"; - device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &gk20a_clock_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &gk20a_timer_oclass; device->oclass[NVDEV_SUBDEV_FB ] = gk20a_fb_oclass; + device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass; device->oclass[NVDEV_SUBDEV_IBUS ] = &gk20a_ibus_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; - device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass; + device->oclass[NVDEV_SUBDEV_BAR ] = &gk20a_bar_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = gk20a_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = gk20a_graph_oclass; @@ -186,14 +188,14 @@ nve0_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass; - device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass; + device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass; device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; - device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass; + device->oclass[NVDEV_SUBDEV_PWR ] = nvd0_pwr_oclass; device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = nvf0_graph_oclass; @@ -219,17 +221,17 @@ nve0_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass; - device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass; + device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass; device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; - device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass; + device->oclass[NVDEV_SUBDEV_PWR ] = nvd0_pwr_oclass; device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; - device->oclass[NVDEV_ENGINE_GR ] = nvf0_graph_oclass; + device->oclass[NVDEV_ENGINE_GR ] = gk110b_graph_oclass; device->oclass[NVDEV_ENGINE_DISP ] = nvf0_disp_oclass; device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass; device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass; @@ -248,18 +250,18 @@ nve0_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass; - device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass; - device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass; + device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass; device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass; device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; - device->oclass[NVDEV_SUBDEV_PWR ] = &nv108_pwr_oclass; + device->oclass[NVDEV_SUBDEV_PWR ] = nv108_pwr_oclass; device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; - device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass; device->oclass[NVDEV_ENGINE_FIFO ] = nv108_fifo_oclass; device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; device->oclass[NVDEV_ENGINE_GR ] = nv108_graph_oclass; diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/base.c b/drivers/gpu/drm/nouveau/core/engine/disp/base.c index 9c38c5e40500..22d55f6cde50 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/base.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/base.c @@ -22,23 +22,93 @@ * Authors: Ben Skeggs */ +#include <core/os.h> +#include <nvif/unpack.h> +#include <nvif/class.h> +#include <nvif/event.h> + #include "priv.h" #include "outp.h" #include "conn.h" +int +nouveau_disp_vblank_ctor(void *data, u32 size, struct nvkm_notify *notify) +{ + struct nouveau_disp *disp = + container_of(notify->event, typeof(*disp), vblank); + union { + struct nvif_notify_head_req_v0 v0; + } *req = data; + int ret; + + if (nvif_unpack(req->v0, 0, 0, false)) { + notify->size = sizeof(struct nvif_notify_head_rep_v0); + if (ret = -ENXIO, req->v0.head <= disp->vblank.index_nr) { + notify->types = 1; + notify->index = req->v0.head; + return 0; + } + } + + return ret; +} + +void +nouveau_disp_vblank(struct nouveau_disp *disp, int head) +{ + struct nvif_notify_head_rep_v0 rep = {}; + nvkm_event_send(&disp->vblank, 1, head, &rep, sizeof(rep)); +} + static int -nouveau_disp_hpd_check(struct nouveau_event *event, u32 types, int index) +nouveau_disp_hpd_ctor(void *data, u32 size, struct nvkm_notify *notify) { - struct nouveau_disp *disp = event->priv; + struct nouveau_disp *disp = + container_of(notify->event, typeof(*disp), hpd); + union { + struct nvif_notify_conn_req_v0 v0; + } *req = data; struct nvkm_output *outp; - list_for_each_entry(outp, &disp->outp, head) { - if (outp->conn->index == index) { - if (outp->conn->hpd.event) - return 0; - break; + int ret; + + if (nvif_unpack(req->v0, 0, 0, false)) { + notify->size = sizeof(struct nvif_notify_conn_rep_v0); + list_for_each_entry(outp, &disp->outp, head) { + if (ret = -ENXIO, outp->conn->index == req->v0.conn) { + if (ret = -ENODEV, outp->conn->hpd.event) { + notify->types = req->v0.mask; + notify->index = req->v0.conn; + ret = 0; + } + break; + } } } - return -ENOSYS; + + return ret; +} + +static const struct nvkm_event_func +nouveau_disp_hpd_func = { + .ctor = nouveau_disp_hpd_ctor +}; + +int +nouveau_disp_ntfy(struct nouveau_object *object, u32 type, + struct nvkm_event **event) +{ + struct nouveau_disp *disp = (void *)object->engine; + switch (type) { + case NV04_DISP_NTFY_VBLANK: + *event = &disp->vblank; + return 0; + case NV04_DISP_NTFY_CONN: + *event = &disp->hpd; + return 0; + default: + break; + } + return -EINVAL; } int @@ -97,7 +167,8 @@ _nouveau_disp_dtor(struct nouveau_object *object) struct nouveau_disp *disp = (void *)object; struct nvkm_output *outp, *outt; - nouveau_event_destroy(&disp->vblank); + nvkm_event_fini(&disp->vblank); + nvkm_event_fini(&disp->hpd); if (disp->outp.next) { list_for_each_entry_safe(outp, outt, &disp->outp, head) { @@ -157,14 +228,11 @@ nouveau_disp_create_(struct nouveau_object *parent, hpd = max(hpd, (u8)(dcbE.connector + 1)); } - ret = nouveau_event_create(3, hpd, &disp->hpd); + ret = nvkm_event_init(&nouveau_disp_hpd_func, 3, hpd, &disp->hpd); if (ret) return ret; - disp->hpd->priv = disp; - disp->hpd->check = nouveau_disp_hpd_check; - - ret = nouveau_event_create(1, heads, &disp->vblank); + ret = nvkm_event_init(impl->vblank, 1, heads, &disp->vblank); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/conn.c b/drivers/gpu/drm/nouveau/core/engine/disp/conn.c index 4ffbc70ecf5a..3d1070228977 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/conn.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/conn.c @@ -22,39 +22,41 @@ * Authors: Ben Skeggs */ +#include <core/os.h> +#include <nvif/event.h> + #include <subdev/gpio.h> #include "conn.h" #include "outp.h" -static void -nvkm_connector_hpd_work(struct work_struct *w) +static int +nvkm_connector_hpd(struct nvkm_notify *notify) { - struct nvkm_connector *conn = container_of(w, typeof(*conn), hpd.work); + struct nvkm_connector *conn = container_of(notify, typeof(*conn), hpd); struct nouveau_disp *disp = nouveau_disp(conn); struct nouveau_gpio *gpio = nouveau_gpio(conn); - u32 send = NVKM_HPD_UNPLUG; - if (gpio->get(gpio, 0, DCB_GPIO_UNUSED, conn->hpd.event->index)) - send = NVKM_HPD_PLUG; - nouveau_event_trigger(disp->hpd, send, conn->index); - nouveau_event_get(conn->hpd.event); -} + const struct nvkm_gpio_ntfy_rep *line = notify->data; + struct nvif_notify_conn_rep_v0 rep; + int index = conn->index; -static int -nvkm_connector_hpd(void *data, u32 type, int index) -{ - struct nvkm_connector *conn = data; - DBG("HPD: %d\n", type); - schedule_work(&conn->hpd.work); - return NVKM_EVENT_DROP; + DBG("HPD: %d\n", line->mask); + + if (!gpio->get(gpio, 0, DCB_GPIO_UNUSED, conn->hpd.index)) + rep.mask = NVIF_NOTIFY_CONN_V0_UNPLUG; + else + rep.mask = NVIF_NOTIFY_CONN_V0_PLUG; + rep.version = 0; + + nvkm_event_send(&disp->hpd, rep.mask, index, &rep, sizeof(rep)); + return NVKM_NOTIFY_KEEP; } int _nvkm_connector_fini(struct nouveau_object *object, bool suspend) { struct nvkm_connector *conn = (void *)object; - if (conn->hpd.event) - nouveau_event_put(conn->hpd.event); + nvkm_notify_put(&conn->hpd); return nouveau_object_fini(&conn->base, suspend); } @@ -63,10 +65,8 @@ _nvkm_connector_init(struct nouveau_object *object) { struct nvkm_connector *conn = (void *)object; int ret = nouveau_object_init(&conn->base); - if (ret == 0) { - if (conn->hpd.event) - nouveau_event_get(conn->hpd.event); - } + if (ret == 0) + nvkm_notify_get(&conn->hpd); return ret; } @@ -74,7 +74,7 @@ void _nvkm_connector_dtor(struct nouveau_object *object) { struct nvkm_connector *conn = (void *)object; - nouveau_event_ref(NULL, &conn->hpd.event); + nvkm_notify_fini(&conn->hpd); nouveau_object_destroy(&conn->base); } @@ -116,19 +116,24 @@ nvkm_connector_create_(struct nouveau_object *parent, if ((info->hpd = ffs(info->hpd))) { if (--info->hpd >= ARRAY_SIZE(hpd)) { ERR("hpd %02x unknown\n", info->hpd); - goto done; + return 0; } info->hpd = hpd[info->hpd]; ret = gpio->find(gpio, 0, info->hpd, DCB_GPIO_UNUSED, &func); if (ret) { ERR("func %02x lookup failed, %d\n", info->hpd, ret); - goto done; + return 0; } - ret = nouveau_event_new(gpio->events, NVKM_GPIO_TOGGLED, - func.line, nvkm_connector_hpd, - conn, &conn->hpd.event); + ret = nvkm_notify_init(&gpio->event, nvkm_connector_hpd, true, + &(struct nvkm_gpio_ntfy_req) { + .mask = NVKM_GPIO_TOGGLED, + .line = func.line, + }, + sizeof(struct nvkm_gpio_ntfy_req), + sizeof(struct nvkm_gpio_ntfy_rep), + &conn->hpd); if (ret) { ERR("func %02x failed, %d\n", info->hpd, ret); } else { @@ -136,8 +141,6 @@ nvkm_connector_create_(struct nouveau_object *parent, } } -done: - INIT_WORK(&conn->hpd.work, nvkm_connector_hpd_work); return 0; } diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/conn.h b/drivers/gpu/drm/nouveau/core/engine/disp/conn.h index 035ebeacbb1c..55e5f5c82c14 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/conn.h +++ b/drivers/gpu/drm/nouveau/core/engine/disp/conn.h @@ -10,10 +10,7 @@ struct nvkm_connector { struct nvbios_connE info; int index; - struct { - struct nouveau_eventh *event; - struct work_struct work; - } hpd; + struct nvkm_notify hpd; }; #define nvkm_connector_create(p,e,c,b,i,d) \ diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c index a66b27c0fcab..b36addff06a9 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c @@ -22,8 +22,9 @@ * Authors: Ben Skeggs */ -#include <core/os.h> -#include <core/class.h> +#include <core/client.h> +#include <nvif/unpack.h> +#include <nvif/class.h> #include <subdev/bios.h> #include <subdev/bios/dcb.h> @@ -32,13 +33,28 @@ #include "nv50.h" int -nv50_dac_power(struct nv50_disp_priv *priv, int or, u32 data) +nv50_dac_power(NV50_DISP_MTHD_V1) { - const u32 stat = (data & NV50_DISP_DAC_PWR_HSYNC) | - (data & NV50_DISP_DAC_PWR_VSYNC) | - (data & NV50_DISP_DAC_PWR_DATA) | - (data & NV50_DISP_DAC_PWR_STATE); - const u32 doff = (or * 0x800); + const u32 doff = outp->or * 0x800; + union { + struct nv50_disp_dac_pwr_v0 v0; + } *args = data; + u32 stat; + int ret; + + nv_ioctl(object, "disp dac pwr size %d\n", size); + if (nvif_unpack(args->v0, 0, 0, false)) { + nv_ioctl(object, "disp dac pwr vers %d state %d data %d " + "vsync %d hsync %d\n", + args->v0.version, args->v0.state, args->v0.data, + args->v0.vsync, args->v0.hsync); + stat = 0x00000040 * !args->v0.state; + stat |= 0x00000010 * !args->v0.data; + stat |= 0x00000004 * !args->v0.vsync; + stat |= 0x00000001 * !args->v0.hsync; + } else + return ret; + nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000); nv_mask(priv, 0x61a004 + doff, 0xc000007f, 0x80000000 | stat); nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000); @@ -46,9 +62,24 @@ nv50_dac_power(struct nv50_disp_priv *priv, int or, u32 data) } int -nv50_dac_sense(struct nv50_disp_priv *priv, int or, u32 loadval) +nv50_dac_sense(NV50_DISP_MTHD_V1) { - const u32 doff = (or * 0x800); + union { + struct nv50_disp_dac_load_v0 v0; + } *args = data; + const u32 doff = outp->or * 0x800; + u32 loadval; + int ret; + + nv_ioctl(object, "disp dac load size %d\n", size); + if (nvif_unpack(args->v0, 0, 0, false)) { + nv_ioctl(object, "disp dac load vers %d data %08x\n", + args->v0.version, args->v0.data); + if (args->v0.data & 0xfff00000) + return -EINVAL; + loadval = args->v0.data; + } else + return ret; nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80150000); nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000); @@ -61,38 +92,10 @@ nv50_dac_sense(struct nv50_disp_priv *priv, int or, u32 loadval) nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80550000); nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000); - nv_debug(priv, "DAC%d sense: 0x%08x\n", or, loadval); + nv_debug(priv, "DAC%d sense: 0x%08x\n", outp->or, loadval); if (!(loadval & 0x80000000)) return -ETIMEDOUT; - return (loadval & 0x38000000) >> 27; -} - -int -nv50_dac_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size) -{ - struct nv50_disp_priv *priv = (void *)object->engine; - const u8 or = (mthd & NV50_DISP_DAC_MTHD_OR); - u32 *data = args; - int ret; - - if (size < sizeof(u32)) - return -EINVAL; - - switch (mthd & ~0x3f) { - case NV50_DISP_DAC_PWR: - ret = priv->dac.power(priv, or, data[0]); - break; - case NV50_DISP_DAC_LOAD: - ret = priv->dac.sense(priv, or, data[0]); - if (ret >= 0) { - data[0] = ret; - ret = 0; - } - break; - default: - BUG_ON(1); - } - - return ret; + args->v0.load = (loadval & 0x38000000) >> 27; + return 0; } diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c index 5a5b59b21130..39890221b91c 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c @@ -30,7 +30,7 @@ #include <engine/disp.h> -#include <core/class.h> +#include <nvif/class.h> #include "dport.h" #include "outpdp.h" @@ -335,7 +335,7 @@ nouveau_dp_train(struct work_struct *w) int ret; /* bring capabilities within encoder limits */ - if (nv_mclass(disp) < NVD0_DISP_CLASS) + if (nv_mclass(disp) < GF110_DISP) outp->dpcd[2] &= ~DPCD_RC02_TPS3_SUPPORTED; if ((outp->dpcd[2] & 0x1f) > outp->base.info.dpconf.link_nr) { outp->dpcd[2] &= ~DPCD_RC02_MAX_LANE_COUNT; @@ -354,7 +354,7 @@ nouveau_dp_train(struct work_struct *w) cfg--; /* disable link interrupt handling during link training */ - nouveau_event_put(outp->irq); + nvkm_notify_put(&outp->irq); /* enable down-spreading and execute pre-train script from vbios */ dp_link_train_init(dp, outp->dpcd[3] & 0x01); @@ -395,5 +395,5 @@ nouveau_dp_train(struct work_struct *w) DBG("training complete\n"); atomic_set(&outp->lt.done, 1); wake_up(&outp->lt.wait); - nouveau_event_get(outp->irq); + nvkm_notify_get(&outp->irq); } diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/gm107.c b/drivers/gpu/drm/nouveau/core/engine/disp/gm107.c index 9fc7447fec90..d54da8b5f87e 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/gm107.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/gm107.c @@ -25,7 +25,7 @@ #include <engine/software.h> #include <engine/disp.h> -#include <core/class.h> +#include <nvif/class.h> #include "nv50.h" @@ -35,17 +35,17 @@ static struct nouveau_oclass gm107_disp_sclass[] = { - { GM107_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs }, - { GM107_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs }, - { GM107_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs }, - { GM107_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs }, - { GM107_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs }, + { GM107_DISP_CORE_CHANNEL_DMA, &nvd0_disp_mast_ofuncs.base }, + { GK110_DISP_BASE_CHANNEL_DMA, &nvd0_disp_sync_ofuncs.base }, + { GK104_DISP_OVERLAY_CONTROL_DMA, &nvd0_disp_ovly_ofuncs.base }, + { GK104_DISP_OVERLAY, &nvd0_disp_oimm_ofuncs.base }, + { GK104_DISP_CURSOR, &nvd0_disp_curs_ofuncs.base }, {} }; static struct nouveau_oclass gm107_disp_base_oclass[] = { - { GM107_DISP_CLASS, &nvd0_disp_base_ofuncs, nvd0_disp_base_omthds }, + { GM107_DISP, &nvd0_disp_base_ofuncs }, {} }; @@ -93,9 +93,11 @@ gm107_disp_oclass = &(struct nv50_disp_impl) { .init = _nouveau_disp_init, .fini = _nouveau_disp_fini, }, + .base.vblank = &nvd0_disp_vblank_func, .base.outp = nvd0_disp_outp_sclass, .mthd.core = &nve0_disp_mast_mthd_chan, .mthd.base = &nvd0_disp_sync_mthd_chan, .mthd.ovly = &nve0_disp_ovly_mthd_chan, .mthd.prev = -0x020000, + .head.scanoutpos = nvd0_disp_base_scanoutpos, }.base.base; diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c index a19e7d79b847..8b4e06abe533 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c @@ -22,25 +22,37 @@ * Authors: Ben Skeggs */ -#include <core/os.h> -#include <core/class.h> +#include <core/client.h> +#include <nvif/unpack.h> +#include <nvif/class.h> #include "nv50.h" int -nva3_hda_eld(struct nv50_disp_priv *priv, int or, u8 *data, u32 size) +nva3_hda_eld(NV50_DISP_MTHD_V1) { - const u32 soff = (or * 0x800); - int i; + union { + struct nv50_disp_sor_hda_eld_v0 v0; + } *args = data; + const u32 soff = outp->or * 0x800; + int ret, i; - if (data && data[0]) { + nv_ioctl(object, "disp sor hda eld size %d\n", size); + if (nvif_unpack(args->v0, 0, 0, true)) { + nv_ioctl(object, "disp sor hda eld vers %d\n", args->v0.version); + if (size > 0x60) + return -E2BIG; + } else + return ret; + + if (size && args->v0.data[0]) { for (i = 0; i < size; i++) - nv_wr32(priv, 0x61c440 + soff, (i << 8) | data[i]); + nv_wr32(priv, 0x61c440 + soff, (i << 8) | args->v0.data[0]); for (; i < 0x60; i++) nv_wr32(priv, 0x61c440 + soff, (i << 8)); nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000003); } else - if (data) { + if (size) { nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000001); } else { nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000000); diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c index 717639386ced..baf558fc12fb 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c @@ -22,8 +22,9 @@ * Authors: Ben Skeggs */ -#include <core/os.h> -#include <core/class.h> +#include <core/client.h> +#include <nvif/unpack.h> +#include <nvif/class.h> #include <subdev/bios.h> #include <subdev/bios/dcb.h> @@ -33,19 +34,30 @@ #include "nv50.h" int -nvd0_hda_eld(struct nv50_disp_priv *priv, int or, u8 *data, u32 size) +nvd0_hda_eld(NV50_DISP_MTHD_V1) { - const u32 soff = (or * 0x030); - int i; + union { + struct nv50_disp_sor_hda_eld_v0 v0; + } *args = data; + const u32 soff = outp->or * 0x030; + int ret, i; - if (data && data[0]) { + nv_ioctl(object, "disp sor hda eld size %d\n", size); + if (nvif_unpack(args->v0, 0, 0, true)) { + nv_ioctl(object, "disp sor hda eld vers %d\n", args->v0.version); + if (size > 0x60) + return -E2BIG; + } else + return ret; + + if (size && args->v0.data[0]) { for (i = 0; i < size; i++) - nv_wr32(priv, 0x10ec00 + soff, (i << 8) | data[i]); + nv_wr32(priv, 0x10ec00 + soff, (i << 8) | args->v0.data[i]); for (; i < 0x60; i++) nv_wr32(priv, 0x10ec00 + soff, (i << 8)); nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000003); } else - if (data) { + if (size) { nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000001); } else { nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000000); diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c index 7fdade6e604d..fa276dede9cd 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c @@ -22,17 +22,38 @@ * Authors: Ben Skeggs */ -#include <core/os.h> -#include <core/class.h> +#include <core/client.h> +#include <nvif/unpack.h> +#include <nvif/class.h> #include "nv50.h" int -nv84_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data) +nv84_hdmi_ctrl(NV50_DISP_MTHD_V1) { const u32 hoff = (head * 0x800); + union { + struct nv50_disp_sor_hdmi_pwr_v0 v0; + } *args = data; + u32 ctrl; + int ret; - if (!(data & NV84_DISP_SOR_HDMI_PWR_STATE_ON)) { + nv_ioctl(object, "disp sor hdmi ctrl size %d\n", size); + if (nvif_unpack(args->v0, 0, 0, false)) { + nv_ioctl(object, "disp sor hdmi ctrl vers %d state %d " + "max_ac_packet %d rekey %d\n", + args->v0.version, args->v0.state, + args->v0.max_ac_packet, args->v0.rekey); + if (args->v0.max_ac_packet > 0x1f || args->v0.rekey > 0x7f) + return -EINVAL; + ctrl = 0x40000000 * !!args->v0.state; + ctrl |= args->v0.max_ac_packet << 16; + ctrl |= args->v0.rekey; + ctrl |= 0x1f000000; /* ??? */ + } else + return ret; + + if (!(ctrl & 0x40000000)) { nv_mask(priv, 0x6165a4 + hoff, 0x40000000, 0x00000000); nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000000); nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000000); @@ -65,6 +86,6 @@ nv84_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data) nv_mask(priv, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */ /* HDMI_CTRL */ - nv_mask(priv, 0x6165a4 + hoff, 0x5f1f007f, data | 0x1f000000 /* ??? */); + nv_mask(priv, 0x6165a4 + hoff, 0x5f1f007f, ctrl); return 0; } diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c index db8c6fd46278..57eeed1d1942 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c @@ -22,17 +22,38 @@ * Authors: Ben Skeggs */ -#include <core/os.h> -#include <core/class.h> +#include <core/client.h> +#include <nvif/unpack.h> +#include <nvif/class.h> #include "nv50.h" int -nva3_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data) +nva3_hdmi_ctrl(NV50_DISP_MTHD_V1) { - const u32 soff = (or * 0x800); + const u32 soff = outp->or * 0x800; + union { + struct nv50_disp_sor_hdmi_pwr_v0 v0; + } *args = data; + u32 ctrl; + int ret; - if (!(data & NV84_DISP_SOR_HDMI_PWR_STATE_ON)) { + nv_ioctl(object, "disp sor hdmi ctrl size %d\n", size); + if (nvif_unpack(args->v0, 0, 0, false)) { + nv_ioctl(object, "disp sor hdmi ctrl vers %d state %d " + "max_ac_packet %d rekey %d\n", + args->v0.version, args->v0.state, + args->v0.max_ac_packet, args->v0.rekey); + if (args->v0.max_ac_packet > 0x1f || args->v0.rekey > 0x7f) + return -EINVAL; + ctrl = 0x40000000 * !!args->v0.state; + ctrl |= args->v0.max_ac_packet << 16; + ctrl |= args->v0.rekey; + ctrl |= 0x1f000000; /* ??? */ + } else + return ret; + + if (!(ctrl & 0x40000000)) { nv_mask(priv, 0x61c5a4 + soff, 0x40000000, 0x00000000); nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000000); nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000000); @@ -65,6 +86,6 @@ nva3_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data) nv_mask(priv, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */ /* HDMI_CTRL */ - nv_mask(priv, 0x61c5a4 + soff, 0x5f1f007f, data | 0x1f000000 /* ??? */); + nv_mask(priv, 0x61c5a4 + soff, 0x5f1f007f, ctrl); return 0; } diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c index 5151bb261832..3106d295b48d 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c @@ -22,17 +22,37 @@ * Authors: Ben Skeggs */ -#include <core/os.h> -#include <core/class.h> +#include <core/client.h> +#include <nvif/unpack.h> +#include <nvif/class.h> #include "nv50.h" int -nvd0_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data) +nvd0_hdmi_ctrl(NV50_DISP_MTHD_V1) { const u32 hoff = (head * 0x800); + union { + struct nv50_disp_sor_hdmi_pwr_v0 v0; + } *args = data; + u32 ctrl; + int ret; - if (!(data & NV84_DISP_SOR_HDMI_PWR_STATE_ON)) { + nv_ioctl(object, "disp sor hdmi ctrl size %d\n", size); + if (nvif_unpack(args->v0, 0, 0, false)) { + nv_ioctl(object, "disp sor hdmi ctrl vers %d state %d " + "max_ac_packet %d rekey %d\n", + args->v0.version, args->v0.state, + args->v0.max_ac_packet, args->v0.rekey); + if (args->v0.max_ac_packet > 0x1f || args->v0.rekey > 0x7f) + return -EINVAL; + ctrl = 0x40000000 * !!args->v0.state; + ctrl |= args->v0.max_ac_packet << 16; + ctrl |= args->v0.rekey; + } else + return ret; + + if (!(ctrl & 0x40000000)) { nv_mask(priv, 0x616798 + hoff, 0x40000000, 0x00000000); nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000000); nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000000); @@ -54,7 +74,7 @@ nvd0_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data) nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000001); /* HDMI_CTRL */ - nv_mask(priv, 0x616798 + hoff, 0x401f007f, data); + nv_mask(priv, 0x616798 + hoff, 0x401f007f, ctrl); /* NFI, audio doesn't work without it though.. */ nv_mask(priv, 0x616548 + hoff, 0x00000070, 0x00000000); diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c index a32666ed0c47..366f315fc9a5 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c @@ -24,60 +24,100 @@ #include "priv.h" +#include <core/client.h> #include <core/event.h> -#include <core/class.h> +#include <nvif/unpack.h> +#include <nvif/class.h> struct nv04_disp_priv { struct nouveau_disp base; }; static int -nv04_disp_scanoutpos(struct nouveau_object *object, u32 mthd, - void *data, u32 size) +nv04_disp_scanoutpos(struct nouveau_object *object, struct nv04_disp_priv *priv, + void *data, u32 size, int head) { - struct nv04_disp_priv *priv = (void *)object->engine; - struct nv04_display_scanoutpos *args = data; - const int head = (mthd & NV04_DISP_MTHD_HEAD); + const u32 hoff = head * 0x2000; + union { + struct nv04_disp_scanoutpos_v0 v0; + } *args = data; u32 line; + int ret; + + nv_ioctl(object, "disp scanoutpos size %d\n", size); + if (nvif_unpack(args->v0, 0, 0, false)) { + nv_ioctl(object, "disp scanoutpos vers %d\n", args->v0.version); + args->v0.vblanks = nv_rd32(priv, 0x680800 + hoff) & 0xffff; + args->v0.vtotal = nv_rd32(priv, 0x680804 + hoff) & 0xffff; + args->v0.vblanke = args->v0.vtotal - 1; + + args->v0.hblanks = nv_rd32(priv, 0x680820 + hoff) & 0xffff; + args->v0.htotal = nv_rd32(priv, 0x680824 + hoff) & 0xffff; + args->v0.hblanke = args->v0.htotal - 1; + + /* + * If output is vga instead of digital then vtotal/htotal is + * invalid so we have to give up and trigger the timestamping + * fallback in the drm core. + */ + if (!args->v0.vtotal || !args->v0.htotal) + return -ENOTSUPP; + + args->v0.time[0] = ktime_to_ns(ktime_get()); + line = nv_rd32(priv, 0x600868 + hoff); + args->v0.time[1] = ktime_to_ns(ktime_get()); + args->v0.hline = (line & 0xffff0000) >> 16; + args->v0.vline = (line & 0x0000ffff); + } else + return ret; - if (size < sizeof(*args)) - return -EINVAL; - - args->vblanks = nv_rd32(priv, 0x680800 + (head * 0x2000)) & 0xffff; - args->vtotal = nv_rd32(priv, 0x680804 + (head * 0x2000)) & 0xffff; - args->vblanke = args->vtotal - 1; - - args->hblanks = nv_rd32(priv, 0x680820 + (head * 0x2000)) & 0xffff; - args->htotal = nv_rd32(priv, 0x680824 + (head * 0x2000)) & 0xffff; - args->hblanke = args->htotal - 1; - - /* - * If output is vga instead of digital then vtotal/htotal is invalid - * so we have to give up and trigger the timestamping fallback in the - * drm core. - */ - if (!args->vtotal || !args->htotal) - return -ENOTSUPP; - - args->time[0] = ktime_to_ns(ktime_get()); - line = nv_rd32(priv, 0x600868 + (head * 0x2000)); - args->time[1] = ktime_to_ns(ktime_get()); - args->hline = (line & 0xffff0000) >> 16; - args->vline = (line & 0x0000ffff); return 0; } -#define HEAD_MTHD(n) (n), (n) + 0x01 +static int +nv04_disp_mthd(struct nouveau_object *object, u32 mthd, void *data, u32 size) +{ + union { + struct nv04_disp_mthd_v0 v0; + } *args = data; + struct nv04_disp_priv *priv = (void *)object->engine; + int head, ret; + + nv_ioctl(object, "disp mthd size %d\n", size); + if (nvif_unpack(args->v0, 0, 0, true)) { + nv_ioctl(object, "disp mthd vers %d mthd %02x head %d\n", + args->v0.version, args->v0.method, args->v0.head); + mthd = args->v0.method; + head = args->v0.head; + } else + return ret; -static struct nouveau_omthds -nv04_disp_omthds[] = { - { HEAD_MTHD(NV04_DISP_SCANOUTPOS), nv04_disp_scanoutpos }, - {} + if (head < 0 || head >= 2) + return -ENXIO; + + switch (mthd) { + case NV04_DISP_SCANOUTPOS: + return nv04_disp_scanoutpos(object, priv, data, size, head); + default: + break; + } + + return -EINVAL; +} + +static struct nouveau_ofuncs +nv04_disp_ofuncs = { + .ctor = _nouveau_object_ctor, + .dtor = nouveau_object_destroy, + .init = nouveau_object_init, + .fini = nouveau_object_fini, + .mthd = nv04_disp_mthd, + .ntfy = nouveau_disp_ntfy, }; static struct nouveau_oclass nv04_disp_sclass[] = { - { NV04_DISP_CLASS, &nouveau_object_ofuncs, nv04_disp_omthds }, + { NV04_DISP, &nv04_disp_ofuncs }, {}, }; @@ -86,17 +126,26 @@ nv04_disp_sclass[] = { ******************************************************************************/ static void -nv04_disp_vblank_enable(struct nouveau_event *event, int type, int head) +nv04_disp_vblank_init(struct nvkm_event *event, int type, int head) { - nv_wr32(event->priv, 0x600140 + (head * 0x2000) , 0x00000001); + struct nouveau_disp *disp = container_of(event, typeof(*disp), vblank); + nv_wr32(disp, 0x600140 + (head * 0x2000) , 0x00000001); } static void -nv04_disp_vblank_disable(struct nouveau_event *event, int type, int head) +nv04_disp_vblank_fini(struct nvkm_event *event, int type, int head) { - nv_wr32(event->priv, 0x600140 + (head * 0x2000) , 0x00000000); + struct nouveau_disp *disp = container_of(event, typeof(*disp), vblank); + nv_wr32(disp, 0x600140 + (head * 0x2000) , 0x00000000); } +static const struct nvkm_event_func +nv04_disp_vblank_func = { + .ctor = nouveau_disp_vblank_ctor, + .init = nv04_disp_vblank_init, + .fini = nv04_disp_vblank_fini, +}; + static void nv04_disp_intr(struct nouveau_subdev *subdev) { @@ -106,12 +155,12 @@ nv04_disp_intr(struct nouveau_subdev *subdev) u32 pvideo; if (crtc0 & 0x00000001) { - nouveau_event_trigger(priv->base.vblank, 1, 0); + nouveau_disp_vblank(&priv->base, 0); nv_wr32(priv, 0x600100, 0x00000001); } if (crtc1 & 0x00000001) { - nouveau_event_trigger(priv->base.vblank, 1, 1); + nouveau_disp_vblank(&priv->base, 1); nv_wr32(priv, 0x602100, 0x00000001); } @@ -140,9 +189,6 @@ nv04_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, nv_engine(priv)->sclass = nv04_disp_sclass; nv_subdev(priv)->intr = nv04_disp_intr; - priv->base.vblank->priv = priv; - priv->base.vblank->enable = nv04_disp_vblank_enable; - priv->base.vblank->disable = nv04_disp_vblank_disable; return 0; } @@ -155,4 +201,5 @@ nv04_disp_oclass = &(struct nouveau_disp_impl) { .init = _nouveau_disp_init, .fini = _nouveau_disp_fini, }, + .vblank = &nv04_disp_vblank_func, }.base; diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c index 2283c442a10d..f8cbb512132f 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c @@ -23,10 +23,12 @@ */ #include <core/object.h> +#include <core/client.h> #include <core/parent.h> #include <core/handle.h> -#include <core/class.h> #include <core/enum.h> +#include <nvif/unpack.h> +#include <nvif/class.h> #include <subdev/bios.h> #include <subdev/bios/dcb.h> @@ -43,14 +45,16 @@ * EVO channel base class ******************************************************************************/ -int +static int nv50_disp_chan_create_(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, int chid, + struct nouveau_oclass *oclass, int head, int length, void **pobject) { + const struct nv50_disp_chan_impl *impl = (void *)oclass->ofuncs; struct nv50_disp_base *base = (void *)parent; struct nv50_disp_chan *chan; + int chid = impl->chid + head; int ret; if (base->chan & (1 << chid)) @@ -63,12 +67,14 @@ nv50_disp_chan_create_(struct nouveau_object *parent, chan = *pobject; if (ret) return ret; - chan->chid = chid; + + nv_parent(chan)->object_attach = impl->attach; + nv_parent(chan)->object_detach = impl->detach; return 0; } -void +static void nv50_disp_chan_destroy(struct nv50_disp_chan *chan) { struct nv50_disp_base *base = (void *)nv_object(chan)->parent; @@ -76,6 +82,16 @@ nv50_disp_chan_destroy(struct nv50_disp_chan *chan) nouveau_namedb_destroy(&chan->base); } +int +nv50_disp_chan_map(struct nouveau_object *object, u64 *addr, u32 *size) +{ + struct nv50_disp_chan *chan = (void *)object; + *addr = nv_device_resource_start(nv_device(object), 0) + + 0x640000 + (chan->chid * 0x1000); + *size = 0x001000; + return 0; +} + u32 nv50_disp_chan_rd32(struct nouveau_object *object, u64 addr) { @@ -115,16 +131,16 @@ nv50_disp_dmac_object_detach(struct nouveau_object *parent, int cookie) nouveau_ramht_remove(base->ramht, cookie); } -int +static int nv50_disp_dmac_create_(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, u32 pushbuf, int chid, + struct nouveau_oclass *oclass, u32 pushbuf, int head, int length, void **pobject) { struct nv50_disp_dmac *dmac; int ret; - ret = nv50_disp_chan_create_(parent, engine, oclass, chid, + ret = nv50_disp_chan_create_(parent, engine, oclass, head, length, pobject); dmac = *pobject; if (ret) @@ -397,27 +413,32 @@ nv50_disp_mast_mthd_chan = { } }; -static int +int nv50_disp_mast_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { - struct nv50_display_mast_class *args = data; + union { + struct nv50_disp_core_channel_dma_v0 v0; + } *args = data; struct nv50_disp_dmac *mast; int ret; - if (size < sizeof(*args)) - return -EINVAL; + nv_ioctl(parent, "create disp core channel dma size %d\n", size); + if (nvif_unpack(args->v0, 0, 0, false)) { + nv_ioctl(parent, "create disp core channel dma vers %d " + "pushbuf %08x\n", + args->v0.version, args->v0.pushbuf); + } else + return ret; - ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf, + ret = nv50_disp_dmac_create_(parent, engine, oclass, args->v0.pushbuf, 0, sizeof(*mast), (void **)&mast); *pobject = nv_object(mast); if (ret) return ret; - nv_parent(mast)->object_attach = nv50_disp_dmac_object_attach; - nv_parent(mast)->object_detach = nv50_disp_dmac_object_detach; return 0; } @@ -479,14 +500,18 @@ nv50_disp_mast_fini(struct nouveau_object *object, bool suspend) return nv50_disp_chan_fini(&mast->base, suspend); } -struct nouveau_ofuncs +struct nv50_disp_chan_impl nv50_disp_mast_ofuncs = { - .ctor = nv50_disp_mast_ctor, - .dtor = nv50_disp_dmac_dtor, - .init = nv50_disp_mast_init, - .fini = nv50_disp_mast_fini, - .rd32 = nv50_disp_chan_rd32, - .wr32 = nv50_disp_chan_wr32, + .base.ctor = nv50_disp_mast_ctor, + .base.dtor = nv50_disp_dmac_dtor, + .base.init = nv50_disp_mast_init, + .base.fini = nv50_disp_mast_fini, + .base.map = nv50_disp_chan_map, + .base.rd32 = nv50_disp_chan_rd32, + .base.wr32 = nv50_disp_chan_wr32, + .chid = 0, + .attach = nv50_disp_dmac_object_attach, + .detach = nv50_disp_dmac_object_detach, }; /******************************************************************************* @@ -543,39 +568,51 @@ nv50_disp_sync_mthd_chan = { } }; -static int +int nv50_disp_sync_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { - struct nv50_display_sync_class *args = data; + union { + struct nv50_disp_base_channel_dma_v0 v0; + } *args = data; + struct nv50_disp_priv *priv = (void *)engine; struct nv50_disp_dmac *dmac; int ret; - if (size < sizeof(*args) || args->head > 1) - return -EINVAL; + nv_ioctl(parent, "create disp base channel dma size %d\n", size); + if (nvif_unpack(args->v0, 0, 0, false)) { + nv_ioctl(parent, "create disp base channel dma vers %d " + "pushbuf %08x head %d\n", + args->v0.version, args->v0.pushbuf, args->v0.head); + if (args->v0.head > priv->head.nr) + return -EINVAL; + } else + return ret; - ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf, - 1 + args->head, sizeof(*dmac), + ret = nv50_disp_dmac_create_(parent, engine, oclass, args->v0.pushbuf, + args->v0.head, sizeof(*dmac), (void **)&dmac); *pobject = nv_object(dmac); if (ret) return ret; - nv_parent(dmac)->object_attach = nv50_disp_dmac_object_attach; - nv_parent(dmac)->object_detach = nv50_disp_dmac_object_detach; return 0; } -struct nouveau_ofuncs +struct nv50_disp_chan_impl nv50_disp_sync_ofuncs = { - .ctor = nv50_disp_sync_ctor, - .dtor = nv50_disp_dmac_dtor, - .init = nv50_disp_dmac_init, - .fini = nv50_disp_dmac_fini, - .rd32 = nv50_disp_chan_rd32, - .wr32 = nv50_disp_chan_wr32, + .base.ctor = nv50_disp_sync_ctor, + .base.dtor = nv50_disp_dmac_dtor, + .base.init = nv50_disp_dmac_init, + .base.fini = nv50_disp_dmac_fini, + .base.map = nv50_disp_chan_map, + .base.rd32 = nv50_disp_chan_rd32, + .base.wr32 = nv50_disp_chan_wr32, + .chid = 1, + .attach = nv50_disp_dmac_object_attach, + .detach = nv50_disp_dmac_object_detach, }; /******************************************************************************* @@ -620,39 +657,51 @@ nv50_disp_ovly_mthd_chan = { } }; -static int +int nv50_disp_ovly_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { - struct nv50_display_ovly_class *args = data; + union { + struct nv50_disp_overlay_channel_dma_v0 v0; + } *args = data; + struct nv50_disp_priv *priv = (void *)engine; struct nv50_disp_dmac *dmac; int ret; - if (size < sizeof(*args) || args->head > 1) - return -EINVAL; + nv_ioctl(parent, "create disp overlay channel dma size %d\n", size); + if (nvif_unpack(args->v0, 0, 0, false)) { + nv_ioctl(parent, "create disp overlay channel dma vers %d " + "pushbuf %08x head %d\n", + args->v0.version, args->v0.pushbuf, args->v0.head); + if (args->v0.head > priv->head.nr) + return -EINVAL; + } else + return ret; - ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf, - 3 + args->head, sizeof(*dmac), + ret = nv50_disp_dmac_create_(parent, engine, oclass, args->v0.pushbuf, + args->v0.head, sizeof(*dmac), (void **)&dmac); *pobject = nv_object(dmac); if (ret) return ret; - nv_parent(dmac)->object_attach = nv50_disp_dmac_object_attach; - nv_parent(dmac)->object_detach = nv50_disp_dmac_object_detach; return 0; } -struct nouveau_ofuncs +struct nv50_disp_chan_impl nv50_disp_ovly_ofuncs = { - .ctor = nv50_disp_ovly_ctor, - .dtor = nv50_disp_dmac_dtor, - .init = nv50_disp_dmac_init, - .fini = nv50_disp_dmac_fini, - .rd32 = nv50_disp_chan_rd32, - .wr32 = nv50_disp_chan_wr32, + .base.ctor = nv50_disp_ovly_ctor, + .base.dtor = nv50_disp_dmac_dtor, + .base.init = nv50_disp_dmac_init, + .base.fini = nv50_disp_dmac_fini, + .base.map = nv50_disp_chan_map, + .base.rd32 = nv50_disp_chan_rd32, + .base.wr32 = nv50_disp_chan_wr32, + .chid = 3, + .attach = nv50_disp_dmac_object_attach, + .detach = nv50_disp_dmac_object_detach, }; /******************************************************************************* @@ -662,14 +711,14 @@ nv50_disp_ovly_ofuncs = { static int nv50_disp_pioc_create_(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, int chid, + struct nouveau_oclass *oclass, int head, int length, void **pobject) { - return nv50_disp_chan_create_(parent, engine, oclass, chid, + return nv50_disp_chan_create_(parent, engine, oclass, head, length, pobject); } -static void +void nv50_disp_pioc_dtor(struct nouveau_object *object) { struct nv50_disp_pioc *pioc = (void *)object; @@ -727,20 +776,29 @@ nv50_disp_pioc_fini(struct nouveau_object *object, bool suspend) * EVO immediate overlay channel objects ******************************************************************************/ -static int +int nv50_disp_oimm_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { - struct nv50_display_oimm_class *args = data; + union { + struct nv50_disp_overlay_v0 v0; + } *args = data; + struct nv50_disp_priv *priv = (void *)engine; struct nv50_disp_pioc *pioc; int ret; - if (size < sizeof(*args) || args->head > 1) - return -EINVAL; + nv_ioctl(parent, "create disp overlay size %d\n", size); + if (nvif_unpack(args->v0, 0, 0, false)) { + nv_ioctl(parent, "create disp overlay vers %d head %d\n", + args->v0.version, args->v0.head); + if (args->v0.head > priv->head.nr) + return -EINVAL; + } else + return ret; - ret = nv50_disp_pioc_create_(parent, engine, oclass, 5 + args->head, + ret = nv50_disp_pioc_create_(parent, engine, oclass, args->v0.head, sizeof(*pioc), (void **)&pioc); *pobject = nv_object(pioc); if (ret) @@ -749,34 +807,45 @@ nv50_disp_oimm_ctor(struct nouveau_object *parent, return 0; } -struct nouveau_ofuncs +struct nv50_disp_chan_impl nv50_disp_oimm_ofuncs = { - .ctor = nv50_disp_oimm_ctor, - .dtor = nv50_disp_pioc_dtor, - .init = nv50_disp_pioc_init, - .fini = nv50_disp_pioc_fini, - .rd32 = nv50_disp_chan_rd32, - .wr32 = nv50_disp_chan_wr32, + .base.ctor = nv50_disp_oimm_ctor, + .base.dtor = nv50_disp_pioc_dtor, + .base.init = nv50_disp_pioc_init, + .base.fini = nv50_disp_pioc_fini, + .base.map = nv50_disp_chan_map, + .base.rd32 = nv50_disp_chan_rd32, + .base.wr32 = nv50_disp_chan_wr32, + .chid = 5, }; /******************************************************************************* * EVO cursor channel objects ******************************************************************************/ -static int +int nv50_disp_curs_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { - struct nv50_display_curs_class *args = data; + union { + struct nv50_disp_cursor_v0 v0; + } *args = data; + struct nv50_disp_priv *priv = (void *)engine; struct nv50_disp_pioc *pioc; int ret; - if (size < sizeof(*args) || args->head > 1) - return -EINVAL; + nv_ioctl(parent, "create disp cursor size %d\n", size); + if (nvif_unpack(args->v0, 0, 0, false)) { + nv_ioctl(parent, "create disp cursor vers %d head %d\n", + args->v0.version, args->v0.head); + if (args->v0.head > priv->head.nr) + return -EINVAL; + } else + return ret; - ret = nv50_disp_pioc_create_(parent, engine, oclass, 7 + args->head, + ret = nv50_disp_pioc_create_(parent, engine, oclass, args->v0.head, sizeof(*pioc), (void **)&pioc); *pobject = nv_object(pioc); if (ret) @@ -785,14 +854,16 @@ nv50_disp_curs_ctor(struct nouveau_object *parent, return 0; } -struct nouveau_ofuncs +struct nv50_disp_chan_impl nv50_disp_curs_ofuncs = { - .ctor = nv50_disp_curs_ctor, - .dtor = nv50_disp_pioc_dtor, - .init = nv50_disp_pioc_init, - .fini = nv50_disp_pioc_fini, - .rd32 = nv50_disp_chan_rd32, - .wr32 = nv50_disp_chan_wr32, + .base.ctor = nv50_disp_curs_ctor, + .base.dtor = nv50_disp_pioc_dtor, + .base.init = nv50_disp_pioc_init, + .base.fini = nv50_disp_pioc_fini, + .base.map = nv50_disp_chan_map, + .base.rd32 = nv50_disp_chan_rd32, + .base.wr32 = nv50_disp_chan_wr32, + .chid = 7, }; /******************************************************************************* @@ -800,47 +871,162 @@ nv50_disp_curs_ofuncs = { ******************************************************************************/ int -nv50_disp_base_scanoutpos(struct nouveau_object *object, u32 mthd, - void *data, u32 size) +nv50_disp_base_scanoutpos(NV50_DISP_MTHD_V0) { - struct nv50_disp_priv *priv = (void *)object->engine; - struct nv04_display_scanoutpos *args = data; - const int head = (mthd & NV50_DISP_MTHD_HEAD); - u32 blanke, blanks, total; + const u32 blanke = nv_rd32(priv, 0x610aec + (head * 0x540)); + const u32 blanks = nv_rd32(priv, 0x610af4 + (head * 0x540)); + const u32 total = nv_rd32(priv, 0x610afc + (head * 0x540)); + union { + struct nv04_disp_scanoutpos_v0 v0; + } *args = data; + int ret; + + nv_ioctl(object, "disp scanoutpos size %d\n", size); + if (nvif_unpack(args->v0, 0, 0, false)) { + nv_ioctl(object, "disp scanoutpos vers %d\n", args->v0.version); + args->v0.vblanke = (blanke & 0xffff0000) >> 16; + args->v0.hblanke = (blanke & 0x0000ffff); + args->v0.vblanks = (blanks & 0xffff0000) >> 16; + args->v0.hblanks = (blanks & 0x0000ffff); + args->v0.vtotal = ( total & 0xffff0000) >> 16; + args->v0.htotal = ( total & 0x0000ffff); + args->v0.time[0] = ktime_to_ns(ktime_get()); + args->v0.vline = /* vline read locks hline */ + nv_rd32(priv, 0x616340 + (head * 0x800)) & 0xffff; + args->v0.time[1] = ktime_to_ns(ktime_get()); + args->v0.hline = + nv_rd32(priv, 0x616344 + (head * 0x800)) & 0xffff; + } else + return ret; - if (size < sizeof(*args) || head >= priv->head.nr) - return -EINVAL; - blanke = nv_rd32(priv, 0x610aec + (head * 0x540)); - blanks = nv_rd32(priv, 0x610af4 + (head * 0x540)); - total = nv_rd32(priv, 0x610afc + (head * 0x540)); - - args->vblanke = (blanke & 0xffff0000) >> 16; - args->hblanke = (blanke & 0x0000ffff); - args->vblanks = (blanks & 0xffff0000) >> 16; - args->hblanks = (blanks & 0x0000ffff); - args->vtotal = ( total & 0xffff0000) >> 16; - args->htotal = ( total & 0x0000ffff); - - args->time[0] = ktime_to_ns(ktime_get()); - args->vline = nv_rd32(priv, 0x616340 + (head * 0x800)) & 0xffff; - args->time[1] = ktime_to_ns(ktime_get()); /* vline read locks hline */ - args->hline = nv_rd32(priv, 0x616344 + (head * 0x800)) & 0xffff; return 0; } -static void -nv50_disp_base_vblank_enable(struct nouveau_event *event, int type, int head) +int +nv50_disp_base_mthd(struct nouveau_object *object, u32 mthd, + void *data, u32 size) { - nv_mask(event->priv, 0x61002c, (4 << head), (4 << head)); -} + const struct nv50_disp_impl *impl = (void *)nv_oclass(object->engine); + union { + struct nv50_disp_mthd_v0 v0; + struct nv50_disp_mthd_v1 v1; + } *args = data; + struct nv50_disp_priv *priv = (void *)object->engine; + struct nvkm_output *outp = NULL; + struct nvkm_output *temp; + u16 type, mask = 0; + int head, ret; -static void -nv50_disp_base_vblank_disable(struct nouveau_event *event, int type, int head) -{ - nv_mask(event->priv, 0x61002c, (4 << head), 0); + if (mthd != NV50_DISP_MTHD) + return -EINVAL; + + nv_ioctl(object, "disp mthd size %d\n", size); + if (nvif_unpack(args->v0, 0, 0, true)) { + nv_ioctl(object, "disp mthd vers %d mthd %02x head %d\n", + args->v0.version, args->v0.method, args->v0.head); + mthd = args->v0.method; + head = args->v0.head; + } else + if (nvif_unpack(args->v1, 1, 1, true)) { + nv_ioctl(object, "disp mthd vers %d mthd %02x " + "type %04x mask %04x\n", + args->v1.version, args->v1.method, + args->v1.hasht, args->v1.hashm); + mthd = args->v1.method; + type = args->v1.hasht; + mask = args->v1.hashm; + head = ffs((mask >> 8) & 0x0f) - 1; + } else + return ret; + + if (head < 0 || head >= priv->head.nr) + return -ENXIO; + + if (mask) { + list_for_each_entry(temp, &priv->base.outp, head) { + if ((temp->info.hasht == type) && + (temp->info.hashm & mask) == mask) { + outp = temp; + break; + } + } + if (outp == NULL) + return -ENXIO; + } + + switch (mthd) { + case NV50_DISP_SCANOUTPOS: + return impl->head.scanoutpos(object, priv, data, size, head); + default: + break; + } + + switch (mthd * !!outp) { + case NV50_DISP_MTHD_V1_DAC_PWR: + return priv->dac.power(object, priv, data, size, head, outp); + case NV50_DISP_MTHD_V1_DAC_LOAD: + return priv->dac.sense(object, priv, data, size, head, outp); + case NV50_DISP_MTHD_V1_SOR_PWR: + return priv->sor.power(object, priv, data, size, head, outp); + case NV50_DISP_MTHD_V1_SOR_HDA_ELD: + if (!priv->sor.hda_eld) + return -ENODEV; + return priv->sor.hda_eld(object, priv, data, size, head, outp); + case NV50_DISP_MTHD_V1_SOR_HDMI_PWR: + if (!priv->sor.hdmi) + return -ENODEV; + return priv->sor.hdmi(object, priv, data, size, head, outp); + case NV50_DISP_MTHD_V1_SOR_LVDS_SCRIPT: { + union { + struct nv50_disp_sor_lvds_script_v0 v0; + } *args = data; + nv_ioctl(object, "disp sor lvds script size %d\n", size); + if (nvif_unpack(args->v0, 0, 0, false)) { + nv_ioctl(object, "disp sor lvds script " + "vers %d name %04x\n", + args->v0.version, args->v0.script); + priv->sor.lvdsconf = args->v0.script; + return 0; + } else + return ret; + } + break; + case NV50_DISP_MTHD_V1_SOR_DP_PWR: { + struct nvkm_output_dp *outpdp = (void *)outp; + union { + struct nv50_disp_sor_dp_pwr_v0 v0; + } *args = data; + nv_ioctl(object, "disp sor dp pwr size %d\n", size); + if (nvif_unpack(args->v0, 0, 0, false)) { + nv_ioctl(object, "disp sor dp pwr vers %d state %d\n", + args->v0.version, args->v0.state); + if (args->v0.state == 0) { + nvkm_notify_put(&outpdp->irq); + ((struct nvkm_output_dp_impl *)nv_oclass(outp)) + ->lnk_pwr(outpdp, 0); + atomic_set(&outpdp->lt.done, 0); + return 0; + } else + if (args->v0.state != 0) { + nvkm_output_dp_train(&outpdp->base, 0, true); + return 0; + } + } else + return ret; + } + break; + case NV50_DISP_MTHD_V1_PIOR_PWR: + if (!priv->pior.power) + return -ENODEV; + return priv->pior.power(object, priv, data, size, head, outp); + default: + break; + } + + return -EINVAL; } -static int +int nv50_disp_base_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, @@ -856,14 +1042,11 @@ nv50_disp_base_ctor(struct nouveau_object *parent, if (ret) return ret; - priv->base.vblank->priv = priv; - priv->base.vblank->enable = nv50_disp_base_vblank_enable; - priv->base.vblank->disable = nv50_disp_base_vblank_disable; return nouveau_ramht_new(nv_object(base), nv_object(base), 0x1000, 0, &base->ramht); } -static void +void nv50_disp_base_dtor(struct nouveau_object *object) { struct nv50_disp_base *base = (void *)object; @@ -958,34 +1141,23 @@ nv50_disp_base_ofuncs = { .dtor = nv50_disp_base_dtor, .init = nv50_disp_base_init, .fini = nv50_disp_base_fini, -}; - -static struct nouveau_omthds -nv50_disp_base_omthds[] = { - { HEAD_MTHD(NV50_DISP_SCANOUTPOS) , nv50_disp_base_scanoutpos }, - { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd }, - { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd }, - { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd }, - { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd }, - { PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd }, - { PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR) , nv50_pior_mthd }, - { PIOR_MTHD(NV50_DISP_PIOR_DP_PWR) , nv50_pior_mthd }, - {}, + .mthd = nv50_disp_base_mthd, + .ntfy = nouveau_disp_ntfy, }; static struct nouveau_oclass nv50_disp_base_oclass[] = { - { NV50_DISP_CLASS, &nv50_disp_base_ofuncs, nv50_disp_base_omthds }, + { NV50_DISP, &nv50_disp_base_ofuncs }, {} }; static struct nouveau_oclass nv50_disp_sclass[] = { - { NV50_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs }, - { NV50_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs }, - { NV50_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs }, - { NV50_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs }, - { NV50_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs }, + { NV50_DISP_CORE_CHANNEL_DMA, &nv50_disp_mast_ofuncs.base }, + { NV50_DISP_BASE_CHANNEL_DMA, &nv50_disp_sync_ofuncs.base }, + { NV50_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base }, + { NV50_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base }, + { NV50_DISP_CURSOR, &nv50_disp_curs_ofuncs.base }, {} }; @@ -1005,7 +1177,7 @@ nv50_disp_data_ctor(struct nouveau_object *parent, int ret = -EBUSY; /* no context needed for channel objects... */ - if (nv_mclass(parent) != NV_DEVICE_CLASS) { + if (nv_mclass(parent) != NV_DEVICE) { atomic_inc(&parent->refcount); *pobject = parent; return 1; @@ -1040,6 +1212,27 @@ nv50_disp_cclass = { * Display engine implementation ******************************************************************************/ +static void +nv50_disp_vblank_fini(struct nvkm_event *event, int type, int head) +{ + struct nouveau_disp *disp = container_of(event, typeof(*disp), vblank); + nv_mask(disp, 0x61002c, (4 << head), 0); +} + +static void +nv50_disp_vblank_init(struct nvkm_event *event, int type, int head) +{ + struct nouveau_disp *disp = container_of(event, typeof(*disp), vblank); + nv_mask(disp, 0x61002c, (4 << head), (4 << head)); +} + +const struct nvkm_event_func +nv50_disp_vblank_func = { + .ctor = nouveau_disp_vblank_ctor, + .init = nv50_disp_vblank_init, + .fini = nv50_disp_vblank_fini, +}; + static const struct nouveau_enum nv50_disp_intr_error_type[] = { { 3, "ILLEGAL_MTHD" }, @@ -1381,7 +1574,7 @@ nv50_disp_intr_unk20_2_dp(struct nv50_disp_priv *priv, int TU, VTUi, VTUf, VTUa; u64 link_data_rate, link_ratio, unk; u32 best_diff = 64 * symbol; - u32 link_nr, link_bw, bits, r; + u32 link_nr, link_bw, bits; /* calculate packed data rate for each lane */ if (dpctrl > 0x00030000) link_nr = 4; @@ -1401,7 +1594,7 @@ nv50_disp_intr_unk20_2_dp(struct nv50_disp_priv *priv, /* calculate ratio of packed data rate to link symbol rate */ link_ratio = link_data_rate * symbol; - r = do_div(link_ratio, link_bw); + do_div(link_ratio, link_bw); for (TU = 64; TU >= 32; TU--) { /* calculate average number of valid symbols in each TU */ @@ -1462,8 +1655,8 @@ nv50_disp_intr_unk20_2_dp(struct nv50_disp_priv *priv, /* XXX close to vbios numbers, but not right */ unk = (symbol - link_ratio) * bestTU; unk *= link_ratio; - r = do_div(unk, symbol); - r = do_div(unk, symbol); + do_div(unk, symbol); + do_div(unk, symbol); unk += 6; nv_mask(priv, 0x61c10c + loff, 0x000001fc, bestTU << 2); @@ -1570,9 +1763,10 @@ nv50_disp_intr_unk40_0_tmds(struct nv50_disp_priv *priv, struct dcb_output *outp const int or = ffs(outp->or) - 1; const u32 loff = (or * 0x800) + (link * 0x80); const u16 mask = (outp->sorconf.link << 6) | outp->or; + struct dcb_output match; u8 ver, hdr; - if (dcb_outp_match(bios, DCB_OUTPUT_DP, mask, &ver, &hdr, outp)) + if (dcb_outp_match(bios, DCB_OUTPUT_DP, mask, &ver, &hdr, &match)) nv_mask(priv, 0x61c10c + loff, 0x00000001, 0x00000000); } @@ -1654,13 +1848,13 @@ nv50_disp_intr(struct nouveau_subdev *subdev) } if (intr1 & 0x00000004) { - nouveau_event_trigger(priv->base.vblank, 1, 0); + nouveau_disp_vblank(&priv->base, 0); nv_wr32(priv, 0x610024, 0x00000004); intr1 &= ~0x00000004; } if (intr1 & 0x00000008) { - nouveau_event_trigger(priv->base.vblank, 1, 1); + nouveau_disp_vblank(&priv->base, 1); nv_wr32(priv, 0x610024, 0x00000008); intr1 &= ~0x00000008; } @@ -1718,9 +1912,11 @@ nv50_disp_oclass = &(struct nv50_disp_impl) { .init = _nouveau_disp_init, .fini = _nouveau_disp_fini, }, + .base.vblank = &nv50_disp_vblank_func, .base.outp = nv50_disp_outp_sclass, .mthd.core = &nv50_disp_mast_mthd_chan, .mthd.base = &nv50_disp_sync_mthd_chan, .mthd.ovly = &nv50_disp_ovly_mthd_chan, .mthd.prev = 0x000004, + .head.scanoutpos = nv50_disp_base_scanoutpos, }.base.base; diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h index 1a886472b6f5..8ab14461f70c 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h @@ -14,15 +14,10 @@ #include "outp.h" #include "outpdp.h" -struct nv50_disp_impl { - struct nouveau_disp_impl base; - struct { - const struct nv50_disp_mthd_chan *core; - const struct nv50_disp_mthd_chan *base; - const struct nv50_disp_mthd_chan *ovly; - int prev; - } mthd; -}; +#define NV50_DISP_MTHD_ struct nouveau_object *object, \ + struct nv50_disp_priv *priv, void *data, u32 size +#define NV50_DISP_MTHD_V0 NV50_DISP_MTHD_, int head +#define NV50_DISP_MTHD_V1 NV50_DISP_MTHD_, int head, struct nvkm_output *outp struct nv50_disp_priv { struct nouveau_disp base; @@ -36,44 +31,52 @@ struct nv50_disp_priv { } head; struct { int nr; - int (*power)(struct nv50_disp_priv *, int dac, u32 data); - int (*sense)(struct nv50_disp_priv *, int dac, u32 load); + int (*power)(NV50_DISP_MTHD_V1); + int (*sense)(NV50_DISP_MTHD_V1); } dac; struct { int nr; - int (*power)(struct nv50_disp_priv *, int sor, u32 data); - int (*hda_eld)(struct nv50_disp_priv *, int sor, u8 *, u32); - int (*hdmi)(struct nv50_disp_priv *, int head, int sor, u32); + int (*power)(NV50_DISP_MTHD_V1); + int (*hda_eld)(NV50_DISP_MTHD_V1); + int (*hdmi)(NV50_DISP_MTHD_V1); u32 lvdsconf; } sor; struct { int nr; - int (*power)(struct nv50_disp_priv *, int ext, u32 data); + int (*power)(NV50_DISP_MTHD_V1); u8 type[3]; } pior; }; -#define HEAD_MTHD(n) (n), (n) + 0x03 - -int nv50_disp_base_scanoutpos(struct nouveau_object *, u32, void *, u32); +struct nv50_disp_impl { + struct nouveau_disp_impl base; + struct { + const struct nv50_disp_mthd_chan *core; + const struct nv50_disp_mthd_chan *base; + const struct nv50_disp_mthd_chan *ovly; + int prev; + } mthd; + struct { + int (*scanoutpos)(NV50_DISP_MTHD_V0); + } head; +}; -#define DAC_MTHD(n) (n), (n) + 0x03 +int nv50_disp_base_scanoutpos(NV50_DISP_MTHD_V0); +int nv50_disp_base_mthd(struct nouveau_object *, u32, void *, u32); -int nv50_dac_mthd(struct nouveau_object *, u32, void *, u32); -int nv50_dac_power(struct nv50_disp_priv *, int, u32); -int nv50_dac_sense(struct nv50_disp_priv *, int, u32); +int nvd0_disp_base_scanoutpos(NV50_DISP_MTHD_V0); -#define SOR_MTHD(n) (n), (n) + 0x3f +int nv50_dac_power(NV50_DISP_MTHD_V1); +int nv50_dac_sense(NV50_DISP_MTHD_V1); -int nva3_hda_eld(struct nv50_disp_priv *, int, u8 *, u32); -int nvd0_hda_eld(struct nv50_disp_priv *, int, u8 *, u32); +int nva3_hda_eld(NV50_DISP_MTHD_V1); +int nvd0_hda_eld(NV50_DISP_MTHD_V1); -int nv84_hdmi_ctrl(struct nv50_disp_priv *, int, int, u32); -int nva3_hdmi_ctrl(struct nv50_disp_priv *, int, int, u32); -int nvd0_hdmi_ctrl(struct nv50_disp_priv *, int, int, u32); +int nv84_hdmi_ctrl(NV50_DISP_MTHD_V1); +int nva3_hdmi_ctrl(NV50_DISP_MTHD_V1); +int nvd0_hdmi_ctrl(NV50_DISP_MTHD_V1); -int nv50_sor_mthd(struct nouveau_object *, u32, void *, u32); -int nv50_sor_power(struct nv50_disp_priv *, int, u32); +int nv50_sor_power(NV50_DISP_MTHD_V1); int nv94_sor_dp_train_init(struct nv50_disp_priv *, int, int, int, u16, u16, u32, struct dcb_output *); @@ -93,10 +96,7 @@ int nvd0_sor_dp_lnkctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32, int nvd0_sor_dp_drvctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32, struct dcb_output *); -#define PIOR_MTHD(n) (n), (n) + 0x03 - -int nv50_pior_mthd(struct nouveau_object *, u32, void *, u32); -int nv50_pior_power(struct nv50_disp_priv *, int, u32); +int nv50_pior_power(NV50_DISP_MTHD_V1); struct nv50_disp_base { struct nouveau_parent base; @@ -104,14 +104,19 @@ struct nv50_disp_base { u32 chan; }; +struct nv50_disp_chan_impl { + struct nouveau_ofuncs base; + int chid; + int (*attach)(struct nouveau_object *, struct nouveau_object *, u32); + void (*detach)(struct nouveau_object *, int); +}; + struct nv50_disp_chan { struct nouveau_namedb base; int chid; }; -int nv50_disp_chan_create_(struct nouveau_object *, struct nouveau_object *, - struct nouveau_oclass *, int, int, void **); -void nv50_disp_chan_destroy(struct nv50_disp_chan *); +int nv50_disp_chan_map(struct nouveau_object *, u64 *, u32 *); u32 nv50_disp_chan_rd32(struct nouveau_object *, u64); void nv50_disp_chan_wr32(struct nouveau_object *, u64, u32); @@ -120,20 +125,20 @@ void nv50_disp_chan_wr32(struct nouveau_object *, u64, u32); #define nv50_disp_chan_fini(a,b) \ nouveau_namedb_fini(&(a)->base, (b)) -int nv50_disp_dmac_create_(struct nouveau_object *, struct nouveau_object *, - struct nouveau_oclass *, u32, int, int, void **); -void nv50_disp_dmac_dtor(struct nouveau_object *); - struct nv50_disp_dmac { struct nv50_disp_chan base; struct nouveau_dmaobj *pushdma; u32 push; }; +void nv50_disp_dmac_dtor(struct nouveau_object *); + struct nv50_disp_pioc { struct nv50_disp_chan base; }; +void nv50_disp_pioc_dtor(struct nouveau_object *); + struct nv50_disp_mthd_list { u32 mthd; u32 addr; @@ -154,47 +159,67 @@ struct nv50_disp_mthd_chan { } data[]; }; -extern struct nouveau_ofuncs nv50_disp_mast_ofuncs; +extern struct nv50_disp_chan_impl nv50_disp_mast_ofuncs; +int nv50_disp_mast_ctor(struct nouveau_object *, struct nouveau_object *, + struct nouveau_oclass *, void *, u32, + struct nouveau_object **); extern const struct nv50_disp_mthd_list nv50_disp_mast_mthd_base; extern const struct nv50_disp_mthd_list nv50_disp_mast_mthd_sor; extern const struct nv50_disp_mthd_list nv50_disp_mast_mthd_pior; -extern struct nouveau_ofuncs nv50_disp_sync_ofuncs; +extern struct nv50_disp_chan_impl nv50_disp_sync_ofuncs; +int nv50_disp_sync_ctor(struct nouveau_object *, struct nouveau_object *, + struct nouveau_oclass *, void *, u32, + struct nouveau_object **); extern const struct nv50_disp_mthd_list nv50_disp_sync_mthd_image; -extern struct nouveau_ofuncs nv50_disp_ovly_ofuncs; +extern struct nv50_disp_chan_impl nv50_disp_ovly_ofuncs; +int nv50_disp_ovly_ctor(struct nouveau_object *, struct nouveau_object *, + struct nouveau_oclass *, void *, u32, + struct nouveau_object **); extern const struct nv50_disp_mthd_list nv50_disp_ovly_mthd_base; -extern struct nouveau_ofuncs nv50_disp_oimm_ofuncs; -extern struct nouveau_ofuncs nv50_disp_curs_ofuncs; +extern struct nv50_disp_chan_impl nv50_disp_oimm_ofuncs; +int nv50_disp_oimm_ctor(struct nouveau_object *, struct nouveau_object *, + struct nouveau_oclass *, void *, u32, + struct nouveau_object **); +extern struct nv50_disp_chan_impl nv50_disp_curs_ofuncs; +int nv50_disp_curs_ctor(struct nouveau_object *, struct nouveau_object *, + struct nouveau_oclass *, void *, u32, + struct nouveau_object **); extern struct nouveau_ofuncs nv50_disp_base_ofuncs; +int nv50_disp_base_ctor(struct nouveau_object *, struct nouveau_object *, + struct nouveau_oclass *, void *, u32, + struct nouveau_object **); +void nv50_disp_base_dtor(struct nouveau_object *); +extern struct nouveau_omthds nv50_disp_base_omthds[]; extern struct nouveau_oclass nv50_disp_cclass; void nv50_disp_mthd_chan(struct nv50_disp_priv *, int debug, int head, const struct nv50_disp_mthd_chan *); void nv50_disp_intr_supervisor(struct work_struct *); void nv50_disp_intr(struct nouveau_subdev *); +extern const struct nvkm_event_func nv50_disp_vblank_func; extern const struct nv50_disp_mthd_chan nv84_disp_mast_mthd_chan; extern const struct nv50_disp_mthd_list nv84_disp_mast_mthd_dac; extern const struct nv50_disp_mthd_list nv84_disp_mast_mthd_head; extern const struct nv50_disp_mthd_chan nv84_disp_sync_mthd_chan; extern const struct nv50_disp_mthd_chan nv84_disp_ovly_mthd_chan; -extern struct nouveau_omthds nv84_disp_base_omthds[]; extern const struct nv50_disp_mthd_chan nv94_disp_mast_mthd_chan; -extern struct nouveau_ofuncs nvd0_disp_mast_ofuncs; +extern struct nv50_disp_chan_impl nvd0_disp_mast_ofuncs; extern const struct nv50_disp_mthd_list nvd0_disp_mast_mthd_base; extern const struct nv50_disp_mthd_list nvd0_disp_mast_mthd_dac; extern const struct nv50_disp_mthd_list nvd0_disp_mast_mthd_sor; extern const struct nv50_disp_mthd_list nvd0_disp_mast_mthd_pior; -extern struct nouveau_ofuncs nvd0_disp_sync_ofuncs; -extern struct nouveau_ofuncs nvd0_disp_ovly_ofuncs; +extern struct nv50_disp_chan_impl nvd0_disp_sync_ofuncs; +extern struct nv50_disp_chan_impl nvd0_disp_ovly_ofuncs; extern const struct nv50_disp_mthd_chan nvd0_disp_sync_mthd_chan; -extern struct nouveau_ofuncs nvd0_disp_oimm_ofuncs; -extern struct nouveau_ofuncs nvd0_disp_curs_ofuncs; -extern struct nouveau_omthds nvd0_disp_base_omthds[]; +extern struct nv50_disp_chan_impl nvd0_disp_oimm_ofuncs; +extern struct nv50_disp_chan_impl nvd0_disp_curs_ofuncs; extern struct nouveau_ofuncs nvd0_disp_base_ofuncs; extern struct nouveau_oclass nvd0_disp_cclass; void nvd0_disp_intr_supervisor(struct work_struct *); void nvd0_disp_intr(struct nouveau_subdev *); +extern const struct nvkm_event_func nvd0_disp_vblank_func; extern const struct nv50_disp_mthd_chan nve0_disp_mast_mthd_chan; extern const struct nv50_disp_mthd_chan nve0_disp_ovly_mthd_chan; diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c index 1cc62e434683..788ced1b6182 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c @@ -25,7 +25,7 @@ #include <engine/software.h> #include <engine/disp.h> -#include <core/class.h> +#include <nvif/class.h> #include "nv50.h" @@ -204,31 +204,17 @@ nv84_disp_ovly_mthd_chan = { static struct nouveau_oclass nv84_disp_sclass[] = { - { NV84_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs }, - { NV84_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs }, - { NV84_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs }, - { NV84_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs }, - { NV84_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs }, + { G82_DISP_CORE_CHANNEL_DMA, &nv50_disp_mast_ofuncs.base }, + { G82_DISP_BASE_CHANNEL_DMA, &nv50_disp_sync_ofuncs.base }, + { G82_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base }, + { G82_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base }, + { G82_DISP_CURSOR, &nv50_disp_curs_ofuncs.base }, {} }; -struct nouveau_omthds -nv84_disp_base_omthds[] = { - { HEAD_MTHD(NV50_DISP_SCANOUTPOS) , nv50_disp_base_scanoutpos }, - { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd }, - { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd }, - { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd }, - { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd }, - { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd }, - { PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd }, - { PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR) , nv50_pior_mthd }, - { PIOR_MTHD(NV50_DISP_PIOR_DP_PWR) , nv50_pior_mthd }, - {}, -}; - static struct nouveau_oclass nv84_disp_base_oclass[] = { - { NV84_DISP_CLASS, &nv50_disp_base_ofuncs, nv84_disp_base_omthds }, + { G82_DISP, &nv50_disp_base_ofuncs }, {} }; @@ -276,9 +262,11 @@ nv84_disp_oclass = &(struct nv50_disp_impl) { .init = _nouveau_disp_init, .fini = _nouveau_disp_fini, }, + .base.vblank = &nv50_disp_vblank_func, .base.outp = nv50_disp_outp_sclass, .mthd.core = &nv84_disp_mast_mthd_chan, .mthd.base = &nv84_disp_sync_mthd_chan, .mthd.ovly = &nv84_disp_ovly_mthd_chan, .mthd.prev = 0x000004, + .head.scanoutpos = nv50_disp_base_scanoutpos, }.base.base; diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c index 4f718a9f5aef..fa79de906eae 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c @@ -25,7 +25,7 @@ #include <engine/software.h> #include <engine/disp.h> -#include <core/class.h> +#include <nvif/class.h> #include "nv50.h" @@ -63,32 +63,17 @@ nv94_disp_mast_mthd_chan = { static struct nouveau_oclass nv94_disp_sclass[] = { - { NV94_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs }, - { NV94_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs }, - { NV94_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs }, - { NV94_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs }, - { NV94_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs }, + { GT206_DISP_CORE_CHANNEL_DMA, &nv50_disp_mast_ofuncs.base }, + { GT200_DISP_BASE_CHANNEL_DMA, &nv50_disp_sync_ofuncs.base }, + { GT200_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base }, + { G82_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base }, + { G82_DISP_CURSOR, &nv50_disp_curs_ofuncs.base }, {} }; -static struct nouveau_omthds -nv94_disp_base_omthds[] = { - { HEAD_MTHD(NV50_DISP_SCANOUTPOS) , nv50_disp_base_scanoutpos }, - { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd }, - { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd }, - { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd }, - { SOR_MTHD(NV94_DISP_SOR_DP_PWR) , nv50_sor_mthd }, - { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd }, - { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd }, - { PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd }, - { PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR) , nv50_pior_mthd }, - { PIOR_MTHD(NV50_DISP_PIOR_DP_PWR) , nv50_pior_mthd }, - {}, -}; - static struct nouveau_oclass nv94_disp_base_oclass[] = { - { NV94_DISP_CLASS, &nv50_disp_base_ofuncs, nv94_disp_base_omthds }, + { GT206_DISP, &nv50_disp_base_ofuncs }, {} }; @@ -143,9 +128,11 @@ nv94_disp_oclass = &(struct nv50_disp_impl) { .init = _nouveau_disp_init, .fini = _nouveau_disp_fini, }, + .base.vblank = &nv50_disp_vblank_func, .base.outp = nv94_disp_outp_sclass, .mthd.core = &nv94_disp_mast_mthd_chan, .mthd.base = &nv84_disp_sync_mthd_chan, .mthd.ovly = &nv84_disp_ovly_mthd_chan, .mthd.prev = 0x000004, + .head.scanoutpos = nv50_disp_base_scanoutpos, }.base.base; diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c index 6237a9a36f70..7af15f5d48dc 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c @@ -25,7 +25,7 @@ #include <engine/software.h> #include <engine/disp.h> -#include <core/class.h> +#include <nvif/class.h> #include "nv50.h" @@ -80,17 +80,17 @@ nva0_disp_ovly_mthd_chan = { static struct nouveau_oclass nva0_disp_sclass[] = { - { NVA0_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs }, - { NVA0_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs }, - { NVA0_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs }, - { NVA0_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs }, - { NVA0_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs }, + { GT200_DISP_CORE_CHANNEL_DMA, &nv50_disp_mast_ofuncs.base }, + { GT200_DISP_BASE_CHANNEL_DMA, &nv50_disp_sync_ofuncs.base }, + { GT200_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base }, + { G82_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base }, + { G82_DISP_CURSOR, &nv50_disp_curs_ofuncs.base }, {} }; static struct nouveau_oclass nva0_disp_base_oclass[] = { - { NVA0_DISP_CLASS, &nv50_disp_base_ofuncs, nv84_disp_base_omthds }, + { GT200_DISP, &nv50_disp_base_ofuncs }, {} }; @@ -138,9 +138,11 @@ nva0_disp_oclass = &(struct nv50_disp_impl) { .init = _nouveau_disp_init, .fini = _nouveau_disp_fini, }, + .base.vblank = &nv50_disp_vblank_func, .base.outp = nv50_disp_outp_sclass, .mthd.core = &nv84_disp_mast_mthd_chan, .mthd.base = &nv84_disp_sync_mthd_chan, .mthd.ovly = &nva0_disp_ovly_mthd_chan, .mthd.prev = 0x000004, + .head.scanoutpos = nv50_disp_base_scanoutpos, }.base.base; diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c index 019124d4782b..6bd39448f8da 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c @@ -25,7 +25,7 @@ #include <engine/software.h> #include <engine/disp.h> -#include <core/class.h> +#include <nvif/class.h> #include "nv50.h" @@ -35,33 +35,17 @@ static struct nouveau_oclass nva3_disp_sclass[] = { - { NVA3_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs }, - { NVA3_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs }, - { NVA3_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs }, - { NVA3_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs }, - { NVA3_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs }, + { GT214_DISP_CORE_CHANNEL_DMA, &nv50_disp_mast_ofuncs.base }, + { GT214_DISP_BASE_CHANNEL_DMA, &nv50_disp_sync_ofuncs.base }, + { GT214_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base }, + { GT214_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base }, + { GT214_DISP_CURSOR, &nv50_disp_curs_ofuncs.base }, {} }; -static struct nouveau_omthds -nva3_disp_base_omthds[] = { - { HEAD_MTHD(NV50_DISP_SCANOUTPOS) , nv50_disp_base_scanoutpos }, - { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd }, - { SOR_MTHD(NVA3_DISP_SOR_HDA_ELD) , nv50_sor_mthd }, - { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd }, - { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd }, - { SOR_MTHD(NV94_DISP_SOR_DP_PWR) , nv50_sor_mthd }, - { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd }, - { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd }, - { PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd }, - { PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR) , nv50_pior_mthd }, - { PIOR_MTHD(NV50_DISP_PIOR_DP_PWR) , nv50_pior_mthd }, - {}, -}; - static struct nouveau_oclass nva3_disp_base_oclass[] = { - { NVA3_DISP_CLASS, &nv50_disp_base_ofuncs, nva3_disp_base_omthds }, + { GT214_DISP, &nv50_disp_base_ofuncs }, {} }; @@ -110,9 +94,11 @@ nva3_disp_oclass = &(struct nv50_disp_impl) { .init = _nouveau_disp_init, .fini = _nouveau_disp_fini, }, + .base.vblank = &nv50_disp_vblank_func, .base.outp = nv94_disp_outp_sclass, .mthd.core = &nv94_disp_mast_mthd_chan, .mthd.base = &nv84_disp_sync_mthd_chan, .mthd.ovly = &nv84_disp_ovly_mthd_chan, .mthd.prev = 0x000004, + .head.scanoutpos = nv50_disp_base_scanoutpos, }.base.base; diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c index fa30d8196f35..a4bb3c774ee1 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c @@ -23,9 +23,11 @@ */ #include <core/object.h> +#include <core/client.h> #include <core/parent.h> #include <core/handle.h> -#include <core/class.h> +#include <nvif/unpack.h> +#include <nvif/class.h> #include <engine/disp.h> @@ -265,30 +267,6 @@ nvd0_disp_mast_mthd_chan = { }; static int -nvd0_disp_mast_ctor(struct nouveau_object *parent, - struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv50_display_mast_class *args = data; - struct nv50_disp_dmac *mast; - int ret; - - if (size < sizeof(*args)) - return -EINVAL; - - ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf, - 0, sizeof(*mast), (void **)&mast); - *pobject = nv_object(mast); - if (ret) - return ret; - - nv_parent(mast)->object_attach = nvd0_disp_dmac_object_attach; - nv_parent(mast)->object_detach = nvd0_disp_dmac_object_detach; - return 0; -} - -static int nvd0_disp_mast_init(struct nouveau_object *object) { struct nv50_disp_priv *priv = (void *)object->engine; @@ -342,14 +320,18 @@ nvd0_disp_mast_fini(struct nouveau_object *object, bool suspend) return nv50_disp_chan_fini(&mast->base, suspend); } -struct nouveau_ofuncs +struct nv50_disp_chan_impl nvd0_disp_mast_ofuncs = { - .ctor = nvd0_disp_mast_ctor, - .dtor = nv50_disp_dmac_dtor, - .init = nvd0_disp_mast_init, - .fini = nvd0_disp_mast_fini, - .rd32 = nv50_disp_chan_rd32, - .wr32 = nv50_disp_chan_wr32, + .base.ctor = nv50_disp_mast_ctor, + .base.dtor = nv50_disp_dmac_dtor, + .base.init = nvd0_disp_mast_init, + .base.fini = nvd0_disp_mast_fini, + .base.map = nv50_disp_chan_map, + .base.rd32 = nv50_disp_chan_rd32, + .base.wr32 = nv50_disp_chan_wr32, + .chid = 0, + .attach = nvd0_disp_dmac_object_attach, + .detach = nvd0_disp_dmac_object_detach, }; /******************************************************************************* @@ -431,40 +413,18 @@ nvd0_disp_sync_mthd_chan = { } }; -static int -nvd0_disp_sync_ctor(struct nouveau_object *parent, - struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv50_display_sync_class *args = data; - struct nv50_disp_priv *priv = (void *)engine; - struct nv50_disp_dmac *dmac; - int ret; - - if (size < sizeof(*args) || args->head >= priv->head.nr) - return -EINVAL; - - ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf, - 1 + args->head, sizeof(*dmac), - (void **)&dmac); - *pobject = nv_object(dmac); - if (ret) - return ret; - - nv_parent(dmac)->object_attach = nvd0_disp_dmac_object_attach; - nv_parent(dmac)->object_detach = nvd0_disp_dmac_object_detach; - return 0; -} - -struct nouveau_ofuncs +struct nv50_disp_chan_impl nvd0_disp_sync_ofuncs = { - .ctor = nvd0_disp_sync_ctor, - .dtor = nv50_disp_dmac_dtor, - .init = nvd0_disp_dmac_init, - .fini = nvd0_disp_dmac_fini, - .rd32 = nv50_disp_chan_rd32, - .wr32 = nv50_disp_chan_wr32, + .base.ctor = nv50_disp_sync_ctor, + .base.dtor = nv50_disp_dmac_dtor, + .base.init = nvd0_disp_dmac_init, + .base.fini = nvd0_disp_dmac_fini, + .base.map = nv50_disp_chan_map, + .base.rd32 = nv50_disp_chan_rd32, + .base.wr32 = nv50_disp_chan_wr32, + .chid = 1, + .attach = nvd0_disp_dmac_object_attach, + .detach = nvd0_disp_dmac_object_detach, }; /******************************************************************************* @@ -533,40 +493,18 @@ nvd0_disp_ovly_mthd_chan = { } }; -static int -nvd0_disp_ovly_ctor(struct nouveau_object *parent, - struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv50_display_ovly_class *args = data; - struct nv50_disp_priv *priv = (void *)engine; - struct nv50_disp_dmac *dmac; - int ret; - - if (size < sizeof(*args) || args->head >= priv->head.nr) - return -EINVAL; - - ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf, - 5 + args->head, sizeof(*dmac), - (void **)&dmac); - *pobject = nv_object(dmac); - if (ret) - return ret; - - nv_parent(dmac)->object_attach = nvd0_disp_dmac_object_attach; - nv_parent(dmac)->object_detach = nvd0_disp_dmac_object_detach; - return 0; -} - -struct nouveau_ofuncs +struct nv50_disp_chan_impl nvd0_disp_ovly_ofuncs = { - .ctor = nvd0_disp_ovly_ctor, - .dtor = nv50_disp_dmac_dtor, - .init = nvd0_disp_dmac_init, - .fini = nvd0_disp_dmac_fini, - .rd32 = nv50_disp_chan_rd32, - .wr32 = nv50_disp_chan_wr32, + .base.ctor = nv50_disp_ovly_ctor, + .base.dtor = nv50_disp_dmac_dtor, + .base.init = nvd0_disp_dmac_init, + .base.fini = nvd0_disp_dmac_fini, + .base.map = nv50_disp_chan_map, + .base.rd32 = nv50_disp_chan_rd32, + .base.wr32 = nv50_disp_chan_wr32, + .chid = 5, + .attach = nvd0_disp_dmac_object_attach, + .detach = nvd0_disp_dmac_object_detach, }; /******************************************************************************* @@ -574,23 +512,6 @@ nvd0_disp_ovly_ofuncs = { ******************************************************************************/ static int -nvd0_disp_pioc_create_(struct nouveau_object *parent, - struct nouveau_object *engine, - struct nouveau_oclass *oclass, int chid, - int length, void **pobject) -{ - return nv50_disp_chan_create_(parent, engine, oclass, chid, - length, pobject); -} - -static void -nvd0_disp_pioc_dtor(struct nouveau_object *object) -{ - struct nv50_disp_pioc *pioc = (void *)object; - nv50_disp_chan_destroy(&pioc->base); -} - -static int nvd0_disp_pioc_init(struct nouveau_object *object) { struct nv50_disp_priv *priv = (void *)object->engine; @@ -643,152 +564,68 @@ nvd0_disp_pioc_fini(struct nouveau_object *object, bool suspend) * EVO immediate overlay channel objects ******************************************************************************/ -static int -nvd0_disp_oimm_ctor(struct nouveau_object *parent, - struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv50_display_oimm_class *args = data; - struct nv50_disp_priv *priv = (void *)engine; - struct nv50_disp_pioc *pioc; - int ret; - - if (size < sizeof(*args) || args->head >= priv->head.nr) - return -EINVAL; - - ret = nvd0_disp_pioc_create_(parent, engine, oclass, 9 + args->head, - sizeof(*pioc), (void **)&pioc); - *pobject = nv_object(pioc); - if (ret) - return ret; - - return 0; -} - -struct nouveau_ofuncs +struct nv50_disp_chan_impl nvd0_disp_oimm_ofuncs = { - .ctor = nvd0_disp_oimm_ctor, - .dtor = nvd0_disp_pioc_dtor, - .init = nvd0_disp_pioc_init, - .fini = nvd0_disp_pioc_fini, - .rd32 = nv50_disp_chan_rd32, - .wr32 = nv50_disp_chan_wr32, + .base.ctor = nv50_disp_oimm_ctor, + .base.dtor = nv50_disp_pioc_dtor, + .base.init = nvd0_disp_pioc_init, + .base.fini = nvd0_disp_pioc_fini, + .base.map = nv50_disp_chan_map, + .base.rd32 = nv50_disp_chan_rd32, + .base.wr32 = nv50_disp_chan_wr32, + .chid = 9, }; /******************************************************************************* * EVO cursor channel objects ******************************************************************************/ -static int -nvd0_disp_curs_ctor(struct nouveau_object *parent, - struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv50_display_curs_class *args = data; - struct nv50_disp_priv *priv = (void *)engine; - struct nv50_disp_pioc *pioc; - int ret; - - if (size < sizeof(*args) || args->head >= priv->head.nr) - return -EINVAL; - - ret = nvd0_disp_pioc_create_(parent, engine, oclass, 13 + args->head, - sizeof(*pioc), (void **)&pioc); - *pobject = nv_object(pioc); - if (ret) - return ret; - - return 0; -} - -struct nouveau_ofuncs +struct nv50_disp_chan_impl nvd0_disp_curs_ofuncs = { - .ctor = nvd0_disp_curs_ctor, - .dtor = nvd0_disp_pioc_dtor, - .init = nvd0_disp_pioc_init, - .fini = nvd0_disp_pioc_fini, - .rd32 = nv50_disp_chan_rd32, - .wr32 = nv50_disp_chan_wr32, + .base.ctor = nv50_disp_curs_ctor, + .base.dtor = nv50_disp_pioc_dtor, + .base.init = nvd0_disp_pioc_init, + .base.fini = nvd0_disp_pioc_fini, + .base.map = nv50_disp_chan_map, + .base.rd32 = nv50_disp_chan_rd32, + .base.wr32 = nv50_disp_chan_wr32, + .chid = 13, }; /******************************************************************************* * Base display object ******************************************************************************/ -static int -nvd0_disp_base_scanoutpos(struct nouveau_object *object, u32 mthd, - void *data, u32 size) -{ - struct nv50_disp_priv *priv = (void *)object->engine; - struct nv04_display_scanoutpos *args = data; - const int head = (mthd & NV50_DISP_MTHD_HEAD); - u32 blanke, blanks, total; - - if (size < sizeof(*args) || head >= priv->head.nr) - return -EINVAL; - - total = nv_rd32(priv, 0x640414 + (head * 0x300)); - blanke = nv_rd32(priv, 0x64041c + (head * 0x300)); - blanks = nv_rd32(priv, 0x640420 + (head * 0x300)); - - args->vblanke = (blanke & 0xffff0000) >> 16; - args->hblanke = (blanke & 0x0000ffff); - args->vblanks = (blanks & 0xffff0000) >> 16; - args->hblanks = (blanks & 0x0000ffff); - args->vtotal = ( total & 0xffff0000) >> 16; - args->htotal = ( total & 0x0000ffff); - - args->time[0] = ktime_to_ns(ktime_get()); - args->vline = nv_rd32(priv, 0x616340 + (head * 0x800)) & 0xffff; - args->time[1] = ktime_to_ns(ktime_get()); /* vline read locks hline */ - args->hline = nv_rd32(priv, 0x616344 + (head * 0x800)) & 0xffff; - return 0; -} - -static void -nvd0_disp_base_vblank_enable(struct nouveau_event *event, int type, int head) -{ - nv_mask(event->priv, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000001); -} - -static void -nvd0_disp_base_vblank_disable(struct nouveau_event *event, int type, int head) +int +nvd0_disp_base_scanoutpos(NV50_DISP_MTHD_V0) { - nv_mask(event->priv, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000000); -} - -static int -nvd0_disp_base_ctor(struct nouveau_object *parent, - struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv50_disp_priv *priv = (void *)engine; - struct nv50_disp_base *base; + const u32 total = nv_rd32(priv, 0x640414 + (head * 0x300)); + const u32 blanke = nv_rd32(priv, 0x64041c + (head * 0x300)); + const u32 blanks = nv_rd32(priv, 0x640420 + (head * 0x300)); + union { + struct nv04_disp_scanoutpos_v0 v0; + } *args = data; int ret; - ret = nouveau_parent_create(parent, engine, oclass, 0, - priv->sclass, 0, &base); - *pobject = nv_object(base); - if (ret) + nv_ioctl(object, "disp scanoutpos size %d\n", size); + if (nvif_unpack(args->v0, 0, 0, false)) { + nv_ioctl(object, "disp scanoutpos vers %d\n", args->v0.version); + args->v0.vblanke = (blanke & 0xffff0000) >> 16; + args->v0.hblanke = (blanke & 0x0000ffff); + args->v0.vblanks = (blanks & 0xffff0000) >> 16; + args->v0.hblanks = (blanks & 0x0000ffff); + args->v0.vtotal = ( total & 0xffff0000) >> 16; + args->v0.htotal = ( total & 0x0000ffff); + args->v0.time[0] = ktime_to_ns(ktime_get()); + args->v0.vline = /* vline read locks hline */ + nv_rd32(priv, 0x616340 + (head * 0x800)) & 0xffff; + args->v0.time[1] = ktime_to_ns(ktime_get()); + args->v0.hline = + nv_rd32(priv, 0x616344 + (head * 0x800)) & 0xffff; + } else return ret; - priv->base.vblank->priv = priv; - priv->base.vblank->enable = nvd0_disp_base_vblank_enable; - priv->base.vblank->disable = nvd0_disp_base_vblank_disable; - - return nouveau_ramht_new(nv_object(base), nv_object(base), 0x1000, 0, - &base->ramht); -} - -static void -nvd0_disp_base_dtor(struct nouveau_object *object) -{ - struct nv50_disp_base *base = (void *)object; - nouveau_ramht_ref(NULL, &base->ramht); - nouveau_parent_destroy(&base->base); + return 0; } static int @@ -874,41 +711,27 @@ nvd0_disp_base_fini(struct nouveau_object *object, bool suspend) struct nouveau_ofuncs nvd0_disp_base_ofuncs = { - .ctor = nvd0_disp_base_ctor, - .dtor = nvd0_disp_base_dtor, + .ctor = nv50_disp_base_ctor, + .dtor = nv50_disp_base_dtor, .init = nvd0_disp_base_init, .fini = nvd0_disp_base_fini, -}; - -struct nouveau_omthds -nvd0_disp_base_omthds[] = { - { HEAD_MTHD(NV50_DISP_SCANOUTPOS) , nvd0_disp_base_scanoutpos }, - { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd }, - { SOR_MTHD(NVA3_DISP_SOR_HDA_ELD) , nv50_sor_mthd }, - { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd }, - { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd }, - { SOR_MTHD(NV94_DISP_SOR_DP_PWR) , nv50_sor_mthd }, - { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd }, - { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd }, - { PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd }, - { PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR) , nv50_pior_mthd }, - { PIOR_MTHD(NV50_DISP_PIOR_DP_PWR) , nv50_pior_mthd }, - {}, + .mthd = nv50_disp_base_mthd, + .ntfy = nouveau_disp_ntfy, }; static struct nouveau_oclass nvd0_disp_base_oclass[] = { - { NVD0_DISP_CLASS, &nvd0_disp_base_ofuncs, nvd0_disp_base_omthds }, + { GF110_DISP, &nvd0_disp_base_ofuncs }, {} }; static struct nouveau_oclass nvd0_disp_sclass[] = { - { NVD0_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs }, - { NVD0_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs }, - { NVD0_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs }, - { NVD0_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs }, - { NVD0_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs }, + { GF110_DISP_CORE_CHANNEL_DMA, &nvd0_disp_mast_ofuncs.base }, + { GF110_DISP_BASE_CHANNEL_DMA, &nvd0_disp_sync_ofuncs.base }, + { GF110_DISP_OVERLAY_CONTROL_DMA, &nvd0_disp_ovly_ofuncs.base }, + { GF110_DISP_OVERLAY, &nvd0_disp_oimm_ofuncs.base }, + { GF110_DISP_CURSOR, &nvd0_disp_curs_ofuncs.base }, {} }; @@ -916,6 +739,27 @@ nvd0_disp_sclass[] = { * Display engine implementation ******************************************************************************/ +static void +nvd0_disp_vblank_init(struct nvkm_event *event, int type, int head) +{ + struct nouveau_disp *disp = container_of(event, typeof(*disp), vblank); + nv_mask(disp, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000001); +} + +static void +nvd0_disp_vblank_fini(struct nvkm_event *event, int type, int head) +{ + struct nouveau_disp *disp = container_of(event, typeof(*disp), vblank); + nv_mask(disp, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000000); +} + +const struct nvkm_event_func +nvd0_disp_vblank_func = { + .ctor = nouveau_disp_vblank_ctor, + .init = nvd0_disp_vblank_init, + .fini = nvd0_disp_vblank_fini, +}; + static struct nvkm_output * exec_lookup(struct nv50_disp_priv *priv, int head, int or, u32 ctrl, u32 *data, u8 *ver, u8 *hdr, u8 *cnt, u8 *len, @@ -1343,7 +1187,7 @@ nvd0_disp_intr(struct nouveau_subdev *subdev) if (mask & intr) { u32 stat = nv_rd32(priv, 0x6100bc + (i * 0x800)); if (stat & 0x00000001) - nouveau_event_trigger(priv->base.vblank, 1, i); + nouveau_disp_vblank(&priv->base, i); nv_mask(priv, 0x6100bc + (i * 0x800), 0, 0); nv_rd32(priv, 0x6100c0 + (i * 0x800)); } @@ -1396,9 +1240,11 @@ nvd0_disp_oclass = &(struct nv50_disp_impl) { .init = _nouveau_disp_init, .fini = _nouveau_disp_fini, }, + .base.vblank = &nvd0_disp_vblank_func, .base.outp = nvd0_disp_outp_sclass, .mthd.core = &nvd0_disp_mast_mthd_chan, .mthd.base = &nvd0_disp_sync_mthd_chan, .mthd.ovly = &nvd0_disp_ovly_mthd_chan, .mthd.prev = -0x020000, + .head.scanoutpos = nvd0_disp_base_scanoutpos, }.base.base; diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c index 11328e3f5df1..47fef1e398c4 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c @@ -25,7 +25,7 @@ #include <engine/software.h> #include <engine/disp.h> -#include <core/class.h> +#include <nvif/class.h> #include "nv50.h" @@ -200,17 +200,17 @@ nve0_disp_ovly_mthd_chan = { static struct nouveau_oclass nve0_disp_sclass[] = { - { NVE0_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs }, - { NVE0_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs }, - { NVE0_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs }, - { NVE0_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs }, - { NVE0_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs }, + { GK104_DISP_CORE_CHANNEL_DMA, &nvd0_disp_mast_ofuncs.base }, + { GK104_DISP_BASE_CHANNEL_DMA, &nvd0_disp_sync_ofuncs.base }, + { GK104_DISP_OVERLAY_CONTROL_DMA, &nvd0_disp_ovly_ofuncs.base }, + { GK104_DISP_OVERLAY, &nvd0_disp_oimm_ofuncs.base }, + { GK104_DISP_CURSOR, &nvd0_disp_curs_ofuncs.base }, {} }; static struct nouveau_oclass nve0_disp_base_oclass[] = { - { NVE0_DISP_CLASS, &nvd0_disp_base_ofuncs, nvd0_disp_base_omthds }, + { GK104_DISP, &nvd0_disp_base_ofuncs }, {} }; @@ -258,9 +258,11 @@ nve0_disp_oclass = &(struct nv50_disp_impl) { .init = _nouveau_disp_init, .fini = _nouveau_disp_fini, }, + .base.vblank = &nvd0_disp_vblank_func, .base.outp = nvd0_disp_outp_sclass, .mthd.core = &nve0_disp_mast_mthd_chan, .mthd.base = &nvd0_disp_sync_mthd_chan, .mthd.ovly = &nve0_disp_ovly_mthd_chan, .mthd.prev = -0x020000, + .head.scanoutpos = nvd0_disp_base_scanoutpos, }.base.base; diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c index 104388081d73..04bda4ac4ed3 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c @@ -25,7 +25,7 @@ #include <engine/software.h> #include <engine/disp.h> -#include <core/class.h> +#include <nvif/class.h> #include "nv50.h" @@ -35,17 +35,17 @@ static struct nouveau_oclass nvf0_disp_sclass[] = { - { NVF0_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs }, - { NVF0_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs }, - { NVF0_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs }, - { NVF0_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs }, - { NVF0_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs }, + { GK110_DISP_CORE_CHANNEL_DMA, &nvd0_disp_mast_ofuncs.base }, + { GK110_DISP_BASE_CHANNEL_DMA, &nvd0_disp_sync_ofuncs.base }, + { GK104_DISP_OVERLAY_CONTROL_DMA, &nvd0_disp_ovly_ofuncs.base }, + { GK104_DISP_OVERLAY, &nvd0_disp_oimm_ofuncs.base }, + { GK104_DISP_CURSOR, &nvd0_disp_curs_ofuncs.base }, {} }; static struct nouveau_oclass nvf0_disp_base_oclass[] = { - { NVF0_DISP_CLASS, &nvd0_disp_base_ofuncs, nvd0_disp_base_omthds }, + { GK110_DISP, &nvd0_disp_base_ofuncs }, {} }; @@ -93,9 +93,11 @@ nvf0_disp_oclass = &(struct nv50_disp_impl) { .init = _nouveau_disp_init, .fini = _nouveau_disp_fini, }, + .base.vblank = &nvd0_disp_vblank_func, .base.outp = nvd0_disp_outp_sclass, .mthd.core = &nve0_disp_mast_mthd_chan, .mthd.base = &nvd0_disp_sync_mthd_chan, .mthd.ovly = &nve0_disp_ovly_mthd_chan, .mthd.prev = -0x020000, + .head.scanoutpos = nvd0_disp_base_scanoutpos, }.base.base; diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/outp.c b/drivers/gpu/drm/nouveau/core/engine/disp/outp.c index ad9ba7ccec7f..a5ff00a9cedc 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/outp.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/outp.c @@ -78,6 +78,7 @@ nvkm_output_create_(struct nouveau_object *parent, outp->info = *dcbE; outp->index = index; + outp->or = ffs(outp->info.or) - 1; DBG("type %02x loc %d or %d link %d con %x edid %x bus %d head %x\n", dcbE->type, dcbE->location, dcbE->or, dcbE->type >= 2 ? diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/outp.h b/drivers/gpu/drm/nouveau/core/engine/disp/outp.h index bc76fbf85710..187f435ad0e2 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/outp.h +++ b/drivers/gpu/drm/nouveau/core/engine/disp/outp.h @@ -9,6 +9,7 @@ struct nvkm_output { struct dcb_output info; int index; + int or; struct nouveau_i2c_port *port; struct nouveau_i2c_port *edid; diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c b/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c index eb2d7789555d..6f6e2a898270 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c @@ -22,6 +22,9 @@ * Authors: Ben Skeggs */ +#include <core/os.h> +#include <nvif/event.h> + #include <subdev/i2c.h> #include "outpdp.h" @@ -86,7 +89,7 @@ done: atomic_set(&outp->lt.done, 0); schedule_work(&outp->lt.work); } else { - nouveau_event_get(outp->irq); + nvkm_notify_get(&outp->irq); } if (wait) { @@ -133,46 +136,59 @@ nvkm_output_dp_detect(struct nvkm_output_dp *outp) } } -static void -nvkm_output_dp_service_work(struct work_struct *work) +static int +nvkm_output_dp_hpd(struct nvkm_notify *notify) { - struct nvkm_output_dp *outp = container_of(work, typeof(*outp), work); - struct nouveau_disp *disp = nouveau_disp(outp); - int type = atomic_xchg(&outp->pending, 0); - u32 send = 0; - - if (type & (NVKM_I2C_PLUG | NVKM_I2C_UNPLUG)) { - nvkm_output_dp_detect(outp); - if (type & NVKM_I2C_UNPLUG) - send |= NVKM_HPD_UNPLUG; - if (type & NVKM_I2C_PLUG) - send |= NVKM_HPD_PLUG; - nouveau_event_get(outp->base.conn->hpd.event); - } - - if (type & NVKM_I2C_IRQ) { - nvkm_output_dp_train(&outp->base, 0, true); - send |= NVKM_HPD_IRQ; + struct nvkm_connector *conn = container_of(notify, typeof(*conn), hpd); + struct nvkm_output_dp *outp; + struct nouveau_disp *disp = nouveau_disp(conn); + const struct nvkm_i2c_ntfy_rep *line = notify->data; + struct nvif_notify_conn_rep_v0 rep = {}; + + list_for_each_entry(outp, &disp->outp, base.head) { + if (outp->base.conn == conn && + outp->info.type == DCB_OUTPUT_DP) { + DBG("HPD: %d\n", line->mask); + nvkm_output_dp_detect(outp); + + if (line->mask & NVKM_I2C_UNPLUG) + rep.mask |= NVIF_NOTIFY_CONN_V0_UNPLUG; + if (line->mask & NVKM_I2C_PLUG) + rep.mask |= NVIF_NOTIFY_CONN_V0_PLUG; + + nvkm_event_send(&disp->hpd, rep.mask, conn->index, + &rep, sizeof(rep)); + return NVKM_NOTIFY_KEEP; + } } - nouveau_event_trigger(disp->hpd, send, outp->base.info.connector); + WARN_ON(1); + return NVKM_NOTIFY_DROP; } static int -nvkm_output_dp_service(void *data, u32 type, int index) +nvkm_output_dp_irq(struct nvkm_notify *notify) { - struct nvkm_output_dp *outp = data; - DBG("HPD: %d\n", type); - atomic_or(type, &outp->pending); - schedule_work(&outp->work); - return NVKM_EVENT_DROP; + struct nvkm_output_dp *outp = container_of(notify, typeof(*outp), irq); + struct nouveau_disp *disp = nouveau_disp(outp); + const struct nvkm_i2c_ntfy_rep *line = notify->data; + struct nvif_notify_conn_rep_v0 rep = { + .mask = NVIF_NOTIFY_CONN_V0_IRQ, + }; + int index = outp->base.info.connector; + + DBG("IRQ: %d\n", line->mask); + nvkm_output_dp_train(&outp->base, 0, true); + + nvkm_event_send(&disp->hpd, rep.mask, index, &rep, sizeof(rep)); + return NVKM_NOTIFY_DROP; } int _nvkm_output_dp_fini(struct nouveau_object *object, bool suspend) { struct nvkm_output_dp *outp = (void *)object; - nouveau_event_put(outp->irq); + nvkm_notify_put(&outp->irq); nvkm_output_dp_enable(outp, false); return nvkm_output_fini(&outp->base, suspend); } @@ -189,7 +205,7 @@ void _nvkm_output_dp_dtor(struct nouveau_object *object) { struct nvkm_output_dp *outp = (void *)object; - nouveau_event_ref(NULL, &outp->irq); + nvkm_notify_fini(&outp->irq); nvkm_output_destroy(&outp->base); } @@ -213,7 +229,7 @@ nvkm_output_dp_create_(struct nouveau_object *parent, if (ret) return ret; - nouveau_event_ref(NULL, &outp->base.conn->hpd.event); + nvkm_notify_fini(&outp->base.conn->hpd); /* access to the aux channel is not optional... */ if (!outp->base.edid) { @@ -238,20 +254,28 @@ nvkm_output_dp_create_(struct nouveau_object *parent, atomic_set(&outp->lt.done, 0); /* link maintenance */ - ret = nouveau_event_new(i2c->ntfy, NVKM_I2C_IRQ, outp->base.edid->index, - nvkm_output_dp_service, outp, &outp->irq); + ret = nvkm_notify_init(&i2c->event, nvkm_output_dp_irq, true, + &(struct nvkm_i2c_ntfy_req) { + .mask = NVKM_I2C_IRQ, + .port = outp->base.edid->index, + }, + sizeof(struct nvkm_i2c_ntfy_req), + sizeof(struct nvkm_i2c_ntfy_rep), + &outp->irq); if (ret) { ERR("error monitoring aux irq event: %d\n", ret); return ret; } - INIT_WORK(&outp->work, nvkm_output_dp_service_work); - /* hotplug detect, replaces gpio-based mechanism with aux events */ - ret = nouveau_event_new(i2c->ntfy, NVKM_I2C_PLUG | NVKM_I2C_UNPLUG, - outp->base.edid->index, - nvkm_output_dp_service, outp, - &outp->base.conn->hpd.event); + ret = nvkm_notify_init(&i2c->event, nvkm_output_dp_hpd, true, + &(struct nvkm_i2c_ntfy_req) { + .mask = NVKM_I2C_PLUG | NVKM_I2C_UNPLUG, + .port = outp->base.edid->index, + }, + sizeof(struct nvkm_i2c_ntfy_req), + sizeof(struct nvkm_i2c_ntfy_rep), + &outp->base.conn->hpd); if (ret) { ERR("error monitoring aux hpd events: %d\n", ret); return ret; diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.h b/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.h index ff33ba12cb67..1fac367cc867 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.h +++ b/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.h @@ -12,10 +12,7 @@ struct nvkm_output_dp { struct nvbios_dpout info; u8 version; - struct nouveau_eventh *irq; - struct nouveau_eventh *hpd; - struct work_struct work; - atomic_t pending; + struct nvkm_notify irq; bool present; u8 dpcd[16]; diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/piornv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/piornv50.c index fe0f256f11bf..d00f89a468a7 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/piornv50.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/piornv50.c @@ -22,8 +22,9 @@ * Authors: Ben Skeggs */ -#include <core/os.h> -#include <core/class.h> +#include <core/client.h> +#include <nvif/unpack.h> +#include <nvif/class.h> #include <subdev/bios.h> #include <subdev/bios/dcb.h> @@ -143,38 +144,29 @@ nv50_pior_dp_impl = { *****************************************************************************/ int -nv50_pior_power(struct nv50_disp_priv *priv, int or, u32 data) +nv50_pior_power(NV50_DISP_MTHD_V1) { - const u32 stat = data & NV50_DISP_PIOR_PWR_STATE; - const u32 soff = (or * 0x800); + const u32 soff = outp->or * 0x800; + union { + struct nv50_disp_pior_pwr_v0 v0; + } *args = data; + u32 ctrl, type; + int ret; + + nv_ioctl(object, "disp pior pwr size %d\n", size); + if (nvif_unpack(args->v0, 0, 0, false)) { + nv_ioctl(object, "disp pior pwr vers %d state %d type %x\n", + args->v0.version, args->v0.state, args->v0.type); + if (args->v0.type > 0x0f) + return -EINVAL; + ctrl = !!args->v0.state; + type = args->v0.type; + } else + return ret; + nv_wait(priv, 0x61e004 + soff, 0x80000000, 0x00000000); - nv_mask(priv, 0x61e004 + soff, 0x80000101, 0x80000000 | stat); + nv_mask(priv, 0x61e004 + soff, 0x80000101, 0x80000000 | ctrl); nv_wait(priv, 0x61e004 + soff, 0x80000000, 0x00000000); + priv->pior.type[outp->or] = type; return 0; } - -int -nv50_pior_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size) -{ - struct nv50_disp_priv *priv = (void *)object->engine; - const u8 type = (mthd & NV50_DISP_PIOR_MTHD_TYPE) >> 12; - const u8 or = (mthd & NV50_DISP_PIOR_MTHD_OR); - u32 *data = args; - int ret; - - if (size < sizeof(u32)) - return -EINVAL; - - mthd &= ~NV50_DISP_PIOR_MTHD_TYPE; - mthd &= ~NV50_DISP_PIOR_MTHD_OR; - switch (mthd) { - case NV50_DISP_PIOR_PWR: - ret = priv->pior.power(priv, or, data[0]); - priv->pior.type[or] = type; - break; - default: - return -EINVAL; - } - - return ret; -} diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/priv.h b/drivers/gpu/drm/nouveau/core/engine/disp/priv.h index 26e9a42569c7..dbd43ae9df81 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/priv.h +++ b/drivers/gpu/drm/nouveau/core/engine/disp/priv.h @@ -11,6 +11,7 @@ struct nouveau_disp_impl { struct nouveau_oclass base; struct nouveau_oclass **outp; struct nouveau_oclass **conn; + const struct nvkm_event_func *vblank; }; #define nouveau_disp_create(p,e,c,h,i,x,d) \ @@ -39,4 +40,8 @@ int _nouveau_disp_fini(struct nouveau_object *, bool); extern struct nouveau_oclass *nvkm_output_oclass; extern struct nouveau_oclass *nvkm_connector_oclass; +int nouveau_disp_vblank_ctor(void *data, u32 size, struct nvkm_notify *); +void nouveau_disp_vblank(struct nouveau_disp *, int head); +int nouveau_disp_ntfy(struct nouveau_object *, u32, struct nvkm_event **); + #endif diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c index 7a1ebdfa9e1b..ddf1760c4400 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c @@ -22,8 +22,9 @@ * Authors: Ben Skeggs */ -#include <core/os.h> -#include <core/class.h> +#include <core/client.h> +#include <nvif/unpack.h> +#include <nvif/class.h> #include <subdev/bios.h> #include <subdev/bios/dcb.h> @@ -32,77 +33,26 @@ #include "nv50.h" int -nv50_sor_power(struct nv50_disp_priv *priv, int or, u32 data) +nv50_sor_power(NV50_DISP_MTHD_V1) { - const u32 stat = data & NV50_DISP_SOR_PWR_STATE; - const u32 soff = (or * 0x800); + union { + struct nv50_disp_sor_pwr_v0 v0; + } *args = data; + const u32 soff = outp->or * 0x800; + u32 stat; + int ret; + + nv_ioctl(object, "disp sor pwr size %d\n", size); + if (nvif_unpack(args->v0, 0, 0, false)) { + nv_ioctl(object, "disp sor pwr vers %d state %d\n", + args->v0.version, args->v0.state); + stat = !!args->v0.state; + } else + return ret; + nv_wait(priv, 0x61c004 + soff, 0x80000000, 0x00000000); nv_mask(priv, 0x61c004 + soff, 0x80000001, 0x80000000 | stat); nv_wait(priv, 0x61c004 + soff, 0x80000000, 0x00000000); nv_wait(priv, 0x61c030 + soff, 0x10000000, 0x00000000); return 0; } - -int -nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size) -{ - struct nv50_disp_priv *priv = (void *)object->engine; - const u8 type = (mthd & NV50_DISP_SOR_MTHD_TYPE) >> 12; - const u8 head = (mthd & NV50_DISP_SOR_MTHD_HEAD) >> 3; - const u8 link = (mthd & NV50_DISP_SOR_MTHD_LINK) >> 2; - const u8 or = (mthd & NV50_DISP_SOR_MTHD_OR); - const u16 mask = (0x0100 << head) | (0x0040 << link) | (0x0001 << or); - struct nvkm_output *outp = NULL, *temp; - u32 data; - int ret = -EINVAL; - - if (size < sizeof(u32)) - return -EINVAL; - data = *(u32 *)args; - - list_for_each_entry(temp, &priv->base.outp, head) { - if ((temp->info.hasht & 0xff) == type && - (temp->info.hashm & mask) == mask) { - outp = temp; - break; - } - } - - switch (mthd & ~0x3f) { - case NV50_DISP_SOR_PWR: - ret = priv->sor.power(priv, or, data); - break; - case NVA3_DISP_SOR_HDA_ELD: - ret = priv->sor.hda_eld(priv, or, args, size); - break; - case NV84_DISP_SOR_HDMI_PWR: - ret = priv->sor.hdmi(priv, head, or, data); - break; - case NV50_DISP_SOR_LVDS_SCRIPT: - priv->sor.lvdsconf = data & NV50_DISP_SOR_LVDS_SCRIPT_ID; - ret = 0; - break; - case NV94_DISP_SOR_DP_PWR: - if (outp) { - struct nvkm_output_dp *outpdp = (void *)outp; - switch (data) { - case NV94_DISP_SOR_DP_PWR_STATE_OFF: - nouveau_event_put(outpdp->irq); - ((struct nvkm_output_dp_impl *)nv_oclass(outp)) - ->lnk_pwr(outpdp, 0); - atomic_set(&outpdp->lt.done, 0); - break; - case NV94_DISP_SOR_DP_PWR_STATE_ON: - nvkm_output_dp_train(&outpdp->base, 0, true); - break; - default: - return -EINVAL; - } - } - break; - default: - BUG_ON(1); - } - - return ret; -} diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c index 05487cda84a8..39f85d627336 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c @@ -23,7 +23,6 @@ */ #include <core/os.h> -#include <core/class.h> #include <subdev/bios.h> #include <subdev/bios/dcb.h> diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c index 97f0e9cd3d40..7b7bbc3e459e 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c @@ -23,7 +23,6 @@ */ #include <core/os.h> -#include <core/class.h> #include <subdev/bios.h> #include <subdev/bios/dcb.h> diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c index 5103e88d1877..e1500f77a56a 100644 --- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c +++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c @@ -23,98 +23,143 @@ */ #include <core/object.h> -#include <core/class.h> +#include <core/client.h> +#include <nvif/unpack.h> +#include <nvif/class.h> #include <subdev/fb.h> -#include <engine/dmaobj.h> +#include <subdev/instmem.h> + +#include "priv.h" static int -nouveau_dmaobj_ctor(struct nouveau_object *parent, +nvkm_dmaobj_bind(struct nouveau_dmaobj *dmaobj, struct nouveau_object *parent, + struct nouveau_gpuobj **pgpuobj) +{ + const struct nvkm_dmaeng_impl *impl = (void *) + nv_oclass(nv_object(dmaobj)->engine); + int ret = 0; + + if (nv_object(dmaobj) == parent) { /* ctor bind */ + if (nv_mclass(parent->parent) == NV_DEVICE) { + /* delayed, or no, binding */ + return 0; + } + ret = impl->bind(dmaobj, parent, pgpuobj); + if (ret == 0) + nouveau_object_ref(NULL, &parent); + return ret; + } + + return impl->bind(dmaobj, parent, pgpuobj); +} + +int +nvkm_dmaobj_create_(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) + struct nouveau_oclass *oclass, void **pdata, u32 *psize, + int length, void **pobject) { - struct nouveau_dmaeng *dmaeng = (void *)engine; + union { + struct nv_dma_v0 v0; + } *args = *pdata; + struct nouveau_instmem *instmem = nouveau_instmem(parent); + struct nouveau_client *client = nouveau_client(parent); + struct nouveau_device *device = nv_device(parent); + struct nouveau_fb *pfb = nouveau_fb(parent); struct nouveau_dmaobj *dmaobj; - struct nouveau_gpuobj *gpuobj; - struct nv_dma_class *args = data; + void *data = *pdata; + u32 size = *psize; int ret; - if (size < sizeof(*args)) - return -EINVAL; - - ret = nouveau_object_create(parent, engine, oclass, 0, &dmaobj); - *pobject = nv_object(dmaobj); + ret = nouveau_object_create_(parent, engine, oclass, 0, length, pobject); + dmaobj = *pobject; if (ret) return ret; - switch (args->flags & NV_DMA_TARGET_MASK) { - case NV_DMA_TARGET_VM: + nv_ioctl(parent, "create dma size %d\n", *psize); + if (nvif_unpack(args->v0, 0, 0, true)) { + nv_ioctl(parent, "create dma vers %d target %d access %d " + "start %016llx limit %016llx\n", + args->v0.version, args->v0.target, args->v0.access, + args->v0.start, args->v0.limit); + dmaobj->target = args->v0.target; + dmaobj->access = args->v0.access; + dmaobj->start = args->v0.start; + dmaobj->limit = args->v0.limit; + } else + return ret; + + *pdata = data; + *psize = size; + + if (dmaobj->start > dmaobj->limit) + return -EINVAL; + + switch (dmaobj->target) { + case NV_DMA_V0_TARGET_VM: dmaobj->target = NV_MEM_TARGET_VM; break; - case NV_DMA_TARGET_VRAM: + case NV_DMA_V0_TARGET_VRAM: + if (!client->super) { + if (dmaobj->limit >= pfb->ram->size - instmem->reserved) + return -EACCES; + if (device->card_type >= NV_50) + return -EACCES; + } dmaobj->target = NV_MEM_TARGET_VRAM; break; - case NV_DMA_TARGET_PCI: + case NV_DMA_V0_TARGET_PCI: + if (!client->super) + return -EACCES; dmaobj->target = NV_MEM_TARGET_PCI; break; - case NV_DMA_TARGET_PCI_US: - case NV_DMA_TARGET_AGP: + case NV_DMA_V0_TARGET_PCI_US: + case NV_DMA_V0_TARGET_AGP: + if (!client->super) + return -EACCES; dmaobj->target = NV_MEM_TARGET_PCI_NOSNOOP; break; default: return -EINVAL; } - switch (args->flags & NV_DMA_ACCESS_MASK) { - case NV_DMA_ACCESS_VM: + switch (dmaobj->access) { + case NV_DMA_V0_ACCESS_VM: dmaobj->access = NV_MEM_ACCESS_VM; break; - case NV_DMA_ACCESS_RD: + case NV_DMA_V0_ACCESS_RD: dmaobj->access = NV_MEM_ACCESS_RO; break; - case NV_DMA_ACCESS_WR: + case NV_DMA_V0_ACCESS_WR: dmaobj->access = NV_MEM_ACCESS_WO; break; - case NV_DMA_ACCESS_RDWR: + case NV_DMA_V0_ACCESS_RDWR: dmaobj->access = NV_MEM_ACCESS_RW; break; default: return -EINVAL; } - dmaobj->start = args->start; - dmaobj->limit = args->limit; - dmaobj->conf0 = args->conf0; - - switch (nv_mclass(parent)) { - case NV_DEVICE_CLASS: - /* delayed, or no, binding */ - break; - default: - ret = dmaeng->bind(dmaeng, *pobject, dmaobj, &gpuobj); - if (ret == 0) { - nouveau_object_ref(NULL, pobject); - *pobject = nv_object(gpuobj); - } - break; - } - return ret; } -static struct nouveau_ofuncs -nouveau_dmaobj_ofuncs = { - .ctor = nouveau_dmaobj_ctor, - .dtor = nouveau_object_destroy, - .init = nouveau_object_init, - .fini = nouveau_object_fini, -}; - -struct nouveau_oclass -nouveau_dmaobj_sclass[] = { - { NV_DMA_FROM_MEMORY_CLASS, &nouveau_dmaobj_ofuncs }, - { NV_DMA_TO_MEMORY_CLASS, &nouveau_dmaobj_ofuncs }, - { NV_DMA_IN_MEMORY_CLASS, &nouveau_dmaobj_ofuncs }, - {} -}; +int +_nvkm_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + const struct nvkm_dmaeng_impl *impl = (void *)oclass; + struct nouveau_dmaeng *dmaeng; + int ret; + + ret = nouveau_engine_create(parent, engine, oclass, true, "DMAOBJ", + "dmaobj", &dmaeng); + *pobject = nv_object(dmaeng); + if (ret) + return ret; + + nv_engine(dmaeng)->sclass = impl->sclass; + dmaeng->bind = nvkm_dmaobj_bind; + return 0; +} diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c index 027d8217c0fa..20c9dbfe3b2e 100644 --- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c +++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c @@ -23,121 +23,143 @@ */ #include <core/gpuobj.h> -#include <core/class.h> +#include <nvif/class.h> #include <subdev/fb.h> #include <subdev/vm/nv04.h> -#include <engine/dmaobj.h> +#include "priv.h" -struct nv04_dmaeng_priv { - struct nouveau_dmaeng base; +struct nv04_dmaobj_priv { + struct nouveau_dmaobj base; + bool clone; + u32 flags0; + u32 flags2; }; static int -nv04_dmaobj_bind(struct nouveau_dmaeng *dmaeng, +nv04_dmaobj_bind(struct nouveau_dmaobj *dmaobj, struct nouveau_object *parent, - struct nouveau_dmaobj *dmaobj, struct nouveau_gpuobj **pgpuobj) { - struct nv04_vmmgr_priv *vmm = nv04_vmmgr(dmaeng); + struct nv04_dmaobj_priv *priv = (void *)dmaobj; struct nouveau_gpuobj *gpuobj; - u32 flags0 = nv_mclass(dmaobj); - u32 flags2 = 0x00000000; - u64 offset = dmaobj->start & 0xfffff000; - u64 adjust = dmaobj->start & 0x00000fff; - u32 length = dmaobj->limit - dmaobj->start; + u64 offset = priv->base.start & 0xfffff000; + u64 adjust = priv->base.start & 0x00000fff; + u32 length = priv->base.limit - priv->base.start; int ret; if (!nv_iclass(parent, NV_ENGCTX_CLASS)) { switch (nv_mclass(parent->parent)) { - case NV03_CHANNEL_DMA_CLASS: - case NV10_CHANNEL_DMA_CLASS: - case NV17_CHANNEL_DMA_CLASS: - case NV40_CHANNEL_DMA_CLASS: + case NV03_CHANNEL_DMA: + case NV10_CHANNEL_DMA: + case NV17_CHANNEL_DMA: + case NV40_CHANNEL_DMA: break; default: return -EINVAL; } } - if (dmaobj->target == NV_MEM_TARGET_VM) { - if (nv_object(vmm)->oclass == &nv04_vmmgr_oclass) { - struct nouveau_gpuobj *pgt = vmm->vm->pgt[0].obj[0]; - if (!dmaobj->start) - return nouveau_gpuobj_dup(parent, pgt, pgpuobj); - offset = nv_ro32(pgt, 8 + (offset >> 10)); - offset &= 0xfffff000; - } + if (priv->clone) { + struct nv04_vmmgr_priv *vmm = nv04_vmmgr(dmaobj); + struct nouveau_gpuobj *pgt = vmm->vm->pgt[0].obj[0]; + if (!dmaobj->start) + return nouveau_gpuobj_dup(parent, pgt, pgpuobj); + offset = nv_ro32(pgt, 8 + (offset >> 10)); + offset &= 0xfffff000; + } + + ret = nouveau_gpuobj_new(parent, parent, 16, 16, 0, &gpuobj); + *pgpuobj = gpuobj; + if (ret == 0) { + nv_wo32(*pgpuobj, 0x00, priv->flags0 | (adjust << 20)); + nv_wo32(*pgpuobj, 0x04, length); + nv_wo32(*pgpuobj, 0x08, priv->flags2 | offset); + nv_wo32(*pgpuobj, 0x0c, priv->flags2 | offset); + } + + return ret; +} + +static int +nv04_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nouveau_dmaeng *dmaeng = (void *)engine; + struct nv04_vmmgr_priv *vmm = nv04_vmmgr(engine); + struct nv04_dmaobj_priv *priv; + int ret; + + ret = nvkm_dmaobj_create(parent, engine, oclass, &data, &size, &priv); + *pobject = nv_object(priv); + if (ret || (ret = -ENOSYS, size)) + return ret; - dmaobj->target = NV_MEM_TARGET_PCI; - dmaobj->access = NV_MEM_ACCESS_RW; + if (priv->base.target == NV_MEM_TARGET_VM) { + if (nv_object(vmm)->oclass == &nv04_vmmgr_oclass) + priv->clone = true; + priv->base.target = NV_MEM_TARGET_PCI; + priv->base.access = NV_MEM_ACCESS_RW; } - switch (dmaobj->target) { + priv->flags0 = nv_mclass(priv); + switch (priv->base.target) { case NV_MEM_TARGET_VRAM: - flags0 |= 0x00003000; + priv->flags0 |= 0x00003000; break; case NV_MEM_TARGET_PCI: - flags0 |= 0x00023000; + priv->flags0 |= 0x00023000; break; case NV_MEM_TARGET_PCI_NOSNOOP: - flags0 |= 0x00033000; + priv->flags0 |= 0x00033000; break; default: return -EINVAL; } - switch (dmaobj->access) { + switch (priv->base.access) { case NV_MEM_ACCESS_RO: - flags0 |= 0x00004000; + priv->flags0 |= 0x00004000; break; case NV_MEM_ACCESS_WO: - flags0 |= 0x00008000; + priv->flags0 |= 0x00008000; case NV_MEM_ACCESS_RW: - flags2 |= 0x00000002; + priv->flags2 |= 0x00000002; break; default: return -EINVAL; } - ret = nouveau_gpuobj_new(parent, parent, 16, 16, 0, &gpuobj); - *pgpuobj = gpuobj; - if (ret == 0) { - nv_wo32(*pgpuobj, 0x00, flags0 | (adjust << 20)); - nv_wo32(*pgpuobj, 0x04, length); - nv_wo32(*pgpuobj, 0x08, flags2 | offset); - nv_wo32(*pgpuobj, 0x0c, flags2 | offset); - } - - return ret; + return dmaeng->bind(&priv->base, nv_object(priv), (void *)pobject); } -static int -nv04_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv04_dmaeng_priv *priv; - int ret; - - ret = nouveau_dmaeng_create(parent, engine, oclass, &priv); - *pobject = nv_object(priv); - if (ret) - return ret; +static struct nouveau_ofuncs +nv04_dmaobj_ofuncs = { + .ctor = nv04_dmaobj_ctor, + .dtor = _nvkm_dmaobj_dtor, + .init = _nvkm_dmaobj_init, + .fini = _nvkm_dmaobj_fini, +}; - nv_engine(priv)->sclass = nouveau_dmaobj_sclass; - priv->base.bind = nv04_dmaobj_bind; - return 0; -} +static struct nouveau_oclass +nv04_dmaeng_sclass[] = { + { NV_DMA_FROM_MEMORY, &nv04_dmaobj_ofuncs }, + { NV_DMA_TO_MEMORY, &nv04_dmaobj_ofuncs }, + { NV_DMA_IN_MEMORY, &nv04_dmaobj_ofuncs }, + {} +}; -struct nouveau_oclass -nv04_dmaeng_oclass = { - .handle = NV_ENGINE(DMAOBJ, 0x04), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = nv04_dmaeng_ctor, - .dtor = _nouveau_dmaeng_dtor, - .init = _nouveau_dmaeng_init, - .fini = _nouveau_dmaeng_fini, +struct nouveau_oclass * +nv04_dmaeng_oclass = &(struct nvkm_dmaeng_impl) { + .base.handle = NV_ENGINE(DMAOBJ, 0x04), + .base.ofuncs = &(struct nouveau_ofuncs) { + .ctor = _nvkm_dmaeng_ctor, + .dtor = _nvkm_dmaeng_dtor, + .init = _nvkm_dmaeng_init, + .fini = _nvkm_dmaeng_fini, }, -}; + .sclass = nv04_dmaeng_sclass, + .bind = nv04_dmaobj_bind, +}.base; diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c index 750183f7c057..a740ddba2ee2 100644 --- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c +++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c @@ -22,140 +22,176 @@ * Authors: Ben Skeggs */ +#include <core/client.h> #include <core/gpuobj.h> -#include <core/class.h> +#include <nvif/unpack.h> +#include <nvif/class.h> #include <subdev/fb.h> -#include <engine/dmaobj.h> -struct nv50_dmaeng_priv { - struct nouveau_dmaeng base; +#include "priv.h" + +struct nv50_dmaobj_priv { + struct nouveau_dmaobj base; + u32 flags0; + u32 flags5; }; static int -nv50_dmaobj_bind(struct nouveau_dmaeng *dmaeng, +nv50_dmaobj_bind(struct nouveau_dmaobj *dmaobj, struct nouveau_object *parent, - struct nouveau_dmaobj *dmaobj, struct nouveau_gpuobj **pgpuobj) { - u32 flags0 = nv_mclass(dmaobj); - u32 flags5 = 0x00000000; + struct nv50_dmaobj_priv *priv = (void *)dmaobj; int ret; if (!nv_iclass(parent, NV_ENGCTX_CLASS)) { switch (nv_mclass(parent->parent)) { - case NV50_CHANNEL_DMA_CLASS: - case NV84_CHANNEL_DMA_CLASS: - case NV50_CHANNEL_IND_CLASS: - case NV84_CHANNEL_IND_CLASS: - case NV50_DISP_MAST_CLASS: - case NV84_DISP_MAST_CLASS: - case NV94_DISP_MAST_CLASS: - case NVA0_DISP_MAST_CLASS: - case NVA3_DISP_MAST_CLASS: - case NV50_DISP_SYNC_CLASS: - case NV84_DISP_SYNC_CLASS: - case NV94_DISP_SYNC_CLASS: - case NVA0_DISP_SYNC_CLASS: - case NVA3_DISP_SYNC_CLASS: - case NV50_DISP_OVLY_CLASS: - case NV84_DISP_OVLY_CLASS: - case NV94_DISP_OVLY_CLASS: - case NVA0_DISP_OVLY_CLASS: - case NVA3_DISP_OVLY_CLASS: + case NV40_CHANNEL_DMA: + case NV50_CHANNEL_GPFIFO: + case G82_CHANNEL_GPFIFO: + case NV50_DISP_CORE_CHANNEL_DMA: + case G82_DISP_CORE_CHANNEL_DMA: + case GT206_DISP_CORE_CHANNEL_DMA: + case GT200_DISP_CORE_CHANNEL_DMA: + case GT214_DISP_CORE_CHANNEL_DMA: + case NV50_DISP_BASE_CHANNEL_DMA: + case G82_DISP_BASE_CHANNEL_DMA: + case GT200_DISP_BASE_CHANNEL_DMA: + case GT214_DISP_BASE_CHANNEL_DMA: + case NV50_DISP_OVERLAY_CHANNEL_DMA: + case G82_DISP_OVERLAY_CHANNEL_DMA: + case GT200_DISP_OVERLAY_CHANNEL_DMA: + case GT214_DISP_OVERLAY_CHANNEL_DMA: break; default: return -EINVAL; } } - if (!(dmaobj->conf0 & NV50_DMA_CONF0_ENABLE)) { - if (dmaobj->target == NV_MEM_TARGET_VM) { - dmaobj->conf0 = NV50_DMA_CONF0_PRIV_VM; - dmaobj->conf0 |= NV50_DMA_CONF0_PART_VM; - dmaobj->conf0 |= NV50_DMA_CONF0_COMP_VM; - dmaobj->conf0 |= NV50_DMA_CONF0_TYPE_VM; + ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj); + if (ret == 0) { + nv_wo32(*pgpuobj, 0x00, priv->flags0 | nv_mclass(dmaobj)); + nv_wo32(*pgpuobj, 0x04, lower_32_bits(priv->base.limit)); + nv_wo32(*pgpuobj, 0x08, lower_32_bits(priv->base.start)); + nv_wo32(*pgpuobj, 0x0c, upper_32_bits(priv->base.limit) << 24 | + upper_32_bits(priv->base.start)); + nv_wo32(*pgpuobj, 0x10, 0x00000000); + nv_wo32(*pgpuobj, 0x14, priv->flags5); + } + + return ret; +} + +static int +nv50_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nouveau_dmaeng *dmaeng = (void *)engine; + union { + struct nv50_dma_v0 v0; + } *args; + struct nv50_dmaobj_priv *priv; + u32 user, part, comp, kind; + int ret; + + ret = nvkm_dmaobj_create(parent, engine, oclass, &data, &size, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + args = data; + + nv_ioctl(parent, "create nv50 dma size %d\n", size); + if (nvif_unpack(args->v0, 0, 0, false)) { + nv_ioctl(parent, "create nv50 dma vers %d priv %d part %d " + "comp %d kind %02x\n", args->v0.version, + args->v0.priv, args->v0.part, args->v0.comp, + args->v0.kind); + user = args->v0.priv; + part = args->v0.part; + comp = args->v0.comp; + kind = args->v0.kind; + } else + if (size == 0) { + if (priv->base.target != NV_MEM_TARGET_VM) { + user = NV50_DMA_V0_PRIV_US; + part = NV50_DMA_V0_PART_256; + comp = NV50_DMA_V0_COMP_NONE; + kind = NV50_DMA_V0_KIND_PITCH; } else { - dmaobj->conf0 = NV50_DMA_CONF0_PRIV_US; - dmaobj->conf0 |= NV50_DMA_CONF0_PART_256; - dmaobj->conf0 |= NV50_DMA_CONF0_COMP_NONE; - dmaobj->conf0 |= NV50_DMA_CONF0_TYPE_LINEAR; + user = NV50_DMA_V0_PRIV_VM; + part = NV50_DMA_V0_PART_VM; + comp = NV50_DMA_V0_COMP_VM; + kind = NV50_DMA_V0_KIND_VM; } - } + } else + return ret; - flags0 |= (dmaobj->conf0 & NV50_DMA_CONF0_COMP) << 22; - flags0 |= (dmaobj->conf0 & NV50_DMA_CONF0_TYPE) << 22; - flags0 |= (dmaobj->conf0 & NV50_DMA_CONF0_PRIV); - flags5 |= (dmaobj->conf0 & NV50_DMA_CONF0_PART); + if (user > 2 || part > 2 || comp > 3 || kind > 0x7f) + return -EINVAL; + priv->flags0 = (comp << 29) | (kind << 22) | (user << 20); + priv->flags5 = (part << 16); - switch (dmaobj->target) { + switch (priv->base.target) { case NV_MEM_TARGET_VM: - flags0 |= 0x00000000; + priv->flags0 |= 0x00000000; break; case NV_MEM_TARGET_VRAM: - flags0 |= 0x00010000; + priv->flags0 |= 0x00010000; break; case NV_MEM_TARGET_PCI: - flags0 |= 0x00020000; + priv->flags0 |= 0x00020000; break; case NV_MEM_TARGET_PCI_NOSNOOP: - flags0 |= 0x00030000; + priv->flags0 |= 0x00030000; break; default: return -EINVAL; } - switch (dmaobj->access) { + switch (priv->base.access) { case NV_MEM_ACCESS_VM: break; case NV_MEM_ACCESS_RO: - flags0 |= 0x00040000; + priv->flags0 |= 0x00040000; break; case NV_MEM_ACCESS_WO: case NV_MEM_ACCESS_RW: - flags0 |= 0x00080000; + priv->flags0 |= 0x00080000; break; + default: + return -EINVAL; } - ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj); - if (ret == 0) { - nv_wo32(*pgpuobj, 0x00, flags0); - nv_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->limit)); - nv_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->start)); - nv_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->limit) << 24 | - upper_32_bits(dmaobj->start)); - nv_wo32(*pgpuobj, 0x10, 0x00000000); - nv_wo32(*pgpuobj, 0x14, flags5); - } - - return ret; + return dmaeng->bind(&priv->base, nv_object(priv), (void *)pobject); } -static int -nv50_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv50_dmaeng_priv *priv; - int ret; - - ret = nouveau_dmaeng_create(parent, engine, oclass, &priv); - *pobject = nv_object(priv); - if (ret) - return ret; +static struct nouveau_ofuncs +nv50_dmaobj_ofuncs = { + .ctor = nv50_dmaobj_ctor, + .dtor = _nvkm_dmaobj_dtor, + .init = _nvkm_dmaobj_init, + .fini = _nvkm_dmaobj_fini, +}; - nv_engine(priv)->sclass = nouveau_dmaobj_sclass; - priv->base.bind = nv50_dmaobj_bind; - return 0; -} +static struct nouveau_oclass +nv50_dmaeng_sclass[] = { + { NV_DMA_FROM_MEMORY, &nv50_dmaobj_ofuncs }, + { NV_DMA_TO_MEMORY, &nv50_dmaobj_ofuncs }, + { NV_DMA_IN_MEMORY, &nv50_dmaobj_ofuncs }, + {} +}; -struct nouveau_oclass -nv50_dmaeng_oclass = { - .handle = NV_ENGINE(DMAOBJ, 0x50), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = nv50_dmaeng_ctor, - .dtor = _nouveau_dmaeng_dtor, - .init = _nouveau_dmaeng_init, - .fini = _nouveau_dmaeng_fini, +struct nouveau_oclass * +nv50_dmaeng_oclass = &(struct nvkm_dmaeng_impl) { + .base.handle = NV_ENGINE(DMAOBJ, 0x50), + .base.ofuncs = &(struct nouveau_ofuncs) { + .ctor = _nvkm_dmaeng_ctor, + .dtor = _nvkm_dmaeng_dtor, + .init = _nvkm_dmaeng_init, + .fini = _nvkm_dmaeng_fini, }, -}; + .sclass = nv50_dmaeng_sclass, + .bind = nv50_dmaobj_bind, +}.base; diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c index cd3970d03b80..88ec33b20048 100644 --- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c @@ -22,32 +22,35 @@ * Authors: Ben Skeggs */ +#include <core/client.h> #include <core/device.h> #include <core/gpuobj.h> -#include <core/class.h> +#include <nvif/unpack.h> +#include <nvif/class.h> #include <subdev/fb.h> -#include <engine/dmaobj.h> -struct nvc0_dmaeng_priv { - struct nouveau_dmaeng base; +#include "priv.h" + +struct nvc0_dmaobj_priv { + struct nouveau_dmaobj base; + u32 flags0; + u32 flags5; }; static int -nvc0_dmaobj_bind(struct nouveau_dmaeng *dmaeng, +nvc0_dmaobj_bind(struct nouveau_dmaobj *dmaobj, struct nouveau_object *parent, - struct nouveau_dmaobj *dmaobj, struct nouveau_gpuobj **pgpuobj) { - u32 flags0 = nv_mclass(dmaobj); - u32 flags5 = 0x00000000; + struct nvc0_dmaobj_priv *priv = (void *)dmaobj; int ret; if (!nv_iclass(parent, NV_ENGCTX_CLASS)) { switch (nv_mclass(parent->parent)) { - case NVA3_DISP_MAST_CLASS: - case NVA3_DISP_SYNC_CLASS: - case NVA3_DISP_OVLY_CLASS: + case GT214_DISP_CORE_CHANNEL_DMA: + case GT214_DISP_BASE_CHANNEL_DMA: + case GT214_DISP_OVERLAY_CHANNEL_DMA: break; default: return -EINVAL; @@ -55,89 +58,122 @@ nvc0_dmaobj_bind(struct nouveau_dmaeng *dmaeng, } else return 0; - if (!(dmaobj->conf0 & NVC0_DMA_CONF0_ENABLE)) { - if (dmaobj->target == NV_MEM_TARGET_VM) { - dmaobj->conf0 = NVC0_DMA_CONF0_PRIV_VM; - dmaobj->conf0 |= NVC0_DMA_CONF0_TYPE_VM; + ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj); + if (ret == 0) { + nv_wo32(*pgpuobj, 0x00, priv->flags0 | nv_mclass(dmaobj)); + nv_wo32(*pgpuobj, 0x04, lower_32_bits(priv->base.limit)); + nv_wo32(*pgpuobj, 0x08, lower_32_bits(priv->base.start)); + nv_wo32(*pgpuobj, 0x0c, upper_32_bits(priv->base.limit) << 24 | + upper_32_bits(priv->base.start)); + nv_wo32(*pgpuobj, 0x10, 0x00000000); + nv_wo32(*pgpuobj, 0x14, priv->flags5); + } + + return ret; +} + +static int +nvc0_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nouveau_dmaeng *dmaeng = (void *)engine; + union { + struct gf100_dma_v0 v0; + } *args; + struct nvc0_dmaobj_priv *priv; + u32 kind, user, unkn; + int ret; + + ret = nvkm_dmaobj_create(parent, engine, oclass, &data, &size, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + args = data; + + nv_ioctl(parent, "create gf100 dma size %d\n", size); + if (nvif_unpack(args->v0, 0, 0, false)) { + nv_ioctl(parent, "create gf100 dma vers %d priv %d kind %02x\n", + args->v0.version, args->v0.priv, args->v0.kind); + kind = args->v0.kind; + user = args->v0.priv; + unkn = 0; + } else + if (size == 0) { + if (priv->base.target != NV_MEM_TARGET_VM) { + kind = GF100_DMA_V0_KIND_PITCH; + user = GF100_DMA_V0_PRIV_US; + unkn = 2; } else { - dmaobj->conf0 = NVC0_DMA_CONF0_PRIV_US; - dmaobj->conf0 |= NVC0_DMA_CONF0_TYPE_LINEAR; - dmaobj->conf0 |= 0x00020000; + kind = GF100_DMA_V0_KIND_VM; + user = GF100_DMA_V0_PRIV_VM; + unkn = 0; } - } + } else + return ret; - flags0 |= (dmaobj->conf0 & NVC0_DMA_CONF0_TYPE) << 22; - flags0 |= (dmaobj->conf0 & NVC0_DMA_CONF0_PRIV); - flags5 |= (dmaobj->conf0 & NVC0_DMA_CONF0_UNKN); + if (user > 2) + return -EINVAL; + priv->flags0 |= (kind << 22) | (user << 20); + priv->flags5 |= (unkn << 16); - switch (dmaobj->target) { + switch (priv->base.target) { case NV_MEM_TARGET_VM: - flags0 |= 0x00000000; + priv->flags0 |= 0x00000000; break; case NV_MEM_TARGET_VRAM: - flags0 |= 0x00010000; + priv->flags0 |= 0x00010000; break; case NV_MEM_TARGET_PCI: - flags0 |= 0x00020000; + priv->flags0 |= 0x00020000; break; case NV_MEM_TARGET_PCI_NOSNOOP: - flags0 |= 0x00030000; + priv->flags0 |= 0x00030000; break; default: return -EINVAL; } - switch (dmaobj->access) { + switch (priv->base.access) { case NV_MEM_ACCESS_VM: break; case NV_MEM_ACCESS_RO: - flags0 |= 0x00040000; + priv->flags0 |= 0x00040000; break; case NV_MEM_ACCESS_WO: case NV_MEM_ACCESS_RW: - flags0 |= 0x00080000; + priv->flags0 |= 0x00080000; break; } - ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj); - if (ret == 0) { - nv_wo32(*pgpuobj, 0x00, flags0); - nv_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->limit)); - nv_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->start)); - nv_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->limit) << 24 | - upper_32_bits(dmaobj->start)); - nv_wo32(*pgpuobj, 0x10, 0x00000000); - nv_wo32(*pgpuobj, 0x14, flags5); - } - - return ret; + return dmaeng->bind(&priv->base, nv_object(priv), (void *)pobject); } -static int -nvc0_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nvc0_dmaeng_priv *priv; - int ret; - - ret = nouveau_dmaeng_create(parent, engine, oclass, &priv); - *pobject = nv_object(priv); - if (ret) - return ret; +static struct nouveau_ofuncs +nvc0_dmaobj_ofuncs = { + .ctor = nvc0_dmaobj_ctor, + .dtor = _nvkm_dmaobj_dtor, + .init = _nvkm_dmaobj_init, + .fini = _nvkm_dmaobj_fini, +}; - nv_engine(priv)->sclass = nouveau_dmaobj_sclass; - priv->base.bind = nvc0_dmaobj_bind; - return 0; -} +static struct nouveau_oclass +nvc0_dmaeng_sclass[] = { + { NV_DMA_FROM_MEMORY, &nvc0_dmaobj_ofuncs }, + { NV_DMA_TO_MEMORY, &nvc0_dmaobj_ofuncs }, + { NV_DMA_IN_MEMORY, &nvc0_dmaobj_ofuncs }, + {} +}; -struct nouveau_oclass -nvc0_dmaeng_oclass = { - .handle = NV_ENGINE(DMAOBJ, 0xc0), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = nvc0_dmaeng_ctor, - .dtor = _nouveau_dmaeng_dtor, - .init = _nouveau_dmaeng_init, - .fini = _nouveau_dmaeng_fini, +struct nouveau_oclass * +nvc0_dmaeng_oclass = &(struct nvkm_dmaeng_impl) { + .base.handle = NV_ENGINE(DMAOBJ, 0xc0), + .base.ofuncs = &(struct nouveau_ofuncs) { + .ctor = _nvkm_dmaeng_ctor, + .dtor = _nvkm_dmaeng_dtor, + .init = _nvkm_dmaeng_init, + .fini = _nvkm_dmaeng_fini, }, -}; + .sclass = nvc0_dmaeng_sclass, + .bind = nvc0_dmaobj_bind, +}.base; diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c index 1cfb3bb90131..3fc4f0b0eaca 100644 --- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c +++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c @@ -22,40 +22,40 @@ * Authors: Ben Skeggs */ +#include <core/client.h> #include <core/device.h> #include <core/gpuobj.h> -#include <core/class.h> +#include <nvif/unpack.h> +#include <nvif/class.h> #include <subdev/fb.h> -#include <engine/dmaobj.h> -struct nvd0_dmaeng_priv { - struct nouveau_dmaeng base; +#include "priv.h" + +struct nvd0_dmaobj_priv { + struct nouveau_dmaobj base; + u32 flags0; }; static int -nvd0_dmaobj_bind(struct nouveau_dmaeng *dmaeng, +nvd0_dmaobj_bind(struct nouveau_dmaobj *dmaobj, struct nouveau_object *parent, - struct nouveau_dmaobj *dmaobj, struct nouveau_gpuobj **pgpuobj) { - u32 flags0 = 0x00000000; + struct nvd0_dmaobj_priv *priv = (void *)dmaobj; int ret; if (!nv_iclass(parent, NV_ENGCTX_CLASS)) { switch (nv_mclass(parent->parent)) { - case NVD0_DISP_MAST_CLASS: - case NVD0_DISP_SYNC_CLASS: - case NVD0_DISP_OVLY_CLASS: - case NVE0_DISP_MAST_CLASS: - case NVE0_DISP_SYNC_CLASS: - case NVE0_DISP_OVLY_CLASS: - case NVF0_DISP_MAST_CLASS: - case NVF0_DISP_SYNC_CLASS: - case NVF0_DISP_OVLY_CLASS: - case GM107_DISP_MAST_CLASS: - case GM107_DISP_SYNC_CLASS: - case GM107_DISP_OVLY_CLASS: + case GF110_DISP_CORE_CHANNEL_DMA: + case GK104_DISP_CORE_CHANNEL_DMA: + case GK110_DISP_CORE_CHANNEL_DMA: + case GM107_DISP_CORE_CHANNEL_DMA: + case GF110_DISP_BASE_CHANNEL_DMA: + case GK104_DISP_BASE_CHANNEL_DMA: + case GK110_DISP_BASE_CHANNEL_DMA: + case GF110_DISP_OVERLAY_CONTROL_DMA: + case GK104_DISP_OVERLAY_CONTROL_DMA: break; default: return -EINVAL; @@ -63,33 +63,11 @@ nvd0_dmaobj_bind(struct nouveau_dmaeng *dmaeng, } else return 0; - if (!(dmaobj->conf0 & NVD0_DMA_CONF0_ENABLE)) { - if (dmaobj->target == NV_MEM_TARGET_VM) { - dmaobj->conf0 |= NVD0_DMA_CONF0_TYPE_VM; - dmaobj->conf0 |= NVD0_DMA_CONF0_PAGE_LP; - } else { - dmaobj->conf0 |= NVD0_DMA_CONF0_TYPE_LINEAR; - dmaobj->conf0 |= NVD0_DMA_CONF0_PAGE_SP; - } - } - - flags0 |= (dmaobj->conf0 & NVD0_DMA_CONF0_TYPE) << 20; - flags0 |= (dmaobj->conf0 & NVD0_DMA_CONF0_PAGE) >> 4; - - switch (dmaobj->target) { - case NV_MEM_TARGET_VRAM: - flags0 |= 0x00000009; - break; - default: - return -EINVAL; - break; - } - ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj); if (ret == 0) { - nv_wo32(*pgpuobj, 0x00, flags0); - nv_wo32(*pgpuobj, 0x04, dmaobj->start >> 8); - nv_wo32(*pgpuobj, 0x08, dmaobj->limit >> 8); + nv_wo32(*pgpuobj, 0x00, priv->flags0); + nv_wo32(*pgpuobj, 0x04, priv->base.start >> 8); + nv_wo32(*pgpuobj, 0x08, priv->base.limit >> 8); nv_wo32(*pgpuobj, 0x0c, 0x00000000); nv_wo32(*pgpuobj, 0x10, 0x00000000); nv_wo32(*pgpuobj, 0x14, 0x00000000); @@ -99,30 +77,91 @@ nvd0_dmaobj_bind(struct nouveau_dmaeng *dmaeng, } static int -nvd0_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine, +nvd0_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { - struct nvd0_dmaeng_priv *priv; + struct nouveau_dmaeng *dmaeng = (void *)engine; + union { + struct gf110_dma_v0 v0; + } *args; + struct nvd0_dmaobj_priv *priv; + u32 kind, page; int ret; - ret = nouveau_dmaeng_create(parent, engine, oclass, &priv); + ret = nvkm_dmaobj_create(parent, engine, oclass, &data, &size, &priv); *pobject = nv_object(priv); if (ret) return ret; + args = data; - nv_engine(priv)->sclass = nouveau_dmaobj_sclass; - priv->base.bind = nvd0_dmaobj_bind; - return 0; + nv_ioctl(parent, "create gf110 dma size %d\n", size); + if (nvif_unpack(args->v0, 0, 0, false)) { + nv_ioctl(parent, "create gf100 dma vers %d page %d kind %02x\n", + args->v0.version, args->v0.page, args->v0.kind); + kind = args->v0.kind; + page = args->v0.page; + } else + if (size == 0) { + if (priv->base.target != NV_MEM_TARGET_VM) { + kind = GF110_DMA_V0_KIND_PITCH; + page = GF110_DMA_V0_PAGE_SP; + } else { + kind = GF110_DMA_V0_KIND_VM; + page = GF110_DMA_V0_PAGE_LP; + } + } else + return ret; + + if (page > 1) + return -EINVAL; + priv->flags0 = (kind << 20) | (page << 6); + + switch (priv->base.target) { + case NV_MEM_TARGET_VRAM: + priv->flags0 |= 0x00000009; + break; + case NV_MEM_TARGET_VM: + case NV_MEM_TARGET_PCI: + case NV_MEM_TARGET_PCI_NOSNOOP: + /* XXX: don't currently know how to construct a real one + * of these. we only use them to represent pushbufs + * on these chipsets, and the classes that use them + * deal with the target themselves. + */ + break; + default: + return -EINVAL; + } + + return dmaeng->bind(&priv->base, nv_object(priv), (void *)pobject); } -struct nouveau_oclass -nvd0_dmaeng_oclass = { - .handle = NV_ENGINE(DMAOBJ, 0xd0), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = nvd0_dmaeng_ctor, - .dtor = _nouveau_dmaeng_dtor, - .init = _nouveau_dmaeng_init, - .fini = _nouveau_dmaeng_fini, - }, +static struct nouveau_ofuncs +nvd0_dmaobj_ofuncs = { + .ctor = nvd0_dmaobj_ctor, + .dtor = _nvkm_dmaobj_dtor, + .init = _nvkm_dmaobj_init, + .fini = _nvkm_dmaobj_fini, }; + +static struct nouveau_oclass +nvd0_dmaeng_sclass[] = { + { NV_DMA_FROM_MEMORY, &nvd0_dmaobj_ofuncs }, + { NV_DMA_TO_MEMORY, &nvd0_dmaobj_ofuncs }, + { NV_DMA_IN_MEMORY, &nvd0_dmaobj_ofuncs }, + {} +}; + +struct nouveau_oclass * +nvd0_dmaeng_oclass = &(struct nvkm_dmaeng_impl) { + .base.handle = NV_ENGINE(DMAOBJ, 0xd0), + .base.ofuncs = &(struct nouveau_ofuncs) { + .ctor = _nvkm_dmaeng_ctor, + .dtor = _nvkm_dmaeng_dtor, + .init = _nvkm_dmaeng_init, + .fini = _nvkm_dmaeng_fini, + }, + .sclass = nvd0_dmaeng_sclass, + .bind = nvd0_dmaobj_bind, +}.base; diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/priv.h b/drivers/gpu/drm/nouveau/core/engine/dmaobj/priv.h new file mode 100644 index 000000000000..36f743866937 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/priv.h @@ -0,0 +1,30 @@ +#ifndef __NVKM_DMAOBJ_PRIV_H__ +#define __NVKM_DMAOBJ_PRIV_H__ + +#include <engine/dmaobj.h> + +#define nvkm_dmaobj_create(p,e,c,pa,sa,d) \ + nvkm_dmaobj_create_((p), (e), (c), (pa), (sa), sizeof(**d), (void **)d) + +int nvkm_dmaobj_create_(struct nouveau_object *, struct nouveau_object *, + struct nouveau_oclass *, void **, u32 *, + int, void **); +#define _nvkm_dmaobj_dtor nouveau_object_destroy +#define _nvkm_dmaobj_init nouveau_object_init +#define _nvkm_dmaobj_fini nouveau_object_fini + +int _nvkm_dmaeng_ctor(struct nouveau_object *, struct nouveau_object *, + struct nouveau_oclass *, void *, u32, + struct nouveau_object **); +#define _nvkm_dmaeng_dtor _nouveau_engine_dtor +#define _nvkm_dmaeng_init _nouveau_engine_init +#define _nvkm_dmaeng_fini _nouveau_engine_fini + +struct nvkm_dmaeng_impl { + struct nouveau_oclass base; + struct nouveau_oclass *sclass; + int (*bind)(struct nouveau_dmaobj *, struct nouveau_object *, + struct nouveau_gpuobj **); +}; + +#endif diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/base.c b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c index 56ed3d73bf8e..0f999fc45ab9 100644 --- a/drivers/gpu/drm/nouveau/core/engine/fifo/base.c +++ b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c @@ -26,11 +26,30 @@ #include <core/object.h> #include <core/handle.h> #include <core/event.h> -#include <core/class.h> +#include <nvif/unpack.h> +#include <nvif/class.h> +#include <nvif/event.h> #include <engine/dmaobj.h> #include <engine/fifo.h> +static int +nouveau_fifo_event_ctor(void *data, u32 size, struct nvkm_notify *notify) +{ + if (size == 0) { + notify->size = 0; + notify->types = 1; + notify->index = 0; + return 0; + } + return -ENOSYS; +} + +static const struct nvkm_event_func +nouveau_fifo_event_func = { + .ctor = nouveau_fifo_event_ctor, +}; + int nouveau_fifo_channel_create_(struct nouveau_object *parent, struct nouveau_object *engine, @@ -59,14 +78,14 @@ nouveau_fifo_channel_create_(struct nouveau_object *parent, dmaeng = (void *)chan->pushdma->base.engine; switch (chan->pushdma->base.oclass->handle) { - case NV_DMA_FROM_MEMORY_CLASS: - case NV_DMA_IN_MEMORY_CLASS: + case NV_DMA_FROM_MEMORY: + case NV_DMA_IN_MEMORY: break; default: return -EINVAL; } - ret = dmaeng->bind(dmaeng, parent, chan->pushdma, &chan->pushgpu); + ret = dmaeng->bind(chan->pushdma, parent, &chan->pushgpu); if (ret) return ret; @@ -85,15 +104,10 @@ nouveau_fifo_channel_create_(struct nouveau_object *parent, return -ENOSPC; } - /* map fifo control registers */ - chan->user = ioremap(nv_device_resource_start(device, bar) + addr + - (chan->chid * size), size); - if (!chan->user) - return -EFAULT; - - nouveau_event_trigger(priv->cevent, 1, 0); - + chan->addr = nv_device_resource_start(device, bar) + + addr + size * chan->chid; chan->size = size; + nvkm_event_send(&priv->cevent, 1, 0, NULL, 0); return 0; } @@ -103,7 +117,8 @@ nouveau_fifo_channel_destroy(struct nouveau_fifo_chan *chan) struct nouveau_fifo *priv = (void *)nv_object(chan)->engine; unsigned long flags; - iounmap(chan->user); + if (chan->user) + iounmap(chan->user); spin_lock_irqsave(&priv->lock, flags); priv->channel[chan->chid] = NULL; @@ -121,10 +136,24 @@ _nouveau_fifo_channel_dtor(struct nouveau_object *object) nouveau_fifo_channel_destroy(chan); } +int +_nouveau_fifo_channel_map(struct nouveau_object *object, u64 *addr, u32 *size) +{ + struct nouveau_fifo_chan *chan = (void *)object; + *addr = chan->addr; + *size = chan->size; + return 0; +} + u32 _nouveau_fifo_channel_rd32(struct nouveau_object *object, u64 addr) { struct nouveau_fifo_chan *chan = (void *)object; + if (unlikely(!chan->user)) { + chan->user = ioremap(chan->addr, chan->size); + if (WARN_ON_ONCE(chan->user == NULL)) + return 0; + } return ioread32_native(chan->user + addr); } @@ -132,9 +161,57 @@ void _nouveau_fifo_channel_wr32(struct nouveau_object *object, u64 addr, u32 data) { struct nouveau_fifo_chan *chan = (void *)object; + if (unlikely(!chan->user)) { + chan->user = ioremap(chan->addr, chan->size); + if (WARN_ON_ONCE(chan->user == NULL)) + return; + } iowrite32_native(data, chan->user + addr); } +int +nouveau_fifo_uevent_ctor(void *data, u32 size, struct nvkm_notify *notify) +{ + union { + struct nvif_notify_uevent_req none; + } *req = data; + int ret; + + if (nvif_unvers(req->none)) { + notify->size = sizeof(struct nvif_notify_uevent_rep); + notify->types = 1; + notify->index = 0; + } + + return ret; +} + +void +nouveau_fifo_uevent(struct nouveau_fifo *fifo) +{ + struct nvif_notify_uevent_rep rep = { + }; + nvkm_event_send(&fifo->uevent, 1, 0, &rep, sizeof(rep)); +} + +int +_nouveau_fifo_channel_ntfy(struct nouveau_object *object, u32 type, + struct nvkm_event **event) +{ + struct nouveau_fifo *fifo = (void *)object->engine; + switch (type) { + case G82_CHANNEL_DMA_V0_NTFY_UEVENT: + if (nv_mclass(object) >= G82_CHANNEL_DMA) { + *event = &fifo->uevent; + return 0; + } + break; + default: + break; + } + return -EINVAL; +} + static int nouveau_fifo_chid(struct nouveau_fifo *priv, struct nouveau_object *object) { @@ -168,8 +245,8 @@ void nouveau_fifo_destroy(struct nouveau_fifo *priv) { kfree(priv->channel); - nouveau_event_destroy(&priv->uevent); - nouveau_event_destroy(&priv->cevent); + nvkm_event_fini(&priv->uevent); + nvkm_event_fini(&priv->cevent); nouveau_engine_destroy(&priv->base); } @@ -194,11 +271,7 @@ nouveau_fifo_create_(struct nouveau_object *parent, if (!priv->channel) return -ENOMEM; - ret = nouveau_event_create(1, 1, &priv->cevent); - if (ret) - return ret; - - ret = nouveau_event_create(1, 1, &priv->uevent); + ret = nvkm_event_init(&nouveau_fifo_event_func, 1, 1, &priv->cevent); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c index c61b16a63884..5ae6a43893b5 100644 --- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c +++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c @@ -22,8 +22,9 @@ * Authors: Ben Skeggs */ -#include <core/os.h> -#include <core/class.h> +#include <core/client.h> +#include <nvif/unpack.h> +#include <nvif/class.h> #include <core/engctx.h> #include <core/namedb.h> #include <core/handle.h> @@ -117,16 +118,23 @@ nv04_fifo_chan_ctor(struct nouveau_object *parent, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { + union { + struct nv03_channel_dma_v0 v0; + } *args = data; struct nv04_fifo_priv *priv = (void *)engine; struct nv04_fifo_chan *chan; - struct nv03_channel_dma_class *args = data; int ret; - if (size < sizeof(*args)) - return -EINVAL; + nv_ioctl(parent, "create channel dma size %d\n", size); + if (nvif_unpack(args->v0, 0, 0, false)) { + nv_ioctl(parent, "create channel dma vers %d pushbuf %08x " + "offset %016llx\n", args->v0.version, + args->v0.pushbuf, args->v0.offset); + } else + return ret; ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000, - 0x10000, args->pushbuf, + 0x10000, args->v0.pushbuf, (1ULL << NVDEV_ENGINE_DMAOBJ) | (1ULL << NVDEV_ENGINE_SW) | (1ULL << NVDEV_ENGINE_GR), &chan); @@ -134,13 +142,15 @@ nv04_fifo_chan_ctor(struct nouveau_object *parent, if (ret) return ret; + args->v0.chid = chan->base.chid; + nv_parent(chan)->object_attach = nv04_fifo_object_attach; nv_parent(chan)->object_detach = nv04_fifo_object_detach; nv_parent(chan)->context_attach = nv04_fifo_context_attach; chan->ramfc = chan->base.chid * 32; - nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset); - nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->offset); + nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->v0.offset); + nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->v0.offset); nv_wo32(priv->ramfc, chan->ramfc + 0x08, chan->base.pushgpu->addr >> 4); nv_wo32(priv->ramfc, chan->ramfc + 0x10, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | @@ -242,13 +252,15 @@ nv04_fifo_ofuncs = { .dtor = nv04_fifo_chan_dtor, .init = nv04_fifo_chan_init, .fini = nv04_fifo_chan_fini, + .map = _nouveau_fifo_channel_map, .rd32 = _nouveau_fifo_channel_rd32, .wr32 = _nouveau_fifo_channel_wr32, + .ntfy = _nouveau_fifo_channel_ntfy }; static struct nouveau_oclass nv04_fifo_sclass[] = { - { NV03_CHANNEL_DMA_CLASS, &nv04_fifo_ofuncs }, + { NV03_CHANNEL_DMA, &nv04_fifo_ofuncs }, {} }; @@ -539,7 +551,7 @@ nv04_fifo_intr(struct nouveau_subdev *subdev) } if (status & 0x40000000) { - nouveau_event_trigger(priv->base.uevent, 1, 0); + nouveau_fifo_uevent(&priv->base); nv_wr32(priv, 0x002100, 0x40000000); status &= ~0x40000000; } diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c index 571a22aa1ae5..2a32add51c81 100644 --- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c +++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c @@ -22,8 +22,9 @@ * Authors: Ben Skeggs */ -#include <core/os.h> -#include <core/class.h> +#include <core/client.h> +#include <nvif/unpack.h> +#include <nvif/class.h> #include <core/engctx.h> #include <core/ramht.h> @@ -59,16 +60,23 @@ nv10_fifo_chan_ctor(struct nouveau_object *parent, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { + union { + struct nv03_channel_dma_v0 v0; + } *args = data; struct nv04_fifo_priv *priv = (void *)engine; struct nv04_fifo_chan *chan; - struct nv03_channel_dma_class *args = data; int ret; - if (size < sizeof(*args)) - return -EINVAL; + nv_ioctl(parent, "create channel dma size %d\n", size); + if (nvif_unpack(args->v0, 0, 0, false)) { + nv_ioctl(parent, "create channel dma vers %d pushbuf %08x " + "offset %016llx\n", args->v0.version, + args->v0.pushbuf, args->v0.offset); + } else + return ret; ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000, - 0x10000, args->pushbuf, + 0x10000, args->v0.pushbuf, (1ULL << NVDEV_ENGINE_DMAOBJ) | (1ULL << NVDEV_ENGINE_SW) | (1ULL << NVDEV_ENGINE_GR), &chan); @@ -76,13 +84,15 @@ nv10_fifo_chan_ctor(struct nouveau_object *parent, if (ret) return ret; + args->v0.chid = chan->base.chid; + nv_parent(chan)->object_attach = nv04_fifo_object_attach; nv_parent(chan)->object_detach = nv04_fifo_object_detach; nv_parent(chan)->context_attach = nv04_fifo_context_attach; chan->ramfc = chan->base.chid * 32; - nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset); - nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->offset); + nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->v0.offset); + nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->v0.offset); nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4); nv_wo32(priv->ramfc, chan->ramfc + 0x14, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | @@ -100,13 +110,15 @@ nv10_fifo_ofuncs = { .dtor = nv04_fifo_chan_dtor, .init = nv04_fifo_chan_init, .fini = nv04_fifo_chan_fini, + .map = _nouveau_fifo_channel_map, .rd32 = _nouveau_fifo_channel_rd32, .wr32 = _nouveau_fifo_channel_wr32, + .ntfy = _nouveau_fifo_channel_ntfy }; static struct nouveau_oclass nv10_fifo_sclass[] = { - { NV10_CHANNEL_DMA_CLASS, &nv10_fifo_ofuncs }, + { NV10_CHANNEL_DMA, &nv10_fifo_ofuncs }, {} }; diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c index f25760209316..12d76c8adb23 100644 --- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c +++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c @@ -22,8 +22,9 @@ * Authors: Ben Skeggs */ -#include <core/os.h> -#include <core/class.h> +#include <core/client.h> +#include <nvif/unpack.h> +#include <nvif/class.h> #include <core/engctx.h> #include <core/ramht.h> @@ -64,16 +65,23 @@ nv17_fifo_chan_ctor(struct nouveau_object *parent, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { + union { + struct nv03_channel_dma_v0 v0; + } *args = data; struct nv04_fifo_priv *priv = (void *)engine; struct nv04_fifo_chan *chan; - struct nv03_channel_dma_class *args = data; int ret; - if (size < sizeof(*args)) - return -EINVAL; + nv_ioctl(parent, "create channel dma size %d\n", size); + if (nvif_unpack(args->v0, 0, 0, false)) { + nv_ioctl(parent, "create channel dma vers %d pushbuf %08x " + "offset %016llx\n", args->v0.version, + args->v0.pushbuf, args->v0.offset); + } else + return ret; ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000, - 0x10000, args->pushbuf, + 0x10000, args->v0.pushbuf, (1ULL << NVDEV_ENGINE_DMAOBJ) | (1ULL << NVDEV_ENGINE_SW) | (1ULL << NVDEV_ENGINE_GR) | @@ -83,13 +91,15 @@ nv17_fifo_chan_ctor(struct nouveau_object *parent, if (ret) return ret; + args->v0.chid = chan->base.chid; + nv_parent(chan)->object_attach = nv04_fifo_object_attach; nv_parent(chan)->object_detach = nv04_fifo_object_detach; nv_parent(chan)->context_attach = nv04_fifo_context_attach; chan->ramfc = chan->base.chid * 64; - nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset); - nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->offset); + nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->v0.offset); + nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->v0.offset); nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4); nv_wo32(priv->ramfc, chan->ramfc + 0x14, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | @@ -107,13 +117,15 @@ nv17_fifo_ofuncs = { .dtor = nv04_fifo_chan_dtor, .init = nv04_fifo_chan_init, .fini = nv04_fifo_chan_fini, + .map = _nouveau_fifo_channel_map, .rd32 = _nouveau_fifo_channel_rd32, .wr32 = _nouveau_fifo_channel_wr32, + .ntfy = _nouveau_fifo_channel_ntfy }; static struct nouveau_oclass nv17_fifo_sclass[] = { - { NV17_CHANNEL_DMA_CLASS, &nv17_fifo_ofuncs }, + { NV17_CHANNEL_DMA, &nv17_fifo_ofuncs }, {} }; diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c index 343487ed2238..9f49c3a24dc6 100644 --- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c +++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c @@ -22,8 +22,9 @@ * Authors: Ben Skeggs */ -#include <core/os.h> -#include <core/class.h> +#include <core/client.h> +#include <nvif/unpack.h> +#include <nvif/class.h> #include <core/engctx.h> #include <core/ramht.h> @@ -182,16 +183,23 @@ nv40_fifo_chan_ctor(struct nouveau_object *parent, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { + union { + struct nv03_channel_dma_v0 v0; + } *args = data; struct nv04_fifo_priv *priv = (void *)engine; struct nv04_fifo_chan *chan; - struct nv03_channel_dma_class *args = data; int ret; - if (size < sizeof(*args)) - return -EINVAL; + nv_ioctl(parent, "create channel dma size %d\n", size); + if (nvif_unpack(args->v0, 0, 0, false)) { + nv_ioctl(parent, "create channel dma vers %d pushbuf %08x " + "offset %016llx\n", args->v0.version, + args->v0.pushbuf, args->v0.offset); + } else + return ret; ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000, - 0x1000, args->pushbuf, + 0x1000, args->v0.pushbuf, (1ULL << NVDEV_ENGINE_DMAOBJ) | (1ULL << NVDEV_ENGINE_SW) | (1ULL << NVDEV_ENGINE_GR) | @@ -200,14 +208,16 @@ nv40_fifo_chan_ctor(struct nouveau_object *parent, if (ret) return ret; + args->v0.chid = chan->base.chid; + nv_parent(chan)->context_attach = nv40_fifo_context_attach; nv_parent(chan)->context_detach = nv40_fifo_context_detach; nv_parent(chan)->object_attach = nv40_fifo_object_attach; nv_parent(chan)->object_detach = nv04_fifo_object_detach; chan->ramfc = chan->base.chid * 128; - nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset); - nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->offset); + nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->v0.offset); + nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->v0.offset); nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4); nv_wo32(priv->ramfc, chan->ramfc + 0x18, 0x30000000 | NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | @@ -226,13 +236,15 @@ nv40_fifo_ofuncs = { .dtor = nv04_fifo_chan_dtor, .init = nv04_fifo_chan_init, .fini = nv04_fifo_chan_fini, + .map = _nouveau_fifo_channel_map, .rd32 = _nouveau_fifo_channel_rd32, .wr32 = _nouveau_fifo_channel_wr32, + .ntfy = _nouveau_fifo_channel_ntfy }; static struct nouveau_oclass nv40_fifo_sclass[] = { - { NV40_CHANNEL_DMA_CLASS, &nv40_fifo_ofuncs }, + { NV40_CHANNEL_DMA, &nv40_fifo_ofuncs }, {} }; diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c index e6352bd5b4ff..5d1e86bc244c 100644 --- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c +++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c @@ -25,7 +25,8 @@ #include <core/client.h> #include <core/engctx.h> #include <core/ramht.h> -#include <core/class.h> +#include <nvif/unpack.h> +#include <nvif/class.h> #include <subdev/timer.h> #include <subdev/bar.h> @@ -194,17 +195,24 @@ nv50_fifo_chan_ctor_dma(struct nouveau_object *parent, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { + union { + struct nv03_channel_dma_v0 v0; + } *args = data; struct nouveau_bar *bar = nouveau_bar(parent); struct nv50_fifo_base *base = (void *)parent; struct nv50_fifo_chan *chan; - struct nv03_channel_dma_class *args = data; int ret; - if (size < sizeof(*args)) - return -EINVAL; + nv_ioctl(parent, "create channel dma size %d\n", size); + if (nvif_unpack(args->v0, 0, 0, false)) { + nv_ioctl(parent, "create channel dma vers %d pushbuf %08x " + "offset %016llx\n", args->v0.version, + args->v0.pushbuf, args->v0.offset); + } else + return ret; ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000, - 0x2000, args->pushbuf, + 0x2000, args->v0.pushbuf, (1ULL << NVDEV_ENGINE_DMAOBJ) | (1ULL << NVDEV_ENGINE_SW) | (1ULL << NVDEV_ENGINE_GR) | @@ -213,6 +221,8 @@ nv50_fifo_chan_ctor_dma(struct nouveau_object *parent, if (ret) return ret; + args->v0.chid = chan->base.chid; + nv_parent(chan)->context_attach = nv50_fifo_context_attach; nv_parent(chan)->context_detach = nv50_fifo_context_detach; nv_parent(chan)->object_attach = nv50_fifo_object_attach; @@ -223,10 +233,10 @@ nv50_fifo_chan_ctor_dma(struct nouveau_object *parent, if (ret) return ret; - nv_wo32(base->ramfc, 0x08, lower_32_bits(args->offset)); - nv_wo32(base->ramfc, 0x0c, upper_32_bits(args->offset)); - nv_wo32(base->ramfc, 0x10, lower_32_bits(args->offset)); - nv_wo32(base->ramfc, 0x14, upper_32_bits(args->offset)); + nv_wo32(base->ramfc, 0x08, lower_32_bits(args->v0.offset)); + nv_wo32(base->ramfc, 0x0c, upper_32_bits(args->v0.offset)); + nv_wo32(base->ramfc, 0x10, lower_32_bits(args->v0.offset)); + nv_wo32(base->ramfc, 0x14, upper_32_bits(args->v0.offset)); nv_wo32(base->ramfc, 0x3c, 0x003f6078); nv_wo32(base->ramfc, 0x44, 0x01003fff); nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4); @@ -247,18 +257,26 @@ nv50_fifo_chan_ctor_ind(struct nouveau_object *parent, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { - struct nv50_channel_ind_class *args = data; + union { + struct nv50_channel_gpfifo_v0 v0; + } *args = data; struct nouveau_bar *bar = nouveau_bar(parent); struct nv50_fifo_base *base = (void *)parent; struct nv50_fifo_chan *chan; u64 ioffset, ilength; int ret; - if (size < sizeof(*args)) - return -EINVAL; + nv_ioctl(parent, "create channel gpfifo size %d\n", size); + if (nvif_unpack(args->v0, 0, 0, false)) { + nv_ioctl(parent, "create channel gpfifo vers %d pushbuf %08x " + "ioffset %016llx ilength %08x\n", + args->v0.version, args->v0.pushbuf, args->v0.ioffset, + args->v0.ilength); + } else + return ret; ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000, - 0x2000, args->pushbuf, + 0x2000, args->v0.pushbuf, (1ULL << NVDEV_ENGINE_DMAOBJ) | (1ULL << NVDEV_ENGINE_SW) | (1ULL << NVDEV_ENGINE_GR) | @@ -267,6 +285,8 @@ nv50_fifo_chan_ctor_ind(struct nouveau_object *parent, if (ret) return ret; + args->v0.chid = chan->base.chid; + nv_parent(chan)->context_attach = nv50_fifo_context_attach; nv_parent(chan)->context_detach = nv50_fifo_context_detach; nv_parent(chan)->object_attach = nv50_fifo_object_attach; @@ -277,8 +297,8 @@ nv50_fifo_chan_ctor_ind(struct nouveau_object *parent, if (ret) return ret; - ioffset = args->ioffset; - ilength = order_base_2(args->ilength / 8); + ioffset = args->v0.ioffset; + ilength = order_base_2(args->v0.ilength / 8); nv_wo32(base->ramfc, 0x3c, 0x403f6078); nv_wo32(base->ramfc, 0x44, 0x01003fff); @@ -343,8 +363,10 @@ nv50_fifo_ofuncs_dma = { .dtor = nv50_fifo_chan_dtor, .init = nv50_fifo_chan_init, .fini = nv50_fifo_chan_fini, + .map = _nouveau_fifo_channel_map, .rd32 = _nouveau_fifo_channel_rd32, .wr32 = _nouveau_fifo_channel_wr32, + .ntfy = _nouveau_fifo_channel_ntfy }; static struct nouveau_ofuncs @@ -353,14 +375,16 @@ nv50_fifo_ofuncs_ind = { .dtor = nv50_fifo_chan_dtor, .init = nv50_fifo_chan_init, .fini = nv50_fifo_chan_fini, + .map = _nouveau_fifo_channel_map, .rd32 = _nouveau_fifo_channel_rd32, .wr32 = _nouveau_fifo_channel_wr32, + .ntfy = _nouveau_fifo_channel_ntfy }; static struct nouveau_oclass nv50_fifo_sclass[] = { - { NV50_CHANNEL_DMA_CLASS, &nv50_fifo_ofuncs_dma }, - { NV50_CHANNEL_IND_CLASS, &nv50_fifo_ofuncs_ind }, + { NV50_CHANNEL_DMA, &nv50_fifo_ofuncs_dma }, + { NV50_CHANNEL_GPFIFO, &nv50_fifo_ofuncs_ind }, {} }; diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c index 6e5ac16e5460..1f42996b354a 100644 --- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c +++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c @@ -27,7 +27,8 @@ #include <core/engctx.h> #include <core/ramht.h> #include <core/event.h> -#include <core/class.h> +#include <nvif/unpack.h> +#include <nvif/class.h> #include <subdev/timer.h> #include <subdev/bar.h> @@ -160,17 +161,24 @@ nv84_fifo_chan_ctor_dma(struct nouveau_object *parent, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { + union { + struct nv03_channel_dma_v0 v0; + } *args = data; struct nouveau_bar *bar = nouveau_bar(parent); struct nv50_fifo_base *base = (void *)parent; struct nv50_fifo_chan *chan; - struct nv03_channel_dma_class *args = data; int ret; - if (size < sizeof(*args)) - return -EINVAL; + nv_ioctl(parent, "create channel dma size %d\n", size); + if (nvif_unpack(args->v0, 0, 0, false)) { + nv_ioctl(parent, "create channel dma vers %d pushbuf %08x " + "offset %016llx\n", args->v0.version, + args->v0.pushbuf, args->v0.offset); + } else + return ret; ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000, - 0x2000, args->pushbuf, + 0x2000, args->v0.pushbuf, (1ULL << NVDEV_ENGINE_DMAOBJ) | (1ULL << NVDEV_ENGINE_SW) | (1ULL << NVDEV_ENGINE_GR) | @@ -186,6 +194,8 @@ nv84_fifo_chan_ctor_dma(struct nouveau_object *parent, if (ret) return ret; + args->v0.chid = chan->base.chid; + ret = nouveau_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16, &chan->ramht); if (ret) @@ -196,10 +206,10 @@ nv84_fifo_chan_ctor_dma(struct nouveau_object *parent, nv_parent(chan)->object_attach = nv84_fifo_object_attach; nv_parent(chan)->object_detach = nv50_fifo_object_detach; - nv_wo32(base->ramfc, 0x08, lower_32_bits(args->offset)); - nv_wo32(base->ramfc, 0x0c, upper_32_bits(args->offset)); - nv_wo32(base->ramfc, 0x10, lower_32_bits(args->offset)); - nv_wo32(base->ramfc, 0x14, upper_32_bits(args->offset)); + nv_wo32(base->ramfc, 0x08, lower_32_bits(args->v0.offset)); + nv_wo32(base->ramfc, 0x0c, upper_32_bits(args->v0.offset)); + nv_wo32(base->ramfc, 0x10, lower_32_bits(args->v0.offset)); + nv_wo32(base->ramfc, 0x14, upper_32_bits(args->v0.offset)); nv_wo32(base->ramfc, 0x3c, 0x003f6078); nv_wo32(base->ramfc, 0x44, 0x01003fff); nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4); @@ -222,18 +232,26 @@ nv84_fifo_chan_ctor_ind(struct nouveau_object *parent, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { + union { + struct nv50_channel_gpfifo_v0 v0; + } *args = data; struct nouveau_bar *bar = nouveau_bar(parent); struct nv50_fifo_base *base = (void *)parent; struct nv50_fifo_chan *chan; - struct nv50_channel_ind_class *args = data; u64 ioffset, ilength; int ret; - if (size < sizeof(*args)) - return -EINVAL; + nv_ioctl(parent, "create channel gpfifo size %d\n", size); + if (nvif_unpack(args->v0, 0, 0, false)) { + nv_ioctl(parent, "create channel gpfifo vers %d pushbuf %08x " + "ioffset %016llx ilength %08x\n", + args->v0.version, args->v0.pushbuf, args->v0.ioffset, + args->v0.ilength); + } else + return ret; ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000, - 0x2000, args->pushbuf, + 0x2000, args->v0.pushbuf, (1ULL << NVDEV_ENGINE_DMAOBJ) | (1ULL << NVDEV_ENGINE_SW) | (1ULL << NVDEV_ENGINE_GR) | @@ -249,6 +267,8 @@ nv84_fifo_chan_ctor_ind(struct nouveau_object *parent, if (ret) return ret; + args->v0.chid = chan->base.chid; + ret = nouveau_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16, &chan->ramht); if (ret) @@ -259,8 +279,8 @@ nv84_fifo_chan_ctor_ind(struct nouveau_object *parent, nv_parent(chan)->object_attach = nv84_fifo_object_attach; nv_parent(chan)->object_detach = nv50_fifo_object_detach; - ioffset = args->ioffset; - ilength = order_base_2(args->ilength / 8); + ioffset = args->v0.ioffset; + ilength = order_base_2(args->v0.ilength / 8); nv_wo32(base->ramfc, 0x3c, 0x403f6078); nv_wo32(base->ramfc, 0x44, 0x01003fff); @@ -304,8 +324,10 @@ nv84_fifo_ofuncs_dma = { .dtor = nv50_fifo_chan_dtor, .init = nv84_fifo_chan_init, .fini = nv50_fifo_chan_fini, + .map = _nouveau_fifo_channel_map, .rd32 = _nouveau_fifo_channel_rd32, .wr32 = _nouveau_fifo_channel_wr32, + .ntfy = _nouveau_fifo_channel_ntfy }; static struct nouveau_ofuncs @@ -314,14 +336,16 @@ nv84_fifo_ofuncs_ind = { .dtor = nv50_fifo_chan_dtor, .init = nv84_fifo_chan_init, .fini = nv50_fifo_chan_fini, + .map = _nouveau_fifo_channel_map, .rd32 = _nouveau_fifo_channel_rd32, .wr32 = _nouveau_fifo_channel_wr32, + .ntfy = _nouveau_fifo_channel_ntfy }; static struct nouveau_oclass nv84_fifo_sclass[] = { - { NV84_CHANNEL_DMA_CLASS, &nv84_fifo_ofuncs_dma }, - { NV84_CHANNEL_IND_CLASS, &nv84_fifo_ofuncs_ind }, + { G82_CHANNEL_DMA, &nv84_fifo_ofuncs_dma }, + { G82_CHANNEL_GPFIFO, &nv84_fifo_ofuncs_ind }, {} }; @@ -389,19 +413,26 @@ nv84_fifo_cclass = { ******************************************************************************/ static void -nv84_fifo_uevent_enable(struct nouveau_event *event, int type, int index) +nv84_fifo_uevent_init(struct nvkm_event *event, int type, int index) { - struct nv84_fifo_priv *priv = event->priv; - nv_mask(priv, 0x002140, 0x40000000, 0x40000000); + struct nouveau_fifo *fifo = container_of(event, typeof(*fifo), uevent); + nv_mask(fifo, 0x002140, 0x40000000, 0x40000000); } static void -nv84_fifo_uevent_disable(struct nouveau_event *event, int type, int index) +nv84_fifo_uevent_fini(struct nvkm_event *event, int type, int index) { - struct nv84_fifo_priv *priv = event->priv; - nv_mask(priv, 0x002140, 0x40000000, 0x00000000); + struct nouveau_fifo *fifo = container_of(event, typeof(*fifo), uevent); + nv_mask(fifo, 0x002140, 0x40000000, 0x00000000); } +static const struct nvkm_event_func +nv84_fifo_uevent_func = { + .ctor = nouveau_fifo_uevent_ctor, + .init = nv84_fifo_uevent_init, + .fini = nv84_fifo_uevent_fini, +}; + static int nv84_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, @@ -425,9 +456,9 @@ nv84_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine, if (ret) return ret; - priv->base.uevent->enable = nv84_fifo_uevent_enable; - priv->base.uevent->disable = nv84_fifo_uevent_disable; - priv->base.uevent->priv = priv; + ret = nvkm_event_init(&nv84_fifo_uevent_func, 1, 1, &priv->base.uevent); + if (ret) + return ret; nv_subdev(priv)->unit = 0x00000100; nv_subdev(priv)->intr = nv04_fifo_intr; diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c index ae4a4dc5642a..1fe1f8fbda0c 100644 --- a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c @@ -28,7 +28,8 @@ #include <core/gpuobj.h> #include <core/engctx.h> #include <core/event.h> -#include <core/class.h> +#include <nvif/unpack.h> +#include <nvif/class.h> #include <core/enum.h> #include <subdev/timer.h> @@ -187,20 +188,28 @@ nvc0_fifo_chan_ctor(struct nouveau_object *parent, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { + union { + struct nv50_channel_gpfifo_v0 v0; + } *args = data; struct nouveau_bar *bar = nouveau_bar(parent); struct nvc0_fifo_priv *priv = (void *)engine; struct nvc0_fifo_base *base = (void *)parent; struct nvc0_fifo_chan *chan; - struct nv50_channel_ind_class *args = data; u64 usermem, ioffset, ilength; int ret, i; - if (size < sizeof(*args)) - return -EINVAL; + nv_ioctl(parent, "create channel gpfifo size %d\n", size); + if (nvif_unpack(args->v0, 0, 0, false)) { + nv_ioctl(parent, "create channel gpfifo vers %d pushbuf %08x " + "ioffset %016llx ilength %08x\n", + args->v0.version, args->v0.pushbuf, args->v0.ioffset, + args->v0.ilength); + } else + return ret; ret = nouveau_fifo_channel_create(parent, engine, oclass, 1, priv->user.bar.offset, 0x1000, - args->pushbuf, + args->v0.pushbuf, (1ULL << NVDEV_ENGINE_SW) | (1ULL << NVDEV_ENGINE_GR) | (1ULL << NVDEV_ENGINE_COPY0) | @@ -212,12 +221,14 @@ nvc0_fifo_chan_ctor(struct nouveau_object *parent, if (ret) return ret; + args->v0.chid = chan->base.chid; + nv_parent(chan)->context_attach = nvc0_fifo_context_attach; nv_parent(chan)->context_detach = nvc0_fifo_context_detach; usermem = chan->base.chid * 0x1000; - ioffset = args->ioffset; - ilength = order_base_2(args->ilength / 8); + ioffset = args->v0.ioffset; + ilength = order_base_2(args->v0.ilength / 8); for (i = 0; i < 0x1000; i += 4) nv_wo32(priv->user.mem, usermem + i, 0x00000000); @@ -291,13 +302,15 @@ nvc0_fifo_ofuncs = { .dtor = _nouveau_fifo_channel_dtor, .init = nvc0_fifo_chan_init, .fini = nvc0_fifo_chan_fini, + .map = _nouveau_fifo_channel_map, .rd32 = _nouveau_fifo_channel_rd32, .wr32 = _nouveau_fifo_channel_wr32, + .ntfy = _nouveau_fifo_channel_ntfy }; static struct nouveau_oclass nvc0_fifo_sclass[] = { - { NVC0_CHANNEL_IND_CLASS, &nvc0_fifo_ofuncs }, + { FERMI_CHANNEL_GPFIFO, &nvc0_fifo_ofuncs }, {} }; @@ -654,7 +667,7 @@ nvc0_fifo_intr_fault(struct nvc0_fifo_priv *priv, int unit) object = engctx; while (object) { switch (nv_mclass(object)) { - case NVC0_CHANNEL_IND_CLASS: + case FERMI_CHANNEL_GPFIFO: nvc0_fifo_recover(priv, engine, (void *)object); break; } @@ -730,7 +743,7 @@ nvc0_fifo_intr_engine_unit(struct nvc0_fifo_priv *priv, int engn) for (unkn = 0; unkn < 8; unkn++) { u32 ints = (intr >> (unkn * 0x04)) & inte; if (ints & 0x1) { - nouveau_event_trigger(priv->base.uevent, 1, 0); + nouveau_fifo_uevent(&priv->base); ints &= ~1; } if (ints) { @@ -827,19 +840,26 @@ nvc0_fifo_intr(struct nouveau_subdev *subdev) } static void -nvc0_fifo_uevent_enable(struct nouveau_event *event, int type, int index) +nvc0_fifo_uevent_init(struct nvkm_event *event, int type, int index) { - struct nvc0_fifo_priv *priv = event->priv; - nv_mask(priv, 0x002140, 0x80000000, 0x80000000); + struct nouveau_fifo *fifo = container_of(event, typeof(*fifo), uevent); + nv_mask(fifo, 0x002140, 0x80000000, 0x80000000); } static void -nvc0_fifo_uevent_disable(struct nouveau_event *event, int type, int index) +nvc0_fifo_uevent_fini(struct nvkm_event *event, int type, int index) { - struct nvc0_fifo_priv *priv = event->priv; - nv_mask(priv, 0x002140, 0x80000000, 0x00000000); + struct nouveau_fifo *fifo = container_of(event, typeof(*fifo), uevent); + nv_mask(fifo, 0x002140, 0x80000000, 0x00000000); } +static const struct nvkm_event_func +nvc0_fifo_uevent_func = { + .ctor = nouveau_fifo_uevent_ctor, + .init = nvc0_fifo_uevent_init, + .fini = nvc0_fifo_uevent_fini, +}; + static int nvc0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, @@ -877,9 +897,9 @@ nvc0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine, if (ret) return ret; - priv->base.uevent->enable = nvc0_fifo_uevent_enable; - priv->base.uevent->disable = nvc0_fifo_uevent_disable; - priv->base.uevent->priv = priv; + ret = nvkm_event_init(&nvc0_fifo_uevent_func, 1, 1, &priv->base.uevent); + if (ret) + return ret; nv_subdev(priv)->unit = 0x00000100; nv_subdev(priv)->intr = nvc0_fifo_intr; diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c index 298063edb92d..d2f0fd39c145 100644 --- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c +++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c @@ -28,7 +28,8 @@ #include <core/gpuobj.h> #include <core/engctx.h> #include <core/event.h> -#include <core/class.h> +#include <nvif/unpack.h> +#include <nvif/class.h> #include <core/enum.h> #include <subdev/timer.h> @@ -216,46 +217,56 @@ nve0_fifo_chan_ctor(struct nouveau_object *parent, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { + union { + struct kepler_channel_gpfifo_a_v0 v0; + } *args = data; struct nouveau_bar *bar = nouveau_bar(parent); struct nve0_fifo_priv *priv = (void *)engine; struct nve0_fifo_base *base = (void *)parent; struct nve0_fifo_chan *chan; - struct nve0_channel_ind_class *args = data; u64 usermem, ioffset, ilength; int ret, i; - if (size < sizeof(*args)) - return -EINVAL; + nv_ioctl(parent, "create channel gpfifo size %d\n", size); + if (nvif_unpack(args->v0, 0, 0, false)) { + nv_ioctl(parent, "create channel gpfifo vers %d pushbuf %08x " + "ioffset %016llx ilength %08x engine %08x\n", + args->v0.version, args->v0.pushbuf, args->v0.ioffset, + args->v0.ilength, args->v0.engine); + } else + return ret; for (i = 0; i < FIFO_ENGINE_NR; i++) { - if (args->engine & (1 << i)) { + if (args->v0.engine & (1 << i)) { if (nouveau_engine(parent, fifo_engine[i].subdev)) { - args->engine = (1 << i); + args->v0.engine = (1 << i); break; } } } if (i == FIFO_ENGINE_NR) { - nv_error(priv, "unsupported engines 0x%08x\n", args->engine); + nv_error(priv, "unsupported engines 0x%08x\n", args->v0.engine); return -ENODEV; } ret = nouveau_fifo_channel_create(parent, engine, oclass, 1, priv->user.bar.offset, 0x200, - args->pushbuf, + args->v0.pushbuf, fifo_engine[i].mask, &chan); *pobject = nv_object(chan); if (ret) return ret; + args->v0.chid = chan->base.chid; + nv_parent(chan)->context_attach = nve0_fifo_context_attach; nv_parent(chan)->context_detach = nve0_fifo_context_detach; chan->engine = i; usermem = chan->base.chid * 0x200; - ioffset = args->ioffset; - ilength = order_base_2(args->ilength / 8); + ioffset = args->v0.ioffset; + ilength = order_base_2(args->v0.ilength / 8); for (i = 0; i < 0x200; i += 4) nv_wo32(priv->user.mem, usermem + i, 0x00000000); @@ -325,13 +336,15 @@ nve0_fifo_ofuncs = { .dtor = _nouveau_fifo_channel_dtor, .init = nve0_fifo_chan_init, .fini = nve0_fifo_chan_fini, + .map = _nouveau_fifo_channel_map, .rd32 = _nouveau_fifo_channel_rd32, .wr32 = _nouveau_fifo_channel_wr32, + .ntfy = _nouveau_fifo_channel_ntfy }; static struct nouveau_oclass nve0_fifo_sclass[] = { - { NVE0_CHANNEL_IND_CLASS, &nve0_fifo_ofuncs }, + { KEPLER_CHANNEL_GPFIFO_A, &nve0_fifo_ofuncs }, {} }; @@ -769,7 +782,7 @@ nve0_fifo_intr_fault(struct nve0_fifo_priv *priv, int unit) object = engctx; while (object) { switch (nv_mclass(object)) { - case NVE0_CHANNEL_IND_CLASS: + case KEPLER_CHANNEL_GPFIFO_A: nve0_fifo_recover(priv, engine, (void *)object); break; } @@ -859,7 +872,7 @@ nve0_fifo_intr_runlist(struct nve0_fifo_priv *priv) static void nve0_fifo_intr_engine(struct nve0_fifo_priv *priv) { - nouveau_event_trigger(priv->base.uevent, 1, 0); + nouveau_fifo_uevent(&priv->base); } static void @@ -952,19 +965,26 @@ nve0_fifo_intr(struct nouveau_subdev *subdev) } static void -nve0_fifo_uevent_enable(struct nouveau_event *event, int type, int index) +nve0_fifo_uevent_init(struct nvkm_event *event, int type, int index) { - struct nve0_fifo_priv *priv = event->priv; - nv_mask(priv, 0x002140, 0x80000000, 0x80000000); + struct nouveau_fifo *fifo = container_of(event, typeof(*fifo), uevent); + nv_mask(fifo, 0x002140, 0x80000000, 0x80000000); } static void -nve0_fifo_uevent_disable(struct nouveau_event *event, int type, int index) +nve0_fifo_uevent_fini(struct nvkm_event *event, int type, int index) { - struct nve0_fifo_priv *priv = event->priv; - nv_mask(priv, 0x002140, 0x80000000, 0x00000000); + struct nouveau_fifo *fifo = container_of(event, typeof(*fifo), uevent); + nv_mask(fifo, 0x002140, 0x80000000, 0x00000000); } +static const struct nvkm_event_func +nve0_fifo_uevent_func = { + .ctor = nouveau_fifo_uevent_ctor, + .init = nve0_fifo_uevent_init, + .fini = nve0_fifo_uevent_fini, +}; + int nve0_fifo_fini(struct nouveau_object *object, bool suspend) { @@ -1067,9 +1087,9 @@ nve0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine, if (ret) return ret; - priv->base.uevent->enable = nve0_fifo_uevent_enable; - priv->base.uevent->disable = nve0_fifo_uevent_disable; - priv->base.uevent->priv = priv; + ret = nvkm_event_init(&nve0_fifo_uevent_func, 1, 1, &priv->base.uevent); + if (ret) + return ret; nv_subdev(priv)->unit = 0x00000100; nv_subdev(priv)->intr = nve0_fifo_intr; diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxgk110b.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxgk110b.c new file mode 100644 index 000000000000..3adb7fe91772 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxgk110b.c @@ -0,0 +1,104 @@ +/* + * Copyright 2013 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs <bskeggs@redhat.com> + */ + +#include "ctxnvc0.h" + +/******************************************************************************* + * PGRAPH context register lists + ******************************************************************************/ + +static const struct nvc0_graph_init +gk110b_grctx_init_sm_0[] = { + { 0x419e04, 1, 0x04, 0x00000000 }, + { 0x419e08, 1, 0x04, 0x0000001d }, + { 0x419e0c, 1, 0x04, 0x00000000 }, + { 0x419e10, 1, 0x04, 0x00001c02 }, + { 0x419e44, 1, 0x04, 0x0013eff2 }, + { 0x419e48, 1, 0x04, 0x00000000 }, + { 0x419e4c, 1, 0x04, 0x0000007f }, + { 0x419e50, 2, 0x04, 0x00000000 }, + { 0x419e58, 1, 0x04, 0x00000001 }, + { 0x419e5c, 3, 0x04, 0x00000000 }, + { 0x419e68, 1, 0x04, 0x00000002 }, + { 0x419e6c, 12, 0x04, 0x00000000 }, + { 0x419eac, 1, 0x04, 0x00001f8f }, + { 0x419eb0, 1, 0x04, 0x0db00d2f }, + { 0x419eb8, 1, 0x04, 0x00000000 }, + { 0x419ec8, 1, 0x04, 0x0001304f }, + { 0x419f30, 4, 0x04, 0x00000000 }, + { 0x419f40, 1, 0x04, 0x00000018 }, + { 0x419f44, 3, 0x04, 0x00000000 }, + { 0x419f58, 1, 0x04, 0x00000000 }, + { 0x419f70, 1, 0x04, 0x00006300 }, + { 0x419f78, 1, 0x04, 0x000000eb }, + { 0x419f7c, 1, 0x04, 0x00000404 }, + {} +}; + +static const struct nvc0_graph_pack +gk110b_grctx_pack_tpc[] = { + { nvd7_grctx_init_pe_0 }, + { nvf0_grctx_init_tex_0 }, + { nvf0_grctx_init_mpc_0 }, + { nvf0_grctx_init_l1c_0 }, + { gk110b_grctx_init_sm_0 }, + {} +}; + +/******************************************************************************* + * PGRAPH context implementation + ******************************************************************************/ + +struct nouveau_oclass * +gk110b_grctx_oclass = &(struct nvc0_grctx_oclass) { + .base.handle = NV_ENGCTX(GR, 0xf1), + .base.ofuncs = &(struct nouveau_ofuncs) { + .ctor = nvc0_graph_context_ctor, + .dtor = nvc0_graph_context_dtor, + .init = _nouveau_graph_context_init, + .fini = _nouveau_graph_context_fini, + .rd32 = _nouveau_graph_context_rd32, + .wr32 = _nouveau_graph_context_wr32, + }, + .main = nve4_grctx_generate_main, + .unkn = nve4_grctx_generate_unkn, + .hub = nvf0_grctx_pack_hub, + .gpc = nvf0_grctx_pack_gpc, + .zcull = nvc0_grctx_pack_zcull, + .tpc = gk110b_grctx_pack_tpc, + .ppc = nvf0_grctx_pack_ppc, + .icmd = nvf0_grctx_pack_icmd, + .mthd = nvf0_grctx_pack_mthd, + .bundle = nve4_grctx_generate_bundle, + .bundle_size = 0x3000, + .bundle_min_gpm_fifo_depth = 0x180, + .bundle_token_limit = 0x600, + .pagepool = nve4_grctx_generate_pagepool, + .pagepool_size = 0x8000, + .attrib = nvd7_grctx_generate_attrib, + .attrib_nr_max = 0x324, + .attrib_nr = 0x218, + .alpha_nr_max = 0x7ff, + .alpha_nr = 0x648, +}.base; diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxgk20a.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxgk20a.c index 224ee0287ab7..36fc9831cc93 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxgk20a.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxgk20a.c @@ -41,7 +41,6 @@ gk20a_grctx_oclass = &(struct nvc0_grctx_oclass) { .wr32 = _nouveau_graph_context_wr32, }, .main = nve4_grctx_generate_main, - .mods = nve4_grctx_generate_mods, .unkn = nve4_grctx_generate_unkn, .hub = nve4_grctx_pack_hub, .gpc = nve4_grctx_pack_gpc, @@ -50,4 +49,15 @@ gk20a_grctx_oclass = &(struct nvc0_grctx_oclass) { .ppc = nve4_grctx_pack_ppc, .icmd = nve4_grctx_pack_icmd, .mthd = gk20a_grctx_pack_mthd, + .bundle = nve4_grctx_generate_bundle, + .bundle_size = 0x1800, + .bundle_min_gpm_fifo_depth = 0x62, + .bundle_token_limit = 0x100, + .pagepool = nve4_grctx_generate_pagepool, + .pagepool_size = 0x8000, + .attrib = nvd7_grctx_generate_attrib, + .attrib_nr_max = 0x240, + .attrib_nr = 0x240, + .alpha_nr_max = 0x648 + (0x648 / 2), + .alpha_nr = 0x648, }.base; diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c index b0d0fb2f4d08..62e918b9fa81 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c @@ -859,45 +859,74 @@ gm107_grctx_pack_ppc[] = { ******************************************************************************/ static void -gm107_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info) +gm107_grctx_generate_bundle(struct nvc0_grctx *info) { - mmio_data(0x003000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS); - mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS); - mmio_data(0x200000, 0x1000, NV_MEM_ACCESS_RW); - - mmio_list(0x40800c, 0x00000000, 8, 1); - mmio_list(0x408010, 0x80000000, 0, 0); - mmio_list(0x419004, 0x00000000, 8, 1); - mmio_list(0x419008, 0x00000000, 0, 0); - mmio_list(0x4064cc, 0x80000000, 0, 0); - mmio_list(0x418e30, 0x80000000, 0, 0); - - mmio_list(0x408004, 0x00000000, 8, 0); - mmio_list(0x408008, 0x80000030, 0, 0); - mmio_list(0x418e24, 0x00000000, 8, 0); - mmio_list(0x418e28, 0x80000030, 0, 0); - - mmio_list(0x4064c8, 0x018002c0, 0, 0); - - mmio_list(0x418810, 0x80000000, 12, 2); - mmio_list(0x419848, 0x10000000, 12, 2); - mmio_list(0x419c2c, 0x10000000, 12, 2); - - mmio_list(0x405830, 0x0aa01000, 0, 0); - mmio_list(0x4064c4, 0x0400ffff, 0, 0); - - /*XXX*/ - mmio_list(0x5030c0, 0x00001540, 0, 0); - mmio_list(0x5030f4, 0x00000000, 0, 0); - mmio_list(0x5030e4, 0x00002000, 0, 0); - mmio_list(0x5030f8, 0x00003fc0, 0, 0); - mmio_list(0x418ea0, 0x07151540, 0, 0); - - mmio_list(0x5032c0, 0x00001540, 0, 0); - mmio_list(0x5032f4, 0x00001fe0, 0, 0); - mmio_list(0x5032e4, 0x00002000, 0, 0); - mmio_list(0x5032f8, 0x00006fc0, 0, 0); - mmio_list(0x418ea4, 0x07151540, 0, 0); + const struct nvc0_grctx_oclass *impl = nvc0_grctx_impl(info->priv); + const u32 state_limit = min(impl->bundle_min_gpm_fifo_depth, + impl->bundle_size / 0x20); + const u32 token_limit = impl->bundle_token_limit; + const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS; + const int s = 8; + const int b = mmio_vram(info, impl->bundle_size, (1 << s), access); + mmio_refn(info, 0x408004, 0x00000000, s, b); + mmio_refn(info, 0x408008, 0x80000000 | (impl->bundle_size >> s), 0, b); + mmio_refn(info, 0x418e24, 0x00000000, s, b); + mmio_refn(info, 0x418e28, 0x80000000 | (impl->bundle_size >> s), 0, b); + mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit); +} + +static void +gm107_grctx_generate_pagepool(struct nvc0_grctx *info) +{ + const struct nvc0_grctx_oclass *impl = nvc0_grctx_impl(info->priv); + const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS; + const int s = 8; + const int b = mmio_vram(info, impl->pagepool_size, (1 << s), access); + mmio_refn(info, 0x40800c, 0x00000000, s, b); + mmio_wr32(info, 0x408010, 0x80000000); + mmio_refn(info, 0x419004, 0x00000000, s, b); + mmio_wr32(info, 0x419008, 0x00000000); + mmio_wr32(info, 0x4064cc, 0x80000000); + mmio_wr32(info, 0x418e30, 0x80000000); /* guess at it being related */ +} + +static void +gm107_grctx_generate_attrib(struct nvc0_grctx *info) +{ + struct nvc0_graph_priv *priv = info->priv; + const struct nvc0_grctx_oclass *impl = (void *)nvc0_grctx_impl(priv); + const u32 alpha = impl->alpha_nr; + const u32 attrib = impl->attrib_nr; + const u32 size = 0x20 * (impl->attrib_nr_max + impl->alpha_nr_max); + const u32 access = NV_MEM_ACCESS_RW; + const int s = 12; + const int b = mmio_vram(info, size * priv->tpc_total, (1 << s), access); + const int max_batches = 0xffff; + u32 bo = 0; + u32 ao = bo + impl->attrib_nr_max * priv->tpc_total; + int gpc, ppc, n = 0; + + mmio_refn(info, 0x418810, 0x80000000, s, b); + mmio_refn(info, 0x419848, 0x10000000, s, b); + mmio_refn(info, 0x419c2c, 0x10000000, s, b); + mmio_wr32(info, 0x405830, (attrib << 16) | alpha); + mmio_wr32(info, 0x4064c4, ((alpha / 4) << 16) | max_batches); + + for (gpc = 0; gpc < priv->gpc_nr; gpc++) { + for (ppc = 0; ppc < priv->ppc_nr[gpc]; ppc++, n++) { + const u32 as = alpha * priv->ppc_tpc_nr[gpc][ppc]; + const u32 bs = attrib * priv->ppc_tpc_nr[gpc][ppc]; + const u32 u = 0x418ea0 + (n * 0x04); + const u32 o = PPC_UNIT(gpc, ppc, 0); + mmio_wr32(info, o + 0xc0, bs); + mmio_wr32(info, o + 0xf4, bo); + bo += impl->attrib_nr_max * priv->ppc_tpc_nr[gpc][ppc]; + mmio_wr32(info, o + 0xe4, as); + mmio_wr32(info, o + 0xf8, ao); + ao += impl->alpha_nr_max * priv->ppc_tpc_nr[gpc][ppc]; + mmio_wr32(info, u, (0x715 /*XXX*/ << 16) | bs); + } + } } static void @@ -934,7 +963,9 @@ gm107_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info) nv_wr32(priv, 0x404154, 0x00000000); - oclass->mods(priv, info); + oclass->bundle(info); + oclass->pagepool(info); + oclass->attrib(info); oclass->unkn(priv); gm107_grctx_generate_tpcid(priv); @@ -979,7 +1010,6 @@ gm107_grctx_oclass = &(struct nvc0_grctx_oclass) { .wr32 = _nouveau_graph_context_wr32, }, .main = gm107_grctx_generate_main, - .mods = gm107_grctx_generate_mods, .unkn = nve4_grctx_generate_unkn, .hub = gm107_grctx_pack_hub, .gpc = gm107_grctx_pack_gpc, @@ -988,4 +1018,15 @@ gm107_grctx_oclass = &(struct nvc0_grctx_oclass) { .ppc = gm107_grctx_pack_ppc, .icmd = gm107_grctx_pack_icmd, .mthd = gm107_grctx_pack_mthd, + .bundle = gm107_grctx_generate_bundle, + .bundle_size = 0x3000, + .bundle_min_gpm_fifo_depth = 0x180, + .bundle_token_limit = 0x2c0, + .pagepool = gm107_grctx_generate_pagepool, + .pagepool_size = 0x8000, + .attrib = gm107_grctx_generate_attrib, + .attrib_nr_max = 0xff0, + .attrib_nr = 0xaa0, + .alpha_nr_max = 0x1800, + .alpha_nr = 0x1000, }.base; diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c index 8de4a4291548..ce252adbef81 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c @@ -531,50 +531,6 @@ nv108_grctx_pack_ppc[] = { * PGRAPH context implementation ******************************************************************************/ -static void -nv108_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info) -{ - u32 magic[GPC_MAX][2]; - u32 offset; - int gpc; - - mmio_data(0x003000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS); - mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS); - mmio_data(0x060000, 0x1000, NV_MEM_ACCESS_RW); - mmio_list(0x40800c, 0x00000000, 8, 1); - mmio_list(0x408010, 0x80000000, 0, 0); - mmio_list(0x419004, 0x00000000, 8, 1); - mmio_list(0x419008, 0x00000000, 0, 0); - mmio_list(0x4064cc, 0x80000000, 0, 0); - mmio_list(0x408004, 0x00000000, 8, 0); - mmio_list(0x408008, 0x80000030, 0, 0); - mmio_list(0x418808, 0x00000000, 8, 0); - mmio_list(0x41880c, 0x80000030, 0, 0); - mmio_list(0x4064c8, 0x00c20200, 0, 0); - mmio_list(0x418810, 0x80000000, 12, 2); - mmio_list(0x419848, 0x10000000, 12, 2); - - mmio_list(0x405830, 0x02180648, 0, 0); - mmio_list(0x4064c4, 0x0192ffff, 0, 0); - - for (gpc = 0, offset = 0; gpc < priv->gpc_nr; gpc++) { - u16 magic0 = 0x0218 * priv->tpc_nr[gpc]; - u16 magic1 = 0x0648 * priv->tpc_nr[gpc]; - magic[gpc][0] = 0x10000000 | (magic0 << 16) | offset; - magic[gpc][1] = 0x00000000 | (magic1 << 16); - offset += 0x0324 * priv->tpc_nr[gpc]; - } - - for (gpc = 0; gpc < priv->gpc_nr; gpc++) { - mmio_list(GPC_UNIT(gpc, 0x30c0), magic[gpc][0], 0, 0); - mmio_list(GPC_UNIT(gpc, 0x30e4), magic[gpc][1] | offset, 0, 0); - offset += 0x07ff * priv->tpc_nr[gpc]; - } - - mmio_list(0x17e91c, 0x0b040a0b, 0, 0); - mmio_list(0x17e920, 0x00090d08, 0, 0); -} - struct nouveau_oclass * nv108_grctx_oclass = &(struct nvc0_grctx_oclass) { .base.handle = NV_ENGCTX(GR, 0x08), @@ -587,7 +543,6 @@ nv108_grctx_oclass = &(struct nvc0_grctx_oclass) { .wr32 = _nouveau_graph_context_wr32, }, .main = nve4_grctx_generate_main, - .mods = nv108_grctx_generate_mods, .unkn = nve4_grctx_generate_unkn, .hub = nv108_grctx_pack_hub, .gpc = nv108_grctx_pack_gpc, @@ -596,4 +551,15 @@ nv108_grctx_oclass = &(struct nvc0_grctx_oclass) { .ppc = nv108_grctx_pack_ppc, .icmd = nv108_grctx_pack_icmd, .mthd = nvf0_grctx_pack_mthd, + .bundle = nve4_grctx_generate_bundle, + .bundle_size = 0x3000, + .bundle_min_gpm_fifo_depth = 0xc2, + .bundle_token_limit = 0x200, + .pagepool = nve4_grctx_generate_pagepool, + .pagepool_size = 0x8000, + .attrib = nvd7_grctx_generate_attrib, + .attrib_nr_max = 0x324, + .attrib_nr = 0x218, + .alpha_nr_max = 0x7ff, + .alpha_nr = 0x648, }.base; diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c index 833a96508c4e..b8e5fe60a1eb 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c @@ -982,34 +982,93 @@ nvc0_grctx_pack_tpc[] = { * PGRAPH context implementation ******************************************************************************/ +int +nvc0_grctx_mmio_data(struct nvc0_grctx *info, u32 size, u32 align, u32 access) +{ + if (info->data) { + info->buffer[info->buffer_nr] = round_up(info->addr, align); + info->addr = info->buffer[info->buffer_nr] + size; + info->data->size = size; + info->data->align = align; + info->data->access = access; + info->data++; + return info->buffer_nr++; + } + return -1; +} + +void +nvc0_grctx_mmio_item(struct nvc0_grctx *info, u32 addr, u32 data, + int shift, int buffer) +{ + if (info->data) { + if (shift >= 0) { + info->mmio->addr = addr; + info->mmio->data = data; + info->mmio->shift = shift; + info->mmio->buffer = buffer; + if (buffer >= 0) + data |= info->buffer[buffer] >> shift; + info->mmio++; + } else + return; + } else { + if (buffer >= 0) + return; + } + + nv_wr32(info->priv, addr, data); +} + +void +nvc0_grctx_generate_bundle(struct nvc0_grctx *info) +{ + const struct nvc0_grctx_oclass *impl = nvc0_grctx_impl(info->priv); + const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS; + const int s = 8; + const int b = mmio_vram(info, impl->bundle_size, (1 << s), access); + mmio_refn(info, 0x408004, 0x00000000, s, b); + mmio_refn(info, 0x408008, 0x80000000 | (impl->bundle_size >> s), 0, b); + mmio_refn(info, 0x418808, 0x00000000, s, b); + mmio_refn(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s), 0, b); +} + void -nvc0_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info) +nvc0_grctx_generate_pagepool(struct nvc0_grctx *info) { + const struct nvc0_grctx_oclass *impl = nvc0_grctx_impl(info->priv); + const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS; + const int s = 8; + const int b = mmio_vram(info, impl->pagepool_size, (1 << s), access); + mmio_refn(info, 0x40800c, 0x00000000, s, b); + mmio_wr32(info, 0x408010, 0x80000000); + mmio_refn(info, 0x419004, 0x00000000, s, b); + mmio_wr32(info, 0x419008, 0x00000000); +} + +void +nvc0_grctx_generate_attrib(struct nvc0_grctx *info) +{ + struct nvc0_graph_priv *priv = info->priv; + const struct nvc0_grctx_oclass *impl = nvc0_grctx_impl(priv); + const u32 attrib = impl->attrib_nr; + const u32 size = 0x20 * (impl->attrib_nr_max + impl->alpha_nr_max); + const u32 access = NV_MEM_ACCESS_RW; + const int s = 12; + const int b = mmio_vram(info, size * priv->tpc_total, (1 << s), access); int gpc, tpc; - u32 offset; - - mmio_data(0x002000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS); - mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS); - mmio_data(0x060000, 0x1000, NV_MEM_ACCESS_RW); - - mmio_list(0x408004, 0x00000000, 8, 0); - mmio_list(0x408008, 0x80000018, 0, 0); - mmio_list(0x40800c, 0x00000000, 8, 1); - mmio_list(0x408010, 0x80000000, 0, 0); - mmio_list(0x418810, 0x80000000, 12, 2); - mmio_list(0x419848, 0x10000000, 12, 2); - mmio_list(0x419004, 0x00000000, 8, 1); - mmio_list(0x419008, 0x00000000, 0, 0); - mmio_list(0x418808, 0x00000000, 8, 0); - mmio_list(0x41880c, 0x80000018, 0, 0); - - mmio_list(0x405830, 0x02180000, 0, 0); - - for (gpc = 0, offset = 0; gpc < priv->gpc_nr; gpc++) { + u32 bo = 0; + + mmio_refn(info, 0x418810, 0x80000000, s, b); + mmio_refn(info, 0x419848, 0x10000000, s, b); + mmio_wr32(info, 0x405830, (attrib << 16)); + + for (gpc = 0; gpc < priv->gpc_nr; gpc++) { for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) { - u32 addr = TPC_UNIT(gpc, tpc, 0x0520); - mmio_list(addr, 0x02180000 | offset, 0, 0); - offset += 0x0324; + const u32 o = TPC_UNIT(gpc, tpc, 0x0520); + mmio_skip(info, o, (attrib << 16) | ++bo); + mmio_wr32(info, o, (attrib << 16) | --bo); + bo += impl->attrib_nr_max; } } } @@ -1170,7 +1229,7 @@ nvc0_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info) { struct nvc0_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass; - nv_mask(priv, 0x000260, 0x00000001, 0x00000000); + nouveau_mc(priv)->unk260(nouveau_mc(priv), 0); nvc0_graph_mmio(priv, oclass->hub); nvc0_graph_mmio(priv, oclass->gpc); @@ -1180,7 +1239,9 @@ nvc0_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info) nv_wr32(priv, 0x404154, 0x00000000); - oclass->mods(priv, info); + oclass->bundle(info); + oclass->pagepool(info); + oclass->attrib(info); oclass->unkn(priv); nvc0_grctx_generate_tpcid(priv); @@ -1192,7 +1253,7 @@ nvc0_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info) nvc0_graph_icmd(priv, oclass->icmd); nv_wr32(priv, 0x404154, 0x00000400); nvc0_graph_mthd(priv, oclass->mthd); - nv_mask(priv, 0x000260, 0x00000001, 0x00000001); + nouveau_mc(priv)->unk260(nouveau_mc(priv), 1); } int @@ -1308,7 +1369,6 @@ nvc0_grctx_oclass = &(struct nvc0_grctx_oclass) { .wr32 = _nouveau_graph_context_wr32, }, .main = nvc0_grctx_generate_main, - .mods = nvc0_grctx_generate_mods, .unkn = nvc0_grctx_generate_unkn, .hub = nvc0_grctx_pack_hub, .gpc = nvc0_grctx_pack_gpc, @@ -1316,4 +1376,11 @@ nvc0_grctx_oclass = &(struct nvc0_grctx_oclass) { .tpc = nvc0_grctx_pack_tpc, .icmd = nvc0_grctx_pack_icmd, .mthd = nvc0_grctx_pack_mthd, + .bundle = nvc0_grctx_generate_bundle, + .bundle_size = 0x1800, + .pagepool = nvc0_grctx_generate_pagepool, + .pagepool_size = 0x8000, + .attrib = nvc0_grctx_generate_attrib, + .attrib_nr_max = 0x324, + .attrib_nr = 0x218, }.base; diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.h b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.h index 8da8b627b9d0..c776cd715e33 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.h +++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.h @@ -12,12 +12,19 @@ struct nvc0_grctx { u64 addr; }; +int nvc0_grctx_mmio_data(struct nvc0_grctx *, u32 size, u32 align, u32 access); +void nvc0_grctx_mmio_item(struct nvc0_grctx *, u32 addr, u32 data, int s, int); + +#define mmio_vram(a,b,c,d) nvc0_grctx_mmio_data((a), (b), (c), (d)) +#define mmio_refn(a,b,c,d,e) nvc0_grctx_mmio_item((a), (b), (c), (d), (e)) +#define mmio_skip(a,b,c) mmio_refn((a), (b), (c), -1, -1) +#define mmio_wr32(a,b,c) mmio_refn((a), (b), (c), 0, -1) + struct nvc0_grctx_oclass { struct nouveau_oclass base; /* main context generation function */ void (*main)(struct nvc0_graph_priv *, struct nvc0_grctx *); /* context-specific modify-on-first-load list generation function */ - void (*mods)(struct nvc0_graph_priv *, struct nvc0_grctx *); void (*unkn)(struct nvc0_graph_priv *); /* mmio context data */ const struct nvc0_graph_pack *hub; @@ -28,30 +35,34 @@ struct nvc0_grctx_oclass { /* indirect context data, generated with icmds/mthds */ const struct nvc0_graph_pack *icmd; const struct nvc0_graph_pack *mthd; + /* bundle circular buffer */ + void (*bundle)(struct nvc0_grctx *); + u32 bundle_size; + u32 bundle_min_gpm_fifo_depth; + u32 bundle_token_limit; + /* pagepool */ + void (*pagepool)(struct nvc0_grctx *); + u32 pagepool_size; + /* attribute(/alpha) circular buffer */ + void (*attrib)(struct nvc0_grctx *); + u32 attrib_nr_max; + u32 attrib_nr; + u32 alpha_nr_max; + u32 alpha_nr; }; -#define mmio_data(s,a,p) do { \ - info->buffer[info->buffer_nr] = round_up(info->addr, (a)); \ - info->addr = info->buffer[info->buffer_nr++] + (s); \ - info->data->size = (s); \ - info->data->align = (a); \ - info->data->access = (p); \ - info->data++; \ -} while(0) - -#define mmio_list(r,d,s,b) do { \ - info->mmio->addr = (r); \ - info->mmio->data = (d); \ - info->mmio->shift = (s); \ - info->mmio->buffer = (b); \ - info->mmio++; \ - nv_wr32(priv, (r), (d) | ((s) ? (info->buffer[(b)] >> (s)) : 0)); \ -} while(0) +static inline const struct nvc0_grctx_oclass * +nvc0_grctx_impl(struct nvc0_graph_priv *priv) +{ + return (void *)nv_engine(priv)->cclass; +} extern struct nouveau_oclass *nvc0_grctx_oclass; int nvc0_grctx_generate(struct nvc0_graph_priv *); void nvc0_grctx_generate_main(struct nvc0_graph_priv *, struct nvc0_grctx *); -void nvc0_grctx_generate_mods(struct nvc0_graph_priv *, struct nvc0_grctx *); +void nvc0_grctx_generate_bundle(struct nvc0_grctx *); +void nvc0_grctx_generate_pagepool(struct nvc0_grctx *); +void nvc0_grctx_generate_attrib(struct nvc0_grctx *); void nvc0_grctx_generate_unkn(struct nvc0_graph_priv *); void nvc0_grctx_generate_tpcid(struct nvc0_graph_priv *); void nvc0_grctx_generate_r406028(struct nvc0_graph_priv *); @@ -60,22 +71,27 @@ void nvc0_grctx_generate_r418bb8(struct nvc0_graph_priv *); void nvc0_grctx_generate_r406800(struct nvc0_graph_priv *); extern struct nouveau_oclass *nvc1_grctx_oclass; -void nvc1_grctx_generate_mods(struct nvc0_graph_priv *, struct nvc0_grctx *); +void nvc1_grctx_generate_attrib(struct nvc0_grctx *); void nvc1_grctx_generate_unkn(struct nvc0_graph_priv *); extern struct nouveau_oclass *nvc4_grctx_oclass; extern struct nouveau_oclass *nvc8_grctx_oclass; + extern struct nouveau_oclass *nvd7_grctx_oclass; +void nvd7_grctx_generate_attrib(struct nvc0_grctx *); + extern struct nouveau_oclass *nvd9_grctx_oclass; extern struct nouveau_oclass *nve4_grctx_oclass; extern struct nouveau_oclass *gk20a_grctx_oclass; void nve4_grctx_generate_main(struct nvc0_graph_priv *, struct nvc0_grctx *); -void nve4_grctx_generate_mods(struct nvc0_graph_priv *, struct nvc0_grctx *); +void nve4_grctx_generate_bundle(struct nvc0_grctx *); +void nve4_grctx_generate_pagepool(struct nvc0_grctx *); void nve4_grctx_generate_unkn(struct nvc0_graph_priv *); void nve4_grctx_generate_r418bb8(struct nvc0_graph_priv *); extern struct nouveau_oclass *nvf0_grctx_oclass; +extern struct nouveau_oclass *gk110b_grctx_oclass; extern struct nouveau_oclass *nv108_grctx_oclass; extern struct nouveau_oclass *gm107_grctx_oclass; @@ -160,16 +176,23 @@ extern const struct nvc0_graph_pack nve4_grctx_pack_ppc[]; extern const struct nvc0_graph_pack nve4_grctx_pack_icmd[]; extern const struct nvc0_graph_init nve4_grctx_init_a097_0[]; +extern const struct nvc0_graph_pack nvf0_grctx_pack_icmd[]; + extern const struct nvc0_graph_pack nvf0_grctx_pack_mthd[]; +extern const struct nvc0_graph_pack nvf0_grctx_pack_hub[]; extern const struct nvc0_graph_init nvf0_grctx_init_pri_0[]; extern const struct nvc0_graph_init nvf0_grctx_init_cwd_0[]; +extern const struct nvc0_graph_pack nvf0_grctx_pack_gpc[]; extern const struct nvc0_graph_init nvf0_grctx_init_gpc_unk_2[]; +extern const struct nvc0_graph_init nvf0_grctx_init_tex_0[]; extern const struct nvc0_graph_init nvf0_grctx_init_mpc_0[]; extern const struct nvc0_graph_init nvf0_grctx_init_l1c_0[]; +extern const struct nvc0_graph_pack nvf0_grctx_pack_ppc[]; + extern const struct nvc0_graph_init nv108_grctx_init_rstr2d_0[]; extern const struct nvc0_graph_init nv108_grctx_init_prop_0[]; diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c index 24a92c569c0a..c6ba8fed18f1 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c @@ -727,38 +727,38 @@ nvc1_grctx_pack_tpc[] = { ******************************************************************************/ void -nvc1_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info) +nvc1_grctx_generate_attrib(struct nvc0_grctx *info) { + struct nvc0_graph_priv *priv = info->priv; + const struct nvc0_grctx_oclass *impl = nvc0_grctx_impl(priv); + const u32 alpha = impl->alpha_nr; + const u32 beta = impl->attrib_nr; + const u32 size = 0x20 * (impl->attrib_nr_max + impl->alpha_nr_max); + const u32 access = NV_MEM_ACCESS_RW; + const int s = 12; + const int b = mmio_vram(info, size * priv->tpc_total, (1 << s), access); + const int timeslice_mode = 1; + const int max_batches = 0xffff; + u32 bo = 0; + u32 ao = bo + impl->attrib_nr_max * priv->tpc_total; int gpc, tpc; - u32 offset; - mmio_data(0x002000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS); - mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS); - mmio_data(0x060000, 0x1000, NV_MEM_ACCESS_RW); - mmio_list(0x408004, 0x00000000, 8, 0); - mmio_list(0x408008, 0x80000018, 0, 0); - mmio_list(0x40800c, 0x00000000, 8, 1); - mmio_list(0x408010, 0x80000000, 0, 0); - mmio_list(0x418810, 0x80000000, 12, 2); - mmio_list(0x419848, 0x10000000, 12, 2); - mmio_list(0x419004, 0x00000000, 8, 1); - mmio_list(0x419008, 0x00000000, 0, 0); - mmio_list(0x418808, 0x00000000, 8, 0); - mmio_list(0x41880c, 0x80000018, 0, 0); + mmio_refn(info, 0x418810, 0x80000000, s, b); + mmio_refn(info, 0x419848, 0x10000000, s, b); + mmio_wr32(info, 0x405830, (beta << 16) | alpha); + mmio_wr32(info, 0x4064c4, ((alpha / 4) << 16) | max_batches); - mmio_list(0x405830, 0x02180218, 0, 0); - mmio_list(0x4064c4, 0x0086ffff, 0, 0); - - for (gpc = 0, offset = 0; gpc < priv->gpc_nr; gpc++) { - for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) { - u32 addr = TPC_UNIT(gpc, tpc, 0x0520); - mmio_list(addr, 0x12180000 | offset, 0, 0); - offset += 0x0324; - } + for (gpc = 0; gpc < priv->gpc_nr; gpc++) { for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) { - u32 addr = TPC_UNIT(gpc, tpc, 0x0544); - mmio_list(addr, 0x02180000 | offset, 0, 0); - offset += 0x0324; + const u32 a = alpha; + const u32 b = beta; + const u32 t = timeslice_mode; + const u32 o = TPC_UNIT(gpc, tpc, 0x500); + mmio_skip(info, o + 0x20, (t << 28) | (b << 16) | ++bo); + mmio_wr32(info, o + 0x20, (t << 28) | (b << 16) | --bo); + bo += impl->attrib_nr_max; + mmio_wr32(info, o + 0x44, (a << 16) | ao); + ao += impl->alpha_nr_max; } } } @@ -786,7 +786,6 @@ nvc1_grctx_oclass = &(struct nvc0_grctx_oclass) { .wr32 = _nouveau_graph_context_wr32, }, .main = nvc0_grctx_generate_main, - .mods = nvc1_grctx_generate_mods, .unkn = nvc1_grctx_generate_unkn, .hub = nvc1_grctx_pack_hub, .gpc = nvc1_grctx_pack_gpc, @@ -794,4 +793,13 @@ nvc1_grctx_oclass = &(struct nvc0_grctx_oclass) { .tpc = nvc1_grctx_pack_tpc, .icmd = nvc1_grctx_pack_icmd, .mthd = nvc1_grctx_pack_mthd, + .bundle = nvc0_grctx_generate_bundle, + .bundle_size = 0x1800, + .pagepool = nvc0_grctx_generate_pagepool, + .pagepool_size = 0x8000, + .attrib = nvc1_grctx_generate_attrib, + .attrib_nr_max = 0x324, + .attrib_nr = 0x218, + .alpha_nr_max = 0x324, + .alpha_nr = 0x218, }.base; diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc4.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc4.c index e11ed5538193..41705c60cc47 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc4.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc4.c @@ -92,7 +92,6 @@ nvc4_grctx_oclass = &(struct nvc0_grctx_oclass) { .wr32 = _nouveau_graph_context_wr32, }, .main = nvc0_grctx_generate_main, - .mods = nvc0_grctx_generate_mods, .unkn = nvc0_grctx_generate_unkn, .hub = nvc0_grctx_pack_hub, .gpc = nvc0_grctx_pack_gpc, @@ -100,4 +99,11 @@ nvc4_grctx_oclass = &(struct nvc0_grctx_oclass) { .tpc = nvc4_grctx_pack_tpc, .icmd = nvc0_grctx_pack_icmd, .mthd = nvc0_grctx_pack_mthd, + .bundle = nvc0_grctx_generate_bundle, + .bundle_size = 0x1800, + .pagepool = nvc0_grctx_generate_pagepool, + .pagepool_size = 0x8000, + .attrib = nvc0_grctx_generate_attrib, + .attrib_nr_max = 0x324, + .attrib_nr = 0x218, }.base; diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc8.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc8.c index feebd58dfe8d..8f804cd8f9c7 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc8.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc8.c @@ -343,7 +343,6 @@ nvc8_grctx_oclass = &(struct nvc0_grctx_oclass) { .wr32 = _nouveau_graph_context_wr32, }, .main = nvc0_grctx_generate_main, - .mods = nvc0_grctx_generate_mods, .unkn = nvc0_grctx_generate_unkn, .hub = nvc0_grctx_pack_hub, .gpc = nvc8_grctx_pack_gpc, @@ -351,4 +350,11 @@ nvc8_grctx_oclass = &(struct nvc0_grctx_oclass) { .tpc = nvc0_grctx_pack_tpc, .icmd = nvc8_grctx_pack_icmd, .mthd = nvc8_grctx_pack_mthd, + .bundle = nvc0_grctx_generate_bundle, + .bundle_size = 0x1800, + .pagepool = nvc0_grctx_generate_pagepool, + .pagepool_size = 0x8000, + .attrib = nvc0_grctx_generate_attrib, + .attrib_nr_max = 0x324, + .attrib_nr = 0x218, }.base; diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c index 1dbc8d7f2e86..fcf534fd9e65 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c @@ -177,44 +177,41 @@ nvd7_grctx_pack_ppc[] = { * PGRAPH context implementation ******************************************************************************/ -static void -nvd7_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info) +void +nvd7_grctx_generate_attrib(struct nvc0_grctx *info) { - u32 magic[GPC_MAX][2]; - u32 offset; - int gpc; - - mmio_data(0x003000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS); - mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS); - mmio_data(0x060000, 0x1000, NV_MEM_ACCESS_RW); - mmio_list(0x40800c, 0x00000000, 8, 1); - mmio_list(0x408010, 0x80000000, 0, 0); - mmio_list(0x419004, 0x00000000, 8, 1); - mmio_list(0x419008, 0x00000000, 0, 0); - mmio_list(0x408004, 0x00000000, 8, 0); - mmio_list(0x408008, 0x80000018, 0, 0); - mmio_list(0x418808, 0x00000000, 8, 0); - mmio_list(0x41880c, 0x80000018, 0, 0); - mmio_list(0x418810, 0x80000000, 12, 2); - mmio_list(0x419848, 0x10000000, 12, 2); + struct nvc0_graph_priv *priv = info->priv; + const struct nvc0_grctx_oclass *impl = nvc0_grctx_impl(priv); + const u32 alpha = impl->alpha_nr; + const u32 beta = impl->attrib_nr; + const u32 size = 0x20 * (impl->attrib_nr_max + impl->alpha_nr_max); + const u32 access = NV_MEM_ACCESS_RW; + const int s = 12; + const int b = mmio_vram(info, size * priv->tpc_total, (1 << s), access); + const int timeslice_mode = 1; + const int max_batches = 0xffff; + u32 bo = 0; + u32 ao = bo + impl->attrib_nr_max * priv->tpc_total; + int gpc, ppc; - mmio_list(0x405830, 0x02180324, 0, 0); - mmio_list(0x4064c4, 0x00c9ffff, 0, 0); - - for (gpc = 0, offset = 0; gpc < priv->gpc_nr; gpc++) { - u16 magic0 = 0x0218 * priv->tpc_nr[gpc]; - u16 magic1 = 0x0324 * priv->tpc_nr[gpc]; - magic[gpc][0] = 0x10000000 | (magic0 << 16) | offset; - magic[gpc][1] = 0x00000000 | (magic1 << 16); - offset += 0x0324 * priv->tpc_nr[gpc]; - } + mmio_refn(info, 0x418810, 0x80000000, s, b); + mmio_refn(info, 0x419848, 0x10000000, s, b); + mmio_wr32(info, 0x405830, (beta << 16) | alpha); + mmio_wr32(info, 0x4064c4, ((alpha / 4) << 16) | max_batches); for (gpc = 0; gpc < priv->gpc_nr; gpc++) { - mmio_list(GPC_UNIT(gpc, 0x30c0), magic[gpc][0], 0, 0); - mmio_list(GPC_UNIT(gpc, 0x30e4), magic[gpc][1] | offset, 0, 0); - offset += 0x07ff * priv->tpc_nr[gpc]; + for (ppc = 0; ppc < priv->ppc_nr[gpc]; ppc++) { + const u32 a = alpha * priv->ppc_tpc_nr[gpc][ppc]; + const u32 b = beta * priv->ppc_tpc_nr[gpc][ppc]; + const u32 t = timeslice_mode; + const u32 o = PPC_UNIT(gpc, ppc, 0); + mmio_skip(info, o + 0xc0, (t << 28) | (b << 16) | ++bo); + mmio_wr32(info, o + 0xc0, (t << 28) | (b << 16) | --bo); + bo += impl->attrib_nr_max * priv->ppc_tpc_nr[gpc][ppc]; + mmio_wr32(info, o + 0xe4, (a << 16) | ao); + ao += impl->alpha_nr_max * priv->ppc_tpc_nr[gpc][ppc]; + } } - mmio_list(0x17e91c, 0x03060609, 0, 0); /* different from kepler */ } void @@ -223,7 +220,7 @@ nvd7_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info) struct nvc0_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass; int i; - nv_mask(priv, 0x000260, 0x00000001, 0x00000000); + nouveau_mc(priv)->unk260(nouveau_mc(priv), 0); nvc0_graph_mmio(priv, oclass->hub); nvc0_graph_mmio(priv, oclass->gpc); @@ -233,7 +230,9 @@ nvd7_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info) nv_wr32(priv, 0x404154, 0x00000000); - oclass->mods(priv, info); + oclass->bundle(info); + oclass->pagepool(info); + oclass->attrib(info); oclass->unkn(priv); nvc0_grctx_generate_tpcid(priv); @@ -248,7 +247,7 @@ nvd7_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info) nvc0_graph_icmd(priv, oclass->icmd); nv_wr32(priv, 0x404154, 0x00000400); nvc0_graph_mthd(priv, oclass->mthd); - nv_mask(priv, 0x000260, 0x00000001, 0x00000001); + nouveau_mc(priv)->unk260(nouveau_mc(priv), 1); } struct nouveau_oclass * @@ -263,7 +262,6 @@ nvd7_grctx_oclass = &(struct nvc0_grctx_oclass) { .wr32 = _nouveau_graph_context_wr32, }, .main = nvd7_grctx_generate_main, - .mods = nvd7_grctx_generate_mods, .unkn = nve4_grctx_generate_unkn, .hub = nvd7_grctx_pack_hub, .gpc = nvd7_grctx_pack_gpc, @@ -272,4 +270,13 @@ nvd7_grctx_oclass = &(struct nvc0_grctx_oclass) { .ppc = nvd7_grctx_pack_ppc, .icmd = nvd9_grctx_pack_icmd, .mthd = nvd9_grctx_pack_mthd, + .bundle = nvc0_grctx_generate_bundle, + .bundle_size = 0x1800, + .pagepool = nvc0_grctx_generate_pagepool, + .pagepool_size = 0x8000, + .attrib = nvd7_grctx_generate_attrib, + .attrib_nr_max = 0x324, + .attrib_nr = 0x218, + .alpha_nr_max = 0x7ff, + .alpha_nr = 0x324, }.base; diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c index c665fb7e4660..b9a301b6fd9f 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c @@ -511,7 +511,6 @@ nvd9_grctx_oclass = &(struct nvc0_grctx_oclass) { .wr32 = _nouveau_graph_context_wr32, }, .main = nvc0_grctx_generate_main, - .mods = nvc1_grctx_generate_mods, .unkn = nvc1_grctx_generate_unkn, .hub = nvd9_grctx_pack_hub, .gpc = nvd9_grctx_pack_gpc, @@ -519,4 +518,13 @@ nvd9_grctx_oclass = &(struct nvc0_grctx_oclass) { .tpc = nvd9_grctx_pack_tpc, .icmd = nvd9_grctx_pack_icmd, .mthd = nvd9_grctx_pack_mthd, + .bundle = nvc0_grctx_generate_bundle, + .bundle_size = 0x1800, + .pagepool = nvc0_grctx_generate_pagepool, + .pagepool_size = 0x8000, + .attrib = nvc1_grctx_generate_attrib, + .attrib_nr_max = 0x324, + .attrib_nr = 0x218, + .alpha_nr_max = 0x324, + .alpha_nr = 0x218, }.base; diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve4.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve4.c index c5b249238587..ccac2ee1a1cb 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve4.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve4.c @@ -839,47 +839,34 @@ nve4_grctx_pack_ppc[] = { ******************************************************************************/ void -nve4_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info) +nve4_grctx_generate_bundle(struct nvc0_grctx *info) { - u32 magic[GPC_MAX][2]; - u32 offset; - int gpc; - - mmio_data(0x003000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS); - mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS); - mmio_data(0x060000, 0x1000, NV_MEM_ACCESS_RW); - mmio_list(0x40800c, 0x00000000, 8, 1); - mmio_list(0x408010, 0x80000000, 0, 0); - mmio_list(0x419004, 0x00000000, 8, 1); - mmio_list(0x419008, 0x00000000, 0, 0); - mmio_list(0x4064cc, 0x80000000, 0, 0); - mmio_list(0x408004, 0x00000000, 8, 0); - mmio_list(0x408008, 0x80000030, 0, 0); - mmio_list(0x418808, 0x00000000, 8, 0); - mmio_list(0x41880c, 0x80000030, 0, 0); - mmio_list(0x4064c8, 0x01800600, 0, 0); - mmio_list(0x418810, 0x80000000, 12, 2); - mmio_list(0x419848, 0x10000000, 12, 2); - - mmio_list(0x405830, 0x02180648, 0, 0); - mmio_list(0x4064c4, 0x0192ffff, 0, 0); - - for (gpc = 0, offset = 0; gpc < priv->gpc_nr; gpc++) { - u16 magic0 = 0x0218 * priv->tpc_nr[gpc]; - u16 magic1 = 0x0648 * priv->tpc_nr[gpc]; - magic[gpc][0] = 0x10000000 | (magic0 << 16) | offset; - magic[gpc][1] = 0x00000000 | (magic1 << 16); - offset += 0x0324 * priv->tpc_nr[gpc]; - } - - for (gpc = 0; gpc < priv->gpc_nr; gpc++) { - mmio_list(GPC_UNIT(gpc, 0x30c0), magic[gpc][0], 0, 0); - mmio_list(GPC_UNIT(gpc, 0x30e4), magic[gpc][1] | offset, 0, 0); - offset += 0x07ff * priv->tpc_nr[gpc]; - } + const struct nvc0_grctx_oclass *impl = nvc0_grctx_impl(info->priv); + const u32 state_limit = min(impl->bundle_min_gpm_fifo_depth, + impl->bundle_size / 0x20); + const u32 token_limit = impl->bundle_token_limit; + const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS; + const int s = 8; + const int b = mmio_vram(info, impl->bundle_size, (1 << s), access); + mmio_refn(info, 0x408004, 0x00000000, s, b); + mmio_refn(info, 0x408008, 0x80000000 | (impl->bundle_size >> s), 0, b); + mmio_refn(info, 0x418808, 0x00000000, s, b); + mmio_refn(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s), 0, b); + mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit); +} - mmio_list(0x17e91c, 0x06060609, 0, 0); - mmio_list(0x17e920, 0x00090a05, 0, 0); +void +nve4_grctx_generate_pagepool(struct nvc0_grctx *info) +{ + const struct nvc0_grctx_oclass *impl = nvc0_grctx_impl(info->priv); + const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS; + const int s = 8; + const int b = mmio_vram(info, impl->pagepool_size, (1 << s), access); + mmio_refn(info, 0x40800c, 0x00000000, s, b); + mmio_wr32(info, 0x408010, 0x80000000); + mmio_refn(info, 0x419004, 0x00000000, s, b); + mmio_wr32(info, 0x419008, 0x00000000); + mmio_wr32(info, 0x4064cc, 0x80000000); } void @@ -957,7 +944,7 @@ nve4_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info) struct nvc0_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass; int i; - nv_mask(priv, 0x000260, 0x00000001, 0x00000000); + nouveau_mc(priv)->unk260(nouveau_mc(priv), 0); nvc0_graph_mmio(priv, oclass->hub); nvc0_graph_mmio(priv, oclass->gpc); @@ -967,7 +954,9 @@ nve4_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info) nv_wr32(priv, 0x404154, 0x00000000); - oclass->mods(priv, info); + oclass->bundle(info); + oclass->pagepool(info); + oclass->attrib(info); oclass->unkn(priv); nvc0_grctx_generate_tpcid(priv); @@ -991,7 +980,7 @@ nve4_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info) nvc0_graph_icmd(priv, oclass->icmd); nv_wr32(priv, 0x404154, 0x00000400); nvc0_graph_mthd(priv, oclass->mthd); - nv_mask(priv, 0x000260, 0x00000001, 0x00000001); + nouveau_mc(priv)->unk260(nouveau_mc(priv), 1); nv_mask(priv, 0x418800, 0x00200000, 0x00200000); nv_mask(priv, 0x41be10, 0x00800000, 0x00800000); @@ -1009,7 +998,6 @@ nve4_grctx_oclass = &(struct nvc0_grctx_oclass) { .wr32 = _nouveau_graph_context_wr32, }, .main = nve4_grctx_generate_main, - .mods = nve4_grctx_generate_mods, .unkn = nve4_grctx_generate_unkn, .hub = nve4_grctx_pack_hub, .gpc = nve4_grctx_pack_gpc, @@ -1018,4 +1006,15 @@ nve4_grctx_oclass = &(struct nvc0_grctx_oclass) { .ppc = nve4_grctx_pack_ppc, .icmd = nve4_grctx_pack_icmd, .mthd = nve4_grctx_pack_mthd, + .bundle = nve4_grctx_generate_bundle, + .bundle_size = 0x3000, + .bundle_min_gpm_fifo_depth = 0x180, + .bundle_token_limit = 0x600, + .pagepool = nve4_grctx_generate_pagepool, + .pagepool_size = 0x8000, + .attrib = nvd7_grctx_generate_attrib, + .attrib_nr_max = 0x324, + .attrib_nr = 0x218, + .alpha_nr_max = 0x7ff, + .alpha_nr = 0x648, }.base; diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c index dec03f04114d..e9b0dcf95a49 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c @@ -279,7 +279,7 @@ nvf0_grctx_init_icmd_0[] = { {} }; -static const struct nvc0_graph_pack +const struct nvc0_graph_pack nvf0_grctx_pack_icmd[] = { { nvf0_grctx_init_icmd_0 }, {} @@ -668,7 +668,7 @@ nvf0_grctx_init_be_0[] = { {} }; -static const struct nvc0_graph_pack +const struct nvc0_graph_pack nvf0_grctx_pack_hub[] = { { nvc0_grctx_init_main_0 }, { nvf0_grctx_init_fe_0 }, @@ -704,7 +704,7 @@ nvf0_grctx_init_gpc_unk_2[] = { {} }; -static const struct nvc0_graph_pack +const struct nvc0_graph_pack nvf0_grctx_pack_gpc[] = { { nvc0_grctx_init_gpc_unk_0 }, { nvd9_grctx_init_prop_0 }, @@ -718,7 +718,7 @@ nvf0_grctx_pack_gpc[] = { {} }; -static const struct nvc0_graph_init +const struct nvc0_graph_init nvf0_grctx_init_tex_0[] = { { 0x419a00, 1, 0x04, 0x000000f0 }, { 0x419a04, 1, 0x04, 0x00000001 }, @@ -797,7 +797,7 @@ nvf0_grctx_init_cbm_0[] = { {} }; -static const struct nvc0_graph_pack +const struct nvc0_graph_pack nvf0_grctx_pack_ppc[] = { { nve4_grctx_init_pes_0 }, { nvf0_grctx_init_cbm_0 }, @@ -809,58 +809,6 @@ nvf0_grctx_pack_ppc[] = { * PGRAPH context implementation ******************************************************************************/ -static void -nvf0_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info) -{ - u32 magic[GPC_MAX][4]; - u32 offset; - int gpc; - - mmio_data(0x003000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS); - mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS); - mmio_data(0x060000, 0x1000, NV_MEM_ACCESS_RW); - mmio_list(0x40800c, 0x00000000, 8, 1); - mmio_list(0x408010, 0x80000000, 0, 0); - mmio_list(0x419004, 0x00000000, 8, 1); - mmio_list(0x419008, 0x00000000, 0, 0); - mmio_list(0x4064cc, 0x80000000, 0, 0); - mmio_list(0x408004, 0x00000000, 8, 0); - mmio_list(0x408008, 0x80000030, 0, 0); - mmio_list(0x418808, 0x00000000, 8, 0); - mmio_list(0x41880c, 0x80000030, 0, 0); - mmio_list(0x4064c8, 0x01800600, 0, 0); - mmio_list(0x418810, 0x80000000, 12, 2); - mmio_list(0x419848, 0x10000000, 12, 2); - - mmio_list(0x405830, 0x02180648, 0, 0); - mmio_list(0x4064c4, 0x0192ffff, 0, 0); - - for (gpc = 0, offset = 0; gpc < priv->gpc_nr; gpc++) { - u16 magic0 = 0x0218 * (priv->tpc_nr[gpc] - 1); - u16 magic1 = 0x0648 * (priv->tpc_nr[gpc] - 1); - u16 magic2 = 0x0218; - u16 magic3 = 0x0648; - magic[gpc][0] = 0x10000000 | (magic0 << 16) | offset; - magic[gpc][1] = 0x00000000 | (magic1 << 16); - offset += 0x0324 * (priv->tpc_nr[gpc] - 1); - magic[gpc][2] = 0x10000000 | (magic2 << 16) | offset; - magic[gpc][3] = 0x00000000 | (magic3 << 16); - offset += 0x0324; - } - - for (gpc = 0; gpc < priv->gpc_nr; gpc++) { - mmio_list(GPC_UNIT(gpc, 0x30c0), magic[gpc][0], 0, 0); - mmio_list(GPC_UNIT(gpc, 0x30e4), magic[gpc][1] | offset, 0, 0); - offset += 0x07ff * (priv->tpc_nr[gpc] - 1); - mmio_list(GPC_UNIT(gpc, 0x32c0), magic[gpc][2], 0, 0); - mmio_list(GPC_UNIT(gpc, 0x32e4), magic[gpc][3] | offset, 0, 0); - offset += 0x07ff; - } - - mmio_list(0x17e91c, 0x06060609, 0, 0); - mmio_list(0x17e920, 0x00090a05, 0, 0); -} - struct nouveau_oclass * nvf0_grctx_oclass = &(struct nvc0_grctx_oclass) { .base.handle = NV_ENGCTX(GR, 0xf0), @@ -873,7 +821,6 @@ nvf0_grctx_oclass = &(struct nvc0_grctx_oclass) { .wr32 = _nouveau_graph_context_wr32, }, .main = nve4_grctx_generate_main, - .mods = nvf0_grctx_generate_mods, .unkn = nve4_grctx_generate_unkn, .hub = nvf0_grctx_pack_hub, .gpc = nvf0_grctx_pack_gpc, @@ -882,4 +829,15 @@ nvf0_grctx_oclass = &(struct nvc0_grctx_oclass) { .ppc = nvf0_grctx_pack_ppc, .icmd = nvf0_grctx_pack_icmd, .mthd = nvf0_grctx_pack_mthd, + .bundle = nve4_grctx_generate_bundle, + .bundle_size = 0x3000, + .bundle_min_gpm_fifo_depth = 0x180, + .bundle_token_limit = 0x7c0, + .pagepool = nve4_grctx_generate_pagepool, + .pagepool_size = 0x8000, + .attrib = nvd7_grctx_generate_attrib, + .attrib_nr_max = 0x324, + .attrib_nr = 0x218, + .alpha_nr_max = 0x7ff, + .alpha_nr = 0x648, }.base; diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/gk110b.c b/drivers/gpu/drm/nouveau/core/engine/graph/gk110b.c new file mode 100644 index 000000000000..d07b19dc168d --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/engine/graph/gk110b.c @@ -0,0 +1,117 @@ +/* + * Copyright 2013 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs <bskeggs@redhat.com> + */ + +#include "nvc0.h" +#include "ctxnvc0.h" + +/******************************************************************************* + * PGRAPH register lists + ******************************************************************************/ + +static const struct nvc0_graph_init +gk110b_graph_init_l1c_0[] = { + { 0x419c98, 1, 0x04, 0x00000000 }, + { 0x419ca8, 1, 0x04, 0x00000000 }, + { 0x419cb0, 1, 0x04, 0x09000000 }, + { 0x419cb4, 1, 0x04, 0x00000000 }, + { 0x419cb8, 1, 0x04, 0x00b08bea }, + { 0x419c84, 1, 0x04, 0x00010384 }, + { 0x419cbc, 1, 0x04, 0x281b3646 }, + { 0x419cc0, 2, 0x04, 0x00000000 }, + { 0x419c80, 1, 0x04, 0x00020230 }, + { 0x419ccc, 2, 0x04, 0x00000000 }, + {} +}; + +static const struct nvc0_graph_init +gk110b_graph_init_sm_0[] = { + { 0x419e00, 1, 0x04, 0x00000080 }, + { 0x419ea0, 1, 0x04, 0x00000000 }, + { 0x419ee4, 1, 0x04, 0x00000000 }, + { 0x419ea4, 1, 0x04, 0x00000100 }, + { 0x419ea8, 1, 0x04, 0x00000000 }, + { 0x419eb4, 1, 0x04, 0x00000000 }, + { 0x419ebc, 2, 0x04, 0x00000000 }, + { 0x419edc, 1, 0x04, 0x00000000 }, + { 0x419f00, 1, 0x04, 0x00000000 }, + { 0x419ed0, 1, 0x04, 0x00002616 }, + { 0x419f74, 1, 0x04, 0x00015555 }, + { 0x419f80, 4, 0x04, 0x00000000 }, + {} +}; + +static const struct nvc0_graph_pack +gk110b_graph_pack_mmio[] = { + { nve4_graph_init_main_0 }, + { nvf0_graph_init_fe_0 }, + { nvc0_graph_init_pri_0 }, + { nvc0_graph_init_rstr2d_0 }, + { nvd9_graph_init_pd_0 }, + { nvf0_graph_init_ds_0 }, + { nvc0_graph_init_scc_0 }, + { nvf0_graph_init_sked_0 }, + { nvf0_graph_init_cwd_0 }, + { nvd9_graph_init_prop_0 }, + { nvc1_graph_init_gpc_unk_0 }, + { nvc0_graph_init_setup_0 }, + { nvc0_graph_init_crstr_0 }, + { nvc1_graph_init_setup_1 }, + { nvc0_graph_init_zcull_0 }, + { nvd9_graph_init_gpm_0 }, + { nvf0_graph_init_gpc_unk_1 }, + { nvc0_graph_init_gcc_0 }, + { nve4_graph_init_tpccs_0 }, + { nvf0_graph_init_tex_0 }, + { nve4_graph_init_pe_0 }, + { gk110b_graph_init_l1c_0 }, + { nvc0_graph_init_mpc_0 }, + { gk110b_graph_init_sm_0 }, + { nvd7_graph_init_pes_0 }, + { nvd7_graph_init_wwdx_0 }, + { nvd7_graph_init_cbm_0 }, + { nve4_graph_init_be_0 }, + { nvc0_graph_init_fe_1 }, + {} +}; + +/******************************************************************************* + * PGRAPH engine/subdev functions + ******************************************************************************/ + +struct nouveau_oclass * +gk110b_graph_oclass = &(struct nvc0_graph_oclass) { + .base.handle = NV_ENGINE(GR, 0xf1), + .base.ofuncs = &(struct nouveau_ofuncs) { + .ctor = nvc0_graph_ctor, + .dtor = nvc0_graph_dtor, + .init = nve4_graph_init, + .fini = nvf0_graph_fini, + }, + .cclass = &gk110b_grctx_oclass, + .sclass = nvf0_graph_sclass, + .mmio = gk110b_graph_pack_mmio, + .fecs.ucode = &nvf0_graph_fecs_ucode, + .gpccs.ucode = &nvf0_graph_gpccs_ucode, + .ppc_nr = 2, +}.base; diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/gk20a.c b/drivers/gpu/drm/nouveau/core/engine/graph/gk20a.c index 83048a56430d..7d0abe9f3fe7 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/gk20a.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/gk20a.c @@ -27,8 +27,8 @@ static struct nouveau_oclass gk20a_graph_sclass[] = { { 0x902d, &nouveau_object_ofuncs }, { 0xa040, &nouveau_object_ofuncs }, - { 0xa297, &nouveau_object_ofuncs }, - { 0xa0c0, &nouveau_object_ofuncs }, + { KEPLER_C, &nvc0_fermi_ofuncs, nvc0_graph_9097_omthds }, + { KEPLER_COMPUTE_A, &nouveau_object_ofuncs, nvc0_graph_90c0_omthds }, {} }; @@ -39,9 +39,10 @@ gk20a_graph_oclass = &(struct nvc0_graph_oclass) { .ctor = nvc0_graph_ctor, .dtor = nvc0_graph_dtor, .init = nve4_graph_init, - .fini = nve4_graph_fini, + .fini = _nouveau_graph_fini, }, .cclass = &gk20a_grctx_oclass, .sclass = gk20a_graph_sclass, .mmio = nve4_graph_pack_mmio, + .ppc_nr = 1, }.base; diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/gm107.c b/drivers/gpu/drm/nouveau/core/engine/graph/gm107.c index 21c5f31d607f..4bdbdab2fd9a 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/gm107.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/gm107.c @@ -36,8 +36,8 @@ static struct nouveau_oclass gm107_graph_sclass[] = { { 0x902d, &nouveau_object_ofuncs }, { 0xa140, &nouveau_object_ofuncs }, - { 0xb097, &nouveau_object_ofuncs }, - { 0xb0c0, &nouveau_object_ofuncs }, + { MAXWELL_A, &nvc0_fermi_ofuncs, nvc0_graph_9097_omthds }, + { MAXWELL_COMPUTE_A, &nouveau_object_ofuncs, nvc0_graph_90c0_omthds }, {} }; @@ -425,6 +425,9 @@ gm107_graph_init(struct nouveau_object *object) nv_wr32(priv, 0x400134, 0xffffffff); nv_wr32(priv, 0x400054, 0x2c350f63); + + nvc0_graph_zbc_init(priv); + return nvc0_graph_init_ctxctl(priv); } @@ -462,4 +465,5 @@ gm107_graph_oclass = &(struct nvc0_graph_oclass) { .mmio = gm107_graph_pack_mmio, .fecs.ucode = 0 ? &gm107_graph_fecs_ucode : NULL, .gpccs.ucode = &gm107_graph_gpccs_ucode, + .ppc_nr = 2, }.base; diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c index ad13dcdd15f9..f70e2f67a4dd 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c @@ -24,7 +24,6 @@ #include <core/client.h> #include <core/os.h> -#include <core/class.h> #include <core/handle.h> #include <core/namedb.h> diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c index 4532f7e5618c..2b12b09683c8 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c @@ -24,7 +24,6 @@ #include <core/client.h> #include <core/os.h> -#include <core/class.h> #include <core/handle.h> #include <subdev/fb.h> diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv108.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv108.c index 00ea1a089822..2b0e8f48c029 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/nv108.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv108.c @@ -33,7 +33,7 @@ static struct nouveau_oclass nv108_graph_sclass[] = { { 0x902d, &nouveau_object_ofuncs }, { 0xa140, &nouveau_object_ofuncs }, - { 0xa197, &nouveau_object_ofuncs }, + { KEPLER_B, &nvc0_fermi_ofuncs }, { 0xa1c0, &nouveau_object_ofuncs }, {} }; @@ -220,4 +220,5 @@ nv108_graph_oclass = &(struct nvc0_graph_oclass) { .mmio = nv108_graph_pack_mmio, .fecs.ucode = &nv108_graph_fecs_ucode, .gpccs.ucode = &nv108_graph_gpccs_ucode, + .ppc_nr = 1, }.base; diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c index d145e080899a..ceb9c746d94e 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c @@ -1,6 +1,5 @@ #include <core/client.h> #include <core/os.h> -#include <core/class.h> #include <core/engctx.h> #include <core/handle.h> #include <core/enum.h> diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv25.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv25.c index 7a80d005a974..f8a6fdd7d5e8 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/nv25.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv25.c @@ -1,5 +1,4 @@ #include <core/os.h> -#include <core/class.h> #include <core/engctx.h> #include <core/enum.h> diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c index 3e1f32ee43d4..5de9caa2ef67 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c @@ -1,5 +1,4 @@ #include <core/os.h> -#include <core/class.h> #include <core/engctx.h> #include <core/enum.h> diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv30.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv30.c index e451db32e92a..2f9dbc709389 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/nv30.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv30.c @@ -1,5 +1,4 @@ #include <core/os.h> -#include <core/class.h> #include <core/engctx.h> #include <core/enum.h> diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv34.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv34.c index 9385ac7b44a4..34dd26c70b64 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/nv34.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv34.c @@ -1,5 +1,4 @@ #include <core/os.h> -#include <core/class.h> #include <core/engctx.h> #include <core/enum.h> diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv35.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv35.c index 9ce84b73f86a..2fb5756d9f66 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/nv35.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv35.c @@ -1,5 +1,4 @@ #include <core/os.h> -#include <core/class.h> #include <core/engctx.h> #include <core/enum.h> diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c index 6477fbf6a550..4f401174868d 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c @@ -24,7 +24,6 @@ #include <core/client.h> #include <core/os.h> -#include <core/class.h> #include <core/handle.h> #include <core/engctx.h> diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c index 20665c21d80e..38e0aa26f1cd 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c @@ -23,7 +23,6 @@ */ #include <core/os.h> -#include <core/class.h> #include <core/client.h> #include <core/handle.h> #include <core/engctx.h> diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c index aa0838916354..30fd1dc64f93 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c @@ -26,15 +26,232 @@ #include "ctxnvc0.h" /******************************************************************************* + * Zero Bandwidth Clear + ******************************************************************************/ + +static void +nvc0_graph_zbc_clear_color(struct nvc0_graph_priv *priv, int zbc) +{ + if (priv->zbc_color[zbc].format) { + nv_wr32(priv, 0x405804, priv->zbc_color[zbc].ds[0]); + nv_wr32(priv, 0x405808, priv->zbc_color[zbc].ds[1]); + nv_wr32(priv, 0x40580c, priv->zbc_color[zbc].ds[2]); + nv_wr32(priv, 0x405810, priv->zbc_color[zbc].ds[3]); + } + nv_wr32(priv, 0x405814, priv->zbc_color[zbc].format); + nv_wr32(priv, 0x405820, zbc); + nv_wr32(priv, 0x405824, 0x00000004); /* TRIGGER | WRITE | COLOR */ +} + +static int +nvc0_graph_zbc_color_get(struct nvc0_graph_priv *priv, int format, + const u32 ds[4], const u32 l2[4]) +{ + struct nouveau_ltc *ltc = nouveau_ltc(priv); + int zbc = -ENOSPC, i; + + for (i = ltc->zbc_min; i <= ltc->zbc_max; i++) { + if (priv->zbc_color[i].format) { + if (priv->zbc_color[i].format != format) + continue; + if (memcmp(priv->zbc_color[i].ds, ds, sizeof( + priv->zbc_color[i].ds))) + continue; + if (memcmp(priv->zbc_color[i].l2, l2, sizeof( + priv->zbc_color[i].l2))) { + WARN_ON(1); + return -EINVAL; + } + return i; + } else { + zbc = (zbc < 0) ? i : zbc; + } + } + + if (zbc < 0) + return zbc; + + memcpy(priv->zbc_color[zbc].ds, ds, sizeof(priv->zbc_color[zbc].ds)); + memcpy(priv->zbc_color[zbc].l2, l2, sizeof(priv->zbc_color[zbc].l2)); + priv->zbc_color[zbc].format = format; + ltc->zbc_color_get(ltc, zbc, l2); + nvc0_graph_zbc_clear_color(priv, zbc); + return zbc; +} + +static void +nvc0_graph_zbc_clear_depth(struct nvc0_graph_priv *priv, int zbc) +{ + if (priv->zbc_depth[zbc].format) + nv_wr32(priv, 0x405818, priv->zbc_depth[zbc].ds); + nv_wr32(priv, 0x40581c, priv->zbc_depth[zbc].format); + nv_wr32(priv, 0x405820, zbc); + nv_wr32(priv, 0x405824, 0x00000005); /* TRIGGER | WRITE | DEPTH */ +} + +static int +nvc0_graph_zbc_depth_get(struct nvc0_graph_priv *priv, int format, + const u32 ds, const u32 l2) +{ + struct nouveau_ltc *ltc = nouveau_ltc(priv); + int zbc = -ENOSPC, i; + + for (i = ltc->zbc_min; i <= ltc->zbc_max; i++) { + if (priv->zbc_depth[i].format) { + if (priv->zbc_depth[i].format != format) + continue; + if (priv->zbc_depth[i].ds != ds) + continue; + if (priv->zbc_depth[i].l2 != l2) { + WARN_ON(1); + return -EINVAL; + } + return i; + } else { + zbc = (zbc < 0) ? i : zbc; + } + } + + if (zbc < 0) + return zbc; + + priv->zbc_depth[zbc].format = format; + priv->zbc_depth[zbc].ds = ds; + priv->zbc_depth[zbc].l2 = l2; + ltc->zbc_depth_get(ltc, zbc, l2); + nvc0_graph_zbc_clear_depth(priv, zbc); + return zbc; +} + +/******************************************************************************* * Graphics object classes ******************************************************************************/ +static int +nvc0_fermi_mthd_zbc_color(struct nouveau_object *object, void *data, u32 size) +{ + struct nvc0_graph_priv *priv = (void *)object->engine; + union { + struct fermi_a_zbc_color_v0 v0; + } *args = data; + int ret; + + if (nvif_unpack(args->v0, 0, 0, false)) { + switch (args->v0.format) { + case FERMI_A_ZBC_COLOR_V0_FMT_ZERO: + case FERMI_A_ZBC_COLOR_V0_FMT_UNORM_ONE: + case FERMI_A_ZBC_COLOR_V0_FMT_RF32_GF32_BF32_AF32: + case FERMI_A_ZBC_COLOR_V0_FMT_R16_G16_B16_A16: + case FERMI_A_ZBC_COLOR_V0_FMT_RN16_GN16_BN16_AN16: + case FERMI_A_ZBC_COLOR_V0_FMT_RS16_GS16_BS16_AS16: + case FERMI_A_ZBC_COLOR_V0_FMT_RU16_GU16_BU16_AU16: + case FERMI_A_ZBC_COLOR_V0_FMT_RF16_GF16_BF16_AF16: + case FERMI_A_ZBC_COLOR_V0_FMT_A8R8G8B8: + case FERMI_A_ZBC_COLOR_V0_FMT_A8RL8GL8BL8: + case FERMI_A_ZBC_COLOR_V0_FMT_A2B10G10R10: + case FERMI_A_ZBC_COLOR_V0_FMT_AU2BU10GU10RU10: + case FERMI_A_ZBC_COLOR_V0_FMT_A8B8G8R8: + case FERMI_A_ZBC_COLOR_V0_FMT_A8BL8GL8RL8: + case FERMI_A_ZBC_COLOR_V0_FMT_AN8BN8GN8RN8: + case FERMI_A_ZBC_COLOR_V0_FMT_AS8BS8GS8RS8: + case FERMI_A_ZBC_COLOR_V0_FMT_AU8BU8GU8RU8: + case FERMI_A_ZBC_COLOR_V0_FMT_A2R10G10B10: + case FERMI_A_ZBC_COLOR_V0_FMT_BF10GF11RF11: + ret = nvc0_graph_zbc_color_get(priv, args->v0.format, + args->v0.ds, + args->v0.l2); + if (ret >= 0) { + args->v0.index = ret; + return 0; + } + break; + default: + return -EINVAL; + } + } + + return ret; +} + +static int +nvc0_fermi_mthd_zbc_depth(struct nouveau_object *object, void *data, u32 size) +{ + struct nvc0_graph_priv *priv = (void *)object->engine; + union { + struct fermi_a_zbc_depth_v0 v0; + } *args = data; + int ret; + + if (nvif_unpack(args->v0, 0, 0, false)) { + switch (args->v0.format) { + case FERMI_A_ZBC_DEPTH_V0_FMT_FP32: + ret = nvc0_graph_zbc_depth_get(priv, args->v0.format, + args->v0.ds, + args->v0.l2); + return (ret >= 0) ? 0 : -ENOSPC; + default: + return -EINVAL; + } + } + + return ret; +} + +static int +nvc0_fermi_mthd(struct nouveau_object *object, u32 mthd, void *data, u32 size) +{ + switch (mthd) { + case FERMI_A_ZBC_COLOR: + return nvc0_fermi_mthd_zbc_color(object, data, size); + case FERMI_A_ZBC_DEPTH: + return nvc0_fermi_mthd_zbc_depth(object, data, size); + default: + break; + } + return -EINVAL; +} + +struct nouveau_ofuncs +nvc0_fermi_ofuncs = { + .ctor = _nouveau_object_ctor, + .dtor = nouveau_object_destroy, + .init = nouveau_object_init, + .fini = nouveau_object_fini, + .mthd = nvc0_fermi_mthd, +}; + +static int +nvc0_graph_set_shader_exceptions(struct nouveau_object *object, u32 mthd, + void *pdata, u32 size) +{ + struct nvc0_graph_priv *priv = (void *)nv_engine(object); + if (size >= sizeof(u32)) { + u32 data = *(u32 *)pdata ? 0xffffffff : 0x00000000; + nv_wr32(priv, 0x419e44, data); + nv_wr32(priv, 0x419e4c, data); + return 0; + } + return -EINVAL; +} + +struct nouveau_omthds +nvc0_graph_9097_omthds[] = { + { 0x1528, 0x1528, nvc0_graph_set_shader_exceptions }, + {} +}; + +struct nouveau_omthds +nvc0_graph_90c0_omthds[] = { + { 0x1528, 0x1528, nvc0_graph_set_shader_exceptions }, + {} +}; + struct nouveau_oclass nvc0_graph_sclass[] = { { 0x902d, &nouveau_object_ofuncs }, { 0x9039, &nouveau_object_ofuncs }, - { 0x9097, &nouveau_object_ofuncs }, - { 0x90c0, &nouveau_object_ofuncs }, + { FERMI_A, &nvc0_fermi_ofuncs, nvc0_graph_9097_omthds }, + { FERMI_COMPUTE_A, &nouveau_object_ofuncs, nvc0_graph_90c0_omthds }, {} }; @@ -98,7 +315,7 @@ nvc0_graph_context_ctor(struct nouveau_object *parent, u32 addr = mmio->addr; u32 data = mmio->data; - if (mmio->shift) { + if (mmio->buffer >= 0) { u64 info = chan->data[mmio->buffer].vma.offset; data |= info >> mmio->shift; } @@ -407,6 +624,35 @@ nvc0_graph_pack_mmio[] = { ******************************************************************************/ void +nvc0_graph_zbc_init(struct nvc0_graph_priv *priv) +{ + const u32 zero[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000 }; + const u32 one[] = { 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff }; + const u32 f32_0[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000 }; + const u32 f32_1[] = { 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000, + 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000 }; + struct nouveau_ltc *ltc = nouveau_ltc(priv); + int index; + + if (!priv->zbc_color[0].format) { + nvc0_graph_zbc_color_get(priv, 1, & zero[0], &zero[4]); + nvc0_graph_zbc_color_get(priv, 2, & one[0], &one[4]); + nvc0_graph_zbc_color_get(priv, 4, &f32_0[0], &f32_0[4]); + nvc0_graph_zbc_color_get(priv, 4, &f32_1[0], &f32_1[4]); + nvc0_graph_zbc_depth_get(priv, 1, 0x00000000, 0x00000000); + nvc0_graph_zbc_depth_get(priv, 1, 0x3f800000, 0x3f800000); + } + + for (index = ltc->zbc_min; index <= ltc->zbc_max; index++) + nvc0_graph_zbc_clear_color(priv, index); + for (index = ltc->zbc_min; index <= ltc->zbc_max; index++) + nvc0_graph_zbc_clear_depth(priv, index); +} + +void nvc0_graph_mmio(struct nvc0_graph_priv *priv, const struct nvc0_graph_pack *p) { const struct nvc0_graph_pack *pack; @@ -969,17 +1215,16 @@ nvc0_graph_init_ctxctl(struct nvc0_graph_priv *priv) { struct nvc0_graph_oclass *oclass = (void *)nv_object(priv)->oclass; struct nvc0_grctx_oclass *cclass = (void *)nv_engine(priv)->cclass; - u32 r000260; int i; if (priv->firmware) { /* load fuc microcode */ - r000260 = nv_mask(priv, 0x000260, 0x00000001, 0x00000000); + nouveau_mc(priv)->unk260(nouveau_mc(priv), 0); nvc0_graph_init_fw(priv, 0x409000, &priv->fuc409c, &priv->fuc409d); nvc0_graph_init_fw(priv, 0x41a000, &priv->fuc41ac, &priv->fuc41ad); - nv_wr32(priv, 0x000260, r000260); + nouveau_mc(priv)->unk260(nouveau_mc(priv), 1); /* start both of them running */ nv_wr32(priv, 0x409840, 0xffffffff); @@ -1066,7 +1311,7 @@ nvc0_graph_init_ctxctl(struct nvc0_graph_priv *priv) } /* load HUB microcode */ - r000260 = nv_mask(priv, 0x000260, 0x00000001, 0x00000000); + nouveau_mc(priv)->unk260(nouveau_mc(priv), 0); nv_wr32(priv, 0x4091c0, 0x01000000); for (i = 0; i < oclass->fecs.ucode->data.size / 4; i++) nv_wr32(priv, 0x4091c4, oclass->fecs.ucode->data.data[i]); @@ -1089,7 +1334,7 @@ nvc0_graph_init_ctxctl(struct nvc0_graph_priv *priv) nv_wr32(priv, 0x41a188, i >> 6); nv_wr32(priv, 0x41a184, oclass->gpccs.ucode->code.data[i]); } - nv_wr32(priv, 0x000260, r000260); + nouveau_mc(priv)->unk260(nouveau_mc(priv), 1); /* load register lists */ nvc0_graph_init_csdata(priv, cclass->hub, 0x409000, 0x000, 0x000000); @@ -1224,6 +1469,9 @@ nvc0_graph_init(struct nouveau_object *object) nv_wr32(priv, 0x400134, 0xffffffff); nv_wr32(priv, 0x400054, 0x34ce3464); + + nvc0_graph_zbc_init(priv); + return nvc0_graph_init_ctxctl(priv); } @@ -1287,7 +1535,7 @@ nvc0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_device *device = nv_device(parent); struct nvc0_graph_priv *priv; bool use_ext_fw, enable; - int ret, i; + int ret, i, j; use_ext_fw = nouveau_boolopt(device->cfgopt, "NvGrUseFW", oclass->fecs.ucode == NULL); @@ -1333,6 +1581,11 @@ nvc0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine, for (i = 0; i < priv->gpc_nr; i++) { priv->tpc_nr[i] = nv_rd32(priv, GPC_UNIT(i, 0x2608)); priv->tpc_total += priv->tpc_nr[i]; + priv->ppc_nr[i] = oclass->ppc_nr; + for (j = 0; j < priv->ppc_nr[i]; j++) { + u8 mask = nv_rd32(priv, GPC_UNIT(i, 0x0c30 + (j * 4))); + priv->ppc_tpc_nr[i][j] = hweight8(mask); + } } /*XXX: these need figuring out... though it might not even matter */ diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h index ffc289198dd8..7ed9e89c3435 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h @@ -30,10 +30,15 @@ #include <core/gpuobj.h> #include <core/option.h> +#include <nvif/unpack.h> +#include <nvif/class.h> + #include <subdev/fb.h> #include <subdev/vm.h> #include <subdev/bar.h> #include <subdev/timer.h> +#include <subdev/mc.h> +#include <subdev/ltc.h> #include <engine/fifo.h> #include <engine/graph.h> @@ -60,7 +65,7 @@ struct nvc0_graph_mmio { u32 addr; u32 data; u32 shift; - u32 buffer; + int buffer; }; struct nvc0_graph_fuc { @@ -68,6 +73,18 @@ struct nvc0_graph_fuc { u32 size; }; +struct nvc0_graph_zbc_color { + u32 format; + u32 ds[4]; + u32 l2[4]; +}; + +struct nvc0_graph_zbc_depth { + u32 format; + u32 ds; + u32 l2; +}; + struct nvc0_graph_priv { struct nouveau_graph base; @@ -77,10 +94,15 @@ struct nvc0_graph_priv { struct nvc0_graph_fuc fuc41ad; bool firmware; + struct nvc0_graph_zbc_color zbc_color[NOUVEAU_LTC_MAX_ZBC_CNT]; + struct nvc0_graph_zbc_depth zbc_depth[NOUVEAU_LTC_MAX_ZBC_CNT]; + u8 rop_nr; u8 gpc_nr; u8 tpc_nr[GPC_MAX]; u8 tpc_total; + u8 ppc_nr[GPC_MAX]; + u8 ppc_tpc_nr[GPC_MAX][4]; struct nouveau_gpuobj *unk4188b4; struct nouveau_gpuobj *unk4188b8; @@ -118,12 +140,20 @@ int nvc0_graph_ctor(struct nouveau_object *, struct nouveau_object *, struct nouveau_object **); void nvc0_graph_dtor(struct nouveau_object *); int nvc0_graph_init(struct nouveau_object *); +void nvc0_graph_zbc_init(struct nvc0_graph_priv *); + int nve4_graph_fini(struct nouveau_object *, bool); int nve4_graph_init(struct nouveau_object *); -extern struct nouveau_oclass nvc0_graph_sclass[]; +int nvf0_graph_fini(struct nouveau_object *, bool); + +extern struct nouveau_ofuncs nvc0_fermi_ofuncs; +extern struct nouveau_oclass nvc0_graph_sclass[]; +extern struct nouveau_omthds nvc0_graph_9097_omthds[]; +extern struct nouveau_omthds nvc0_graph_90c0_omthds[]; extern struct nouveau_oclass nvc8_graph_sclass[]; +extern struct nouveau_oclass nvf0_graph_sclass[]; struct nvc0_graph_init { u32 addr; @@ -149,6 +179,9 @@ struct nvc0_graph_ucode { extern struct nvc0_graph_ucode nvc0_graph_fecs_ucode; extern struct nvc0_graph_ucode nvc0_graph_gpccs_ucode; +extern struct nvc0_graph_ucode nvf0_graph_fecs_ucode; +extern struct nvc0_graph_ucode nvf0_graph_gpccs_ucode; + struct nvc0_graph_oclass { struct nouveau_oclass base; struct nouveau_oclass **cclass; @@ -160,6 +193,7 @@ struct nvc0_graph_oclass { struct { struct nvc0_graph_ucode *ucode; } gpccs; + int ppc_nr; }; void nvc0_graph_mmio(struct nvc0_graph_priv *, const struct nvc0_graph_pack *); @@ -223,9 +257,11 @@ extern const struct nvc0_graph_init nve4_graph_init_be_0[]; extern const struct nvc0_graph_pack nve4_graph_pack_mmio[]; extern const struct nvc0_graph_init nvf0_graph_init_fe_0[]; +extern const struct nvc0_graph_init nvf0_graph_init_ds_0[]; extern const struct nvc0_graph_init nvf0_graph_init_sked_0[]; extern const struct nvc0_graph_init nvf0_graph_init_cwd_0[]; extern const struct nvc0_graph_init nvf0_graph_init_gpc_unk_1[]; +extern const struct nvc0_graph_init nvf0_graph_init_tex_0[]; extern const struct nvc0_graph_init nvf0_graph_init_sm_0[]; extern const struct nvc0_graph_init nv108_graph_init_gpc_unk_0[]; diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc1.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc1.c index 30cab0b2eba1..93d58e5b82c2 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc1.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc1.c @@ -33,9 +33,9 @@ static struct nouveau_oclass nvc1_graph_sclass[] = { { 0x902d, &nouveau_object_ofuncs }, { 0x9039, &nouveau_object_ofuncs }, - { 0x9097, &nouveau_object_ofuncs }, - { 0x90c0, &nouveau_object_ofuncs }, - { 0x9197, &nouveau_object_ofuncs }, + { FERMI_A, &nvc0_fermi_ofuncs, nvc0_graph_9097_omthds }, + { FERMI_B, &nvc0_fermi_ofuncs, nvc0_graph_9097_omthds }, + { FERMI_COMPUTE_A, &nouveau_object_ofuncs, nvc0_graph_90c0_omthds }, {} }; diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc8.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc8.c index a6bf783e1256..692e1eda0eb4 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc8.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc8.c @@ -33,10 +33,10 @@ struct nouveau_oclass nvc8_graph_sclass[] = { { 0x902d, &nouveau_object_ofuncs }, { 0x9039, &nouveau_object_ofuncs }, - { 0x9097, &nouveau_object_ofuncs }, - { 0x90c0, &nouveau_object_ofuncs }, - { 0x9197, &nouveau_object_ofuncs }, - { 0x9297, &nouveau_object_ofuncs }, + { FERMI_A, &nvc0_fermi_ofuncs, nvc0_graph_9097_omthds }, + { FERMI_B, &nvc0_fermi_ofuncs, nvc0_graph_9097_omthds }, + { FERMI_C, &nvc0_fermi_ofuncs, nvc0_graph_9097_omthds }, + { FERMI_COMPUTE_A, &nouveau_object_ofuncs, nvc0_graph_90c0_omthds }, {} }; diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvd7.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvd7.c index 2a6a94e2a041..41e8445c7eea 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/nvd7.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvd7.c @@ -133,4 +133,5 @@ nvd7_graph_oclass = &(struct nvc0_graph_oclass) { .mmio = nvd7_graph_pack_mmio, .fecs.ucode = &nvd7_graph_fecs_ucode, .gpccs.ucode = &nvd7_graph_gpccs_ucode, + .ppc_nr = 1, }.base; diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nve4.c b/drivers/gpu/drm/nouveau/core/engine/graph/nve4.c index 51e0c075ad34..0c71f5c67ae0 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/nve4.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nve4.c @@ -22,6 +22,8 @@ * Authors: Ben Skeggs <bskeggs@redhat.com> */ +#include <subdev/pwr.h> + #include "nvc0.h" #include "ctxnvc0.h" @@ -33,8 +35,8 @@ static struct nouveau_oclass nve4_graph_sclass[] = { { 0x902d, &nouveau_object_ofuncs }, { 0xa040, &nouveau_object_ofuncs }, - { 0xa097, &nouveau_object_ofuncs }, - { 0xa0c0, &nouveau_object_ofuncs }, + { KEPLER_A, &nvc0_fermi_ofuncs, nvc0_graph_9097_omthds }, + { KEPLER_COMPUTE_A, &nouveau_object_ofuncs, nvc0_graph_90c0_omthds }, {} }; @@ -190,39 +192,20 @@ nve4_graph_pack_mmio[] = { ******************************************************************************/ int -nve4_graph_fini(struct nouveau_object *object, bool suspend) -{ - struct nvc0_graph_priv *priv = (void *)object; - - /*XXX: this is a nasty hack to power on gr on certain boards - * where it's disabled by therm, somehow. ideally it'd - * be nice to know when we should be doing this, and why, - * but, it's yet to be determined. for now we test for - * the particular mmio error that occurs in the situation, - * and then bash therm in the way nvidia do. - */ - nv_mask(priv, 0x000200, 0x08001000, 0x08001000); - nv_rd32(priv, 0x000200); - if (nv_rd32(priv, 0x400700) == 0xbadf1000) { - nv_mask(priv, 0x000200, 0x08001000, 0x00000000); - nv_rd32(priv, 0x000200); - nv_mask(priv, 0x020004, 0xc0000000, 0x40000000); - } - - return nouveau_graph_fini(&priv->base, suspend); -} - -int nve4_graph_init(struct nouveau_object *object) { struct nvc0_graph_oclass *oclass = (void *)object->oclass; struct nvc0_graph_priv *priv = (void *)object; + struct nouveau_pwr *ppwr = nouveau_pwr(priv); const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total); u32 data[TPC_MAX / 8] = {}; u8 tpcnr[GPC_MAX]; int gpc, tpc, rop; int ret, i; + if (ppwr) + ppwr->pgob(ppwr, false); + ret = nouveau_graph_init(&priv->base); if (ret) return ret; @@ -320,6 +303,9 @@ nve4_graph_init(struct nouveau_object *object) nv_wr32(priv, 0x400134, 0xffffffff); nv_wr32(priv, 0x400054, 0x34ce3464); + + nvc0_graph_zbc_init(priv); + return nvc0_graph_init_ctxctl(priv); } @@ -350,11 +336,12 @@ nve4_graph_oclass = &(struct nvc0_graph_oclass) { .ctor = nvc0_graph_ctor, .dtor = nvc0_graph_dtor, .init = nve4_graph_init, - .fini = nve4_graph_fini, + .fini = _nouveau_graph_fini, }, .cclass = &nve4_grctx_oclass, .sclass = nve4_graph_sclass, .mmio = nve4_graph_pack_mmio, .fecs.ucode = &nve4_graph_fecs_ucode, .gpccs.ucode = &nve4_graph_gpccs_ucode, + .ppc_nr = 1, }.base; diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvf0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvf0.c index c96762122b9b..c306c0f2fc84 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/nvf0.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvf0.c @@ -29,12 +29,12 @@ * Graphics object classes ******************************************************************************/ -static struct nouveau_oclass +struct nouveau_oclass nvf0_graph_sclass[] = { { 0x902d, &nouveau_object_ofuncs }, { 0xa140, &nouveau_object_ofuncs }, - { 0xa197, &nouveau_object_ofuncs }, - { 0xa1c0, &nouveau_object_ofuncs }, + { KEPLER_B, &nvc0_fermi_ofuncs, nvc0_graph_9097_omthds }, + { KEPLER_COMPUTE_B, &nouveau_object_ofuncs, nvc0_graph_90c0_omthds }, {} }; @@ -50,7 +50,7 @@ nvf0_graph_init_fe_0[] = { {} }; -static const struct nvc0_graph_init +const struct nvc0_graph_init nvf0_graph_init_ds_0[] = { { 0x405844, 1, 0x04, 0x00ffffff }, { 0x405850, 1, 0x04, 0x00000000 }, @@ -88,7 +88,7 @@ nvf0_graph_init_gpc_unk_1[] = { {} }; -static const struct nvc0_graph_init +const struct nvc0_graph_init nvf0_graph_init_tex_0[] = { { 0x419ab0, 1, 0x04, 0x00000000 }, { 0x419ac8, 1, 0x04, 0x00000000 }, @@ -170,7 +170,7 @@ nvf0_graph_pack_mmio[] = { * PGRAPH engine/subdev functions ******************************************************************************/ -static int +int nvf0_graph_fini(struct nouveau_object *object, bool suspend) { struct nvc0_graph_priv *priv = (void *)object; @@ -209,7 +209,7 @@ nvf0_graph_fini(struct nouveau_object *object, bool suspend) #include "fuc/hubnvf0.fuc.h" -static struct nvc0_graph_ucode +struct nvc0_graph_ucode nvf0_graph_fecs_ucode = { .code.data = nvf0_grhub_code, .code.size = sizeof(nvf0_grhub_code), @@ -219,7 +219,7 @@ nvf0_graph_fecs_ucode = { #include "fuc/gpcnvf0.fuc.h" -static struct nvc0_graph_ucode +struct nvc0_graph_ucode nvf0_graph_gpccs_ucode = { .code.data = nvf0_grgpc_code, .code.size = sizeof(nvf0_grgpc_code), @@ -241,4 +241,5 @@ nvf0_graph_oclass = &(struct nvc0_graph_oclass) { .mmio = nvf0_graph_pack_mmio, .fecs.ucode = &nvf0_graph_fecs_ucode, .gpccs.ucode = &nvf0_graph_gpccs_ucode, + .ppc_nr = 2, }.base; diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c index 7eb6d94c84e2..d88c700b2f69 100644 --- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c +++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c @@ -24,7 +24,6 @@ #include <core/client.h> #include <core/os.h> -#include <core/class.h> #include <core/engctx.h> #include <core/handle.h> diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c index d4e7ec0ba68c..bdb2f20ff7b1 100644 --- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c +++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c @@ -23,7 +23,6 @@ */ #include <core/os.h> -#include <core/class.h> #include <core/engctx.h> #include <subdev/fb.h> diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv44.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv44.c index 3d8c2133e0e8..72c7f33fd29b 100644 --- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv44.c +++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv44.c @@ -23,7 +23,6 @@ */ #include <core/os.h> -#include <core/class.h> #include <core/client.h> #include <core/engctx.h> #include <core/handle.h> diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c index 37a2bd9e8078..cae33f86b11a 100644 --- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c +++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c @@ -23,7 +23,6 @@ */ #include <core/os.h> -#include <core/class.h> #include <core/engctx.h> #include <subdev/vm.h> diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv84.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv84.c index 96f5aa92677b..e9cc8b116a24 100644 --- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv84.c +++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv84.c @@ -23,7 +23,6 @@ */ #include <core/os.h> -#include <core/class.h> #include <core/engctx.h> #include <subdev/vm.h> diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/base.c b/drivers/gpu/drm/nouveau/core/engine/perfmon/base.c index e9c5e51943ef..63013812f7c9 100644 --- a/drivers/gpu/drm/nouveau/core/engine/perfmon/base.c +++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/base.c @@ -22,8 +22,11 @@ * Authors: Ben Skeggs */ +#include <core/client.h> #include <core/option.h> -#include <core/class.h> +#include <nvif/unpack.h> +#include <nvif/class.h> +#include <nvif/ioctl.h> #include <subdev/clock.h> @@ -101,24 +104,28 @@ nouveau_perfsig_wrap(struct nouveau_perfmon *ppm, const char *name, * Perfmon object classes ******************************************************************************/ static int -nouveau_perfctr_query(struct nouveau_object *object, u32 mthd, - void *data, u32 size) +nouveau_perfctr_query(struct nouveau_object *object, void *data, u32 size) { + union { + struct nvif_perfctr_query_v0 v0; + } *args = data; struct nouveau_device *device = nv_device(object); struct nouveau_perfmon *ppm = (void *)object->engine; struct nouveau_perfdom *dom = NULL, *chk; - struct nv_perfctr_query *args = data; const bool all = nouveau_boolopt(device->cfgopt, "NvPmShowAll", false); const bool raw = nouveau_boolopt(device->cfgopt, "NvPmUnnamed", all); const char *name; int tmp = 0, di, si; - char path[64]; - - if (size < sizeof(*args)) - return -EINVAL; + int ret; - di = (args->iter & 0xff000000) >> 24; - si = (args->iter & 0x00ffffff) - 1; + nv_ioctl(object, "perfctr query size %d\n", size); + if (nvif_unpack(args->v0, 0, 0, false)) { + nv_ioctl(object, "perfctr query vers %d iter %08x\n", + args->v0.version, args->v0.iter); + di = (args->v0.iter & 0xff000000) >> 24; + si = (args->v0.iter & 0x00ffffff) - 1; + } else + return ret; list_for_each_entry(chk, &ppm->domains, head) { if (tmp++ == di) { @@ -132,19 +139,17 @@ nouveau_perfctr_query(struct nouveau_object *object, u32 mthd, if (si >= 0) { if (raw || !(name = dom->signal[si].name)) { - snprintf(path, sizeof(path), "/%s/%02x", dom->name, si); - name = path; + snprintf(args->v0.name, sizeof(args->v0.name), + "/%s/%02x", dom->name, si); + } else { + strncpy(args->v0.name, name, sizeof(args->v0.name)); } - - if (args->name) - strncpy(args->name, name, args->size); - args->size = strlen(name) + 1; } do { while (++si < dom->signal_nr) { if (all || dom->signal[si].name) { - args->iter = (di << 24) | ++si; + args->v0.iter = (di << 24) | ++si; return 0; } } @@ -153,21 +158,26 @@ nouveau_perfctr_query(struct nouveau_object *object, u32 mthd, dom = list_entry(dom->head.next, typeof(*dom), head); } while (&dom->head != &ppm->domains); - args->iter = 0xffffffff; + args->v0.iter = 0xffffffff; return 0; } static int -nouveau_perfctr_sample(struct nouveau_object *object, u32 mthd, - void *data, u32 size) +nouveau_perfctr_sample(struct nouveau_object *object, void *data, u32 size) { + union { + struct nvif_perfctr_sample none; + } *args = data; struct nouveau_perfmon *ppm = (void *)object->engine; struct nouveau_perfctr *ctr, *tmp; struct nouveau_perfdom *dom; - struct nv_perfctr_sample *args = data; + int ret; - if (size < sizeof(*args)) - return -EINVAL; + nv_ioctl(object, "perfctr sample size %d\n", size); + if (nvif_unvers(args->none)) { + nv_ioctl(object, "perfctr sample\n"); + } else + return ret; ppm->sequence++; list_for_each_entry(dom, &ppm->domains, head) { @@ -206,22 +216,45 @@ nouveau_perfctr_sample(struct nouveau_object *object, u32 mthd, } static int -nouveau_perfctr_read(struct nouveau_object *object, u32 mthd, - void *data, u32 size) +nouveau_perfctr_read(struct nouveau_object *object, void *data, u32 size) { + union { + struct nvif_perfctr_read_v0 v0; + } *args = data; struct nouveau_perfctr *ctr = (void *)object; - struct nv_perfctr_read *args = data; + int ret; + + nv_ioctl(object, "perfctr read size %d\n", size); + if (nvif_unpack(args->v0, 0, 0, false)) { + nv_ioctl(object, "perfctr read vers %d\n", args->v0.version); + } else + return ret; - if (size < sizeof(*args)) - return -EINVAL; if (!ctr->clk) return -EAGAIN; - args->clk = ctr->clk; - args->ctr = ctr->ctr; + args->v0.clk = ctr->clk; + args->v0.ctr = ctr->ctr; return 0; } +static int +nouveau_perfctr_mthd(struct nouveau_object *object, u32 mthd, + void *data, u32 size) +{ + switch (mthd) { + case NVIF_PERFCTR_V0_QUERY: + return nouveau_perfctr_query(object, data, size); + case NVIF_PERFCTR_V0_SAMPLE: + return nouveau_perfctr_sample(object, data, size); + case NVIF_PERFCTR_V0_READ: + return nouveau_perfctr_read(object, data, size); + default: + break; + } + return -EINVAL; +} + static void nouveau_perfctr_dtor(struct nouveau_object *object) { @@ -237,19 +270,27 @@ nouveau_perfctr_ctor(struct nouveau_object *parent, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { + union { + struct nvif_perfctr_v0 v0; + } *args = data; struct nouveau_perfmon *ppm = (void *)engine; struct nouveau_perfdom *dom = NULL; struct nouveau_perfsig *sig[4] = {}; struct nouveau_perfctr *ctr; - struct nv_perfctr_class *args = data; int ret, i; - if (size < sizeof(*args)) - return -EINVAL; + nv_ioctl(parent, "create perfctr size %d\n", size); + if (nvif_unpack(args->v0, 0, 0, false)) { + nv_ioctl(parent, "create perfctr vers %d logic_op %04x\n", + args->v0.version, args->v0.logic_op); + } else + return ret; - for (i = 0; i < ARRAY_SIZE(args->signal) && args->signal[i].name; i++) { - sig[i] = nouveau_perfsig_find(ppm, args->signal[i].name, - args->signal[i].size, &dom); + for (i = 0; i < ARRAY_SIZE(args->v0.name) && args->v0.name[i][0]; i++) { + sig[i] = nouveau_perfsig_find(ppm, args->v0.name[i], + strnlen(args->v0.name[i], + sizeof(args->v0.name[i])), + &dom); if (!sig[i]) return -EINVAL; } @@ -260,7 +301,7 @@ nouveau_perfctr_ctor(struct nouveau_object *parent, return ret; ctr->slot = -1; - ctr->logic_op = args->logic_op; + ctr->logic_op = args->v0.logic_op; ctr->signal[0] = sig[0]; ctr->signal[1] = sig[1]; ctr->signal[2] = sig[2]; @@ -276,21 +317,13 @@ nouveau_perfctr_ofuncs = { .dtor = nouveau_perfctr_dtor, .init = nouveau_object_init, .fini = nouveau_object_fini, -}; - -static struct nouveau_omthds -nouveau_perfctr_omthds[] = { - { NV_PERFCTR_QUERY, NV_PERFCTR_QUERY, nouveau_perfctr_query }, - { NV_PERFCTR_SAMPLE, NV_PERFCTR_SAMPLE, nouveau_perfctr_sample }, - { NV_PERFCTR_READ, NV_PERFCTR_READ, nouveau_perfctr_read }, - {} + .mthd = nouveau_perfctr_mthd, }; struct nouveau_oclass nouveau_perfmon_sclass[] = { - { .handle = NV_PERFCTR_CLASS, + { .handle = NVIF_IOCTL_NEW_V0_PERFCTR, .ofuncs = &nouveau_perfctr_ofuncs, - .omthds = nouveau_perfctr_omthds, }, {}, }; @@ -303,6 +336,7 @@ nouveau_perfctx_dtor(struct nouveau_object *object) { struct nouveau_perfmon *ppm = (void *)object->engine; mutex_lock(&nv_subdev(ppm)->mutex); + nouveau_engctx_destroy(&ppm->context->base); ppm->context = NULL; mutex_unlock(&nv_subdev(ppm)->mutex); } diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv04.c b/drivers/gpu/drm/nouveau/core/engine/software/nv04.c index c571758e4a27..64df15c7f051 100644 --- a/drivers/gpu/drm/nouveau/core/engine/software/nv04.c +++ b/drivers/gpu/drm/nouveau/core/engine/software/nv04.c @@ -23,7 +23,6 @@ */ #include <core/os.h> -#include <core/class.h> #include <core/engctx.h> #include <engine/software.h> diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv10.c b/drivers/gpu/drm/nouveau/core/engine/software/nv10.c index a62f11a78430..f54a2253deca 100644 --- a/drivers/gpu/drm/nouveau/core/engine/software/nv10.c +++ b/drivers/gpu/drm/nouveau/core/engine/software/nv10.c @@ -23,7 +23,6 @@ */ #include <core/os.h> -#include <core/class.h> #include <core/engctx.h> #include <engine/software.h> diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c index f3b4d9dbf23c..4d2994d8cc32 100644 --- a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c +++ b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c @@ -23,12 +23,12 @@ */ #include <core/os.h> -#include <core/class.h> #include <core/engctx.h> #include <core/namedb.h> #include <core/handle.h> #include <core/gpuobj.h> #include <core/event.h> +#include <nvif/event.h> #include <subdev/bar.h> @@ -86,10 +86,10 @@ nv50_software_mthd_vblsem_release(struct nouveau_object *object, u32 mthd, { struct nv50_software_chan *chan = (void *)nv_engctx(object->parent); u32 head = *(u32 *)args; - if (head >= chan->vblank.nr_event) + if (head >= nouveau_disp(chan)->vblank.index_nr) return -EINVAL; - nouveau_event_get(chan->vblank.event[head]); + nvkm_notify_get(&chan->vblank.notify[head]); return 0; } @@ -124,9 +124,10 @@ nv50_software_sclass[] = { ******************************************************************************/ static int -nv50_software_vblsem_release(void *data, u32 type, int head) +nv50_software_vblsem_release(struct nvkm_notify *notify) { - struct nv50_software_chan *chan = data; + struct nv50_software_chan *chan = + container_of(notify, typeof(*chan), vblank.notify[notify->index]); struct nv50_software_priv *priv = (void *)nv_object(chan)->engine; struct nouveau_bar *bar = nouveau_bar(priv); @@ -142,7 +143,7 @@ nv50_software_vblsem_release(void *data, u32 type, int head) nv_wr32(priv, 0x060014, chan->vblank.value); } - return NVKM_EVENT_DROP; + return NVKM_NOTIFY_DROP; } void @@ -151,11 +152,8 @@ nv50_software_context_dtor(struct nouveau_object *object) struct nv50_software_chan *chan = (void *)object; int i; - if (chan->vblank.event) { - for (i = 0; i < chan->vblank.nr_event; i++) - nouveau_event_ref(NULL, &chan->vblank.event[i]); - kfree(chan->vblank.event); - } + for (i = 0; i < ARRAY_SIZE(chan->vblank.notify); i++) + nvkm_notify_fini(&chan->vblank.notify[i]); nouveau_software_context_destroy(&chan->base); } @@ -176,15 +174,14 @@ nv50_software_context_ctor(struct nouveau_object *parent, if (ret) return ret; - chan->vblank.nr_event = pdisp ? pdisp->vblank->index_nr : 0; - chan->vblank.event = kzalloc(chan->vblank.nr_event * - sizeof(*chan->vblank.event), GFP_KERNEL); - if (!chan->vblank.event) - return -ENOMEM; - - for (i = 0; i < chan->vblank.nr_event; i++) { - ret = nouveau_event_new(pdisp->vblank, 1, i, pclass->vblank, - chan, &chan->vblank.event[i]); + for (i = 0; pdisp && i < pdisp->vblank.index_nr; i++) { + ret = nvkm_notify_init(&pdisp->vblank, pclass->vblank, false, + &(struct nvif_notify_head_req_v0) { + .head = i, + }, + sizeof(struct nvif_notify_head_req_v0), + sizeof(struct nvif_notify_head_rep_v0), + &chan->vblank.notify[i]); if (ret) return ret; } @@ -198,7 +195,7 @@ nv50_software_cclass = { .base.handle = NV_ENGCTX(SW, 0x50), .base.ofuncs = &(struct nouveau_ofuncs) { .ctor = nv50_software_context_ctor, - .dtor = _nouveau_software_context_dtor, + .dtor = nv50_software_context_dtor, .init = _nouveau_software_context_init, .fini = _nouveau_software_context_fini, }, diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv50.h b/drivers/gpu/drm/nouveau/core/engine/software/nv50.h index bb49a7a20857..41542e725b4b 100644 --- a/drivers/gpu/drm/nouveau/core/engine/software/nv50.h +++ b/drivers/gpu/drm/nouveau/core/engine/software/nv50.h @@ -19,14 +19,13 @@ int nv50_software_ctor(struct nouveau_object *, struct nouveau_object *, struct nv50_software_cclass { struct nouveau_oclass base; - int (*vblank)(void *, u32, int); + int (*vblank)(struct nvkm_notify *); }; struct nv50_software_chan { struct nouveau_software_chan base; struct { - struct nouveau_eventh **event; - int nr_event; + struct nvkm_notify notify[4]; u32 channel; u32 ctxdma; u64 offset; diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c index 135c20f38356..6af370d3a06d 100644 --- a/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c @@ -23,7 +23,6 @@ */ #include <core/os.h> -#include <core/class.h> #include <core/engctx.h> #include <core/event.h> @@ -104,9 +103,10 @@ nvc0_software_sclass[] = { ******************************************************************************/ static int -nvc0_software_vblsem_release(void *data, u32 type, int head) +nvc0_software_vblsem_release(struct nvkm_notify *notify) { - struct nv50_software_chan *chan = data; + struct nv50_software_chan *chan = + container_of(notify, typeof(*chan), vblank.notify[notify->index]); struct nv50_software_priv *priv = (void *)nv_object(chan)->engine; struct nouveau_bar *bar = nouveau_bar(priv); @@ -116,7 +116,7 @@ nvc0_software_vblsem_release(void *data, u32 type, int head) nv_wr32(priv, 0x060010, lower_32_bits(chan->vblank.offset)); nv_wr32(priv, 0x060014, chan->vblank.value); - return NVKM_EVENT_DROP; + return NVKM_NOTIFY_DROP; } static struct nv50_software_cclass @@ -124,7 +124,7 @@ nvc0_software_cclass = { .base.handle = NV_ENGCTX(SW, 0xc0), .base.ofuncs = &(struct nouveau_ofuncs) { .ctor = nv50_software_context_ctor, - .dtor = _nouveau_software_context_dtor, + .dtor = nv50_software_context_dtor, .init = _nouveau_software_context_init, .fini = _nouveau_software_context_fini, }, diff --git a/drivers/gpu/drm/nouveau/core/include/core/class.h b/drivers/gpu/drm/nouveau/core/include/core/class.h deleted file mode 100644 index e0c812bc884f..000000000000 --- a/drivers/gpu/drm/nouveau/core/include/core/class.h +++ /dev/null @@ -1,470 +0,0 @@ -#ifndef __NOUVEAU_CLASS_H__ -#define __NOUVEAU_CLASS_H__ - -/* Device class - * - * 0080: NV_DEVICE - */ -#define NV_DEVICE_CLASS 0x00000080 - -#define NV_DEVICE_DISABLE_IDENTIFY 0x0000000000000001ULL -#define NV_DEVICE_DISABLE_MMIO 0x0000000000000002ULL -#define NV_DEVICE_DISABLE_VBIOS 0x0000000000000004ULL -#define NV_DEVICE_DISABLE_CORE 0x0000000000000008ULL -#define NV_DEVICE_DISABLE_DISP 0x0000000000010000ULL -#define NV_DEVICE_DISABLE_FIFO 0x0000000000020000ULL -#define NV_DEVICE_DISABLE_GRAPH 0x0000000100000000ULL -#define NV_DEVICE_DISABLE_MPEG 0x0000000200000000ULL -#define NV_DEVICE_DISABLE_ME 0x0000000400000000ULL -#define NV_DEVICE_DISABLE_VP 0x0000000800000000ULL -#define NV_DEVICE_DISABLE_CRYPT 0x0000001000000000ULL -#define NV_DEVICE_DISABLE_BSP 0x0000002000000000ULL -#define NV_DEVICE_DISABLE_PPP 0x0000004000000000ULL -#define NV_DEVICE_DISABLE_COPY0 0x0000008000000000ULL -#define NV_DEVICE_DISABLE_COPY1 0x0000010000000000ULL -#define NV_DEVICE_DISABLE_VIC 0x0000020000000000ULL -#define NV_DEVICE_DISABLE_VENC 0x0000040000000000ULL - -struct nv_device_class { - u64 device; /* device identifier, ~0 for client default */ - u64 disable; /* disable particular subsystems */ - u64 debug0; /* as above, but *internal* ids, and *NOT* ABI */ -}; - -/* DMA object classes - * - * 0002: NV_DMA_FROM_MEMORY - * 0003: NV_DMA_TO_MEMORY - * 003d: NV_DMA_IN_MEMORY - */ -#define NV_DMA_FROM_MEMORY_CLASS 0x00000002 -#define NV_DMA_TO_MEMORY_CLASS 0x00000003 -#define NV_DMA_IN_MEMORY_CLASS 0x0000003d - -#define NV_DMA_TARGET_MASK 0x000000ff -#define NV_DMA_TARGET_VM 0x00000000 -#define NV_DMA_TARGET_VRAM 0x00000001 -#define NV_DMA_TARGET_PCI 0x00000002 -#define NV_DMA_TARGET_PCI_US 0x00000003 -#define NV_DMA_TARGET_AGP 0x00000004 -#define NV_DMA_ACCESS_MASK 0x00000f00 -#define NV_DMA_ACCESS_VM 0x00000000 -#define NV_DMA_ACCESS_RD 0x00000100 -#define NV_DMA_ACCESS_WR 0x00000200 -#define NV_DMA_ACCESS_RDWR 0x00000300 - -/* NV50:NVC0 */ -#define NV50_DMA_CONF0_ENABLE 0x80000000 -#define NV50_DMA_CONF0_PRIV 0x00300000 -#define NV50_DMA_CONF0_PRIV_VM 0x00000000 -#define NV50_DMA_CONF0_PRIV_US 0x00100000 -#define NV50_DMA_CONF0_PRIV__S 0x00200000 -#define NV50_DMA_CONF0_PART 0x00030000 -#define NV50_DMA_CONF0_PART_VM 0x00000000 -#define NV50_DMA_CONF0_PART_256 0x00010000 -#define NV50_DMA_CONF0_PART_1KB 0x00020000 -#define NV50_DMA_CONF0_COMP 0x00000180 -#define NV50_DMA_CONF0_COMP_NONE 0x00000000 -#define NV50_DMA_CONF0_COMP_VM 0x00000180 -#define NV50_DMA_CONF0_TYPE 0x0000007f -#define NV50_DMA_CONF0_TYPE_LINEAR 0x00000000 -#define NV50_DMA_CONF0_TYPE_VM 0x0000007f - -/* NVC0:NVD9 */ -#define NVC0_DMA_CONF0_ENABLE 0x80000000 -#define NVC0_DMA_CONF0_PRIV 0x00300000 -#define NVC0_DMA_CONF0_PRIV_VM 0x00000000 -#define NVC0_DMA_CONF0_PRIV_US 0x00100000 -#define NVC0_DMA_CONF0_PRIV__S 0x00200000 -#define NVC0_DMA_CONF0_UNKN /* PART? */ 0x00030000 -#define NVC0_DMA_CONF0_TYPE 0x000000ff -#define NVC0_DMA_CONF0_TYPE_LINEAR 0x00000000 -#define NVC0_DMA_CONF0_TYPE_VM 0x000000ff - -/* NVD9- */ -#define NVD0_DMA_CONF0_ENABLE 0x80000000 -#define NVD0_DMA_CONF0_PAGE 0x00000400 -#define NVD0_DMA_CONF0_PAGE_LP 0x00000000 -#define NVD0_DMA_CONF0_PAGE_SP 0x00000400 -#define NVD0_DMA_CONF0_TYPE 0x000000ff -#define NVD0_DMA_CONF0_TYPE_LINEAR 0x00000000 -#define NVD0_DMA_CONF0_TYPE_VM 0x000000ff - -struct nv_dma_class { - u32 flags; - u32 pad0; - u64 start; - u64 limit; - u32 conf0; -}; - -/* Perfmon counter class - * - * XXXX: NV_PERFCTR - */ -#define NV_PERFCTR_CLASS 0x0000ffff -#define NV_PERFCTR_QUERY 0x00000000 -#define NV_PERFCTR_SAMPLE 0x00000001 -#define NV_PERFCTR_READ 0x00000002 - -struct nv_perfctr_class { - u16 logic_op; - struct { - char __user *name; /*XXX: use cfu when exposed to userspace */ - u32 size; - } signal[4]; -}; - -struct nv_perfctr_query { - u32 iter; - u32 size; - char __user *name; /*XXX: use ctu when exposed to userspace */ -}; - -struct nv_perfctr_sample { -}; - -struct nv_perfctr_read { - u32 ctr; - u32 clk; -}; - -/* Device control class - * - * XXXX: NV_CONTROL - */ -#define NV_CONTROL_CLASS 0x0000fffe - -#define NV_CONTROL_PSTATE_INFO 0x00000000 -#define NV_CONTROL_PSTATE_INFO_USTATE_DISABLE (-1) -#define NV_CONTROL_PSTATE_INFO_USTATE_PERFMON (-2) -#define NV_CONTROL_PSTATE_INFO_PSTATE_UNKNOWN (-1) -#define NV_CONTROL_PSTATE_INFO_PSTATE_PERFMON (-2) -#define NV_CONTROL_PSTATE_ATTR 0x00000001 -#define NV_CONTROL_PSTATE_ATTR_STATE_CURRENT (-1) -#define NV_CONTROL_PSTATE_USER 0x00000002 -#define NV_CONTROL_PSTATE_USER_STATE_UNKNOWN (-1) -#define NV_CONTROL_PSTATE_USER_STATE_PERFMON (-2) - -struct nv_control_pstate_info { - u32 count; /* out: number of power states */ - s32 ustate; /* out: current target pstate index */ - u32 pstate; /* out: current pstate index */ -}; - -struct nv_control_pstate_attr { - s32 state; /* in: index of pstate to query - * out: pstate identifier - */ - u32 index; /* in: index of attribute to query - * out: index of next attribute, or 0 if no more - */ - char name[32]; - char unit[16]; - u32 min; - u32 max; -}; - -struct nv_control_pstate_user { - s32 state; /* in: pstate identifier */ -}; - -/* DMA FIFO channel classes - * - * 006b: NV03_CHANNEL_DMA - * 006e: NV10_CHANNEL_DMA - * 176e: NV17_CHANNEL_DMA - * 406e: NV40_CHANNEL_DMA - * 506e: NV50_CHANNEL_DMA - * 826e: NV84_CHANNEL_DMA - */ -#define NV03_CHANNEL_DMA_CLASS 0x0000006b -#define NV10_CHANNEL_DMA_CLASS 0x0000006e -#define NV17_CHANNEL_DMA_CLASS 0x0000176e -#define NV40_CHANNEL_DMA_CLASS 0x0000406e -#define NV50_CHANNEL_DMA_CLASS 0x0000506e -#define NV84_CHANNEL_DMA_CLASS 0x0000826e - -struct nv03_channel_dma_class { - u32 pushbuf; - u32 pad0; - u64 offset; -}; - -/* Indirect FIFO channel classes - * - * 506f: NV50_CHANNEL_IND - * 826f: NV84_CHANNEL_IND - * 906f: NVC0_CHANNEL_IND - * a06f: NVE0_CHANNEL_IND - */ - -#define NV50_CHANNEL_IND_CLASS 0x0000506f -#define NV84_CHANNEL_IND_CLASS 0x0000826f -#define NVC0_CHANNEL_IND_CLASS 0x0000906f -#define NVE0_CHANNEL_IND_CLASS 0x0000a06f - -struct nv50_channel_ind_class { - u32 pushbuf; - u32 ilength; - u64 ioffset; -}; - -#define NVE0_CHANNEL_IND_ENGINE_GR 0x00000001 -#define NVE0_CHANNEL_IND_ENGINE_VP 0x00000002 -#define NVE0_CHANNEL_IND_ENGINE_PPP 0x00000004 -#define NVE0_CHANNEL_IND_ENGINE_BSP 0x00000008 -#define NVE0_CHANNEL_IND_ENGINE_CE0 0x00000010 -#define NVE0_CHANNEL_IND_ENGINE_CE1 0x00000020 -#define NVE0_CHANNEL_IND_ENGINE_ENC 0x00000040 - -struct nve0_channel_ind_class { - u32 pushbuf; - u32 ilength; - u64 ioffset; - u32 engine; -}; - -/* 0046: NV04_DISP - */ - -#define NV04_DISP_CLASS 0x00000046 - -#define NV04_DISP_MTHD 0x00000000 -#define NV04_DISP_MTHD_HEAD 0x00000001 - -#define NV04_DISP_SCANOUTPOS 0x00000000 - -struct nv04_display_class { -}; - -struct nv04_display_scanoutpos { - s64 time[2]; - u32 vblanks; - u32 vblanke; - u32 vtotal; - u32 vline; - u32 hblanks; - u32 hblanke; - u32 htotal; - u32 hline; -}; - -/* 5070: NV50_DISP - * 8270: NV84_DISP - * 8370: NVA0_DISP - * 8870: NV94_DISP - * 8570: NVA3_DISP - * 9070: NVD0_DISP - * 9170: NVE0_DISP - * 9270: NVF0_DISP - * 9470: GM107_DISP - */ - -#define NV50_DISP_CLASS 0x00005070 -#define NV84_DISP_CLASS 0x00008270 -#define NVA0_DISP_CLASS 0x00008370 -#define NV94_DISP_CLASS 0x00008870 -#define NVA3_DISP_CLASS 0x00008570 -#define NVD0_DISP_CLASS 0x00009070 -#define NVE0_DISP_CLASS 0x00009170 -#define NVF0_DISP_CLASS 0x00009270 -#define GM107_DISP_CLASS 0x00009470 - -#define NV50_DISP_MTHD 0x00000000 -#define NV50_DISP_MTHD_HEAD 0x00000003 - -#define NV50_DISP_SCANOUTPOS 0x00000000 - -#define NV50_DISP_SOR_MTHD 0x00010000 -#define NV50_DISP_SOR_MTHD_TYPE 0x0000f000 -#define NV50_DISP_SOR_MTHD_HEAD 0x00000018 -#define NV50_DISP_SOR_MTHD_LINK 0x00000004 -#define NV50_DISP_SOR_MTHD_OR 0x00000003 - -#define NV50_DISP_SOR_PWR 0x00010000 -#define NV50_DISP_SOR_PWR_STATE 0x00000001 -#define NV50_DISP_SOR_PWR_STATE_ON 0x00000001 -#define NV50_DISP_SOR_PWR_STATE_OFF 0x00000000 -#define NVA3_DISP_SOR_HDA_ELD 0x00010100 -#define NV84_DISP_SOR_HDMI_PWR 0x00012000 -#define NV84_DISP_SOR_HDMI_PWR_STATE 0x40000000 -#define NV84_DISP_SOR_HDMI_PWR_STATE_OFF 0x00000000 -#define NV84_DISP_SOR_HDMI_PWR_STATE_ON 0x40000000 -#define NV84_DISP_SOR_HDMI_PWR_MAX_AC_PACKET 0x001f0000 -#define NV84_DISP_SOR_HDMI_PWR_REKEY 0x0000007f -#define NV50_DISP_SOR_LVDS_SCRIPT 0x00013000 -#define NV50_DISP_SOR_LVDS_SCRIPT_ID 0x0000ffff -#define NV94_DISP_SOR_DP_PWR 0x00016000 -#define NV94_DISP_SOR_DP_PWR_STATE 0x00000001 -#define NV94_DISP_SOR_DP_PWR_STATE_OFF 0x00000000 -#define NV94_DISP_SOR_DP_PWR_STATE_ON 0x00000001 - -#define NV50_DISP_DAC_MTHD 0x00020000 -#define NV50_DISP_DAC_MTHD_TYPE 0x0000f000 -#define NV50_DISP_DAC_MTHD_OR 0x00000003 - -#define NV50_DISP_DAC_PWR 0x00020000 -#define NV50_DISP_DAC_PWR_HSYNC 0x00000001 -#define NV50_DISP_DAC_PWR_HSYNC_ON 0x00000000 -#define NV50_DISP_DAC_PWR_HSYNC_LO 0x00000001 -#define NV50_DISP_DAC_PWR_VSYNC 0x00000004 -#define NV50_DISP_DAC_PWR_VSYNC_ON 0x00000000 -#define NV50_DISP_DAC_PWR_VSYNC_LO 0x00000004 -#define NV50_DISP_DAC_PWR_DATA 0x00000010 -#define NV50_DISP_DAC_PWR_DATA_ON 0x00000000 -#define NV50_DISP_DAC_PWR_DATA_LO 0x00000010 -#define NV50_DISP_DAC_PWR_STATE 0x00000040 -#define NV50_DISP_DAC_PWR_STATE_ON 0x00000000 -#define NV50_DISP_DAC_PWR_STATE_OFF 0x00000040 -#define NV50_DISP_DAC_LOAD 0x00020100 -#define NV50_DISP_DAC_LOAD_VALUE 0x00000007 - -#define NV50_DISP_PIOR_MTHD 0x00030000 -#define NV50_DISP_PIOR_MTHD_TYPE 0x0000f000 -#define NV50_DISP_PIOR_MTHD_OR 0x00000003 - -#define NV50_DISP_PIOR_PWR 0x00030000 -#define NV50_DISP_PIOR_PWR_STATE 0x00000001 -#define NV50_DISP_PIOR_PWR_STATE_ON 0x00000001 -#define NV50_DISP_PIOR_PWR_STATE_OFF 0x00000000 -#define NV50_DISP_PIOR_TMDS_PWR 0x00032000 -#define NV50_DISP_PIOR_TMDS_PWR_STATE 0x00000001 -#define NV50_DISP_PIOR_TMDS_PWR_STATE_ON 0x00000001 -#define NV50_DISP_PIOR_TMDS_PWR_STATE_OFF 0x00000000 -#define NV50_DISP_PIOR_DP_PWR 0x00036000 -#define NV50_DISP_PIOR_DP_PWR_STATE 0x00000001 -#define NV50_DISP_PIOR_DP_PWR_STATE_ON 0x00000001 -#define NV50_DISP_PIOR_DP_PWR_STATE_OFF 0x00000000 - -struct nv50_display_class { -}; - -/* 507a: NV50_DISP_CURS - * 827a: NV84_DISP_CURS - * 837a: NVA0_DISP_CURS - * 887a: NV94_DISP_CURS - * 857a: NVA3_DISP_CURS - * 907a: NVD0_DISP_CURS - * 917a: NVE0_DISP_CURS - * 927a: NVF0_DISP_CURS - * 947a: GM107_DISP_CURS - */ - -#define NV50_DISP_CURS_CLASS 0x0000507a -#define NV84_DISP_CURS_CLASS 0x0000827a -#define NVA0_DISP_CURS_CLASS 0x0000837a -#define NV94_DISP_CURS_CLASS 0x0000887a -#define NVA3_DISP_CURS_CLASS 0x0000857a -#define NVD0_DISP_CURS_CLASS 0x0000907a -#define NVE0_DISP_CURS_CLASS 0x0000917a -#define NVF0_DISP_CURS_CLASS 0x0000927a -#define GM107_DISP_CURS_CLASS 0x0000947a - -struct nv50_display_curs_class { - u32 head; -}; - -/* 507b: NV50_DISP_OIMM - * 827b: NV84_DISP_OIMM - * 837b: NVA0_DISP_OIMM - * 887b: NV94_DISP_OIMM - * 857b: NVA3_DISP_OIMM - * 907b: NVD0_DISP_OIMM - * 917b: NVE0_DISP_OIMM - * 927b: NVE0_DISP_OIMM - * 947b: GM107_DISP_OIMM - */ - -#define NV50_DISP_OIMM_CLASS 0x0000507b -#define NV84_DISP_OIMM_CLASS 0x0000827b -#define NVA0_DISP_OIMM_CLASS 0x0000837b -#define NV94_DISP_OIMM_CLASS 0x0000887b -#define NVA3_DISP_OIMM_CLASS 0x0000857b -#define NVD0_DISP_OIMM_CLASS 0x0000907b -#define NVE0_DISP_OIMM_CLASS 0x0000917b -#define NVF0_DISP_OIMM_CLASS 0x0000927b -#define GM107_DISP_OIMM_CLASS 0x0000947b - -struct nv50_display_oimm_class { - u32 head; -}; - -/* 507c: NV50_DISP_SYNC - * 827c: NV84_DISP_SYNC - * 837c: NVA0_DISP_SYNC - * 887c: NV94_DISP_SYNC - * 857c: NVA3_DISP_SYNC - * 907c: NVD0_DISP_SYNC - * 917c: NVE0_DISP_SYNC - * 927c: NVF0_DISP_SYNC - * 947c: GM107_DISP_SYNC - */ - -#define NV50_DISP_SYNC_CLASS 0x0000507c -#define NV84_DISP_SYNC_CLASS 0x0000827c -#define NVA0_DISP_SYNC_CLASS 0x0000837c -#define NV94_DISP_SYNC_CLASS 0x0000887c -#define NVA3_DISP_SYNC_CLASS 0x0000857c -#define NVD0_DISP_SYNC_CLASS 0x0000907c -#define NVE0_DISP_SYNC_CLASS 0x0000917c -#define NVF0_DISP_SYNC_CLASS 0x0000927c -#define GM107_DISP_SYNC_CLASS 0x0000947c - -struct nv50_display_sync_class { - u32 pushbuf; - u32 head; -}; - -/* 507d: NV50_DISP_MAST - * 827d: NV84_DISP_MAST - * 837d: NVA0_DISP_MAST - * 887d: NV94_DISP_MAST - * 857d: NVA3_DISP_MAST - * 907d: NVD0_DISP_MAST - * 917d: NVE0_DISP_MAST - * 927d: NVF0_DISP_MAST - * 947d: GM107_DISP_MAST - */ - -#define NV50_DISP_MAST_CLASS 0x0000507d -#define NV84_DISP_MAST_CLASS 0x0000827d -#define NVA0_DISP_MAST_CLASS 0x0000837d -#define NV94_DISP_MAST_CLASS 0x0000887d -#define NVA3_DISP_MAST_CLASS 0x0000857d -#define NVD0_DISP_MAST_CLASS 0x0000907d -#define NVE0_DISP_MAST_CLASS 0x0000917d -#define NVF0_DISP_MAST_CLASS 0x0000927d -#define GM107_DISP_MAST_CLASS 0x0000947d - -struct nv50_display_mast_class { - u32 pushbuf; -}; - -/* 507e: NV50_DISP_OVLY - * 827e: NV84_DISP_OVLY - * 837e: NVA0_DISP_OVLY - * 887e: NV94_DISP_OVLY - * 857e: NVA3_DISP_OVLY - * 907e: NVD0_DISP_OVLY - * 917e: NVE0_DISP_OVLY - * 927e: NVF0_DISP_OVLY - * 947e: GM107_DISP_OVLY - */ - -#define NV50_DISP_OVLY_CLASS 0x0000507e -#define NV84_DISP_OVLY_CLASS 0x0000827e -#define NVA0_DISP_OVLY_CLASS 0x0000837e -#define NV94_DISP_OVLY_CLASS 0x0000887e -#define NVA3_DISP_OVLY_CLASS 0x0000857e -#define NVD0_DISP_OVLY_CLASS 0x0000907e -#define NVE0_DISP_OVLY_CLASS 0x0000917e -#define NVF0_DISP_OVLY_CLASS 0x0000927e -#define GM107_DISP_OVLY_CLASS 0x0000947e - -struct nv50_display_ovly_class { - u32 pushbuf; - u32 head; -}; - -#endif diff --git a/drivers/gpu/drm/nouveau/core/include/core/client.h b/drivers/gpu/drm/nouveau/core/include/core/client.h index c66eac513803..1794a05205d8 100644 --- a/drivers/gpu/drm/nouveau/core/include/core/client.h +++ b/drivers/gpu/drm/nouveau/core/include/core/client.h @@ -10,6 +10,11 @@ struct nouveau_client { char name[32]; u32 debug; struct nouveau_vm *vm; + bool super; + void *data; + + int (*ntfy)(const void *, u32, const void *, u32); + struct nvkm_client_notify *notify[16]; }; static inline struct nouveau_client * @@ -43,4 +48,10 @@ int nouveau_client_init(struct nouveau_client *); int nouveau_client_fini(struct nouveau_client *, bool suspend); const char *nouveau_client_name(void *obj); +int nvkm_client_notify_new(struct nouveau_client *, struct nvkm_event *, + void *data, u32 size); +int nvkm_client_notify_del(struct nouveau_client *, int index); +int nvkm_client_notify_get(struct nouveau_client *, int index); +int nvkm_client_notify_put(struct nouveau_client *, int index); + #endif diff --git a/drivers/gpu/drm/nouveau/core/include/core/device.h b/drivers/gpu/drm/nouveau/core/include/core/device.h index a8a9a9cf16cb..8743766454a5 100644 --- a/drivers/gpu/drm/nouveau/core/include/core/device.h +++ b/drivers/gpu/drm/nouveau/core/include/core/device.h @@ -4,6 +4,7 @@ #include <core/object.h> #include <core/subdev.h> #include <core/engine.h> +#include <core/event.h> enum nv_subdev_type { NVDEV_ENGINE_DEVICE, @@ -28,7 +29,7 @@ enum nv_subdev_type { NVDEV_SUBDEV_BUS, NVDEV_SUBDEV_TIMER, NVDEV_SUBDEV_FB, - NVDEV_SUBDEV_LTCG, + NVDEV_SUBDEV_LTC, NVDEV_SUBDEV_IBUS, NVDEV_SUBDEV_INSTMEM, NVDEV_SUBDEV_VM, @@ -69,6 +70,8 @@ struct nouveau_device { struct platform_device *platformdev; u64 handle; + struct nvkm_event event; + const char *cfgopt; const char *dbgopt; const char *name; @@ -84,7 +87,6 @@ struct nouveau_device { NV_40 = 0x40, NV_50 = 0x50, NV_C0 = 0xc0, - NV_D0 = 0xd0, NV_E0 = 0xe0, GM100 = 0x110, } card_type; @@ -93,8 +95,14 @@ struct nouveau_device { struct nouveau_oclass *oclass[NVDEV_SUBDEV_NR]; struct nouveau_object *subdev[NVDEV_SUBDEV_NR]; + + struct { + struct notifier_block nb; + } acpi; }; +int nouveau_device_list(u64 *name, int size); + static inline struct nouveau_device * nv_device(void *obj) { @@ -162,12 +170,6 @@ nv_device_resource_start(struct nouveau_device *device, unsigned int bar); resource_size_t nv_device_resource_len(struct nouveau_device *device, unsigned int bar); -dma_addr_t -nv_device_map_page(struct nouveau_device *device, struct page *page); - -void -nv_device_unmap_page(struct nouveau_device *device, dma_addr_t addr); - int nv_device_get_irq(struct nouveau_device *device, bool stall); diff --git a/drivers/gpu/drm/nouveau/core/include/core/event.h b/drivers/gpu/drm/nouveau/core/include/core/event.h index ba3f1a76a815..51e55d03330a 100644 --- a/drivers/gpu/drm/nouveau/core/include/core/event.h +++ b/drivers/gpu/drm/nouveau/core/include/core/event.h @@ -1,47 +1,34 @@ #ifndef __NVKM_EVENT_H__ #define __NVKM_EVENT_H__ -/* return codes from event handlers */ -#define NVKM_EVENT_DROP 0 -#define NVKM_EVENT_KEEP 1 +#include <core/notify.h> -/* nouveau_eventh.flags bit #s */ -#define NVKM_EVENT_ENABLE 0 - -struct nouveau_eventh { - struct nouveau_event *event; - struct list_head head; - unsigned long flags; - u32 types; - int index; - int (*func)(void *, u32, int); - void *priv; +struct nvkm_event_func { + int (*ctor)(void *data, u32 size, struct nvkm_notify *); + void (*send)(void *data, u32 size, struct nvkm_notify *); + void (*init)(struct nvkm_event *, int type, int index); + void (*fini)(struct nvkm_event *, int type, int index); }; -struct nouveau_event { - void *priv; - int (*check)(struct nouveau_event *, u32 type, int index); - void (*enable)(struct nouveau_event *, int type, int index); - void (*disable)(struct nouveau_event *, int type, int index); +struct nvkm_event { + const struct nvkm_event_func *func; int types_nr; int index_nr; - spinlock_t list_lock; - struct list_head *list; spinlock_t refs_lock; - int refs[]; + spinlock_t list_lock; + struct list_head list; + int *refs; }; -int nouveau_event_create(int types_nr, int index_nr, struct nouveau_event **); -void nouveau_event_destroy(struct nouveau_event **); -void nouveau_event_trigger(struct nouveau_event *, u32 types, int index); - -int nouveau_event_new(struct nouveau_event *, u32 types, int index, - int (*func)(void *, u32, int), void *, - struct nouveau_eventh **); -void nouveau_event_ref(struct nouveau_eventh *, struct nouveau_eventh **); -void nouveau_event_get(struct nouveau_eventh *); -void nouveau_event_put(struct nouveau_eventh *); +int nvkm_event_init(const struct nvkm_event_func *func, + int types_nr, int index_nr, + struct nvkm_event *); +void nvkm_event_fini(struct nvkm_event *); +void nvkm_event_get(struct nvkm_event *, u32 types, int index); +void nvkm_event_put(struct nvkm_event *, u32 types, int index); +void nvkm_event_send(struct nvkm_event *, u32 types, int index, + void *data, u32 size); #endif diff --git a/drivers/gpu/drm/nouveau/core/include/core/handle.h b/drivers/gpu/drm/nouveau/core/include/core/handle.h index 363674cdf8ab..ceb67d770875 100644 --- a/drivers/gpu/drm/nouveau/core/include/core/handle.h +++ b/drivers/gpu/drm/nouveau/core/include/core/handle.h @@ -10,6 +10,9 @@ struct nouveau_handle { u32 name; u32 priv; + u8 route; + u64 token; + struct nouveau_handle *parent; struct nouveau_object *object; }; @@ -20,6 +23,11 @@ void nouveau_handle_destroy(struct nouveau_handle *); int nouveau_handle_init(struct nouveau_handle *); int nouveau_handle_fini(struct nouveau_handle *, bool suspend); +int nouveau_handle_new(struct nouveau_object *, u32 parent, u32 handle, + u16 oclass, void *data, u32 size, + struct nouveau_object **); +int nouveau_handle_del(struct nouveau_object *, u32 parent, u32 handle); + struct nouveau_object * nouveau_handle_ref(struct nouveau_object *, u32 name); diff --git a/drivers/gpu/drm/nouveau/core/include/core/ioctl.h b/drivers/gpu/drm/nouveau/core/include/core/ioctl.h new file mode 100644 index 000000000000..ac7935c2474e --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/core/ioctl.h @@ -0,0 +1,6 @@ +#ifndef __NVKM_IOCTL_H__ +#define __NVKM_IOCTL_H__ + +int nvkm_ioctl(struct nouveau_client *, bool, void *, u32, void **); + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/core/notify.h b/drivers/gpu/drm/nouveau/core/include/core/notify.h new file mode 100644 index 000000000000..1262d8f020f3 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/core/notify.h @@ -0,0 +1,36 @@ +#ifndef __NVKM_NOTIFY_H__ +#define __NVKM_NOTIFY_H__ + +struct nvkm_notify { + struct nvkm_event *event; + struct list_head head; +#define NVKM_NOTIFY_USER 0 +#define NVKM_NOTIFY_WORK 1 + unsigned long flags; + int block; +#define NVKM_NOTIFY_DROP 0 +#define NVKM_NOTIFY_KEEP 1 + int (*func)(struct nvkm_notify *); + + /* set by nvkm_event ctor */ + u32 types; + int index; + u32 size; + + struct work_struct work; + /* this is const for a *very* good reason - the data might be on the + * stack from an irq handler. if you're not core/notify.c then you + * should probably think twice before casting it away... + */ + const void *data; +}; + +int nvkm_notify_init(struct nvkm_event *, int (*func)(struct nvkm_notify *), + bool work, void *data, u32 size, u32 reply, + struct nvkm_notify *); +void nvkm_notify_fini(struct nvkm_notify *); +void nvkm_notify_get(struct nvkm_notify *); +void nvkm_notify_put(struct nvkm_notify *); +void nvkm_notify_send(struct nvkm_notify *, void *data, u32 size); + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/core/object.h b/drivers/gpu/drm/nouveau/core/include/core/object.h index 62e68baef087..d7039482d6fd 100644 --- a/drivers/gpu/drm/nouveau/core/include/core/object.h +++ b/drivers/gpu/drm/nouveau/core/include/core/object.h @@ -48,6 +48,10 @@ void nouveau_object_destroy(struct nouveau_object *); int nouveau_object_init(struct nouveau_object *); int nouveau_object_fini(struct nouveau_object *, bool suspend); +int _nouveau_object_ctor(struct nouveau_object *, struct nouveau_object *, + struct nouveau_oclass *, void *, u32, + struct nouveau_object **); + extern struct nouveau_ofuncs nouveau_object_ofuncs; /* Don't allocate dynamically, because lockdep needs lock_class_keys to be in @@ -78,6 +82,7 @@ struct nouveau_omthds { int (*call)(struct nouveau_object *, u32, void *, u32); }; +struct nvkm_event; struct nouveau_ofuncs { int (*ctor)(struct nouveau_object *, struct nouveau_object *, struct nouveau_oclass *, void *data, u32 size, @@ -85,6 +90,9 @@ struct nouveau_ofuncs { void (*dtor)(struct nouveau_object *); int (*init)(struct nouveau_object *); int (*fini)(struct nouveau_object *, bool suspend); + int (*mthd)(struct nouveau_object *, u32, void *, u32); + int (*ntfy)(struct nouveau_object *, u32, struct nvkm_event **); + int (* map)(struct nouveau_object *, u64 *, u32 *); u8 (*rd08)(struct nouveau_object *, u64 offset); u16 (*rd16)(struct nouveau_object *, u64 offset); u32 (*rd32)(struct nouveau_object *, u64 offset); @@ -106,10 +114,6 @@ void nouveau_object_ref(struct nouveau_object *, struct nouveau_object **); int nouveau_object_inc(struct nouveau_object *); int nouveau_object_dec(struct nouveau_object *, bool suspend); -int nouveau_object_new(struct nouveau_object *, u32 parent, u32 handle, - u16 oclass, void *data, u32 size, - struct nouveau_object **); -int nouveau_object_del(struct nouveau_object *, u32 parent, u32 handle); void nouveau_object_debug(void); static inline int @@ -199,4 +203,21 @@ nv_memcmp(void *obj, u32 addr, const char *str, u32 len) return 0; } +#include <core/handle.h> + +static inline int +nouveau_object_new(struct nouveau_object *client, u32 parent, u32 handle, + u16 oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + return nouveau_handle_new(client, parent, handle, oclass, + data, size, pobject); +} + +static inline int +nouveau_object_del(struct nouveau_object *client, u32 parent, u32 handle) +{ + return nouveau_handle_del(client, parent, handle); +} + #endif diff --git a/drivers/gpu/drm/nouveau/core/include/core/parent.h b/drivers/gpu/drm/nouveau/core/include/core/parent.h index 9f5ea900ff00..12da418ec70a 100644 --- a/drivers/gpu/drm/nouveau/core/include/core/parent.h +++ b/drivers/gpu/drm/nouveau/core/include/core/parent.h @@ -57,5 +57,6 @@ void _nouveau_parent_dtor(struct nouveau_object *); int nouveau_parent_sclass(struct nouveau_object *, u16 handle, struct nouveau_object **pengine, struct nouveau_oclass **poclass); +int nouveau_parent_lclass(struct nouveau_object *, u32 *, int); #endif diff --git a/drivers/gpu/drm/nouveau/core/include/core/printk.h b/drivers/gpu/drm/nouveau/core/include/core/printk.h index 0f9a37bd32b0..451b6ed20b7e 100644 --- a/drivers/gpu/drm/nouveau/core/include/core/printk.h +++ b/drivers/gpu/drm/nouveau/core/include/core/printk.h @@ -21,6 +21,7 @@ nv_printk_(struct nouveau_object *, int, const char *, ...); #define nv_debug(o,f,a...) nv_printk((o), DEBUG, f, ##a) #define nv_trace(o,f,a...) nv_printk((o), TRACE, f, ##a) #define nv_spam(o,f,a...) nv_printk((o), SPAM, f, ##a) +#define nv_ioctl(o,f,a...) nv_trace(nouveau_client(o), "ioctl: "f, ##a) #define nv_assert(f,a...) do { \ if (NV_DBG_FATAL <= CONFIG_NOUVEAU_DEBUG) \ diff --git a/drivers/gpu/drm/nouveau/core/include/engine/disp.h b/drivers/gpu/drm/nouveau/core/include/engine/disp.h index fde842896806..7a64f347b385 100644 --- a/drivers/gpu/drm/nouveau/core/include/engine/disp.h +++ b/drivers/gpu/drm/nouveau/core/include/engine/disp.h @@ -6,20 +6,13 @@ #include <core/device.h> #include <core/event.h> -enum nvkm_hpd_event { - NVKM_HPD_PLUG = 1, - NVKM_HPD_UNPLUG = 2, - NVKM_HPD_IRQ = 4, - NVKM_HPD = (NVKM_HPD_PLUG | NVKM_HPD_UNPLUG | NVKM_HPD_IRQ) -}; - struct nouveau_disp { struct nouveau_engine base; struct list_head outp; - struct nouveau_event *hpd; - struct nouveau_event *vblank; + struct nvkm_event hpd; + struct nvkm_event vblank; }; static inline struct nouveau_disp * diff --git a/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h b/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h index b28914ed1752..1b283a7b78e6 100644 --- a/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h +++ b/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h @@ -12,37 +12,20 @@ struct nouveau_dmaobj { u32 access; u64 start; u64 limit; - u32 conf0; }; struct nouveau_dmaeng { struct nouveau_engine base; /* creates a "physical" dma object from a struct nouveau_dmaobj */ - int (*bind)(struct nouveau_dmaeng *dmaeng, + int (*bind)(struct nouveau_dmaobj *dmaobj, struct nouveau_object *parent, - struct nouveau_dmaobj *dmaobj, struct nouveau_gpuobj **); }; -#define nouveau_dmaeng_create(p,e,c,d) \ - nouveau_engine_create((p), (e), (c), true, "DMAOBJ", "dmaobj", (d)) -#define nouveau_dmaeng_destroy(p) \ - nouveau_engine_destroy(&(p)->base) -#define nouveau_dmaeng_init(p) \ - nouveau_engine_init(&(p)->base) -#define nouveau_dmaeng_fini(p,s) \ - nouveau_engine_fini(&(p)->base, (s)) - -#define _nouveau_dmaeng_dtor _nouveau_engine_dtor -#define _nouveau_dmaeng_init _nouveau_engine_init -#define _nouveau_dmaeng_fini _nouveau_engine_fini - -extern struct nouveau_oclass nv04_dmaeng_oclass; -extern struct nouveau_oclass nv50_dmaeng_oclass; -extern struct nouveau_oclass nvc0_dmaeng_oclass; -extern struct nouveau_oclass nvd0_dmaeng_oclass; - -extern struct nouveau_oclass nouveau_dmaobj_sclass[]; +extern struct nouveau_oclass *nv04_dmaeng_oclass; +extern struct nouveau_oclass *nv50_dmaeng_oclass; +extern struct nouveau_oclass *nvc0_dmaeng_oclass; +extern struct nouveau_oclass *nvd0_dmaeng_oclass; #endif diff --git a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h index b639eb2c74ff..e5e4d930b2c2 100644 --- a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h +++ b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h @@ -4,12 +4,14 @@ #include <core/namedb.h> #include <core/gpuobj.h> #include <core/engine.h> +#include <core/event.h> struct nouveau_fifo_chan { struct nouveau_namedb base; struct nouveau_dmaobj *pushdma; struct nouveau_gpuobj *pushgpu; void __iomem *user; + u64 addr; u32 size; u16 chid; atomic_t refcnt; /* NV04_NVSW_SET_REF */ @@ -40,8 +42,10 @@ void nouveau_fifo_channel_destroy(struct nouveau_fifo_chan *); #define _nouveau_fifo_channel_fini _nouveau_namedb_fini void _nouveau_fifo_channel_dtor(struct nouveau_object *); +int _nouveau_fifo_channel_map(struct nouveau_object *, u64 *, u32 *); u32 _nouveau_fifo_channel_rd32(struct nouveau_object *, u64); void _nouveau_fifo_channel_wr32(struct nouveau_object *, u64, u32); +int _nouveau_fifo_channel_ntfy(struct nouveau_object *, u32, struct nvkm_event **); struct nouveau_fifo_base { struct nouveau_gpuobj base; @@ -65,8 +69,8 @@ struct nouveau_fifo_base { struct nouveau_fifo { struct nouveau_engine base; - struct nouveau_event *cevent; /* channel creation event */ - struct nouveau_event *uevent; /* async user trigger */ + struct nvkm_event cevent; /* channel creation event */ + struct nvkm_event uevent; /* async user trigger */ struct nouveau_object **channel; spinlock_t lock; @@ -112,6 +116,9 @@ extern struct nouveau_oclass *nve0_fifo_oclass; extern struct nouveau_oclass *gk20a_fifo_oclass; extern struct nouveau_oclass *nv108_fifo_oclass; +int nouveau_fifo_uevent_ctor(void *, u32, struct nvkm_notify *); +void nouveau_fifo_uevent(struct nouveau_fifo *); + void nv04_fifo_intr(struct nouveau_subdev *); int nv04_fifo_context_attach(struct nouveau_object *, struct nouveau_object *); diff --git a/drivers/gpu/drm/nouveau/core/include/engine/graph.h b/drivers/gpu/drm/nouveau/core/include/engine/graph.h index 8c1d4772da0c..d5055570d01b 100644 --- a/drivers/gpu/drm/nouveau/core/include/engine/graph.h +++ b/drivers/gpu/drm/nouveau/core/include/engine/graph.h @@ -70,6 +70,7 @@ extern struct nouveau_oclass *nvd9_graph_oclass; extern struct nouveau_oclass *nve4_graph_oclass; extern struct nouveau_oclass *gk20a_graph_oclass; extern struct nouveau_oclass *nvf0_graph_oclass; +extern struct nouveau_oclass *gk110b_graph_oclass; extern struct nouveau_oclass *nv108_graph_oclass; extern struct nouveau_oclass *gm107_graph_oclass; diff --git a/drivers/gpu/drm/nouveau/core/include/engine/perfmon.h b/drivers/gpu/drm/nouveau/core/include/engine/perfmon.h index 49b0024910fe..88cc812baaa3 100644 --- a/drivers/gpu/drm/nouveau/core/include/engine/perfmon.h +++ b/drivers/gpu/drm/nouveau/core/include/engine/perfmon.h @@ -4,7 +4,6 @@ #include <core/device.h> #include <core/engine.h> #include <core/engctx.h> -#include <core/class.h> struct nouveau_perfdom; struct nouveau_perfctr; diff --git a/drivers/gpu/drm/nouveau/core/include/nvif/class.h b/drivers/gpu/drm/nouveau/core/include/nvif/class.h new file mode 120000 index 000000000000..f1ac4859edd4 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/nvif/class.h @@ -0,0 +1 @@ +../../../nvif/class.h
\ No newline at end of file diff --git a/drivers/gpu/drm/nouveau/core/include/nvif/event.h b/drivers/gpu/drm/nouveau/core/include/nvif/event.h new file mode 120000 index 000000000000..1b798538a725 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/nvif/event.h @@ -0,0 +1 @@ +../../../nvif/event.h
\ No newline at end of file diff --git a/drivers/gpu/drm/nouveau/core/include/nvif/ioctl.h b/drivers/gpu/drm/nouveau/core/include/nvif/ioctl.h new file mode 120000 index 000000000000..8569c86907c5 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/nvif/ioctl.h @@ -0,0 +1 @@ +../../../nvif/ioctl.h
\ No newline at end of file diff --git a/drivers/gpu/drm/nouveau/core/include/nvif/unpack.h b/drivers/gpu/drm/nouveau/core/include/nvif/unpack.h new file mode 120000 index 000000000000..69d99292bca4 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/nvif/unpack.h @@ -0,0 +1 @@ +../../../nvif/unpack.h
\ No newline at end of file diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bar.h b/drivers/gpu/drm/nouveau/core/include/subdev/bar.h index 9faa98e67ad8..be037fac534c 100644 --- a/drivers/gpu/drm/nouveau/core/include/subdev/bar.h +++ b/drivers/gpu/drm/nouveau/core/include/subdev/bar.h @@ -20,6 +20,9 @@ struct nouveau_bar { u32 flags, struct nouveau_vma *); void (*unmap)(struct nouveau_bar *, struct nouveau_vma *); void (*flush)(struct nouveau_bar *); + + /* whether the BAR supports to be ioremapped WC or should be uncached */ + bool iomap_uncached; }; static inline struct nouveau_bar * @@ -30,5 +33,6 @@ nouveau_bar(void *obj) extern struct nouveau_oclass nv50_bar_oclass; extern struct nouveau_oclass nvc0_bar_oclass; +extern struct nouveau_oclass gk20a_bar_oclass; #endif diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h index c01e29c9f89a..a5ca00dd2f61 100644 --- a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h +++ b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h @@ -71,8 +71,15 @@ struct nouveau_clock { struct list_head states; int state_nr; + struct work_struct work; + wait_queue_head_t wait; + atomic_t waiting; + + struct nvkm_notify pwrsrc_ntfy; + int pwrsrc; int pstate; /* current */ - int ustate; /* user-requested (-1 disabled, -2 perfmon) */ + int ustate_ac; /* user-requested (-1 disabled, -2 perfmon) */ + int ustate_dc; /* user-requested (-1 disabled, -2 perfmon) */ int astate; /* perfmon adjustment (base) */ int tstate; /* thermal adjustment (max-) */ int dstate; /* display adjustment (min+) */ @@ -108,8 +115,9 @@ struct nouveau_clocks { int mdiv; }; -#define nouveau_clock_create(p,e,o,i,r,d) \ - nouveau_clock_create_((p), (e), (o), (i), (r), sizeof(**d), (void **)d) +#define nouveau_clock_create(p,e,o,i,r,s,n,d) \ + nouveau_clock_create_((p), (e), (o), (i), (r), (s), (n), sizeof(**d), \ + (void **)d) #define nouveau_clock_destroy(p) ({ \ struct nouveau_clock *clk = (p); \ _nouveau_clock_dtor(nv_object(clk)); \ @@ -118,15 +126,18 @@ struct nouveau_clocks { struct nouveau_clock *clk = (p); \ _nouveau_clock_init(nv_object(clk)); \ }) -#define nouveau_clock_fini(p,s) \ - nouveau_subdev_fini(&(p)->base, (s)) +#define nouveau_clock_fini(p,s) ({ \ + struct nouveau_clock *clk = (p); \ + _nouveau_clock_fini(nv_object(clk), (s)); \ +}) int nouveau_clock_create_(struct nouveau_object *, struct nouveau_object *, struct nouveau_oclass *, - struct nouveau_clocks *, bool, int, void **); + struct nouveau_clocks *, struct nouveau_pstate *, + int, bool, int, void **); void _nouveau_clock_dtor(struct nouveau_object *); -int _nouveau_clock_init(struct nouveau_object *); -#define _nouveau_clock_fini _nouveau_subdev_fini +int _nouveau_clock_init(struct nouveau_object *); +int _nouveau_clock_fini(struct nouveau_object *, bool); extern struct nouveau_oclass nv04_clock_oclass; extern struct nouveau_oclass nv40_clock_oclass; @@ -136,6 +147,7 @@ extern struct nouveau_oclass *nvaa_clock_oclass; extern struct nouveau_oclass nva3_clock_oclass; extern struct nouveau_oclass nvc0_clock_oclass; extern struct nouveau_oclass nve0_clock_oclass; +extern struct nouveau_oclass gk20a_clock_oclass; int nv04_clock_pll_set(struct nouveau_clock *, u32 type, u32 freq); int nv04_clock_pll_calc(struct nouveau_clock *, struct nvbios_pll *, @@ -145,7 +157,7 @@ int nv04_clock_pll_prog(struct nouveau_clock *, u32 reg1, int nva3_clock_pll_calc(struct nouveau_clock *, struct nvbios_pll *, int clk, struct nouveau_pll_vals *); -int nouveau_clock_ustate(struct nouveau_clock *, int req); +int nouveau_clock_ustate(struct nouveau_clock *, int req, int pwr); int nouveau_clock_astate(struct nouveau_clock *, int req, int rel); int nouveau_clock_dstate(struct nouveau_clock *, int req, int rel); int nouveau_clock_tstate(struct nouveau_clock *, int req, int rel); diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h b/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h index 612d82ab683d..b73733d21cc7 100644 --- a/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h +++ b/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h @@ -8,16 +8,22 @@ #include <subdev/bios.h> #include <subdev/bios/gpio.h> -enum nvkm_gpio_event { - NVKM_GPIO_HI = 1, - NVKM_GPIO_LO = 2, - NVKM_GPIO_TOGGLED = (NVKM_GPIO_HI | NVKM_GPIO_LO), +struct nvkm_gpio_ntfy_req { +#define NVKM_GPIO_HI 0x01 +#define NVKM_GPIO_LO 0x02 +#define NVKM_GPIO_TOGGLED 0x03 + u8 mask; + u8 line; +}; + +struct nvkm_gpio_ntfy_rep { + u8 mask; }; struct nouveau_gpio { struct nouveau_subdev base; - struct nouveau_event *events; + struct nvkm_event event; void (*reset)(struct nouveau_gpio *, u8 func); int (*find)(struct nouveau_gpio *, int idx, u8 tag, u8 line, diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h index 825f7bb46b67..1b937c2c25ae 100644 --- a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h +++ b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h @@ -14,15 +14,18 @@ #define NV_I2C_TYPE_EXTDDC(e) (0x0005 | (e) << 8) #define NV_I2C_TYPE_EXTAUX(e) (0x0006 | (e) << 8) -enum nvkm_i2c_event { - NVKM_I2C_PLUG = 1, - NVKM_I2C_UNPLUG = 2, - NVKM_I2C_IRQ = 4, - NVKM_I2C_DONE = 8, - NVKM_I2C_ANY = (NVKM_I2C_PLUG | - NVKM_I2C_UNPLUG | - NVKM_I2C_IRQ | - NVKM_I2C_DONE), +struct nvkm_i2c_ntfy_req { +#define NVKM_I2C_PLUG 0x01 +#define NVKM_I2C_UNPLUG 0x02 +#define NVKM_I2C_IRQ 0x04 +#define NVKM_I2C_DONE 0x08 +#define NVKM_I2C_ANY 0x0f + u8 mask; + u8 port; +}; + +struct nvkm_i2c_ntfy_rep { + u8 mask; }; struct nouveau_i2c_port { @@ -56,7 +59,7 @@ struct nouveau_i2c_board_info { struct nouveau_i2c { struct nouveau_subdev base; - struct nouveau_event *ntfy; + struct nvkm_event event; struct nouveau_i2c_port *(*find)(struct nouveau_i2c *, u8 index); struct nouveau_i2c_port *(*find_type)(struct nouveau_i2c *, u16 type); diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/ltc.h b/drivers/gpu/drm/nouveau/core/include/subdev/ltc.h new file mode 100644 index 000000000000..b909a7363f6b --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/include/subdev/ltc.h @@ -0,0 +1,35 @@ +#ifndef __NOUVEAU_LTC_H__ +#define __NOUVEAU_LTC_H__ + +#include <core/subdev.h> +#include <core/device.h> + +#define NOUVEAU_LTC_MAX_ZBC_CNT 16 + +struct nouveau_mm_node; + +struct nouveau_ltc { + struct nouveau_subdev base; + + int (*tags_alloc)(struct nouveau_ltc *, u32 count, + struct nouveau_mm_node **); + void (*tags_free)(struct nouveau_ltc *, struct nouveau_mm_node **); + void (*tags_clear)(struct nouveau_ltc *, u32 first, u32 count); + + int zbc_min; + int zbc_max; + int (*zbc_color_get)(struct nouveau_ltc *, int index, const u32[4]); + int (*zbc_depth_get)(struct nouveau_ltc *, int index, const u32); +}; + +static inline struct nouveau_ltc * +nouveau_ltc(void *obj) +{ + return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_LTC]; +} + +extern struct nouveau_oclass *gf100_ltc_oclass; +extern struct nouveau_oclass *gk104_ltc_oclass; +extern struct nouveau_oclass *gm107_ltc_oclass; + +#endif diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h b/drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h deleted file mode 100644 index c9c1950b7743..000000000000 --- a/drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h +++ /dev/null @@ -1,41 +0,0 @@ -#ifndef __NOUVEAU_LTCG_H__ -#define __NOUVEAU_LTCG_H__ - -#include <core/subdev.h> -#include <core/device.h> - -struct nouveau_mm_node; - -struct nouveau_ltcg { - struct nouveau_subdev base; - - int (*tags_alloc)(struct nouveau_ltcg *, u32 count, - struct nouveau_mm_node **); - void (*tags_free)(struct nouveau_ltcg *, struct nouveau_mm_node **); - void (*tags_clear)(struct nouveau_ltcg *, u32 first, u32 count); -}; - -static inline struct nouveau_ltcg * -nouveau_ltcg(void *obj) -{ - return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_LTCG]; -} - -#define nouveau_ltcg_create(p,e,o,d) \ - nouveau_subdev_create_((p), (e), (o), 0, "PLTCG", "level2", \ - sizeof(**d), (void **)d) -#define nouveau_ltcg_destroy(p) \ - nouveau_subdev_destroy(&(p)->base) -#define nouveau_ltcg_init(p) \ - nouveau_subdev_init(&(p)->base) -#define nouveau_ltcg_fini(p,s) \ - nouveau_subdev_fini(&(p)->base, (s)) - -#define _nouveau_ltcg_dtor _nouveau_subdev_dtor -#define _nouveau_ltcg_init _nouveau_subdev_init -#define _nouveau_ltcg_fini _nouveau_subdev_fini - -extern struct nouveau_oclass *gf100_ltcg_oclass; -extern struct nouveau_oclass *gm107_ltcg_oclass; - -#endif diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h index 72b176831be6..568e4dfc5e9e 100644 --- a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h +++ b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h @@ -4,15 +4,11 @@ #include <core/subdev.h> #include <core/device.h> -struct nouveau_mc_intr { - u32 stat; - u32 unit; -}; - struct nouveau_mc { struct nouveau_subdev base; bool use_msi; unsigned int irq; + void (*unk260)(struct nouveau_mc *, u32); }; static inline struct nouveau_mc * @@ -21,30 +17,6 @@ nouveau_mc(void *obj) return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_MC]; } -#define nouveau_mc_create(p,e,o,d) \ - nouveau_mc_create_((p), (e), (o), sizeof(**d), (void **)d) -#define nouveau_mc_destroy(p) ({ \ - struct nouveau_mc *pmc = (p); _nouveau_mc_dtor(nv_object(pmc)); \ -}) -#define nouveau_mc_init(p) ({ \ - struct nouveau_mc *pmc = (p); _nouveau_mc_init(nv_object(pmc)); \ -}) -#define nouveau_mc_fini(p,s) ({ \ - struct nouveau_mc *pmc = (p); _nouveau_mc_fini(nv_object(pmc), (s)); \ -}) - -int nouveau_mc_create_(struct nouveau_object *, struct nouveau_object *, - struct nouveau_oclass *, int, void **); -void _nouveau_mc_dtor(struct nouveau_object *); -int _nouveau_mc_init(struct nouveau_object *); -int _nouveau_mc_fini(struct nouveau_object *, bool); - -struct nouveau_mc_oclass { - struct nouveau_oclass base; - const struct nouveau_mc_intr *intr; - void (*msi_rearm)(struct nouveau_mc *); -}; - extern struct nouveau_oclass *nv04_mc_oclass; extern struct nouveau_oclass *nv40_mc_oclass; extern struct nouveau_oclass *nv44_mc_oclass; @@ -54,5 +26,6 @@ extern struct nouveau_oclass *nv94_mc_oclass; extern struct nouveau_oclass *nv98_mc_oclass; extern struct nouveau_oclass *nvc0_mc_oclass; extern struct nouveau_oclass *nvc3_mc_oclass; +extern struct nouveau_oclass *gk20a_mc_oclass; #endif diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/pwr.h b/drivers/gpu/drm/nouveau/core/include/subdev/pwr.h index c5c92cbed33f..f73feec151db 100644 --- a/drivers/gpu/drm/nouveau/core/include/subdev/pwr.h +++ b/drivers/gpu/drm/nouveau/core/include/subdev/pwr.h @@ -8,18 +8,6 @@ struct nouveau_pwr { struct nouveau_subdev base; struct { - u32 limit; - u32 *data; - u32 size; - } code; - - struct { - u32 limit; - u32 *data; - u32 size; - } data; - - struct { u32 base; u32 size; } send; @@ -35,7 +23,8 @@ struct nouveau_pwr { u32 data[2]; } recv; - int (*message)(struct nouveau_pwr *, u32[2], u32, u32, u32, u32); + int (*message)(struct nouveau_pwr *, u32[2], u32, u32, u32, u32); + void (*pgob)(struct nouveau_pwr *, bool); }; static inline struct nouveau_pwr * @@ -44,29 +33,11 @@ nouveau_pwr(void *obj) return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_PWR]; } -#define nouveau_pwr_create(p, e, o, d) \ - nouveau_pwr_create_((p), (e), (o), sizeof(**d), (void **)d) -#define nouveau_pwr_destroy(p) \ - nouveau_subdev_destroy(&(p)->base) -#define nouveau_pwr_init(p) ({ \ - struct nouveau_pwr *ppwr = (p); \ - _nouveau_pwr_init(nv_object(ppwr)); \ -}) -#define nouveau_pwr_fini(p,s) ({ \ - struct nouveau_pwr *ppwr = (p); \ - _nouveau_pwr_fini(nv_object(ppwr), (s)); \ -}) - -int nouveau_pwr_create_(struct nouveau_object *, struct nouveau_object *, - struct nouveau_oclass *, int, void **); -#define _nouveau_pwr_dtor _nouveau_subdev_dtor -int _nouveau_pwr_init(struct nouveau_object *); -int _nouveau_pwr_fini(struct nouveau_object *, bool); - -extern struct nouveau_oclass nva3_pwr_oclass; -extern struct nouveau_oclass nvc0_pwr_oclass; -extern struct nouveau_oclass nvd0_pwr_oclass; -extern struct nouveau_oclass nv108_pwr_oclass; +extern struct nouveau_oclass *nva3_pwr_oclass; +extern struct nouveau_oclass *nvc0_pwr_oclass; +extern struct nouveau_oclass *nvd0_pwr_oclass; +extern struct nouveau_oclass *gk104_pwr_oclass; +extern struct nouveau_oclass *nv108_pwr_oclass; /* interface to MEMX process running on PPWR */ struct nouveau_memx; diff --git a/drivers/gpu/drm/nouveau/core/os.h b/drivers/gpu/drm/nouveau/core/os.h index d0ced94ca54c..ccfa21d72ddc 100644 --- a/drivers/gpu/drm/nouveau/core/os.h +++ b/drivers/gpu/drm/nouveau/core/os.h @@ -21,6 +21,8 @@ #include <linux/interrupt.h> #include <linux/log2.h> #include <linux/pm_runtime.h> +#include <linux/power_supply.h> +#include <linux/clk.h> #include <asm/unaligned.h> diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/base.c b/drivers/gpu/drm/nouveau/core/subdev/bar/base.c index 73b1ed20c8d5..8bcbdf39cfb2 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/bar/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/bar/base.c @@ -99,8 +99,13 @@ nouveau_bar_alloc(struct nouveau_bar *bar, struct nouveau_object *parent, struct nouveau_mem *mem, struct nouveau_object **pobject) { struct nouveau_object *engine = nv_object(bar); - return nouveau_object_ctor(parent, engine, &nouveau_barobj_oclass, - mem, 0, pobject); + int ret = -ENOMEM; + if (bar->iomem) { + ret = nouveau_object_ctor(parent, engine, + &nouveau_barobj_oclass, + mem, 0, pobject); + } + return ret; } int @@ -118,9 +123,12 @@ nouveau_bar_create_(struct nouveau_object *parent, if (ret) return ret; - if (nv_device_resource_len(device, 3) != 0) + if (nv_device_resource_len(device, 3) != 0) { bar->iomem = ioremap(nv_device_resource_start(device, 3), nv_device_resource_len(device, 3)); + if (!bar->iomem) + nv_warn(bar, "PRAMIN ioremap failed\n"); + } return 0; } diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/gk20a.c b/drivers/gpu/drm/nouveau/core/subdev/bar/gk20a.c new file mode 100644 index 000000000000..bf877af9d3bd --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/bar/gk20a.c @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include <subdev/bar.h> + +#include "priv.h" + +int +gk20a_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nouveau_bar *bar; + int ret; + + ret = nvc0_bar_ctor(parent, engine, oclass, data, size, pobject); + if (ret) + return ret; + + bar = (struct nouveau_bar *)*pobject; + bar->iomap_uncached = true; + + return 0; +} + +struct nouveau_oclass +gk20a_bar_oclass = { + .handle = NV_SUBDEV(BAR, 0xea), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = gk20a_bar_ctor, + .dtor = nvc0_bar_dtor, + .init = nvc0_bar_init, + .fini = _nouveau_bar_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c index ca8139b9ab27..05a278bab247 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c @@ -133,7 +133,7 @@ nvc0_bar_init_vm(struct nvc0_bar_priv *priv, struct nvc0_bar_priv_vm *bar_vm, return 0; } -static int +int nvc0_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) @@ -169,7 +169,7 @@ nvc0_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine, return 0; } -static void +void nvc0_bar_dtor(struct nouveau_object *object) { struct nvc0_bar_priv *priv = (void *)object; @@ -188,7 +188,7 @@ nvc0_bar_dtor(struct nouveau_object *object) nouveau_bar_destroy(&priv->base); } -static int +int nvc0_bar_init(struct nouveau_object *object) { struct nvc0_bar_priv *priv = (void *)object; @@ -200,7 +200,6 @@ nvc0_bar_init(struct nouveau_object *object) nv_mask(priv, 0x000200, 0x00000100, 0x00000000); nv_mask(priv, 0x000200, 0x00000100, 0x00000100); - nv_mask(priv, 0x100c80, 0x00000001, 0x00000000); nv_wr32(priv, 0x001704, 0x80000000 | priv->bar[1].mem->addr >> 12); if (priv->bar[0].mem) diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/priv.h b/drivers/gpu/drm/nouveau/core/subdev/bar/priv.h index ffad8f337ead..3ee8b1476d00 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/bar/priv.h +++ b/drivers/gpu/drm/nouveau/core/subdev/bar/priv.h @@ -23,4 +23,10 @@ int nouveau_bar_alloc(struct nouveau_bar *, struct nouveau_object *, void nv84_bar_flush(struct nouveau_bar *); +int nvc0_bar_ctor(struct nouveau_object *, struct nouveau_object *, + struct nouveau_oclass *, void *, u32, + struct nouveau_object **); +void nvc0_bar_dtor(struct nouveau_object *); +int nvc0_bar_init(struct nouveau_object *); + #endif diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/base.c b/drivers/gpu/drm/nouveau/core/subdev/clock/base.c index 22351f594d2a..a276a711294a 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/clock/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/clock/base.c @@ -90,16 +90,20 @@ nouveau_cstate_prog(struct nouveau_clock *clk, cstate = &pstate->base; } - ret = nouveau_therm_cstate(ptherm, pstate->fanspeed, +1); - if (ret && ret != -ENODEV) { - nv_error(clk, "failed to raise fan speed: %d\n", ret); - return ret; + if (ptherm) { + ret = nouveau_therm_cstate(ptherm, pstate->fanspeed, +1); + if (ret && ret != -ENODEV) { + nv_error(clk, "failed to raise fan speed: %d\n", ret); + return ret; + } } - ret = volt->set_id(volt, cstate->voltage, +1); - if (ret && ret != -ENODEV) { - nv_error(clk, "failed to raise voltage: %d\n", ret); - return ret; + if (volt) { + ret = volt->set_id(volt, cstate->voltage, +1); + if (ret && ret != -ENODEV) { + nv_error(clk, "failed to raise voltage: %d\n", ret); + return ret; + } } ret = clk->calc(clk, cstate); @@ -108,13 +112,17 @@ nouveau_cstate_prog(struct nouveau_clock *clk, clk->tidy(clk); } - ret = volt->set_id(volt, cstate->voltage, -1); - if (ret && ret != -ENODEV) - nv_error(clk, "failed to lower voltage: %d\n", ret); + if (volt) { + ret = volt->set_id(volt, cstate->voltage, -1); + if (ret && ret != -ENODEV) + nv_error(clk, "failed to lower voltage: %d\n", ret); + } - ret = nouveau_therm_cstate(ptherm, pstate->fanspeed, -1); - if (ret && ret != -ENODEV) - nv_error(clk, "failed to lower fan speed: %d\n", ret); + if (ptherm) { + ret = nouveau_therm_cstate(ptherm, pstate->fanspeed, -1); + if (ret && ret != -ENODEV) + nv_error(clk, "failed to lower fan speed: %d\n", ret); + } return 0; } @@ -194,16 +202,23 @@ nouveau_pstate_prog(struct nouveau_clock *clk, int pstatei) return nouveau_cstate_prog(clk, pstate, 0); } -static int -nouveau_pstate_calc(struct nouveau_clock *clk) +static void +nouveau_pstate_work(struct work_struct *work) { - int pstate, ret = 0; + struct nouveau_clock *clk = container_of(work, typeof(*clk), work); + int pstate; - nv_trace(clk, "P %d U %d A %d T %d D %d\n", clk->pstate, - clk->ustate, clk->astate, clk->tstate, clk->dstate); + if (!atomic_xchg(&clk->waiting, 0)) + return; + clk->pwrsrc = power_supply_is_system_supplied(); - if (clk->state_nr && clk->ustate != -1) { - pstate = (clk->ustate < 0) ? clk->astate : clk->ustate; + nv_trace(clk, "P %d PWR %d U(AC) %d U(DC) %d A %d T %d D %d\n", + clk->pstate, clk->pwrsrc, clk->ustate_ac, clk->ustate_dc, + clk->astate, clk->tstate, clk->dstate); + + pstate = clk->pwrsrc ? clk->ustate_ac : clk->ustate_dc; + if (clk->state_nr && pstate != -1) { + pstate = (pstate < 0) ? clk->astate : pstate; pstate = min(pstate, clk->state_nr - 1 - clk->tstate); pstate = max(pstate, clk->dstate); } else { @@ -211,9 +226,26 @@ nouveau_pstate_calc(struct nouveau_clock *clk) } nv_trace(clk, "-> %d\n", pstate); - if (pstate != clk->pstate) - ret = nouveau_pstate_prog(clk, pstate); - return ret; + if (pstate != clk->pstate) { + int ret = nouveau_pstate_prog(clk, pstate); + if (ret) { + nv_error(clk, "error setting pstate %d: %d\n", + pstate, ret); + } + } + + wake_up_all(&clk->wait); + nvkm_notify_get(&clk->pwrsrc_ntfy); +} + +static int +nouveau_pstate_calc(struct nouveau_clock *clk, bool wait) +{ + atomic_set(&clk->waiting, 1); + schedule_work(&clk->work); + if (wait) + wait_event(clk->wait, !atomic_read(&clk->waiting)); + return 0; } static void @@ -361,17 +393,40 @@ nouveau_clock_ustate_update(struct nouveau_clock *clk, int req) req = i; } - clk->ustate = req; - return 0; + return req + 2; +} + +static int +nouveau_clock_nstate(struct nouveau_clock *clk, const char *mode, int arglen) +{ + int ret = 1; + + if (strncasecmpz(mode, "disabled", arglen)) { + char save = mode[arglen]; + long v; + + ((char *)mode)[arglen] = '\0'; + if (!kstrtol(mode, 0, &v)) { + ret = nouveau_clock_ustate_update(clk, v); + if (ret < 0) + ret = 1; + } + ((char *)mode)[arglen] = save; + } + + return ret - 2; } int -nouveau_clock_ustate(struct nouveau_clock *clk, int req) +nouveau_clock_ustate(struct nouveau_clock *clk, int req, int pwr) { int ret = nouveau_clock_ustate_update(clk, req); - if (ret) - return ret; - return nouveau_pstate_calc(clk); + if (ret >= 0) { + if (ret -= 2, pwr) clk->ustate_ac = ret; + else clk->ustate_dc = ret; + return nouveau_pstate_calc(clk, true); + } + return ret; } int @@ -381,7 +436,7 @@ nouveau_clock_astate(struct nouveau_clock *clk, int req, int rel) if ( rel) clk->astate += rel; clk->astate = min(clk->astate, clk->state_nr - 1); clk->astate = max(clk->astate, 0); - return nouveau_pstate_calc(clk); + return nouveau_pstate_calc(clk, true); } int @@ -391,7 +446,7 @@ nouveau_clock_tstate(struct nouveau_clock *clk, int req, int rel) if ( rel) clk->tstate += rel; clk->tstate = min(clk->tstate, 0); clk->tstate = max(clk->tstate, -(clk->state_nr - 1)); - return nouveau_pstate_calc(clk); + return nouveau_pstate_calc(clk, true); } int @@ -401,12 +456,30 @@ nouveau_clock_dstate(struct nouveau_clock *clk, int req, int rel) if ( rel) clk->dstate += rel; clk->dstate = min(clk->dstate, clk->state_nr - 1); clk->dstate = max(clk->dstate, 0); - return nouveau_pstate_calc(clk); + return nouveau_pstate_calc(clk, true); +} + +static int +nouveau_clock_pwrsrc(struct nvkm_notify *notify) +{ + struct nouveau_clock *clk = + container_of(notify, typeof(*clk), pwrsrc_ntfy); + nouveau_pstate_calc(clk, false); + return NVKM_NOTIFY_DROP; } /****************************************************************************** * subdev base class implementation *****************************************************************************/ + +int +_nouveau_clock_fini(struct nouveau_object *object, bool suspend) +{ + struct nouveau_clock *clk = (void *)object; + nvkm_notify_put(&clk->pwrsrc_ntfy); + return nouveau_subdev_fini(&clk->base, suspend); +} + int _nouveau_clock_init(struct nouveau_object *object) { @@ -414,6 +487,10 @@ _nouveau_clock_init(struct nouveau_object *object) struct nouveau_clocks *clock = clk->domains; int ret; + ret = nouveau_subdev_init(&clk->base); + if (ret) + return ret; + memset(&clk->bstate, 0x00, sizeof(clk->bstate)); INIT_LIST_HEAD(&clk->bstate.list); clk->bstate.pstate = 0xff; @@ -434,7 +511,7 @@ _nouveau_clock_init(struct nouveau_object *object) clk->tstate = 0; clk->dstate = 0; clk->pstate = -1; - nouveau_pstate_calc(clk); + nouveau_pstate_calc(clk, true); return 0; } @@ -444,6 +521,8 @@ _nouveau_clock_dtor(struct nouveau_object *object) struct nouveau_clock *clk = (void *)object; struct nouveau_pstate *pstate, *temp; + nvkm_notify_fini(&clk->pwrsrc_ntfy); + list_for_each_entry_safe(pstate, temp, &clk->states, head) { nouveau_pstate_del(pstate); } @@ -456,6 +535,7 @@ nouveau_clock_create_(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, struct nouveau_clocks *clocks, + struct nouveau_pstate *pstates, int nb_pstates, bool allow_reclock, int length, void **object) { @@ -472,29 +552,46 @@ nouveau_clock_create_(struct nouveau_object *parent, INIT_LIST_HEAD(&clk->states); clk->domains = clocks; - clk->ustate = -1; + clk->ustate_ac = -1; + clk->ustate_dc = -1; + + INIT_WORK(&clk->work, nouveau_pstate_work); + init_waitqueue_head(&clk->wait); + atomic_set(&clk->waiting, 0); - idx = 0; - do { - ret = nouveau_pstate_new(clk, idx++); - } while (ret == 0); + /* If no pstates are provided, try and fetch them from the BIOS */ + if (!pstates) { + idx = 0; + do { + ret = nouveau_pstate_new(clk, idx++); + } while (ret == 0); + } else { + for (idx = 0; idx < nb_pstates; idx++) + list_add_tail(&pstates[idx].head, &clk->states); + clk->state_nr = nb_pstates; + } clk->allow_reclock = allow_reclock; + ret = nvkm_notify_init(&device->event, nouveau_clock_pwrsrc, true, + NULL, 0, 0, &clk->pwrsrc_ntfy); + if (ret) + return ret; + mode = nouveau_stropt(device->cfgopt, "NvClkMode", &arglen); if (mode) { - if (!strncasecmpz(mode, "disabled", arglen)) { - clk->ustate = -1; - } else { - char save = mode[arglen]; - long v; - - ((char *)mode)[arglen] = '\0'; - if (!kstrtol(mode, 0, &v)) - nouveau_clock_ustate_update(clk, v); - ((char *)mode)[arglen] = save; - } + clk->ustate_ac = nouveau_clock_nstate(clk, mode, arglen); + clk->ustate_dc = nouveau_clock_nstate(clk, mode, arglen); } + mode = nouveau_stropt(device->cfgopt, "NvClkModeAC", &arglen); + if (mode) + clk->ustate_ac = nouveau_clock_nstate(clk, mode, arglen); + + mode = nouveau_stropt(device->cfgopt, "NvClkModeDC", &arglen); + if (mode) + clk->ustate_dc = nouveau_clock_nstate(clk, mode, arglen); + + return 0; } diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/gk20a.c b/drivers/gpu/drm/nouveau/core/subdev/clock/gk20a.c new file mode 100644 index 000000000000..425a8d5e9129 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/clock/gk20a.c @@ -0,0 +1,665 @@ +/* + * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Shamelessly ripped off from ChromeOS's gk20a/clk_pllg.c + * + */ + +#define MHZ (1000 * 1000) + +#define MASK(w) ((1 << w) - 1) + +#define SYS_GPCPLL_CFG_BASE 0x00137000 +#define GPC_BCASE_GPCPLL_CFG_BASE 0x00132800 + +#define GPCPLL_CFG (SYS_GPCPLL_CFG_BASE + 0) +#define GPCPLL_CFG_ENABLE BIT(0) +#define GPCPLL_CFG_IDDQ BIT(1) +#define GPCPLL_CFG_LOCK_DET_OFF BIT(4) +#define GPCPLL_CFG_LOCK BIT(17) + +#define GPCPLL_COEFF (SYS_GPCPLL_CFG_BASE + 4) +#define GPCPLL_COEFF_M_SHIFT 0 +#define GPCPLL_COEFF_M_WIDTH 8 +#define GPCPLL_COEFF_N_SHIFT 8 +#define GPCPLL_COEFF_N_WIDTH 8 +#define GPCPLL_COEFF_P_SHIFT 16 +#define GPCPLL_COEFF_P_WIDTH 6 + +#define GPCPLL_CFG2 (SYS_GPCPLL_CFG_BASE + 0xc) +#define GPCPLL_CFG2_SETUP2_SHIFT 16 +#define GPCPLL_CFG2_PLL_STEPA_SHIFT 24 + +#define GPCPLL_CFG3 (SYS_GPCPLL_CFG_BASE + 0x18) +#define GPCPLL_CFG3_PLL_STEPB_SHIFT 16 + +#define GPCPLL_NDIV_SLOWDOWN (SYS_GPCPLL_CFG_BASE + 0x1c) +#define GPCPLL_NDIV_SLOWDOWN_NDIV_LO_SHIFT 0 +#define GPCPLL_NDIV_SLOWDOWN_NDIV_MID_SHIFT 8 +#define GPCPLL_NDIV_SLOWDOWN_STEP_SIZE_LO2MID_SHIFT 16 +#define GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT 22 +#define GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT 31 + +#define SEL_VCO (SYS_GPCPLL_CFG_BASE + 0x100) +#define SEL_VCO_GPC2CLK_OUT_SHIFT 0 + +#define GPC2CLK_OUT (SYS_GPCPLL_CFG_BASE + 0x250) +#define GPC2CLK_OUT_SDIV14_INDIV4_WIDTH 1 +#define GPC2CLK_OUT_SDIV14_INDIV4_SHIFT 31 +#define GPC2CLK_OUT_SDIV14_INDIV4_MODE 1 +#define GPC2CLK_OUT_VCODIV_WIDTH 6 +#define GPC2CLK_OUT_VCODIV_SHIFT 8 +#define GPC2CLK_OUT_VCODIV1 0 +#define GPC2CLK_OUT_VCODIV_MASK (MASK(GPC2CLK_OUT_VCODIV_WIDTH) << \ + GPC2CLK_OUT_VCODIV_SHIFT) +#define GPC2CLK_OUT_BYPDIV_WIDTH 6 +#define GPC2CLK_OUT_BYPDIV_SHIFT 0 +#define GPC2CLK_OUT_BYPDIV31 0x3c +#define GPC2CLK_OUT_INIT_MASK ((MASK(GPC2CLK_OUT_SDIV14_INDIV4_WIDTH) << \ + GPC2CLK_OUT_SDIV14_INDIV4_SHIFT)\ + | (MASK(GPC2CLK_OUT_VCODIV_WIDTH) << GPC2CLK_OUT_VCODIV_SHIFT)\ + | (MASK(GPC2CLK_OUT_BYPDIV_WIDTH) << GPC2CLK_OUT_BYPDIV_SHIFT)) +#define GPC2CLK_OUT_INIT_VAL ((GPC2CLK_OUT_SDIV14_INDIV4_MODE << \ + GPC2CLK_OUT_SDIV14_INDIV4_SHIFT) \ + | (GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT) \ + | (GPC2CLK_OUT_BYPDIV31 << GPC2CLK_OUT_BYPDIV_SHIFT)) + +#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG (GPC_BCASE_GPCPLL_CFG_BASE + 0xa0) +#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_SHIFT 24 +#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK \ + (0x1 << GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_SHIFT) + +#include <subdev/clock.h> +#include <subdev/timer.h> + +#ifdef __KERNEL__ +#include <nouveau_platform.h> +#endif + +static const u8 pl_to_div[] = { +/* PL: 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 */ +/* p: */ 1, 2, 3, 4, 5, 6, 8, 10, 12, 16, 12, 16, 20, 24, 32, +}; + +/* All frequencies in Mhz */ +struct gk20a_clk_pllg_params { + u32 min_vco, max_vco; + u32 min_u, max_u; + u32 min_m, max_m; + u32 min_n, max_n; + u32 min_pl, max_pl; +}; + +static const struct gk20a_clk_pllg_params gk20a_pllg_params = { + .min_vco = 1000, .max_vco = 1700, + .min_u = 12, .max_u = 38, + .min_m = 1, .max_m = 255, + .min_n = 8, .max_n = 255, + .min_pl = 1, .max_pl = 32, +}; + +struct gk20a_clock_priv { + struct nouveau_clock base; + const struct gk20a_clk_pllg_params *params; + u32 m, n, pl; + u32 parent_rate; +}; +#define to_gk20a_clock(base) container_of(base, struct gk20a_clock_priv, base) + +static void +gk20a_pllg_read_mnp(struct gk20a_clock_priv *priv) +{ + u32 val; + + val = nv_rd32(priv, GPCPLL_COEFF); + priv->m = (val >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH); + priv->n = (val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH); + priv->pl = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH); +} + +static u32 +gk20a_pllg_calc_rate(struct gk20a_clock_priv *priv) +{ + u32 rate; + u32 divider; + + rate = priv->parent_rate * priv->n; + divider = priv->m * pl_to_div[priv->pl]; + do_div(rate, divider); + + return rate / 2; +} + +static int +gk20a_pllg_calc_mnp(struct gk20a_clock_priv *priv, unsigned long rate) +{ + u32 target_clk_f, ref_clk_f, target_freq; + u32 min_vco_f, max_vco_f; + u32 low_pl, high_pl, best_pl; + u32 target_vco_f, vco_f; + u32 best_m, best_n; + u32 u_f; + u32 m, n, n2; + u32 delta, lwv, best_delta = ~0; + u32 pl; + + target_clk_f = rate * 2 / MHZ; + ref_clk_f = priv->parent_rate / MHZ; + + max_vco_f = priv->params->max_vco; + min_vco_f = priv->params->min_vco; + best_m = priv->params->max_m; + best_n = priv->params->min_n; + best_pl = priv->params->min_pl; + + target_vco_f = target_clk_f + target_clk_f / 50; + if (max_vco_f < target_vco_f) + max_vco_f = target_vco_f; + + /* min_pl <= high_pl <= max_pl */ + high_pl = (max_vco_f + target_vco_f - 1) / target_vco_f; + high_pl = min(high_pl, priv->params->max_pl); + high_pl = max(high_pl, priv->params->min_pl); + + /* min_pl <= low_pl <= max_pl */ + low_pl = min_vco_f / target_vco_f; + low_pl = min(low_pl, priv->params->max_pl); + low_pl = max(low_pl, priv->params->min_pl); + + /* Find Indices of high_pl and low_pl */ + for (pl = 0; pl < ARRAY_SIZE(pl_to_div) - 1; pl++) { + if (pl_to_div[pl] >= low_pl) { + low_pl = pl; + break; + } + } + for (pl = 0; pl < ARRAY_SIZE(pl_to_div) - 1; pl++) { + if (pl_to_div[pl] >= high_pl) { + high_pl = pl; + break; + } + } + + nv_debug(priv, "low_PL %d(div%d), high_PL %d(div%d)", low_pl, + pl_to_div[low_pl], high_pl, pl_to_div[high_pl]); + + /* Select lowest possible VCO */ + for (pl = low_pl; pl <= high_pl; pl++) { + target_vco_f = target_clk_f * pl_to_div[pl]; + for (m = priv->params->min_m; m <= priv->params->max_m; m++) { + u_f = ref_clk_f / m; + + if (u_f < priv->params->min_u) + break; + if (u_f > priv->params->max_u) + continue; + + n = (target_vco_f * m) / ref_clk_f; + n2 = ((target_vco_f * m) + (ref_clk_f - 1)) / ref_clk_f; + + if (n > priv->params->max_n) + break; + + for (; n <= n2; n++) { + if (n < priv->params->min_n) + continue; + if (n > priv->params->max_n) + break; + + vco_f = ref_clk_f * n / m; + + if (vco_f >= min_vco_f && vco_f <= max_vco_f) { + lwv = (vco_f + (pl_to_div[pl] / 2)) + / pl_to_div[pl]; + delta = abs(lwv - target_clk_f); + + if (delta < best_delta) { + best_delta = delta; + best_m = m; + best_n = n; + best_pl = pl; + + if (best_delta == 0) + goto found_match; + } + } + } + } + } + +found_match: + WARN_ON(best_delta == ~0); + + if (best_delta != 0) + nv_debug(priv, "no best match for target @ %dMHz on gpc_pll", + target_clk_f); + + priv->m = best_m; + priv->n = best_n; + priv->pl = best_pl; + + target_freq = gk20a_pllg_calc_rate(priv) / MHZ; + + nv_debug(priv, "actual target freq %d MHz, M %d, N %d, PL %d(div%d)\n", + target_freq, priv->m, priv->n, priv->pl, pl_to_div[priv->pl]); + + return 0; +} + +static int +gk20a_pllg_slide(struct gk20a_clock_priv *priv, u32 n) +{ + u32 val; + int ramp_timeout; + + /* get old coefficients */ + val = nv_rd32(priv, GPCPLL_COEFF); + /* do nothing if NDIV is the same */ + if (n == ((val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH))) + return 0; + + /* setup */ + nv_mask(priv, GPCPLL_CFG2, 0xff << GPCPLL_CFG2_PLL_STEPA_SHIFT, + 0x2b << GPCPLL_CFG2_PLL_STEPA_SHIFT); + nv_mask(priv, GPCPLL_CFG3, 0xff << GPCPLL_CFG3_PLL_STEPB_SHIFT, + 0xb << GPCPLL_CFG3_PLL_STEPB_SHIFT); + + /* pll slowdown mode */ + nv_mask(priv, GPCPLL_NDIV_SLOWDOWN, + BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT), + BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT)); + + /* new ndiv ready for ramp */ + val = nv_rd32(priv, GPCPLL_COEFF); + val &= ~(MASK(GPCPLL_COEFF_N_WIDTH) << GPCPLL_COEFF_N_SHIFT); + val |= (n & MASK(GPCPLL_COEFF_N_WIDTH)) << GPCPLL_COEFF_N_SHIFT; + udelay(1); + nv_wr32(priv, GPCPLL_COEFF, val); + + /* dynamic ramp to new ndiv */ + val = nv_rd32(priv, GPCPLL_NDIV_SLOWDOWN); + val |= 0x1 << GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT; + udelay(1); + nv_wr32(priv, GPCPLL_NDIV_SLOWDOWN, val); + + for (ramp_timeout = 500; ramp_timeout > 0; ramp_timeout--) { + udelay(1); + val = nv_rd32(priv, GPC_BCAST_NDIV_SLOWDOWN_DEBUG); + if (val & GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK) + break; + } + + /* exit slowdown mode */ + nv_mask(priv, GPCPLL_NDIV_SLOWDOWN, + BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT) | + BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT), 0); + nv_rd32(priv, GPCPLL_NDIV_SLOWDOWN); + + if (ramp_timeout <= 0) { + nv_error(priv, "gpcpll dynamic ramp timeout\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static void +_gk20a_pllg_enable(struct gk20a_clock_priv *priv) +{ + nv_mask(priv, GPCPLL_CFG, GPCPLL_CFG_ENABLE, GPCPLL_CFG_ENABLE); + nv_rd32(priv, GPCPLL_CFG); +} + +static void +_gk20a_pllg_disable(struct gk20a_clock_priv *priv) +{ + nv_mask(priv, GPCPLL_CFG, GPCPLL_CFG_ENABLE, 0); + nv_rd32(priv, GPCPLL_CFG); +} + +static int +_gk20a_pllg_program_mnp(struct gk20a_clock_priv *priv, bool allow_slide) +{ + u32 val, cfg; + u32 m_old, pl_old, n_lo; + + /* get old coefficients */ + val = nv_rd32(priv, GPCPLL_COEFF); + m_old = (val >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH); + pl_old = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH); + + /* do NDIV slide if there is no change in M and PL */ + cfg = nv_rd32(priv, GPCPLL_CFG); + if (allow_slide && priv->m == m_old && priv->pl == pl_old && + (cfg & GPCPLL_CFG_ENABLE)) { + return gk20a_pllg_slide(priv, priv->n); + } + + /* slide down to NDIV_LO */ + n_lo = DIV_ROUND_UP(m_old * priv->params->min_vco, + priv->parent_rate / MHZ); + if (allow_slide && (cfg & GPCPLL_CFG_ENABLE)) { + int ret = gk20a_pllg_slide(priv, n_lo); + + if (ret) + return ret; + } + + /* split FO-to-bypass jump in halfs by setting out divider 1:2 */ + nv_mask(priv, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK, + 0x2 << GPC2CLK_OUT_VCODIV_SHIFT); + + /* put PLL in bypass before programming it */ + val = nv_rd32(priv, SEL_VCO); + val &= ~(BIT(SEL_VCO_GPC2CLK_OUT_SHIFT)); + udelay(2); + nv_wr32(priv, SEL_VCO, val); + + /* get out from IDDQ */ + val = nv_rd32(priv, GPCPLL_CFG); + if (val & GPCPLL_CFG_IDDQ) { + val &= ~GPCPLL_CFG_IDDQ; + nv_wr32(priv, GPCPLL_CFG, val); + nv_rd32(priv, GPCPLL_CFG); + udelay(2); + } + + _gk20a_pllg_disable(priv); + + nv_debug(priv, "%s: m=%d n=%d pl=%d\n", __func__, priv->m, priv->n, + priv->pl); + + n_lo = DIV_ROUND_UP(priv->m * priv->params->min_vco, + priv->parent_rate / MHZ); + val = priv->m << GPCPLL_COEFF_M_SHIFT; + val |= (allow_slide ? n_lo : priv->n) << GPCPLL_COEFF_N_SHIFT; + val |= priv->pl << GPCPLL_COEFF_P_SHIFT; + nv_wr32(priv, GPCPLL_COEFF, val); + + _gk20a_pllg_enable(priv); + + val = nv_rd32(priv, GPCPLL_CFG); + if (val & GPCPLL_CFG_LOCK_DET_OFF) { + val &= ~GPCPLL_CFG_LOCK_DET_OFF; + nv_wr32(priv, GPCPLL_CFG, val); + } + + if (!nouveau_timer_wait_eq(priv, 300000, GPCPLL_CFG, GPCPLL_CFG_LOCK, + GPCPLL_CFG_LOCK)) { + nv_error(priv, "%s: timeout waiting for pllg lock\n", __func__); + return -ETIMEDOUT; + } + + /* switch to VCO mode */ + nv_mask(priv, SEL_VCO, 0, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT)); + + /* restore out divider 1:1 */ + val = nv_rd32(priv, GPC2CLK_OUT); + val &= ~GPC2CLK_OUT_VCODIV_MASK; + udelay(2); + nv_wr32(priv, GPC2CLK_OUT, val); + + /* slide up to new NDIV */ + return allow_slide ? gk20a_pllg_slide(priv, priv->n) : 0; +} + +static int +gk20a_pllg_program_mnp(struct gk20a_clock_priv *priv) +{ + int err; + + err = _gk20a_pllg_program_mnp(priv, true); + if (err) + err = _gk20a_pllg_program_mnp(priv, false); + + return err; +} + +static void +gk20a_pllg_disable(struct gk20a_clock_priv *priv) +{ + u32 val; + + /* slide to VCO min */ + val = nv_rd32(priv, GPCPLL_CFG); + if (val & GPCPLL_CFG_ENABLE) { + u32 coeff, m, n_lo; + + coeff = nv_rd32(priv, GPCPLL_COEFF); + m = (coeff >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH); + n_lo = DIV_ROUND_UP(m * priv->params->min_vco, + priv->parent_rate / MHZ); + gk20a_pllg_slide(priv, n_lo); + } + + /* put PLL in bypass before disabling it */ + nv_mask(priv, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0); + + _gk20a_pllg_disable(priv); +} + +#define GK20A_CLK_GPC_MDIV 1000 + +static struct nouveau_clocks +gk20a_domains[] = { + { nv_clk_src_crystal, 0xff }, + { nv_clk_src_gpc, 0xff, 0, "core", GK20A_CLK_GPC_MDIV }, + { nv_clk_src_max } +}; + +static struct nouveau_pstate +gk20a_pstates[] = { + { + .base = { + .domain[nv_clk_src_gpc] = 72000, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 108000, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 180000, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 252000, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 324000, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 396000, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 468000, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 540000, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 612000, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 648000, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 684000, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 708000, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 756000, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 804000, + }, + }, + { + .base = { + .domain[nv_clk_src_gpc] = 852000, + }, + }, +}; + +static int +gk20a_clock_read(struct nouveau_clock *clk, enum nv_clk_src src) +{ + struct gk20a_clock_priv *priv = (void *)clk; + + switch (src) { + case nv_clk_src_crystal: + return nv_device(clk)->crystal; + case nv_clk_src_gpc: + gk20a_pllg_read_mnp(priv); + return gk20a_pllg_calc_rate(priv) / GK20A_CLK_GPC_MDIV; + default: + nv_error(clk, "invalid clock source %d\n", src); + return -EINVAL; + } +} + +static int +gk20a_clock_calc(struct nouveau_clock *clk, struct nouveau_cstate *cstate) +{ + struct gk20a_clock_priv *priv = (void *)clk; + + return gk20a_pllg_calc_mnp(priv, cstate->domain[nv_clk_src_gpc] * + GK20A_CLK_GPC_MDIV); +} + +static int +gk20a_clock_prog(struct nouveau_clock *clk) +{ + struct gk20a_clock_priv *priv = (void *)clk; + + return gk20a_pllg_program_mnp(priv); +} + +static void +gk20a_clock_tidy(struct nouveau_clock *clk) +{ +} + +static int +gk20a_clock_fini(struct nouveau_object *object, bool suspend) +{ + struct gk20a_clock_priv *priv = (void *)object; + int ret; + + ret = nouveau_clock_fini(&priv->base, false); + + gk20a_pllg_disable(priv); + + return ret; +} + +static int +gk20a_clock_init(struct nouveau_object *object) +{ + struct gk20a_clock_priv *priv = (void *)object; + int ret; + + nv_mask(priv, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK, GPC2CLK_OUT_INIT_VAL); + + ret = nouveau_clock_init(&priv->base); + if (ret) + return ret; + + ret = gk20a_clock_prog(&priv->base); + if (ret) { + nv_error(priv, "cannot initialize clock\n"); + return ret; + } + + return 0; +} + +static int +gk20a_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct gk20a_clock_priv *priv; + struct nouveau_platform_device *plat; + int ret; + int i; + + /* Finish initializing the pstates */ + for (i = 0; i < ARRAY_SIZE(gk20a_pstates); i++) { + INIT_LIST_HEAD(&gk20a_pstates[i].list); + gk20a_pstates[i].pstate = i + 1; + } + + ret = nouveau_clock_create(parent, engine, oclass, gk20a_domains, + gk20a_pstates, ARRAY_SIZE(gk20a_pstates), true, &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + priv->params = &gk20a_pllg_params; + + plat = nv_device_to_platform(nv_device(parent)); + priv->parent_rate = clk_get_rate(plat->gpu->clk); + nv_info(priv, "parent clock rate: %d Mhz\n", priv->parent_rate / MHZ); + + priv->base.read = gk20a_clock_read; + priv->base.calc = gk20a_clock_calc; + priv->base.prog = gk20a_clock_prog; + priv->base.tidy = gk20a_clock_tidy; + + return 0; +} + +struct nouveau_oclass +gk20a_clock_oclass = { + .handle = NV_SUBDEV(CLOCK, 0xea), + .ofuncs = &(struct nouveau_ofuncs) { + .ctor = gk20a_clock_ctor, + .dtor = _nouveau_subdev_dtor, + .init = gk20a_clock_init, + .fini = gk20a_clock_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c index eb2d4425a49e..4c48232686be 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c +++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c @@ -82,8 +82,8 @@ nv04_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nv04_clock_priv *priv; int ret; - ret = nouveau_clock_create(parent, engine, oclass, nv04_domain, false, - &priv); + ret = nouveau_clock_create(parent, engine, oclass, nv04_domain, NULL, 0, + false, &priv); *pobject = nv_object(priv); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c index 8a9e16839791..08368fe97029 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c +++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c @@ -213,8 +213,8 @@ nv40_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nv40_clock_priv *priv; int ret; - ret = nouveau_clock_create(parent, engine, oclass, nv40_domain, true, - &priv); + ret = nouveau_clock_create(parent, engine, oclass, nv40_domain, NULL, 0, + true, &priv); *pobject = nv_object(priv); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c index 8c132772ba9e..5070ebc260f8 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c +++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c @@ -507,7 +507,7 @@ nv50_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine, int ret; ret = nouveau_clock_create(parent, engine, oclass, pclass->domains, - false, &priv); + NULL, 0, false, &priv); *pobject = nv_object(priv); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c index 9fb58354a80b..087012b18956 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c +++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c @@ -302,8 +302,8 @@ nva3_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nva3_clock_priv *priv; int ret; - ret = nouveau_clock_create(parent, engine, oclass, nva3_domain, false, - &priv); + ret = nouveau_clock_create(parent, engine, oclass, nva3_domain, NULL, 0, + false, &priv); *pobject = nv_object(priv); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c index 6a65fc9e9663..74e19731b1b7 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c +++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c @@ -421,8 +421,8 @@ nvaa_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nvaa_clock_priv *priv; int ret; - ret = nouveau_clock_create(parent, engine, oclass, nvaa_domains, true, - &priv); + ret = nouveau_clock_create(parent, engine, oclass, nvaa_domains, NULL, + 0, true, &priv); *pobject = nv_object(priv); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c index dbf8517f54da..1234abaab2db 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c @@ -437,8 +437,8 @@ nvc0_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nvc0_clock_priv *priv; int ret; - ret = nouveau_clock_create(parent, engine, oclass, nvc0_domain, false, - &priv); + ret = nouveau_clock_create(parent, engine, oclass, nvc0_domain, NULL, 0, + false, &priv); *pobject = nv_object(priv); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c index 0e62a3240144..7eccad57512e 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c +++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c @@ -475,8 +475,8 @@ nve0_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nve0_clock_priv *priv; int ret; - ret = nouveau_clock_create(parent, engine, oclass, nve0_domain, true, - &priv); + ret = nouveau_clock_create(parent, engine, oclass, nve0_domain, NULL, 0, + true, &priv); *pobject = nv_object(priv); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c index 1fc55c1e91a1..4150b0d10af8 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c @@ -250,9 +250,11 @@ nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO); if (priv->r100c08_page) { - priv->r100c08 = nv_device_map_page(device, priv->r100c08_page); - if (!priv->r100c08) - nv_warn(priv, "failed 0x100c08 page map\n"); + priv->r100c08 = dma_map_page(nv_device_base(device), + priv->r100c08_page, 0, PAGE_SIZE, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(nv_device_base(device), priv->r100c08)) + return -EFAULT; } else { nv_warn(priv, "failed 0x100c08 page alloc\n"); } @@ -268,7 +270,8 @@ nv50_fb_dtor(struct nouveau_object *object) struct nv50_fb_priv *priv = (void *)object; if (priv->r100c08_page) { - nv_device_unmap_page(device, priv->r100c08); + dma_unmap_page(nv_device_base(device), priv->r100c08, PAGE_SIZE, + DMA_BIDIRECTIONAL); __free_page(priv->r100c08_page); } diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c index 0670ae33ee45..32f28dc73ef2 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c @@ -60,6 +60,7 @@ nvc0_fb_init(struct nouveau_object *object) if (priv->r100c10_page) nv_wr32(priv, 0x100c10, priv->r100c10 >> 8); + nv_mask(priv, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */ return 0; } @@ -70,7 +71,8 @@ nvc0_fb_dtor(struct nouveau_object *object) struct nvc0_fb_priv *priv = (void *)object; if (priv->r100c10_page) { - nv_device_unmap_page(device, priv->r100c10); + dma_unmap_page(nv_device_base(device), priv->r100c10, PAGE_SIZE, + DMA_BIDIRECTIONAL); __free_page(priv->r100c10_page); } @@ -93,8 +95,10 @@ nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO); if (priv->r100c10_page) { - priv->r100c10 = nv_device_map_page(device, priv->r100c10_page); - if (!priv->r100c10) + priv->r100c10 = dma_map_page(nv_device_base(device), + priv->r100c10_page, 0, PAGE_SIZE, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(nv_device_base(device), priv->r100c10)) return -EFAULT; } diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c index 5a6a5027f749..2b284b192763 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c @@ -26,7 +26,7 @@ #include <subdev/bios/pll.h> #include <subdev/bios/rammap.h> #include <subdev/bios/timing.h> -#include <subdev/ltcg.h> +#include <subdev/ltc.h> #include <subdev/clock.h> #include <subdev/clock/pll.h> @@ -425,7 +425,7 @@ extern const u8 nvc0_pte_storage_type_map[256]; void nvc0_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem) { - struct nouveau_ltcg *ltcg = nouveau_ltcg(pfb); + struct nouveau_ltc *ltc = nouveau_ltc(pfb); struct nouveau_mem *mem = *pmem; *pmem = NULL; @@ -434,7 +434,7 @@ nvc0_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem) mutex_lock(&pfb->base.mutex); if (mem->tag) - ltcg->tags_free(ltcg, &mem->tag); + ltc->tags_free(ltc, &mem->tag); __nv50_ram_put(pfb, mem); mutex_unlock(&pfb->base.mutex); @@ -468,12 +468,12 @@ nvc0_ram_get(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin, mutex_lock(&pfb->base.mutex); if (comp) { - struct nouveau_ltcg *ltcg = nouveau_ltcg(pfb); + struct nouveau_ltc *ltc = nouveau_ltc(pfb); /* compression only works with lpages */ if (align == (1 << (17 - 12))) { int n = size >> 5; - ltcg->tags_alloc(ltcg, n, &mem->tag); + ltc->tags_alloc(ltc, n, &mem->tag); } if (unlikely(!mem->tag)) @@ -554,13 +554,13 @@ nvc0_ram_create_(struct nouveau_object *parent, struct nouveau_object *engine, } else { /* otherwise, address lowest common amount from 0GiB */ ret = nouveau_mm_init(&pfb->vram, rsvd_head, - (bsize << 8) * parts, 1); + (bsize << 8) * parts - rsvd_head, 1); if (ret) return ret; /* and the rest starting from (8GiB + common_size) */ offset = (0x0200000000ULL >> 12) + (bsize << 8); - length = (ram->size >> 12) - (bsize << 8) - rsvd_tail; + length = (ram->size >> 12) - ((bsize * parts) << 8) - rsvd_tail; ret = nouveau_mm_init(&pfb->vram, offset, length, 0); if (ret) diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c index 45e0202f3151..b1e3ed7c8beb 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c @@ -106,39 +106,59 @@ nouveau_gpio_get(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line) } static void -nouveau_gpio_intr_disable(struct nouveau_event *event, int type, int index) +nouveau_gpio_intr_fini(struct nvkm_event *event, int type, int index) { - struct nouveau_gpio *gpio = nouveau_gpio(event->priv); + struct nouveau_gpio *gpio = container_of(event, typeof(*gpio), event); const struct nouveau_gpio_impl *impl = (void *)nv_object(gpio)->oclass; impl->intr_mask(gpio, type, 1 << index, 0); } static void -nouveau_gpio_intr_enable(struct nouveau_event *event, int type, int index) +nouveau_gpio_intr_init(struct nvkm_event *event, int type, int index) { - struct nouveau_gpio *gpio = nouveau_gpio(event->priv); + struct nouveau_gpio *gpio = container_of(event, typeof(*gpio), event); const struct nouveau_gpio_impl *impl = (void *)nv_object(gpio)->oclass; impl->intr_mask(gpio, type, 1 << index, 1 << index); } +static int +nouveau_gpio_intr_ctor(void *data, u32 size, struct nvkm_notify *notify) +{ + struct nvkm_gpio_ntfy_req *req = data; + if (!WARN_ON(size != sizeof(*req))) { + notify->size = sizeof(struct nvkm_gpio_ntfy_rep); + notify->types = req->mask; + notify->index = req->line; + return 0; + } + return -EINVAL; +} + static void nouveau_gpio_intr(struct nouveau_subdev *subdev) { struct nouveau_gpio *gpio = nouveau_gpio(subdev); const struct nouveau_gpio_impl *impl = (void *)nv_object(gpio)->oclass; - u32 hi, lo, e, i; + u32 hi, lo, i; impl->intr_stat(gpio, &hi, &lo); - for (i = 0; e = 0, (hi | lo) && i < impl->lines; i++) { - if (hi & (1 << i)) - e |= NVKM_GPIO_HI; - if (lo & (1 << i)) - e |= NVKM_GPIO_LO; - nouveau_event_trigger(gpio->events, e, i); + for (i = 0; (hi | lo) && i < impl->lines; i++) { + struct nvkm_gpio_ntfy_rep rep = { + .mask = (NVKM_GPIO_HI * !!(hi & (1 << i))) | + (NVKM_GPIO_LO * !!(lo & (1 << i))), + }; + nvkm_event_send(&gpio->event, rep.mask, i, &rep, sizeof(rep)); } } +static const struct nvkm_event_func +nouveau_gpio_intr_func = { + .ctor = nouveau_gpio_intr_ctor, + .init = nouveau_gpio_intr_init, + .fini = nouveau_gpio_intr_fini, +}; + int _nouveau_gpio_fini(struct nouveau_object *object, bool suspend) { @@ -183,7 +203,7 @@ void _nouveau_gpio_dtor(struct nouveau_object *object) { struct nouveau_gpio *gpio = (void *)object; - nouveau_event_destroy(&gpio->events); + nvkm_event_fini(&gpio->event); nouveau_subdev_destroy(&gpio->base); } @@ -208,13 +228,11 @@ nouveau_gpio_create_(struct nouveau_object *parent, gpio->get = nouveau_gpio_get; gpio->reset = impl->reset; - ret = nouveau_event_create(2, impl->lines, &gpio->events); + ret = nvkm_event_init(&nouveau_gpio_intr_func, 2, impl->lines, + &gpio->event); if (ret) return ret; - gpio->events->priv = gpio; - gpio->events->enable = nouveau_gpio_intr_enable; - gpio->events->disable = nouveau_gpio_intr_disable; nv_subdev(gpio)->intr = nouveau_gpio_intr; return 0; } diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c index 09ba2cc851cf..a652cafde3d6 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c @@ -326,9 +326,9 @@ nouveau_i2c_identify(struct nouveau_i2c *i2c, int index, const char *what, } static void -nouveau_i2c_intr_disable(struct nouveau_event *event, int type, int index) +nouveau_i2c_intr_fini(struct nvkm_event *event, int type, int index) { - struct nouveau_i2c *i2c = nouveau_i2c(event->priv); + struct nouveau_i2c *i2c = container_of(event, typeof(*i2c), event); struct nouveau_i2c_port *port = i2c->find(i2c, index); const struct nouveau_i2c_impl *impl = (void *)nv_object(i2c)->oclass; if (port && port->aux >= 0) @@ -336,15 +336,28 @@ nouveau_i2c_intr_disable(struct nouveau_event *event, int type, int index) } static void -nouveau_i2c_intr_enable(struct nouveau_event *event, int type, int index) +nouveau_i2c_intr_init(struct nvkm_event *event, int type, int index) { - struct nouveau_i2c *i2c = nouveau_i2c(event->priv); + struct nouveau_i2c *i2c = container_of(event, typeof(*i2c), event); struct nouveau_i2c_port *port = i2c->find(i2c, index); const struct nouveau_i2c_impl *impl = (void *)nv_object(i2c)->oclass; if (port && port->aux >= 0) impl->aux_mask(i2c, type, 1 << port->aux, 1 << port->aux); } +static int +nouveau_i2c_intr_ctor(void *data, u32 size, struct nvkm_notify *notify) +{ + struct nvkm_i2c_ntfy_req *req = data; + if (!WARN_ON(size != sizeof(*req))) { + notify->size = sizeof(struct nvkm_i2c_ntfy_rep); + notify->types = req->mask; + notify->index = req->port; + return 0; + } + return -EINVAL; +} + static void nouveau_i2c_intr(struct nouveau_subdev *subdev) { @@ -364,13 +377,26 @@ nouveau_i2c_intr(struct nouveau_subdev *subdev) if (lo & (1 << port->aux)) e |= NVKM_I2C_UNPLUG; if (rq & (1 << port->aux)) e |= NVKM_I2C_IRQ; if (tx & (1 << port->aux)) e |= NVKM_I2C_DONE; - - nouveau_event_trigger(i2c->ntfy, e, port->index); + if (e) { + struct nvkm_i2c_ntfy_rep rep = { + .mask = e, + }; + nvkm_event_send(&i2c->event, rep.mask, + port->index, &rep, + sizeof(rep)); + } } } } } +static const struct nvkm_event_func +nouveau_i2c_intr_func = { + .ctor = nouveau_i2c_intr_ctor, + .init = nouveau_i2c_intr_init, + .fini = nouveau_i2c_intr_fini, +}; + int _nouveau_i2c_fini(struct nouveau_object *object, bool suspend) { @@ -431,7 +457,7 @@ _nouveau_i2c_dtor(struct nouveau_object *object) struct nouveau_i2c *i2c = (void *)object; struct nouveau_i2c_port *port, *temp; - nouveau_event_destroy(&i2c->ntfy); + nvkm_event_fini(&i2c->event); list_for_each_entry_safe(port, temp, &i2c->ports, head) { nouveau_object_ref(NULL, (struct nouveau_object **)&port); @@ -547,13 +573,10 @@ nouveau_i2c_create_(struct nouveau_object *parent, } } - ret = nouveau_event_create(4, index, &i2c->ntfy); + ret = nvkm_event_init(&nouveau_i2c_intr_func, 4, index, &i2c->event); if (ret) return ret; - i2c->ntfy->priv = i2c; - i2c->ntfy->enable = nouveau_i2c_intr_enable; - i2c->ntfy->disable = nouveau_i2c_intr_disable; return 0; } diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltc/base.c b/drivers/gpu/drm/nouveau/core/subdev/ltc/base.c new file mode 100644 index 000000000000..32ed442c5913 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/ltc/base.c @@ -0,0 +1,126 @@ +/* + * Copyright 2014 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs <bskeggs@redhat.com> + */ + +#include "priv.h" + +static int +nvkm_ltc_tags_alloc(struct nouveau_ltc *ltc, u32 n, + struct nouveau_mm_node **pnode) +{ + struct nvkm_ltc_priv *priv = (void *)ltc; + int ret; + + ret = nouveau_mm_head(&priv->tags, 1, n, n, 1, pnode); + if (ret) + *pnode = NULL; + + return ret; +} + +static void +nvkm_ltc_tags_free(struct nouveau_ltc *ltc, struct nouveau_mm_node **pnode) +{ + struct nvkm_ltc_priv *priv = (void *)ltc; + nouveau_mm_free(&priv->tags, pnode); +} + +static void +nvkm_ltc_tags_clear(struct nouveau_ltc *ltc, u32 first, u32 count) +{ + const struct nvkm_ltc_impl *impl = (void *)nv_oclass(ltc); + struct nvkm_ltc_priv *priv = (void *)ltc; + const u32 limit = first + count - 1; + + BUG_ON((first > limit) || (limit >= priv->num_tags)); + + impl->cbc_clear(priv, first, limit); + impl->cbc_wait(priv); +} + +static int +nvkm_ltc_zbc_color_get(struct nouveau_ltc *ltc, int index, const u32 color[4]) +{ + const struct nvkm_ltc_impl *impl = (void *)nv_oclass(ltc); + struct nvkm_ltc_priv *priv = (void *)ltc; + memcpy(priv->zbc_color[index], color, sizeof(priv->zbc_color[index])); + impl->zbc_clear_color(priv, index, color); + return index; +} + +static int +nvkm_ltc_zbc_depth_get(struct nouveau_ltc *ltc, int index, const u32 depth) +{ + const struct nvkm_ltc_impl *impl = (void *)nv_oclass(ltc); + struct nvkm_ltc_priv *priv = (void *)ltc; + priv->zbc_depth[index] = depth; + impl->zbc_clear_depth(priv, index, depth); + return index; +} + +int +_nvkm_ltc_init(struct nouveau_object *object) +{ + const struct nvkm_ltc_impl *impl = (void *)nv_oclass(object); + struct nvkm_ltc_priv *priv = (void *)object; + int ret, i; + + ret = nouveau_subdev_init(&priv->base.base); + if (ret) + return ret; + + for (i = priv->base.zbc_min; i <= priv->base.zbc_max; i++) { + impl->zbc_clear_color(priv, i, priv->zbc_color[i]); + impl->zbc_clear_depth(priv, i, priv->zbc_depth[i]); + } + + return 0; +} + +int +nvkm_ltc_create_(struct nouveau_object *parent, struct nouveau_object *engine, + struct nouveau_oclass *oclass, int length, void **pobject) +{ + const struct nvkm_ltc_impl *impl = (void *)oclass; + struct nvkm_ltc_priv *priv; + int ret; + + ret = nouveau_subdev_create_(parent, engine, oclass, 0, "PLTCG", + "l2c", length, pobject); + priv = *pobject; + if (ret) + return ret; + + memset(priv->zbc_color, 0x00, sizeof(priv->zbc_color)); + memset(priv->zbc_depth, 0x00, sizeof(priv->zbc_depth)); + + priv->base.base.intr = impl->intr; + priv->base.tags_alloc = nvkm_ltc_tags_alloc; + priv->base.tags_free = nvkm_ltc_tags_free; + priv->base.tags_clear = nvkm_ltc_tags_clear; + priv->base.zbc_min = 1; /* reserve 0 for disabled */ + priv->base.zbc_max = min(impl->zbc, NOUVEAU_LTC_MAX_ZBC_CNT) - 1; + priv->base.zbc_color_get = nvkm_ltc_zbc_color_get; + priv->base.zbc_depth_get = nvkm_ltc_zbc_depth_get; + return 0; +} diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltcg/gf100.c b/drivers/gpu/drm/nouveau/core/subdev/ltc/gf100.c index f2f3338a967a..d5d65285efe5 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/ltcg/gf100.c +++ b/drivers/gpu/drm/nouveau/core/subdev/ltc/gf100.c @@ -25,10 +25,45 @@ #include <subdev/fb.h> #include <subdev/timer.h> -#include "gf100.h" +#include "priv.h" + +void +gf100_ltc_cbc_clear(struct nvkm_ltc_priv *priv, u32 start, u32 limit) +{ + nv_wr32(priv, 0x17e8cc, start); + nv_wr32(priv, 0x17e8d0, limit); + nv_wr32(priv, 0x17e8c8, 0x00000004); +} + +void +gf100_ltc_cbc_wait(struct nvkm_ltc_priv *priv) +{ + int c, s; + for (c = 0; c < priv->ltc_nr; c++) { + for (s = 0; s < priv->lts_nr; s++) + nv_wait(priv, 0x1410c8 + c * 0x2000 + s * 0x400, ~0, 0); + } +} + +void +gf100_ltc_zbc_clear_color(struct nvkm_ltc_priv *priv, int i, const u32 color[4]) +{ + nv_mask(priv, 0x17ea44, 0x0000000f, i); + nv_wr32(priv, 0x17ea48, color[0]); + nv_wr32(priv, 0x17ea4c, color[1]); + nv_wr32(priv, 0x17ea50, color[2]); + nv_wr32(priv, 0x17ea54, color[3]); +} + +void +gf100_ltc_zbc_clear_depth(struct nvkm_ltc_priv *priv, int i, const u32 depth) +{ + nv_mask(priv, 0x17ea44, 0x0000000f, i); + nv_wr32(priv, 0x17ea58, depth); +} static void -gf100_ltcg_lts_isr(struct gf100_ltcg_priv *priv, int ltc, int lts) +gf100_ltc_lts_isr(struct nvkm_ltc_priv *priv, int ltc, int lts) { u32 base = 0x141000 + (ltc * 0x2000) + (lts * 0x400); u32 stat = nv_rd32(priv, base + 0x020); @@ -39,17 +74,17 @@ gf100_ltcg_lts_isr(struct gf100_ltcg_priv *priv, int ltc, int lts) } } -static void -gf100_ltcg_intr(struct nouveau_subdev *subdev) +void +gf100_ltc_intr(struct nouveau_subdev *subdev) { - struct gf100_ltcg_priv *priv = (void *)subdev; + struct nvkm_ltc_priv *priv = (void *)subdev; u32 mask; mask = nv_rd32(priv, 0x00017c); while (mask) { u32 lts, ltc = __ffs(mask); for (lts = 0; lts < priv->lts_nr; lts++) - gf100_ltcg_lts_isr(priv, ltc, lts); + gf100_ltc_lts_isr(priv, ltc, lts); mask &= ~(1 << ltc); } @@ -59,52 +94,40 @@ gf100_ltcg_intr(struct nouveau_subdev *subdev) nv_mask(priv, 0x000640, 0x02000000, 0x00000000); } -int -gf100_ltcg_tags_alloc(struct nouveau_ltcg *ltcg, u32 n, - struct nouveau_mm_node **pnode) +static int +gf100_ltc_init(struct nouveau_object *object) { - struct gf100_ltcg_priv *priv = (struct gf100_ltcg_priv *)ltcg; + struct nvkm_ltc_priv *priv = (void *)object; + u32 lpg128 = !(nv_rd32(priv, 0x100c80) & 0x00000001); int ret; - ret = nouveau_mm_head(&priv->tags, 1, n, n, 1, pnode); + ret = nvkm_ltc_init(priv); if (ret) - *pnode = NULL; + return ret; - return ret; + nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */ + nv_wr32(priv, 0x17e8d8, priv->ltc_nr); + nv_wr32(priv, 0x17e8d4, priv->tag_base); + nv_mask(priv, 0x17e8c0, 0x00000002, lpg128 ? 0x00000002 : 0x00000000); + return 0; } void -gf100_ltcg_tags_free(struct nouveau_ltcg *ltcg, struct nouveau_mm_node **pnode) -{ - struct gf100_ltcg_priv *priv = (struct gf100_ltcg_priv *)ltcg; - - nouveau_mm_free(&priv->tags, pnode); -} - -static void -gf100_ltcg_tags_clear(struct nouveau_ltcg *ltcg, u32 first, u32 count) +gf100_ltc_dtor(struct nouveau_object *object) { - struct gf100_ltcg_priv *priv = (struct gf100_ltcg_priv *)ltcg; - u32 last = first + count - 1; - int p, i; - - BUG_ON((first > last) || (last >= priv->num_tags)); + struct nouveau_fb *pfb = nouveau_fb(object); + struct nvkm_ltc_priv *priv = (void *)object; - nv_wr32(priv, 0x17e8cc, first); - nv_wr32(priv, 0x17e8d0, last); - nv_wr32(priv, 0x17e8c8, 0x4); /* trigger clear */ + nouveau_mm_fini(&priv->tags); + nouveau_mm_free(&pfb->vram, &priv->tag_ram); - /* wait until it's finished with clearing */ - for (p = 0; p < priv->ltc_nr; ++p) { - for (i = 0; i < priv->lts_nr; ++i) - nv_wait(priv, 0x1410c8 + p * 0x2000 + i * 0x400, ~0, 0); - } + nvkm_ltc_destroy(priv); } /* TODO: Figure out tag memory details and drop the over-cautious allocation. */ int -gf100_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct gf100_ltcg_priv *priv) +gf100_ltc_init_tag_ram(struct nouveau_fb *pfb, struct nvkm_ltc_priv *priv) { u32 tag_size, tag_margin, tag_align; int ret; @@ -135,29 +158,29 @@ gf100_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct gf100_ltcg_priv *priv) if (ret) { priv->num_tags = 0; } else { - u64 tag_base = (priv->tag_ram->offset << 12) + tag_margin; + u64 tag_base = ((u64)priv->tag_ram->offset << 12) + tag_margin; tag_base += tag_align - 1; ret = do_div(tag_base, tag_align); priv->tag_base = tag_base; } - ret = nouveau_mm_init(&priv->tags, 0, priv->num_tags, 1); + ret = nouveau_mm_init(&priv->tags, 0, priv->num_tags, 1); return ret; } -static int -gf100_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine, +int +gf100_ltc_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { - struct gf100_ltcg_priv *priv; struct nouveau_fb *pfb = nouveau_fb(parent); + struct nvkm_ltc_priv *priv; u32 parts, mask; int ret, i; - ret = nouveau_ltcg_create(parent, engine, oclass, &priv); + ret = nvkm_ltc_create(parent, engine, oclass, &priv); *pobject = nv_object(priv); if (ret) return ret; @@ -170,57 +193,27 @@ gf100_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine, } priv->lts_nr = nv_rd32(priv, 0x17e8dc) >> 28; - ret = gf100_ltcg_init_tag_ram(pfb, priv); + ret = gf100_ltc_init_tag_ram(pfb, priv); if (ret) return ret; - priv->base.tags_alloc = gf100_ltcg_tags_alloc; - priv->base.tags_free = gf100_ltcg_tags_free; - priv->base.tags_clear = gf100_ltcg_tags_clear; - - nv_subdev(priv)->intr = gf100_ltcg_intr; - return 0; -} - -void -gf100_ltcg_dtor(struct nouveau_object *object) -{ - struct nouveau_ltcg *ltcg = (struct nouveau_ltcg *)object; - struct gf100_ltcg_priv *priv = (struct gf100_ltcg_priv *)ltcg; - struct nouveau_fb *pfb = nouveau_fb(ltcg->base.base.parent); - - nouveau_mm_fini(&priv->tags); - nouveau_mm_free(&pfb->vram, &priv->tag_ram); - - nouveau_ltcg_destroy(ltcg); -} - -static int -gf100_ltcg_init(struct nouveau_object *object) -{ - struct nouveau_ltcg *ltcg = (struct nouveau_ltcg *)object; - struct gf100_ltcg_priv *priv = (struct gf100_ltcg_priv *)ltcg; - int ret; - - ret = nouveau_ltcg_init(ltcg); - if (ret) - return ret; - - nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */ - nv_wr32(priv, 0x17e8d8, priv->ltc_nr); - if (nv_device(ltcg)->card_type >= NV_E0) - nv_wr32(priv, 0x17e000, priv->ltc_nr); - nv_wr32(priv, 0x17e8d4, priv->tag_base); + nv_subdev(priv)->intr = gf100_ltc_intr; return 0; } struct nouveau_oclass * -gf100_ltcg_oclass = &(struct nouveau_oclass) { - .handle = NV_SUBDEV(LTCG, 0xc0), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = gf100_ltcg_ctor, - .dtor = gf100_ltcg_dtor, - .init = gf100_ltcg_init, - .fini = _nouveau_ltcg_fini, +gf100_ltc_oclass = &(struct nvkm_ltc_impl) { + .base.handle = NV_SUBDEV(LTC, 0xc0), + .base.ofuncs = &(struct nouveau_ofuncs) { + .ctor = gf100_ltc_ctor, + .dtor = gf100_ltc_dtor, + .init = gf100_ltc_init, + .fini = _nvkm_ltc_fini, }, -}; + .intr = gf100_ltc_intr, + .cbc_clear = gf100_ltc_cbc_clear, + .cbc_wait = gf100_ltc_cbc_wait, + .zbc = 16, + .zbc_clear_color = gf100_ltc_zbc_clear_color, + .zbc_clear_depth = gf100_ltc_zbc_clear_depth, +}.base; diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltc/gk104.c b/drivers/gpu/drm/nouveau/core/subdev/ltc/gk104.c new file mode 100644 index 000000000000..b39b5d0eb8f9 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/ltc/gk104.c @@ -0,0 +1,60 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "priv.h" + +static int +gk104_ltc_init(struct nouveau_object *object) +{ + struct nvkm_ltc_priv *priv = (void *)object; + u32 lpg128 = !(nv_rd32(priv, 0x100c80) & 0x00000001); + int ret; + + ret = nvkm_ltc_init(priv); + if (ret) + return ret; + + nv_wr32(priv, 0x17e8d8, priv->ltc_nr); + nv_wr32(priv, 0x17e000, priv->ltc_nr); + nv_wr32(priv, 0x17e8d4, priv->tag_base); + nv_mask(priv, 0x17e8c0, 0x00000002, lpg128 ? 0x00000002 : 0x00000000); + return 0; +} + +struct nouveau_oclass * +gk104_ltc_oclass = &(struct nvkm_ltc_impl) { + .base.handle = NV_SUBDEV(LTC, 0xe4), + .base.ofuncs = &(struct nouveau_ofuncs) { + .ctor = gf100_ltc_ctor, + .dtor = gf100_ltc_dtor, + .init = gk104_ltc_init, + .fini = _nvkm_ltc_fini, + }, + .intr = gf100_ltc_intr, + .cbc_clear = gf100_ltc_cbc_clear, + .cbc_wait = gf100_ltc_cbc_wait, + .zbc = 16, + .zbc_clear_color = gf100_ltc_zbc_clear_color, + .zbc_clear_depth = gf100_ltc_zbc_clear_depth, +}.base; diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltcg/gm107.c b/drivers/gpu/drm/nouveau/core/subdev/ltc/gm107.c index e79d0e81de40..a4de64289762 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/ltcg/gm107.c +++ b/drivers/gpu/drm/nouveau/core/subdev/ltc/gm107.c @@ -25,10 +25,45 @@ #include <subdev/fb.h> #include <subdev/timer.h> -#include "gf100.h" +#include "priv.h" static void -gm107_ltcg_lts_isr(struct gf100_ltcg_priv *priv, int ltc, int lts) +gm107_ltc_cbc_clear(struct nvkm_ltc_priv *priv, u32 start, u32 limit) +{ + nv_wr32(priv, 0x17e270, start); + nv_wr32(priv, 0x17e274, limit); + nv_wr32(priv, 0x17e26c, 0x00000004); +} + +static void +gm107_ltc_cbc_wait(struct nvkm_ltc_priv *priv) +{ + int c, s; + for (c = 0; c < priv->ltc_nr; c++) { + for (s = 0; s < priv->lts_nr; s++) + nv_wait(priv, 0x14046c + c * 0x2000 + s * 0x200, ~0, 0); + } +} + +static void +gm107_ltc_zbc_clear_color(struct nvkm_ltc_priv *priv, int i, const u32 color[4]) +{ + nv_mask(priv, 0x17e338, 0x0000000f, i); + nv_wr32(priv, 0x17e33c, color[0]); + nv_wr32(priv, 0x17e340, color[1]); + nv_wr32(priv, 0x17e344, color[2]); + nv_wr32(priv, 0x17e348, color[3]); +} + +static void +gm107_ltc_zbc_clear_depth(struct nvkm_ltc_priv *priv, int i, const u32 depth) +{ + nv_mask(priv, 0x17e338, 0x0000000f, i); + nv_wr32(priv, 0x17e34c, depth); +} + +static void +gm107_ltc_lts_isr(struct nvkm_ltc_priv *priv, int ltc, int lts) { u32 base = 0x140000 + (ltc * 0x2000) + (lts * 0x400); u32 stat = nv_rd32(priv, base + 0x00c); @@ -40,16 +75,16 @@ gm107_ltcg_lts_isr(struct gf100_ltcg_priv *priv, int ltc, int lts) } static void -gm107_ltcg_intr(struct nouveau_subdev *subdev) +gm107_ltc_intr(struct nouveau_subdev *subdev) { - struct gf100_ltcg_priv *priv = (void *)subdev; + struct nvkm_ltc_priv *priv = (void *)subdev; u32 mask; mask = nv_rd32(priv, 0x00017c); while (mask) { u32 lts, ltc = __ffs(mask); for (lts = 0; lts < priv->lts_nr; lts++) - gm107_ltcg_lts_isr(priv, ltc, lts); + gm107_ltc_lts_isr(priv, ltc, lts); mask &= ~(1 << ltc); } @@ -59,37 +94,34 @@ gm107_ltcg_intr(struct nouveau_subdev *subdev) nv_mask(priv, 0x000640, 0x02000000, 0x00000000); } -static void -gm107_ltcg_tags_clear(struct nouveau_ltcg *ltcg, u32 first, u32 count) +static int +gm107_ltc_init(struct nouveau_object *object) { - struct gf100_ltcg_priv *priv = (struct gf100_ltcg_priv *)ltcg; - u32 last = first + count - 1; - int p, i; - - BUG_ON((first > last) || (last >= priv->num_tags)); + struct nvkm_ltc_priv *priv = (void *)object; + u32 lpg128 = !(nv_rd32(priv, 0x100c80) & 0x00000001); + int ret; - nv_wr32(priv, 0x17e270, first); - nv_wr32(priv, 0x17e274, last); - nv_wr32(priv, 0x17e26c, 0x4); /* trigger clear */ + ret = nvkm_ltc_init(priv); + if (ret) + return ret; - /* wait until it's finished with clearing */ - for (p = 0; p < priv->ltc_nr; ++p) { - for (i = 0; i < priv->lts_nr; ++i) - nv_wait(priv, 0x14046c + p * 0x2000 + i * 0x200, ~0, 0); - } + nv_wr32(priv, 0x17e27c, priv->ltc_nr); + nv_wr32(priv, 0x17e278, priv->tag_base); + nv_mask(priv, 0x17e264, 0x00000002, lpg128 ? 0x00000002 : 0x00000000); + return 0; } static int -gm107_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine, +gm107_ltc_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { - struct gf100_ltcg_priv *priv; struct nouveau_fb *pfb = nouveau_fb(parent); + struct nvkm_ltc_priv *priv; u32 parts, mask; int ret, i; - ret = nouveau_ltcg_create(parent, engine, oclass, &priv); + ret = nvkm_ltc_create(parent, engine, oclass, &priv); *pobject = nv_object(priv); if (ret) return ret; @@ -102,41 +134,26 @@ gm107_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine, } priv->lts_nr = nv_rd32(priv, 0x17e280) >> 28; - ret = gf100_ltcg_init_tag_ram(pfb, priv); - if (ret) - return ret; - - priv->base.tags_alloc = gf100_ltcg_tags_alloc; - priv->base.tags_free = gf100_ltcg_tags_free; - priv->base.tags_clear = gm107_ltcg_tags_clear; - - nv_subdev(priv)->intr = gm107_ltcg_intr; - return 0; -} - -static int -gm107_ltcg_init(struct nouveau_object *object) -{ - struct nouveau_ltcg *ltcg = (struct nouveau_ltcg *)object; - struct gf100_ltcg_priv *priv = (struct gf100_ltcg_priv *)ltcg; - int ret; - - ret = nouveau_ltcg_init(ltcg); + ret = gf100_ltc_init_tag_ram(pfb, priv); if (ret) return ret; - nv_wr32(priv, 0x17e27c, priv->ltc_nr); - nv_wr32(priv, 0x17e278, priv->tag_base); return 0; } struct nouveau_oclass * -gm107_ltcg_oclass = &(struct nouveau_oclass) { - .handle = NV_SUBDEV(LTCG, 0xff), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = gm107_ltcg_ctor, - .dtor = gf100_ltcg_dtor, - .init = gm107_ltcg_init, - .fini = _nouveau_ltcg_fini, +gm107_ltc_oclass = &(struct nvkm_ltc_impl) { + .base.handle = NV_SUBDEV(LTC, 0xff), + .base.ofuncs = &(struct nouveau_ofuncs) { + .ctor = gm107_ltc_ctor, + .dtor = gf100_ltc_dtor, + .init = gm107_ltc_init, + .fini = _nvkm_ltc_fini, }, -}; + .intr = gm107_ltc_intr, + .cbc_clear = gm107_ltc_cbc_clear, + .cbc_wait = gm107_ltc_cbc_wait, + .zbc = 16, + .zbc_clear_color = gm107_ltc_zbc_clear_color, + .zbc_clear_depth = gm107_ltc_zbc_clear_depth, +}.base; diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltc/priv.h b/drivers/gpu/drm/nouveau/core/subdev/ltc/priv.h new file mode 100644 index 000000000000..594924f39126 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/ltc/priv.h @@ -0,0 +1,69 @@ +#ifndef __NVKM_LTC_PRIV_H__ +#define __NVKM_LTC_PRIV_H__ + +#include <subdev/ltc.h> +#include <subdev/fb.h> + +struct nvkm_ltc_priv { + struct nouveau_ltc base; + u32 ltc_nr; + u32 lts_nr; + + u32 num_tags; + u32 tag_base; + struct nouveau_mm tags; + struct nouveau_mm_node *tag_ram; + + u32 zbc_color[NOUVEAU_LTC_MAX_ZBC_CNT][4]; + u32 zbc_depth[NOUVEAU_LTC_MAX_ZBC_CNT]; +}; + +#define nvkm_ltc_create(p,e,o,d) \ + nvkm_ltc_create_((p), (e), (o), sizeof(**d), (void **)d) +#define nvkm_ltc_destroy(p) ({ \ + struct nvkm_ltc_priv *_priv = (p); \ + _nvkm_ltc_dtor(nv_object(_priv)); \ +}) +#define nvkm_ltc_init(p) ({ \ + struct nvkm_ltc_priv *_priv = (p); \ + _nvkm_ltc_init(nv_object(_priv)); \ +}) +#define nvkm_ltc_fini(p,s) ({ \ + struct nvkm_ltc_priv *_priv = (p); \ + _nvkm_ltc_fini(nv_object(_priv), (s)); \ +}) + +int nvkm_ltc_create_(struct nouveau_object *, struct nouveau_object *, + struct nouveau_oclass *, int, void **); + +#define _nvkm_ltc_dtor _nouveau_subdev_dtor +int _nvkm_ltc_init(struct nouveau_object *); +#define _nvkm_ltc_fini _nouveau_subdev_fini + +int gf100_ltc_ctor(struct nouveau_object *, struct nouveau_object *, + struct nouveau_oclass *, void *, u32, + struct nouveau_object **); +void gf100_ltc_dtor(struct nouveau_object *); +int gf100_ltc_init_tag_ram(struct nouveau_fb *, struct nvkm_ltc_priv *); +int gf100_ltc_tags_alloc(struct nouveau_ltc *, u32, struct nouveau_mm_node **); +void gf100_ltc_tags_free(struct nouveau_ltc *, struct nouveau_mm_node **); + +struct nvkm_ltc_impl { + struct nouveau_oclass base; + void (*intr)(struct nouveau_subdev *); + + void (*cbc_clear)(struct nvkm_ltc_priv *, u32 start, u32 limit); + void (*cbc_wait)(struct nvkm_ltc_priv *); + + int zbc; + void (*zbc_clear_color)(struct nvkm_ltc_priv *, int, const u32[4]); + void (*zbc_clear_depth)(struct nvkm_ltc_priv *, int, const u32); +}; + +void gf100_ltc_intr(struct nouveau_subdev *); +void gf100_ltc_cbc_clear(struct nvkm_ltc_priv *, u32, u32); +void gf100_ltc_cbc_wait(struct nvkm_ltc_priv *); +void gf100_ltc_zbc_clear_color(struct nvkm_ltc_priv *, int, const u32[4]); +void gf100_ltc_zbc_clear_depth(struct nvkm_ltc_priv *, int, const u32); + +#endif diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltcg/gf100.h b/drivers/gpu/drm/nouveau/core/subdev/ltcg/gf100.h deleted file mode 100644 index 87b10b8412ea..000000000000 --- a/drivers/gpu/drm/nouveau/core/subdev/ltcg/gf100.h +++ /dev/null @@ -1,21 +0,0 @@ -#ifndef __NVKM_LTCG_PRIV_GF100_H__ -#define __NVKM_LTCG_PRIV_GF100_H__ - -#include <subdev/ltcg.h> - -struct gf100_ltcg_priv { - struct nouveau_ltcg base; - u32 ltc_nr; - u32 lts_nr; - u32 num_tags; - u32 tag_base; - struct nouveau_mm tags; - struct nouveau_mm_node *tag_ram; -}; - -void gf100_ltcg_dtor(struct nouveau_object *); -int gf100_ltcg_init_tag_ram(struct nouveau_fb *, struct gf100_ltcg_priv *); -int gf100_ltcg_tags_alloc(struct nouveau_ltcg *, u32, struct nouveau_mm_node **); -void gf100_ltcg_tags_free(struct nouveau_ltcg *, struct nouveau_mm_node **); - -#endif diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c index 8a5555192fa5..ca7cee3a314a 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c @@ -22,9 +22,17 @@ * Authors: Ben Skeggs */ -#include <subdev/mc.h> +#include "priv.h" #include <core/option.h> +static inline void +nouveau_mc_unk260(struct nouveau_mc *pmc, u32 data) +{ + const struct nouveau_mc_oclass *impl = (void *)nv_oclass(pmc); + if (impl->unk260) + impl->unk260(pmc, data); +} + static inline u32 nouveau_mc_intr_mask(struct nouveau_mc *pmc) { @@ -114,6 +122,8 @@ nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine, if (ret) return ret; + pmc->unk260 = nouveau_mc_unk260; + if (nv_device_is_pci(device)) switch (device->pdev->device & 0x0ff0) { case 0x00f0: diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/gk20a.c b/drivers/gpu/drm/nouveau/core/subdev/mc/gk20a.c new file mode 100644 index 000000000000..b8d6cb435d0a --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/gk20a.c @@ -0,0 +1,38 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "nv04.h" + +struct nouveau_oclass * +gk20a_mc_oclass = &(struct nouveau_mc_oclass) { + .base.handle = NV_SUBDEV(MC, 0xea), + .base.ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv04_mc_ctor, + .dtor = _nouveau_mc_dtor, + .init = nv50_mc_init, + .fini = _nouveau_mc_fini, + }, + .intr = nvc0_mc_intr, + .msi_rearm = nv40_mc_msi_rearm, +}.base; diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h index 81a408e7d034..4d9ea46c47c2 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h @@ -1,7 +1,7 @@ #ifndef __NVKM_MC_NV04_H__ #define __NVKM_MC_NV04_H__ -#include <subdev/mc.h> +#include "priv.h" struct nv04_mc_priv { struct nouveau_mc base; diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c index f9c6a678b47d..15d41dc176ff 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c @@ -41,7 +41,7 @@ nvc0_mc_intr[] = { { 0x00200000, NVDEV_SUBDEV_GPIO }, /* PMGR->GPIO */ { 0x00200000, NVDEV_SUBDEV_I2C }, /* PMGR->I2C/AUX */ { 0x01000000, NVDEV_SUBDEV_PWR }, - { 0x02000000, NVDEV_SUBDEV_LTCG }, + { 0x02000000, NVDEV_SUBDEV_LTC }, { 0x08000000, NVDEV_SUBDEV_FB }, { 0x10000000, NVDEV_SUBDEV_BUS }, { 0x40000000, NVDEV_SUBDEV_IBUS }, @@ -56,6 +56,12 @@ nvc0_mc_msi_rearm(struct nouveau_mc *pmc) nv_wr32(priv, 0x088704, 0x00000000); } +void +nvc0_mc_unk260(struct nouveau_mc *pmc, u32 data) +{ + nv_wr32(pmc, 0x000260, data); +} + struct nouveau_oclass * nvc0_mc_oclass = &(struct nouveau_mc_oclass) { .base.handle = NV_SUBDEV(MC, 0xc0), @@ -67,4 +73,5 @@ nvc0_mc_oclass = &(struct nouveau_mc_oclass) { }, .intr = nvc0_mc_intr, .msi_rearm = nvc0_mc_msi_rearm, + .unk260 = nvc0_mc_unk260, }.base; diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc3.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc3.c index 837e545aeb9f..68b5f61aadb5 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc3.c +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc3.c @@ -35,4 +35,5 @@ nvc3_mc_oclass = &(struct nouveau_mc_oclass) { }, .intr = nvc0_mc_intr, .msi_rearm = nv40_mc_msi_rearm, + .unk260 = nvc0_mc_unk260, }.base; diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/priv.h b/drivers/gpu/drm/nouveau/core/subdev/mc/priv.h new file mode 100644 index 000000000000..911e66392587 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/priv.h @@ -0,0 +1,38 @@ +#ifndef __NVKM_MC_PRIV_H__ +#define __NVKM_MC_PRIV_H__ + +#include <subdev/mc.h> + +#define nouveau_mc_create(p,e,o,d) \ + nouveau_mc_create_((p), (e), (o), sizeof(**d), (void **)d) +#define nouveau_mc_destroy(p) ({ \ + struct nouveau_mc *pmc = (p); _nouveau_mc_dtor(nv_object(pmc)); \ +}) +#define nouveau_mc_init(p) ({ \ + struct nouveau_mc *pmc = (p); _nouveau_mc_init(nv_object(pmc)); \ +}) +#define nouveau_mc_fini(p,s) ({ \ + struct nouveau_mc *pmc = (p); _nouveau_mc_fini(nv_object(pmc), (s)); \ +}) + +int nouveau_mc_create_(struct nouveau_object *, struct nouveau_object *, + struct nouveau_oclass *, int, void **); +void _nouveau_mc_dtor(struct nouveau_object *); +int _nouveau_mc_init(struct nouveau_object *); +int _nouveau_mc_fini(struct nouveau_object *, bool); + +struct nouveau_mc_intr { + u32 stat; + u32 unit; +}; + +struct nouveau_mc_oclass { + struct nouveau_oclass base; + const struct nouveau_mc_intr *intr; + void (*msi_rearm)(struct nouveau_mc *); + void (*unk260)(struct nouveau_mc *, u32); +}; + +void nvc0_mc_unk260(struct nouveau_mc *, u32); + +#endif diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/base.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/base.c index d4fd3bc9c66f..69f1f34f6931 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/pwr/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/base.c @@ -22,9 +22,18 @@ * Authors: Ben Skeggs */ -#include <subdev/pwr.h> #include <subdev/timer.h> +#include "priv.h" + +static void +nouveau_pwr_pgob(struct nouveau_pwr *ppwr, bool enable) +{ + const struct nvkm_pwr_impl *impl = (void *)nv_oclass(ppwr); + if (impl->pgob) + impl->pgob(ppwr, enable); +} + static int nouveau_pwr_send(struct nouveau_pwr *ppwr, u32 reply[2], u32 process, u32 message, u32 data0, u32 data1) @@ -177,6 +186,7 @@ _nouveau_pwr_fini(struct nouveau_object *object, bool suspend) int _nouveau_pwr_init(struct nouveau_object *object) { + const struct nvkm_pwr_impl *impl = (void *)object->oclass; struct nouveau_pwr *ppwr = (void *)object; int ret, i; @@ -186,6 +196,7 @@ _nouveau_pwr_init(struct nouveau_object *object) nv_subdev(ppwr)->intr = nouveau_pwr_intr; ppwr->message = nouveau_pwr_send; + ppwr->pgob = nouveau_pwr_pgob; /* prevent previous ucode from running, wait for idle, reset */ nv_wr32(ppwr, 0x10a014, 0x0000ffff); /* INTR_EN_CLR = ALL */ @@ -195,15 +206,15 @@ _nouveau_pwr_init(struct nouveau_object *object) /* upload data segment */ nv_wr32(ppwr, 0x10a1c0, 0x01000000); - for (i = 0; i < ppwr->data.size / 4; i++) - nv_wr32(ppwr, 0x10a1c4, ppwr->data.data[i]); + for (i = 0; i < impl->data.size / 4; i++) + nv_wr32(ppwr, 0x10a1c4, impl->data.data[i]); /* upload code segment */ nv_wr32(ppwr, 0x10a180, 0x01000000); - for (i = 0; i < ppwr->code.size / 4; i++) { + for (i = 0; i < impl->code.size / 4; i++) { if ((i & 0x3f) == 0) nv_wr32(ppwr, 0x10a188, i >> 6); - nv_wr32(ppwr, 0x10a184, ppwr->code.data[i]); + nv_wr32(ppwr, 0x10a184, impl->code.data[i]); } /* start it running */ @@ -245,3 +256,15 @@ nouveau_pwr_create_(struct nouveau_object *parent, init_waitqueue_head(&ppwr->recv.wait); return 0; } + +int +_nouveau_pwr_ctor(struct nouveau_object *parent, + struct nouveau_object *engine, + struct nouveau_oclass *oclass, void *data, u32 size, + struct nouveau_object **pobject) +{ + struct nouveau_pwr *ppwr; + int ret = nouveau_pwr_create(parent, engine, oclass, &ppwr); + *pobject = nv_object(ppwr); + return ret; +} diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc index e2a63ac5422b..5668e045bac1 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc +++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc @@ -242,7 +242,7 @@ */ push reg /* */ pop $r13 /* */ pop $r14 /* -*/ call(wr32) /* +*/ call(wr32) #else #define nv_wr32(addr,reg) /* */ sethi $r0 0x14000000 /* diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h index 39a5dc150a05..986495d533dd 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h +++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h @@ -46,8 +46,8 @@ uint32_t nv108_pwr_data[] = { 0x00000000, 0x00000000, 0x584d454d, - 0x0000046f, - 0x00000461, + 0x00000464, + 0x00000456, 0x00000000, 0x00000000, 0x00000000, @@ -68,8 +68,8 @@ uint32_t nv108_pwr_data[] = { 0x00000000, 0x00000000, 0x46524550, - 0x00000473, - 0x00000471, + 0x00000468, + 0x00000466, 0x00000000, 0x00000000, 0x00000000, @@ -90,8 +90,8 @@ uint32_t nv108_pwr_data[] = { 0x00000000, 0x00000000, 0x5f433249, - 0x00000877, - 0x0000071e, + 0x0000086c, + 0x00000713, 0x00000000, 0x00000000, 0x00000000, @@ -112,8 +112,8 @@ uint32_t nv108_pwr_data[] = { 0x00000000, 0x00000000, 0x54534554, - 0x00000898, - 0x00000879, + 0x0000088d, + 0x0000086e, 0x00000000, 0x00000000, 0x00000000, @@ -134,8 +134,8 @@ uint32_t nv108_pwr_data[] = { 0x00000000, 0x00000000, 0x454c4449, - 0x000008a3, - 0x000008a1, + 0x00000898, + 0x00000896, 0x00000000, 0x00000000, 0x00000000, @@ -239,10 +239,10 @@ uint32_t nv108_pwr_data[] = { 0x000003df, 0x00040003, 0x00000000, - 0x00000407, + 0x000003fc, 0x00010004, 0x00000000, - 0x00000421, + 0x00000416, /* 0x03ac: memx_func_tail */ /* 0x03ac: memx_data_head */ 0x00000000, @@ -1080,375 +1080,375 @@ uint32_t nv108_pwr_code[] = { 0x50f960f9, 0xe0fcd0fc, 0x00002e7e, - 0x140003f1, - 0xa00506fd, - 0xb604bd05, - 0x1bf40242, -/* 0x0407: memx_func_wait */ - 0x0800f8dd, - 0x0088cf2c, - 0x98001e98, - 0x1c98011d, - 0x031b9802, - 0x7e1010b6, - 0xf8000071, -/* 0x0421: memx_func_delay */ + 0xf40242b6, + 0x00f8e81b, +/* 0x03fc: memx_func_wait */ + 0x88cf2c08, 0x001e9800, - 0x7e0410b6, - 0xf800005d, -/* 0x042d: memx_exec */ - 0xf9e0f900, - 0xb2c1b2d0, -/* 0x0435: memx_exec_next */ - 0x001398b2, - 0x950410b6, - 0x30f01034, - 0xde35980c, - 0x12a655f9, - 0xfced1ef4, - 0x7ee0fcd0, - 0xf800023f, -/* 0x0455: memx_info */ - 0x03ac4c00, - 0x7e08004b, - 0xf800023f, -/* 0x0461: memx_recv */ - 0x01d6b000, - 0xb0c90bf4, - 0x0bf400d6, -/* 0x046f: memx_init */ - 0xf800f8eb, -/* 0x0471: perf_recv */ -/* 0x0473: perf_init */ - 0xf800f800, -/* 0x0475: i2c_drive_scl */ - 0x0036b000, - 0x400d0bf4, - 0x01f607e0, - 0xf804bd00, -/* 0x0485: i2c_drive_scl_lo */ - 0x07e44000, - 0xbd0001f6, -/* 0x048f: i2c_drive_sda */ - 0xb000f804, - 0x0bf40036, - 0x07e0400d, - 0xbd0002f6, -/* 0x049f: i2c_drive_sda_lo */ - 0x4000f804, - 0x02f607e4, - 0xf804bd00, -/* 0x04a9: i2c_sense_scl */ - 0x0132f400, - 0xcf07c443, - 0x31fd0033, - 0x060bf404, -/* 0x04bb: i2c_sense_scl_done */ - 0xf80131f4, -/* 0x04bd: i2c_sense_sda */ - 0x0132f400, - 0xcf07c443, - 0x32fd0033, - 0x060bf404, -/* 0x04cf: i2c_sense_sda_done */ - 0xf80131f4, -/* 0x04d1: i2c_raise_scl */ - 0x4440f900, - 0x01030898, - 0x0004757e, -/* 0x04dc: i2c_raise_scl_wait */ - 0x7e03e84e, - 0x7e00005d, - 0xf40004a9, - 0x42b60901, - 0xef1bf401, -/* 0x04f0: i2c_raise_scl_done */ - 0x00f840fc, -/* 0x04f4: i2c_start */ - 0x0004a97e, - 0x7e0d11f4, - 0xf40004bd, - 0x0ef40611, -/* 0x0505: i2c_start_rep */ - 0x7e00032e, - 0x03000475, - 0x048f7e01, - 0x0076bb00, - 0xf90465b6, - 0x04659450, - 0xbd0256bb, - 0x0475fd50, - 0xd17e50fc, - 0x64b60004, - 0x1d11f404, -/* 0x0530: i2c_start_send */ - 0x8f7e0003, - 0x884e0004, - 0x005d7e13, - 0x7e000300, - 0x4e000475, - 0x5d7e1388, -/* 0x054a: i2c_start_out */ - 0x00f80000, -/* 0x054c: i2c_stop */ - 0x757e0003, - 0x00030004, - 0x00048f7e, - 0x7e03e84e, - 0x0300005d, - 0x04757e01, - 0x13884e00, + 0x98011d98, + 0x1b98021c, + 0x1010b603, + 0x0000717e, +/* 0x0416: memx_func_delay */ + 0x1e9800f8, + 0x0410b600, 0x00005d7e, - 0x8f7e0103, - 0x884e0004, - 0x005d7e13, -/* 0x057b: i2c_bitw */ - 0x7e00f800, - 0x4e00048f, - 0x5d7e03e8, - 0x76bb0000, +/* 0x0422: memx_exec */ + 0xe0f900f8, + 0xc1b2d0f9, +/* 0x042a: memx_exec_next */ + 0x1398b2b2, + 0x0410b600, + 0xf0103495, + 0x35980c30, + 0xa655f9de, + 0xed1ef412, + 0xe0fcd0fc, + 0x00023f7e, +/* 0x044a: memx_info */ + 0xac4c00f8, + 0x08004b03, + 0x00023f7e, +/* 0x0456: memx_recv */ + 0xd6b000f8, + 0xc90bf401, + 0xf400d6b0, + 0x00f8eb0b, +/* 0x0464: memx_init */ +/* 0x0466: perf_recv */ + 0x00f800f8, +/* 0x0468: perf_init */ +/* 0x046a: i2c_drive_scl */ + 0x36b000f8, + 0x0d0bf400, + 0xf607e040, + 0x04bd0001, +/* 0x047a: i2c_drive_scl_lo */ + 0xe44000f8, + 0x0001f607, + 0x00f804bd, +/* 0x0484: i2c_drive_sda */ + 0xf40036b0, + 0xe0400d0b, + 0x0002f607, + 0x00f804bd, +/* 0x0494: i2c_drive_sda_lo */ + 0xf607e440, + 0x04bd0002, +/* 0x049e: i2c_sense_scl */ + 0x32f400f8, + 0x07c44301, + 0xfd0033cf, + 0x0bf40431, + 0x0131f406, +/* 0x04b0: i2c_sense_scl_done */ +/* 0x04b2: i2c_sense_sda */ + 0x32f400f8, + 0x07c44301, + 0xfd0033cf, + 0x0bf40432, + 0x0131f406, +/* 0x04c4: i2c_sense_sda_done */ +/* 0x04c6: i2c_raise_scl */ + 0x40f900f8, + 0x03089844, + 0x046a7e01, +/* 0x04d1: i2c_raise_scl_wait */ + 0x03e84e00, + 0x00005d7e, + 0x00049e7e, + 0xb60901f4, + 0x1bf40142, +/* 0x04e5: i2c_raise_scl_done */ + 0xf840fcef, +/* 0x04e9: i2c_start */ + 0x049e7e00, + 0x0d11f400, + 0x0004b27e, + 0xf40611f4, +/* 0x04fa: i2c_start_rep */ + 0x00032e0e, + 0x00046a7e, + 0x847e0103, + 0x76bb0004, 0x0465b600, 0x659450f9, 0x0256bb04, 0x75fd50bd, 0x7e50fc04, - 0xb60004d1, + 0xb60004c6, 0x11f40464, - 0x13884e17, +/* 0x0525: i2c_start_send */ + 0x7e00031d, + 0x4e000484, + 0x5d7e1388, + 0x00030000, + 0x00046a7e, + 0x7e13884e, +/* 0x053f: i2c_start_out */ + 0xf800005d, +/* 0x0541: i2c_stop */ + 0x7e000300, + 0x0300046a, + 0x04847e00, + 0x03e84e00, 0x00005d7e, - 0x757e0003, + 0x6a7e0103, 0x884e0004, 0x005d7e13, -/* 0x05b9: i2c_bitw_out */ -/* 0x05bb: i2c_bitr */ - 0x0300f800, - 0x048f7e01, - 0x03e84e00, - 0x00005d7e, - 0xb60076bb, - 0x50f90465, - 0xbb046594, - 0x50bd0256, - 0xfc0475fd, - 0x04d17e50, - 0x0464b600, - 0x7e1a11f4, - 0x030004bd, - 0x04757e00, - 0x13884e00, - 0x00005d7e, - 0xf4013cf0, -/* 0x05fe: i2c_bitr_done */ - 0x00f80131, -/* 0x0600: i2c_get_byte */ - 0x08040005, -/* 0x0604: i2c_get_byte_next */ - 0xbb0154b6, + 0x7e010300, + 0x4e000484, + 0x5d7e1388, + 0x00f80000, +/* 0x0570: i2c_bitw */ + 0x0004847e, + 0x7e03e84e, + 0xbb00005d, 0x65b60076, 0x9450f904, 0x56bb0465, 0xfd50bd02, 0x50fc0475, - 0x0005bb7e, + 0x0004c67e, 0xf40464b6, - 0x53fd2a11, - 0x0142b605, - 0x03d81bf4, - 0x0076bb01, + 0x884e1711, + 0x005d7e13, + 0x7e000300, + 0x4e00046a, + 0x5d7e1388, +/* 0x05ae: i2c_bitw_out */ + 0x00f80000, +/* 0x05b0: i2c_bitr */ + 0x847e0103, + 0xe84e0004, + 0x005d7e03, + 0x0076bb00, 0xf90465b6, 0x04659450, 0xbd0256bb, 0x0475fd50, - 0x7b7e50fc, - 0x64b60005, -/* 0x064d: i2c_get_byte_done */ -/* 0x064f: i2c_put_byte */ - 0x0400f804, -/* 0x0651: i2c_put_byte_next */ - 0x0142b608, - 0xbb3854ff, - 0x65b60076, - 0x9450f904, - 0x56bb0465, - 0xfd50bd02, - 0x50fc0475, - 0x00057b7e, - 0xf40464b6, - 0x46b03411, - 0xd81bf400, + 0xc67e50fc, + 0x64b60004, + 0x1a11f404, + 0x0004b27e, + 0x6a7e0003, + 0x884e0004, + 0x005d7e13, + 0x013cf000, +/* 0x05f3: i2c_bitr_done */ + 0xf80131f4, +/* 0x05f5: i2c_get_byte */ + 0x04000500, +/* 0x05f9: i2c_get_byte_next */ + 0x0154b608, 0xb60076bb, 0x50f90465, 0xbb046594, 0x50bd0256, 0xfc0475fd, - 0x05bb7e50, + 0x05b07e50, 0x0464b600, - 0xbb0f11f4, - 0x36b00076, - 0x061bf401, -/* 0x06a7: i2c_put_byte_done */ - 0xf80132f4, -/* 0x06a9: i2c_addr */ - 0x0076bb00, - 0xf90465b6, - 0x04659450, - 0xbd0256bb, - 0x0475fd50, - 0xf47e50fc, - 0x64b60004, - 0x2911f404, - 0x012ec3e7, - 0xfd0134b6, - 0x76bb0553, + 0xfd2a11f4, + 0x42b60553, + 0xd81bf401, + 0x76bb0103, 0x0465b600, 0x659450f9, 0x0256bb04, 0x75fd50bd, 0x7e50fc04, - 0xb600064f, -/* 0x06ee: i2c_addr_done */ + 0xb6000570, +/* 0x0642: i2c_get_byte_done */ 0x00f80464, -/* 0x06f0: i2c_acquire_addr */ - 0xb6f8cec7, - 0xe0b705e4, - 0x00f8d014, -/* 0x06fc: i2c_acquire */ - 0x0006f07e, - 0x0000047e, - 0x7e03d9f0, - 0xf800002e, -/* 0x070d: i2c_release */ - 0x06f07e00, - 0x00047e00, - 0x03daf000, - 0x00002e7e, -/* 0x071e: i2c_recv */ - 0x32f400f8, - 0xf8c1c701, - 0xb00214b6, - 0x1ff52816, - 0x13b80137, - 0x98000bd4, - 0x13b80032, - 0x98000bac, - 0x31f40031, - 0xf9d0f902, - 0xf1d0f9e0, - 0xf1000067, - 0x92100063, - 0x76bb0167, - 0x0465b600, - 0x659450f9, - 0x0256bb04, - 0x75fd50bd, - 0x7e50fc04, - 0xb60006fc, - 0xd0fc0464, - 0xf500d6b0, - 0x0500b01b, - 0x0076bb00, - 0xf90465b6, - 0x04659450, - 0xbd0256bb, - 0x0475fd50, - 0xa97e50fc, - 0x64b60006, - 0xcc11f504, - 0xe0c5c700, +/* 0x0644: i2c_put_byte */ +/* 0x0646: i2c_put_byte_next */ + 0x42b60804, + 0x3854ff01, 0xb60076bb, 0x50f90465, 0xbb046594, 0x50bd0256, 0xfc0475fd, - 0x064f7e50, + 0x05707e50, 0x0464b600, - 0x00a911f5, - 0x76bb0105, + 0xb03411f4, + 0x1bf40046, + 0x0076bbd8, + 0xf90465b6, + 0x04659450, + 0xbd0256bb, + 0x0475fd50, + 0xb07e50fc, + 0x64b60005, + 0x0f11f404, + 0xb00076bb, + 0x1bf40136, + 0x0132f406, +/* 0x069c: i2c_put_byte_done */ +/* 0x069e: i2c_addr */ + 0x76bb00f8, 0x0465b600, 0x659450f9, 0x0256bb04, 0x75fd50bd, 0x7e50fc04, - 0xb60006a9, - 0x11f50464, - 0x76bb0087, + 0xb60004e9, + 0x11f40464, + 0x2ec3e729, + 0x0134b601, + 0xbb0553fd, + 0x65b60076, + 0x9450f904, + 0x56bb0465, + 0xfd50bd02, + 0x50fc0475, + 0x0006447e, +/* 0x06e3: i2c_addr_done */ + 0xf80464b6, +/* 0x06e5: i2c_acquire_addr */ + 0xf8cec700, + 0xb705e4b6, + 0xf8d014e0, +/* 0x06f1: i2c_acquire */ + 0x06e57e00, + 0x00047e00, + 0x03d9f000, + 0x00002e7e, +/* 0x0702: i2c_release */ + 0xe57e00f8, + 0x047e0006, + 0xdaf00000, + 0x002e7e03, +/* 0x0713: i2c_recv */ + 0xf400f800, + 0xc1c70132, + 0x0214b6f8, + 0xf52816b0, + 0xb801371f, + 0x000bd413, + 0xb8003298, + 0x000bac13, + 0xf4003198, + 0xd0f90231, + 0xd0f9e0f9, + 0x000067f1, + 0x100063f1, + 0xbb016792, + 0x65b60076, + 0x9450f904, + 0x56bb0465, + 0xfd50bd02, + 0x50fc0475, + 0x0006f17e, + 0xfc0464b6, + 0x00d6b0d0, + 0x00b01bf5, + 0x76bb0005, 0x0465b600, 0x659450f9, 0x0256bb04, 0x75fd50bd, 0x7e50fc04, - 0xb6000600, - 0x11f40464, - 0xe05bcb67, - 0xb60076bb, - 0x50f90465, - 0xbb046594, - 0x50bd0256, - 0xfc0475fd, - 0x054c7e50, - 0x0464b600, - 0x74bd5bb2, -/* 0x0823: i2c_recv_not_rd08 */ - 0xb0410ef4, - 0x1bf401d6, - 0x7e00053b, - 0xf40006a9, - 0xc5c73211, - 0x064f7ee0, - 0x2811f400, - 0xa97e0005, + 0xb600069e, + 0x11f50464, + 0xc5c700cc, + 0x0076bbe0, + 0xf90465b6, + 0x04659450, + 0xbd0256bb, + 0x0475fd50, + 0x447e50fc, + 0x64b60006, + 0xa911f504, + 0xbb010500, + 0x65b60076, + 0x9450f904, + 0x56bb0465, + 0xfd50bd02, + 0x50fc0475, + 0x00069e7e, + 0xf50464b6, + 0xbb008711, + 0x65b60076, + 0x9450f904, + 0x56bb0465, + 0xfd50bd02, + 0x50fc0475, + 0x0005f57e, + 0xf40464b6, + 0x5bcb6711, + 0x0076bbe0, + 0xf90465b6, + 0x04659450, + 0xbd0256bb, + 0x0475fd50, + 0x417e50fc, + 0x64b60005, + 0xbd5bb204, + 0x410ef474, +/* 0x0818: i2c_recv_not_rd08 */ + 0xf401d6b0, + 0x00053b1b, + 0x00069e7e, + 0xc73211f4, + 0x447ee0c5, 0x11f40006, - 0xe0b5c71f, - 0x00064f7e, - 0x7e1511f4, - 0xbd00054c, - 0x08c5c774, - 0xf4091bf4, - 0x0ef40232, -/* 0x0861: i2c_recv_not_wr08 */ -/* 0x0861: i2c_recv_done */ - 0xf8cec703, - 0x00070d7e, - 0xd0fce0fc, - 0xb20912f4, - 0x023f7e7c, -/* 0x0875: i2c_recv_exit */ -/* 0x0877: i2c_init */ - 0xf800f800, -/* 0x0879: test_recv */ - 0x04584100, - 0xb60011cf, - 0x58400110, - 0x0001f604, - 0xe7f104bd, - 0xe3f1d900, - 0x967e134f, - 0x00f80001, -/* 0x0898: test_init */ - 0x7e08004e, - 0xf8000196, -/* 0x08a1: idle_recv */ -/* 0x08a3: idle */ - 0xf400f800, - 0x54410031, + 0x7e000528, + 0xf400069e, + 0xb5c71f11, + 0x06447ee0, + 0x1511f400, + 0x0005417e, + 0xc5c774bd, + 0x091bf408, + 0xf40232f4, +/* 0x0856: i2c_recv_not_wr08 */ +/* 0x0856: i2c_recv_done */ + 0xcec7030e, + 0x07027ef8, + 0xfce0fc00, + 0x0912f4d0, + 0x3f7e7cb2, +/* 0x086a: i2c_recv_exit */ + 0x00f80002, +/* 0x086c: i2c_init */ +/* 0x086e: test_recv */ + 0x584100f8, 0x0011cf04, 0x400110b6, - 0x01f60454, -/* 0x08b7: idle_loop */ - 0x0104bd00, - 0x0232f458, -/* 0x08bc: idle_proc */ -/* 0x08bc: idle_proc_exec */ - 0x1eb210f9, - 0x0002487e, - 0x11f410fc, - 0x0231f409, -/* 0x08cf: idle_proc_next */ - 0xb6f00ef4, - 0x1fa65810, - 0xf4e81bf4, - 0x28f4e002, - 0xc60ef400, + 0x01f60458, + 0xf104bd00, + 0xf1d900e7, + 0x7e134fe3, + 0xf8000196, +/* 0x088d: test_init */ + 0x08004e00, + 0x0001967e, +/* 0x0896: idle_recv */ + 0x00f800f8, +/* 0x0898: idle */ + 0x410031f4, + 0x11cf0454, + 0x0110b600, + 0xf6045440, + 0x04bd0001, +/* 0x08ac: idle_loop */ + 0x32f45801, +/* 0x08b1: idle_proc */ +/* 0x08b1: idle_proc_exec */ + 0xb210f902, + 0x02487e1e, + 0xf410fc00, + 0x31f40911, + 0xf00ef402, +/* 0x08c4: idle_proc_next */ + 0xa65810b6, + 0xe81bf41f, + 0xf4e002f4, + 0x0ef40028, + 0x000000c6, + 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h index 254205cd5166..e087ce3041be 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h +++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h @@ -46,8 +46,8 @@ uint32_t nva3_pwr_data[] = { 0x00000000, 0x00000000, 0x584d454d, - 0x0000054e, - 0x00000540, + 0x00000542, + 0x00000534, 0x00000000, 0x00000000, 0x00000000, @@ -68,8 +68,8 @@ uint32_t nva3_pwr_data[] = { 0x00000000, 0x00000000, 0x46524550, - 0x00000552, - 0x00000550, + 0x00000546, + 0x00000544, 0x00000000, 0x00000000, 0x00000000, @@ -90,8 +90,8 @@ uint32_t nva3_pwr_data[] = { 0x00000000, 0x00000000, 0x5f433249, - 0x00000982, - 0x00000825, + 0x00000976, + 0x00000819, 0x00000000, 0x00000000, 0x00000000, @@ -112,8 +112,8 @@ uint32_t nva3_pwr_data[] = { 0x00000000, 0x00000000, 0x54534554, - 0x000009ab, - 0x00000984, + 0x0000099f, + 0x00000978, 0x00000000, 0x00000000, 0x00000000, @@ -134,8 +134,8 @@ uint32_t nva3_pwr_data[] = { 0x00000000, 0x00000000, 0x454c4449, - 0x000009b7, - 0x000009b5, + 0x000009ab, + 0x000009a9, 0x00000000, 0x00000000, 0x00000000, @@ -239,10 +239,10 @@ uint32_t nva3_pwr_data[] = { 0x000004b7, 0x00040003, 0x00000000, - 0x000004df, + 0x000004d3, 0x00010004, 0x00000000, - 0x000004fc, + 0x000004f0, /* 0x03ac: memx_func_tail */ /* 0x03ac: memx_data_head */ 0x00000000, @@ -1198,13 +1198,10 @@ uint32_t nva3_pwr_code[] = { 0x0810b601, 0x50f960f9, 0xe0fcd0fc, - 0xf13f21f4, - 0xfd140003, - 0x05800506, - 0xb604bd00, + 0xb63f21f4, 0x1bf40242, -/* 0x04df: memx_func_wait */ - 0xf000f8dd, +/* 0x04d3: memx_func_wait */ + 0xf000f8e9, 0x84b62c87, 0x0088cf06, 0x98001e98, @@ -1212,14 +1209,14 @@ uint32_t nva3_pwr_code[] = { 0x031b9802, 0xf41010b6, 0x00f89c21, -/* 0x04fc: memx_func_delay */ +/* 0x04f0: memx_func_delay */ 0xb6001e98, 0x21f40410, -/* 0x0507: memx_exec */ +/* 0x04fb: memx_exec */ 0xf900f87f, 0xb9d0f9e0, 0xb2b902c1, -/* 0x0511: memx_exec_next */ +/* 0x0505: memx_exec_next */ 0x00139802, 0x950410b6, 0x30f01034, @@ -1228,112 +1225,112 @@ uint32_t nva3_pwr_code[] = { 0xec1ef406, 0xe0fcd0fc, 0x02b921f5, -/* 0x0532: memx_info */ +/* 0x0526: memx_info */ 0xc7f100f8, 0xb7f103ac, 0x21f50800, 0x00f802b9, -/* 0x0540: memx_recv */ +/* 0x0534: memx_recv */ 0xf401d6b0, 0xd6b0c40b, 0xe90bf400, -/* 0x054e: memx_init */ +/* 0x0542: memx_init */ 0x00f800f8, -/* 0x0550: perf_recv */ -/* 0x0552: perf_init */ +/* 0x0544: perf_recv */ +/* 0x0546: perf_init */ 0x00f800f8, -/* 0x0554: i2c_drive_scl */ +/* 0x0548: i2c_drive_scl */ 0xf40036b0, 0x07f1110b, 0x04b607e0, 0x0001d006, 0x00f804bd, -/* 0x0568: i2c_drive_scl_lo */ +/* 0x055c: i2c_drive_scl_lo */ 0x07e407f1, 0xd00604b6, 0x04bd0001, -/* 0x0576: i2c_drive_sda */ +/* 0x056a: i2c_drive_sda */ 0x36b000f8, 0x110bf400, 0x07e007f1, 0xd00604b6, 0x04bd0002, -/* 0x058a: i2c_drive_sda_lo */ +/* 0x057e: i2c_drive_sda_lo */ 0x07f100f8, 0x04b607e4, 0x0002d006, 0x00f804bd, -/* 0x0598: i2c_sense_scl */ +/* 0x058c: i2c_sense_scl */ 0xf10132f4, 0xb607c437, 0x33cf0634, 0x0431fd00, 0xf4060bf4, -/* 0x05ae: i2c_sense_scl_done */ +/* 0x05a2: i2c_sense_scl_done */ 0x00f80131, -/* 0x05b0: i2c_sense_sda */ +/* 0x05a4: i2c_sense_sda */ 0xf10132f4, 0xb607c437, 0x33cf0634, 0x0432fd00, 0xf4060bf4, -/* 0x05c6: i2c_sense_sda_done */ +/* 0x05ba: i2c_sense_sda_done */ 0x00f80131, -/* 0x05c8: i2c_raise_scl */ +/* 0x05bc: i2c_raise_scl */ 0x47f140f9, 0x37f00898, - 0x5421f501, -/* 0x05d5: i2c_raise_scl_wait */ + 0x4821f501, +/* 0x05c9: i2c_raise_scl_wait */ 0xe8e7f105, 0x7f21f403, - 0x059821f5, + 0x058c21f5, 0xb60901f4, 0x1bf40142, -/* 0x05e9: i2c_raise_scl_done */ +/* 0x05dd: i2c_raise_scl_done */ 0xf840fcef, -/* 0x05ed: i2c_start */ - 0x9821f500, +/* 0x05e1: i2c_start */ + 0x8c21f500, 0x0d11f405, - 0x05b021f5, + 0x05a421f5, 0xf40611f4, -/* 0x05fe: i2c_start_rep */ +/* 0x05f2: i2c_start_rep */ 0x37f0300e, - 0x5421f500, + 0x4821f500, 0x0137f005, - 0x057621f5, + 0x056a21f5, 0xb60076bb, 0x50f90465, 0xbb046594, 0x50bd0256, 0xfc0475fd, - 0xc821f550, + 0xbc21f550, 0x0464b605, -/* 0x062b: i2c_start_send */ +/* 0x061f: i2c_start_send */ 0xf01f11f4, 0x21f50037, - 0xe7f10576, + 0xe7f1056a, 0x21f41388, 0x0037f07f, - 0x055421f5, + 0x054821f5, 0x1388e7f1, -/* 0x0647: i2c_start_out */ +/* 0x063b: i2c_start_out */ 0xf87f21f4, -/* 0x0649: i2c_stop */ +/* 0x063d: i2c_stop */ 0x0037f000, - 0x055421f5, + 0x054821f5, 0xf50037f0, - 0xf1057621, + 0xf1056a21, 0xf403e8e7, 0x37f07f21, - 0x5421f501, + 0x4821f501, 0x88e7f105, 0x7f21f413, 0xf50137f0, - 0xf1057621, + 0xf1056a21, 0xf41388e7, 0x00f87f21, -/* 0x067c: i2c_bitw */ - 0x057621f5, +/* 0x0670: i2c_bitw */ + 0x056a21f5, 0x03e8e7f1, 0xbb7f21f4, 0x65b60076, @@ -1341,18 +1338,18 @@ uint32_t nva3_pwr_code[] = { 0x56bb0465, 0xfd50bd02, 0x50fc0475, - 0x05c821f5, + 0x05bc21f5, 0xf40464b6, 0xe7f11811, 0x21f41388, 0x0037f07f, - 0x055421f5, + 0x054821f5, 0x1388e7f1, -/* 0x06bb: i2c_bitw_out */ +/* 0x06af: i2c_bitw_out */ 0xf87f21f4, -/* 0x06bd: i2c_bitr */ +/* 0x06b1: i2c_bitr */ 0x0137f000, - 0x057621f5, + 0x056a21f5, 0x03e8e7f1, 0xbb7f21f4, 0x65b60076, @@ -1360,19 +1357,19 @@ uint32_t nva3_pwr_code[] = { 0x56bb0465, 0xfd50bd02, 0x50fc0475, - 0x05c821f5, + 0x05bc21f5, 0xf40464b6, 0x21f51b11, - 0x37f005b0, - 0x5421f500, + 0x37f005a4, + 0x4821f500, 0x88e7f105, 0x7f21f413, 0xf4013cf0, -/* 0x0702: i2c_bitr_done */ +/* 0x06f6: i2c_bitr_done */ 0x00f80131, -/* 0x0704: i2c_get_byte */ +/* 0x06f8: i2c_get_byte */ 0xf00057f0, -/* 0x070a: i2c_get_byte_next */ +/* 0x06fe: i2c_get_byte_next */ 0x54b60847, 0x0076bb01, 0xf90465b6, @@ -1380,7 +1377,7 @@ uint32_t nva3_pwr_code[] = { 0xbd0256bb, 0x0475fd50, 0x21f550fc, - 0x64b606bd, + 0x64b606b1, 0x2b11f404, 0xb60553fd, 0x1bf40142, @@ -1390,12 +1387,12 @@ uint32_t nva3_pwr_code[] = { 0xbb046594, 0x50bd0256, 0xfc0475fd, - 0x7c21f550, + 0x7021f550, 0x0464b606, -/* 0x0754: i2c_get_byte_done */ -/* 0x0756: i2c_put_byte */ +/* 0x0748: i2c_get_byte_done */ +/* 0x074a: i2c_put_byte */ 0x47f000f8, -/* 0x0759: i2c_put_byte_next */ +/* 0x074d: i2c_put_byte_next */ 0x0142b608, 0xbb3854ff, 0x65b60076, @@ -1403,7 +1400,7 @@ uint32_t nva3_pwr_code[] = { 0x56bb0465, 0xfd50bd02, 0x50fc0475, - 0x067c21f5, + 0x067021f5, 0xf40464b6, 0x46b03411, 0xd81bf400, @@ -1412,21 +1409,21 @@ uint32_t nva3_pwr_code[] = { 0xbb046594, 0x50bd0256, 0xfc0475fd, - 0xbd21f550, + 0xb121f550, 0x0464b606, 0xbb0f11f4, 0x36b00076, 0x061bf401, -/* 0x07af: i2c_put_byte_done */ +/* 0x07a3: i2c_put_byte_done */ 0xf80132f4, -/* 0x07b1: i2c_addr */ +/* 0x07a5: i2c_addr */ 0x0076bb00, 0xf90465b6, 0x04659450, 0xbd0256bb, 0x0475fd50, 0x21f550fc, - 0x64b605ed, + 0x64b605e1, 0x2911f404, 0x012ec3e7, 0xfd0134b6, @@ -1436,24 +1433,24 @@ uint32_t nva3_pwr_code[] = { 0x0256bb04, 0x75fd50bd, 0xf550fc04, - 0xb6075621, -/* 0x07f6: i2c_addr_done */ + 0xb6074a21, +/* 0x07ea: i2c_addr_done */ 0x00f80464, -/* 0x07f8: i2c_acquire_addr */ +/* 0x07ec: i2c_acquire_addr */ 0xb6f8cec7, 0xe0b702e4, 0xee980bfc, -/* 0x0807: i2c_acquire */ +/* 0x07fb: i2c_acquire */ 0xf500f800, - 0xf407f821, + 0xf407ec21, 0xd9f00421, 0x3f21f403, -/* 0x0816: i2c_release */ +/* 0x080a: i2c_release */ 0x21f500f8, - 0x21f407f8, + 0x21f407ec, 0x03daf004, 0xf83f21f4, -/* 0x0825: i2c_recv */ +/* 0x0819: i2c_recv */ 0x0132f400, 0xb6f8c1c7, 0x16b00214, @@ -1472,7 +1469,7 @@ uint32_t nva3_pwr_code[] = { 0x56bb0465, 0xfd50bd02, 0x50fc0475, - 0x080721f5, + 0x07fb21f5, 0xfc0464b6, 0x00d6b0d0, 0x00b31bf5, @@ -1482,7 +1479,7 @@ uint32_t nva3_pwr_code[] = { 0x56bb0465, 0xfd50bd02, 0x50fc0475, - 0x07b121f5, + 0x07a521f5, 0xf50464b6, 0xc700d011, 0x76bbe0c5, @@ -1491,7 +1488,7 @@ uint32_t nva3_pwr_code[] = { 0x0256bb04, 0x75fd50bd, 0xf550fc04, - 0xb6075621, + 0xb6074a21, 0x11f50464, 0x57f000ad, 0x0076bb01, @@ -1500,7 +1497,7 @@ uint32_t nva3_pwr_code[] = { 0xbd0256bb, 0x0475fd50, 0x21f550fc, - 0x64b607b1, + 0x64b607a5, 0x8a11f504, 0x0076bb00, 0xf90465b6, @@ -1508,7 +1505,7 @@ uint32_t nva3_pwr_code[] = { 0xbd0256bb, 0x0475fd50, 0x21f550fc, - 0x64b60704, + 0x64b606f8, 0x6a11f404, 0xbbe05bcb, 0x65b60076, @@ -1516,38 +1513,38 @@ uint32_t nva3_pwr_code[] = { 0x56bb0465, 0xfd50bd02, 0x50fc0475, - 0x064921f5, + 0x063d21f5, 0xb90464b6, 0x74bd025b, -/* 0x092b: i2c_recv_not_rd08 */ +/* 0x091f: i2c_recv_not_rd08 */ 0xb0430ef4, 0x1bf401d6, 0x0057f03d, - 0x07b121f5, + 0x07a521f5, 0xc73311f4, 0x21f5e0c5, - 0x11f40756, + 0x11f4074a, 0x0057f029, - 0x07b121f5, + 0x07a521f5, 0xc71f11f4, 0x21f5e0b5, - 0x11f40756, - 0x4921f515, + 0x11f4074a, + 0x3d21f515, 0xc774bd06, 0x1bf408c5, 0x0232f409, -/* 0x096b: i2c_recv_not_wr08 */ -/* 0x096b: i2c_recv_done */ +/* 0x095f: i2c_recv_not_wr08 */ +/* 0x095f: i2c_recv_done */ 0xc7030ef4, 0x21f5f8ce, - 0xe0fc0816, + 0xe0fc080a, 0x12f4d0fc, 0x027cb90a, 0x02b921f5, -/* 0x0980: i2c_recv_exit */ -/* 0x0982: i2c_init */ +/* 0x0974: i2c_recv_exit */ +/* 0x0976: i2c_init */ 0x00f800f8, -/* 0x0984: test_recv */ +/* 0x0978: test_recv */ 0x05d817f1, 0xcf0614b6, 0x10b60011, @@ -1557,12 +1554,12 @@ uint32_t nva3_pwr_code[] = { 0x00e7f104, 0x4fe3f1d9, 0xf521f513, -/* 0x09ab: test_init */ +/* 0x099f: test_init */ 0xf100f801, 0xf50800e7, 0xf801f521, -/* 0x09b5: idle_recv */ -/* 0x09b7: idle */ +/* 0x09a9: idle_recv */ +/* 0x09ab: idle */ 0xf400f800, 0x17f10031, 0x14b605d4, @@ -1570,20 +1567,23 @@ uint32_t nva3_pwr_code[] = { 0xf10110b6, 0xb605d407, 0x01d00604, -/* 0x09d3: idle_loop */ +/* 0x09c7: idle_loop */ 0xf004bd00, 0x32f45817, -/* 0x09d9: idle_proc */ -/* 0x09d9: idle_proc_exec */ +/* 0x09cd: idle_proc */ +/* 0x09cd: idle_proc_exec */ 0xb910f902, 0x21f5021e, 0x10fc02c2, 0xf40911f4, 0x0ef40231, -/* 0x09ed: idle_proc_next */ +/* 0x09e1: idle_proc_next */ 0x5810b6ef, 0xf4061fb8, 0x02f4e61b, 0x0028f4dd, 0x00bb0ef4, + 0x00000000, + 0x00000000, + 0x00000000, }; diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h index 7ac87405d01b..0773ff0e3dc3 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h +++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h @@ -46,8 +46,8 @@ uint32_t nvc0_pwr_data[] = { 0x00000000, 0x00000000, 0x584d454d, - 0x0000054e, - 0x00000540, + 0x00000542, + 0x00000534, 0x00000000, 0x00000000, 0x00000000, @@ -68,8 +68,8 @@ uint32_t nvc0_pwr_data[] = { 0x00000000, 0x00000000, 0x46524550, - 0x00000552, - 0x00000550, + 0x00000546, + 0x00000544, 0x00000000, 0x00000000, 0x00000000, @@ -90,8 +90,8 @@ uint32_t nvc0_pwr_data[] = { 0x00000000, 0x00000000, 0x5f433249, - 0x00000982, - 0x00000825, + 0x00000976, + 0x00000819, 0x00000000, 0x00000000, 0x00000000, @@ -112,8 +112,8 @@ uint32_t nvc0_pwr_data[] = { 0x00000000, 0x00000000, 0x54534554, - 0x000009ab, - 0x00000984, + 0x0000099f, + 0x00000978, 0x00000000, 0x00000000, 0x00000000, @@ -134,8 +134,8 @@ uint32_t nvc0_pwr_data[] = { 0x00000000, 0x00000000, 0x454c4449, - 0x000009b7, - 0x000009b5, + 0x000009ab, + 0x000009a9, 0x00000000, 0x00000000, 0x00000000, @@ -239,10 +239,10 @@ uint32_t nvc0_pwr_data[] = { 0x000004b7, 0x00040003, 0x00000000, - 0x000004df, + 0x000004d3, 0x00010004, 0x00000000, - 0x000004fc, + 0x000004f0, /* 0x03ac: memx_func_tail */ /* 0x03ac: memx_data_head */ 0x00000000, @@ -1198,13 +1198,10 @@ uint32_t nvc0_pwr_code[] = { 0x0810b601, 0x50f960f9, 0xe0fcd0fc, - 0xf13f21f4, - 0xfd140003, - 0x05800506, - 0xb604bd00, + 0xb63f21f4, 0x1bf40242, -/* 0x04df: memx_func_wait */ - 0xf000f8dd, +/* 0x04d3: memx_func_wait */ + 0xf000f8e9, 0x84b62c87, 0x0088cf06, 0x98001e98, @@ -1212,14 +1209,14 @@ uint32_t nvc0_pwr_code[] = { 0x031b9802, 0xf41010b6, 0x00f89c21, -/* 0x04fc: memx_func_delay */ +/* 0x04f0: memx_func_delay */ 0xb6001e98, 0x21f40410, -/* 0x0507: memx_exec */ +/* 0x04fb: memx_exec */ 0xf900f87f, 0xb9d0f9e0, 0xb2b902c1, -/* 0x0511: memx_exec_next */ +/* 0x0505: memx_exec_next */ 0x00139802, 0x950410b6, 0x30f01034, @@ -1228,112 +1225,112 @@ uint32_t nvc0_pwr_code[] = { 0xec1ef406, 0xe0fcd0fc, 0x02b921f5, -/* 0x0532: memx_info */ +/* 0x0526: memx_info */ 0xc7f100f8, 0xb7f103ac, 0x21f50800, 0x00f802b9, -/* 0x0540: memx_recv */ +/* 0x0534: memx_recv */ 0xf401d6b0, 0xd6b0c40b, 0xe90bf400, -/* 0x054e: memx_init */ +/* 0x0542: memx_init */ 0x00f800f8, -/* 0x0550: perf_recv */ -/* 0x0552: perf_init */ +/* 0x0544: perf_recv */ +/* 0x0546: perf_init */ 0x00f800f8, -/* 0x0554: i2c_drive_scl */ +/* 0x0548: i2c_drive_scl */ 0xf40036b0, 0x07f1110b, 0x04b607e0, 0x0001d006, 0x00f804bd, -/* 0x0568: i2c_drive_scl_lo */ +/* 0x055c: i2c_drive_scl_lo */ 0x07e407f1, 0xd00604b6, 0x04bd0001, -/* 0x0576: i2c_drive_sda */ +/* 0x056a: i2c_drive_sda */ 0x36b000f8, 0x110bf400, 0x07e007f1, 0xd00604b6, 0x04bd0002, -/* 0x058a: i2c_drive_sda_lo */ +/* 0x057e: i2c_drive_sda_lo */ 0x07f100f8, 0x04b607e4, 0x0002d006, 0x00f804bd, -/* 0x0598: i2c_sense_scl */ +/* 0x058c: i2c_sense_scl */ 0xf10132f4, 0xb607c437, 0x33cf0634, 0x0431fd00, 0xf4060bf4, -/* 0x05ae: i2c_sense_scl_done */ +/* 0x05a2: i2c_sense_scl_done */ 0x00f80131, -/* 0x05b0: i2c_sense_sda */ +/* 0x05a4: i2c_sense_sda */ 0xf10132f4, 0xb607c437, 0x33cf0634, 0x0432fd00, 0xf4060bf4, -/* 0x05c6: i2c_sense_sda_done */ +/* 0x05ba: i2c_sense_sda_done */ 0x00f80131, -/* 0x05c8: i2c_raise_scl */ +/* 0x05bc: i2c_raise_scl */ 0x47f140f9, 0x37f00898, - 0x5421f501, -/* 0x05d5: i2c_raise_scl_wait */ + 0x4821f501, +/* 0x05c9: i2c_raise_scl_wait */ 0xe8e7f105, 0x7f21f403, - 0x059821f5, + 0x058c21f5, 0xb60901f4, 0x1bf40142, -/* 0x05e9: i2c_raise_scl_done */ +/* 0x05dd: i2c_raise_scl_done */ 0xf840fcef, -/* 0x05ed: i2c_start */ - 0x9821f500, +/* 0x05e1: i2c_start */ + 0x8c21f500, 0x0d11f405, - 0x05b021f5, + 0x05a421f5, 0xf40611f4, -/* 0x05fe: i2c_start_rep */ +/* 0x05f2: i2c_start_rep */ 0x37f0300e, - 0x5421f500, + 0x4821f500, 0x0137f005, - 0x057621f5, + 0x056a21f5, 0xb60076bb, 0x50f90465, 0xbb046594, 0x50bd0256, 0xfc0475fd, - 0xc821f550, + 0xbc21f550, 0x0464b605, -/* 0x062b: i2c_start_send */ +/* 0x061f: i2c_start_send */ 0xf01f11f4, 0x21f50037, - 0xe7f10576, + 0xe7f1056a, 0x21f41388, 0x0037f07f, - 0x055421f5, + 0x054821f5, 0x1388e7f1, -/* 0x0647: i2c_start_out */ +/* 0x063b: i2c_start_out */ 0xf87f21f4, -/* 0x0649: i2c_stop */ +/* 0x063d: i2c_stop */ 0x0037f000, - 0x055421f5, + 0x054821f5, 0xf50037f0, - 0xf1057621, + 0xf1056a21, 0xf403e8e7, 0x37f07f21, - 0x5421f501, + 0x4821f501, 0x88e7f105, 0x7f21f413, 0xf50137f0, - 0xf1057621, + 0xf1056a21, 0xf41388e7, 0x00f87f21, -/* 0x067c: i2c_bitw */ - 0x057621f5, +/* 0x0670: i2c_bitw */ + 0x056a21f5, 0x03e8e7f1, 0xbb7f21f4, 0x65b60076, @@ -1341,18 +1338,18 @@ uint32_t nvc0_pwr_code[] = { 0x56bb0465, 0xfd50bd02, 0x50fc0475, - 0x05c821f5, + 0x05bc21f5, 0xf40464b6, 0xe7f11811, 0x21f41388, 0x0037f07f, - 0x055421f5, + 0x054821f5, 0x1388e7f1, -/* 0x06bb: i2c_bitw_out */ +/* 0x06af: i2c_bitw_out */ 0xf87f21f4, -/* 0x06bd: i2c_bitr */ +/* 0x06b1: i2c_bitr */ 0x0137f000, - 0x057621f5, + 0x056a21f5, 0x03e8e7f1, 0xbb7f21f4, 0x65b60076, @@ -1360,19 +1357,19 @@ uint32_t nvc0_pwr_code[] = { 0x56bb0465, 0xfd50bd02, 0x50fc0475, - 0x05c821f5, + 0x05bc21f5, 0xf40464b6, 0x21f51b11, - 0x37f005b0, - 0x5421f500, + 0x37f005a4, + 0x4821f500, 0x88e7f105, 0x7f21f413, 0xf4013cf0, -/* 0x0702: i2c_bitr_done */ +/* 0x06f6: i2c_bitr_done */ 0x00f80131, -/* 0x0704: i2c_get_byte */ +/* 0x06f8: i2c_get_byte */ 0xf00057f0, -/* 0x070a: i2c_get_byte_next */ +/* 0x06fe: i2c_get_byte_next */ 0x54b60847, 0x0076bb01, 0xf90465b6, @@ -1380,7 +1377,7 @@ uint32_t nvc0_pwr_code[] = { 0xbd0256bb, 0x0475fd50, 0x21f550fc, - 0x64b606bd, + 0x64b606b1, 0x2b11f404, 0xb60553fd, 0x1bf40142, @@ -1390,12 +1387,12 @@ uint32_t nvc0_pwr_code[] = { 0xbb046594, 0x50bd0256, 0xfc0475fd, - 0x7c21f550, + 0x7021f550, 0x0464b606, -/* 0x0754: i2c_get_byte_done */ -/* 0x0756: i2c_put_byte */ +/* 0x0748: i2c_get_byte_done */ +/* 0x074a: i2c_put_byte */ 0x47f000f8, -/* 0x0759: i2c_put_byte_next */ +/* 0x074d: i2c_put_byte_next */ 0x0142b608, 0xbb3854ff, 0x65b60076, @@ -1403,7 +1400,7 @@ uint32_t nvc0_pwr_code[] = { 0x56bb0465, 0xfd50bd02, 0x50fc0475, - 0x067c21f5, + 0x067021f5, 0xf40464b6, 0x46b03411, 0xd81bf400, @@ -1412,21 +1409,21 @@ uint32_t nvc0_pwr_code[] = { 0xbb046594, 0x50bd0256, 0xfc0475fd, - 0xbd21f550, + 0xb121f550, 0x0464b606, 0xbb0f11f4, 0x36b00076, 0x061bf401, -/* 0x07af: i2c_put_byte_done */ +/* 0x07a3: i2c_put_byte_done */ 0xf80132f4, -/* 0x07b1: i2c_addr */ +/* 0x07a5: i2c_addr */ 0x0076bb00, 0xf90465b6, 0x04659450, 0xbd0256bb, 0x0475fd50, 0x21f550fc, - 0x64b605ed, + 0x64b605e1, 0x2911f404, 0x012ec3e7, 0xfd0134b6, @@ -1436,24 +1433,24 @@ uint32_t nvc0_pwr_code[] = { 0x0256bb04, 0x75fd50bd, 0xf550fc04, - 0xb6075621, -/* 0x07f6: i2c_addr_done */ + 0xb6074a21, +/* 0x07ea: i2c_addr_done */ 0x00f80464, -/* 0x07f8: i2c_acquire_addr */ +/* 0x07ec: i2c_acquire_addr */ 0xb6f8cec7, 0xe0b702e4, 0xee980bfc, -/* 0x0807: i2c_acquire */ +/* 0x07fb: i2c_acquire */ 0xf500f800, - 0xf407f821, + 0xf407ec21, 0xd9f00421, 0x3f21f403, -/* 0x0816: i2c_release */ +/* 0x080a: i2c_release */ 0x21f500f8, - 0x21f407f8, + 0x21f407ec, 0x03daf004, 0xf83f21f4, -/* 0x0825: i2c_recv */ +/* 0x0819: i2c_recv */ 0x0132f400, 0xb6f8c1c7, 0x16b00214, @@ -1472,7 +1469,7 @@ uint32_t nvc0_pwr_code[] = { 0x56bb0465, 0xfd50bd02, 0x50fc0475, - 0x080721f5, + 0x07fb21f5, 0xfc0464b6, 0x00d6b0d0, 0x00b31bf5, @@ -1482,7 +1479,7 @@ uint32_t nvc0_pwr_code[] = { 0x56bb0465, 0xfd50bd02, 0x50fc0475, - 0x07b121f5, + 0x07a521f5, 0xf50464b6, 0xc700d011, 0x76bbe0c5, @@ -1491,7 +1488,7 @@ uint32_t nvc0_pwr_code[] = { 0x0256bb04, 0x75fd50bd, 0xf550fc04, - 0xb6075621, + 0xb6074a21, 0x11f50464, 0x57f000ad, 0x0076bb01, @@ -1500,7 +1497,7 @@ uint32_t nvc0_pwr_code[] = { 0xbd0256bb, 0x0475fd50, 0x21f550fc, - 0x64b607b1, + 0x64b607a5, 0x8a11f504, 0x0076bb00, 0xf90465b6, @@ -1508,7 +1505,7 @@ uint32_t nvc0_pwr_code[] = { 0xbd0256bb, 0x0475fd50, 0x21f550fc, - 0x64b60704, + 0x64b606f8, 0x6a11f404, 0xbbe05bcb, 0x65b60076, @@ -1516,38 +1513,38 @@ uint32_t nvc0_pwr_code[] = { 0x56bb0465, 0xfd50bd02, 0x50fc0475, - 0x064921f5, + 0x063d21f5, 0xb90464b6, 0x74bd025b, -/* 0x092b: i2c_recv_not_rd08 */ +/* 0x091f: i2c_recv_not_rd08 */ 0xb0430ef4, 0x1bf401d6, 0x0057f03d, - 0x07b121f5, + 0x07a521f5, 0xc73311f4, 0x21f5e0c5, - 0x11f40756, + 0x11f4074a, 0x0057f029, - 0x07b121f5, + 0x07a521f5, 0xc71f11f4, 0x21f5e0b5, - 0x11f40756, - 0x4921f515, + 0x11f4074a, + 0x3d21f515, 0xc774bd06, 0x1bf408c5, 0x0232f409, -/* 0x096b: i2c_recv_not_wr08 */ -/* 0x096b: i2c_recv_done */ +/* 0x095f: i2c_recv_not_wr08 */ +/* 0x095f: i2c_recv_done */ 0xc7030ef4, 0x21f5f8ce, - 0xe0fc0816, + 0xe0fc080a, 0x12f4d0fc, 0x027cb90a, 0x02b921f5, -/* 0x0980: i2c_recv_exit */ -/* 0x0982: i2c_init */ +/* 0x0974: i2c_recv_exit */ +/* 0x0976: i2c_init */ 0x00f800f8, -/* 0x0984: test_recv */ +/* 0x0978: test_recv */ 0x05d817f1, 0xcf0614b6, 0x10b60011, @@ -1557,12 +1554,12 @@ uint32_t nvc0_pwr_code[] = { 0x00e7f104, 0x4fe3f1d9, 0xf521f513, -/* 0x09ab: test_init */ +/* 0x099f: test_init */ 0xf100f801, 0xf50800e7, 0xf801f521, -/* 0x09b5: idle_recv */ -/* 0x09b7: idle */ +/* 0x09a9: idle_recv */ +/* 0x09ab: idle */ 0xf400f800, 0x17f10031, 0x14b605d4, @@ -1570,20 +1567,23 @@ uint32_t nvc0_pwr_code[] = { 0xf10110b6, 0xb605d407, 0x01d00604, -/* 0x09d3: idle_loop */ +/* 0x09c7: idle_loop */ 0xf004bd00, 0x32f45817, -/* 0x09d9: idle_proc */ -/* 0x09d9: idle_proc_exec */ +/* 0x09cd: idle_proc */ +/* 0x09cd: idle_proc_exec */ 0xb910f902, 0x21f5021e, 0x10fc02c2, 0xf40911f4, 0x0ef40231, -/* 0x09ed: idle_proc_next */ +/* 0x09e1: idle_proc_next */ 0x5810b6ef, 0xf4061fb8, 0x02f4e61b, 0x0028f4dd, 0x00bb0ef4, + 0x00000000, + 0x00000000, + 0x00000000, }; diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h index cd9ff1a73284..8d369b3faaba 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h +++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h @@ -46,8 +46,8 @@ uint32_t nvd0_pwr_data[] = { 0x00000000, 0x00000000, 0x584d454d, - 0x000004c4, - 0x000004b6, + 0x000004b8, + 0x000004aa, 0x00000000, 0x00000000, 0x00000000, @@ -68,8 +68,8 @@ uint32_t nvd0_pwr_data[] = { 0x00000000, 0x00000000, 0x46524550, - 0x000004c8, - 0x000004c6, + 0x000004bc, + 0x000004ba, 0x00000000, 0x00000000, 0x00000000, @@ -90,8 +90,8 @@ uint32_t nvd0_pwr_data[] = { 0x00000000, 0x00000000, 0x5f433249, - 0x000008e3, - 0x00000786, + 0x000008d7, + 0x0000077a, 0x00000000, 0x00000000, 0x00000000, @@ -112,8 +112,8 @@ uint32_t nvd0_pwr_data[] = { 0x00000000, 0x00000000, 0x54534554, - 0x00000906, - 0x000008e5, + 0x000008fa, + 0x000008d9, 0x00000000, 0x00000000, 0x00000000, @@ -134,8 +134,8 @@ uint32_t nvd0_pwr_data[] = { 0x00000000, 0x00000000, 0x454c4449, - 0x00000912, - 0x00000910, + 0x00000906, + 0x00000904, 0x00000000, 0x00000000, 0x00000000, @@ -239,10 +239,10 @@ uint32_t nvd0_pwr_data[] = { 0x00000430, 0x00040003, 0x00000000, - 0x00000458, + 0x0000044c, 0x00010004, 0x00000000, - 0x00000472, + 0x00000466, /* 0x03ac: memx_func_tail */ /* 0x03ac: memx_data_head */ 0x00000000, @@ -1100,26 +1100,23 @@ uint32_t nvd0_pwr_code[] = { 0xf960f908, 0xfcd0fc50, 0x3321f4e0, - 0x140003f1, - 0x800506fd, - 0x04bd0005, 0xf40242b6, - 0x00f8dd1b, -/* 0x0458: memx_func_wait */ + 0x00f8e91b, +/* 0x044c: memx_func_wait */ 0xcf2c87f0, 0x1e980088, 0x011d9800, 0x98021c98, 0x10b6031b, 0x7e21f410, -/* 0x0472: memx_func_delay */ +/* 0x0466: memx_func_delay */ 0x1e9800f8, 0x0410b600, 0xf86721f4, -/* 0x047d: memx_exec */ +/* 0x0471: memx_exec */ 0xf9e0f900, 0x02c1b9d0, -/* 0x0487: memx_exec_next */ +/* 0x047b: memx_exec_next */ 0x9802b2b9, 0x10b60013, 0x10349504, @@ -1129,107 +1126,107 @@ uint32_t nvd0_pwr_code[] = { 0xd0fcec1e, 0x21f5e0fc, 0x00f8026b, -/* 0x04a8: memx_info */ +/* 0x049c: memx_info */ 0x03acc7f1, 0x0800b7f1, 0x026b21f5, -/* 0x04b6: memx_recv */ +/* 0x04aa: memx_recv */ 0xd6b000f8, 0xc40bf401, 0xf400d6b0, 0x00f8e90b, -/* 0x04c4: memx_init */ -/* 0x04c6: perf_recv */ +/* 0x04b8: memx_init */ +/* 0x04ba: perf_recv */ 0x00f800f8, -/* 0x04c8: perf_init */ -/* 0x04ca: i2c_drive_scl */ +/* 0x04bc: perf_init */ +/* 0x04be: i2c_drive_scl */ 0x36b000f8, 0x0e0bf400, 0x07e007f1, 0xbd0001d0, -/* 0x04db: i2c_drive_scl_lo */ +/* 0x04cf: i2c_drive_scl_lo */ 0xf100f804, 0xd007e407, 0x04bd0001, -/* 0x04e6: i2c_drive_sda */ +/* 0x04da: i2c_drive_sda */ 0x36b000f8, 0x0e0bf400, 0x07e007f1, 0xbd0002d0, -/* 0x04f7: i2c_drive_sda_lo */ +/* 0x04eb: i2c_drive_sda_lo */ 0xf100f804, 0xd007e407, 0x04bd0002, -/* 0x0502: i2c_sense_scl */ +/* 0x04f6: i2c_sense_scl */ 0x32f400f8, 0xc437f101, 0x0033cf07, 0xf40431fd, 0x31f4060b, -/* 0x0515: i2c_sense_scl_done */ -/* 0x0517: i2c_sense_sda */ +/* 0x0509: i2c_sense_scl_done */ +/* 0x050b: i2c_sense_sda */ 0xf400f801, 0x37f10132, 0x33cf07c4, 0x0432fd00, 0xf4060bf4, -/* 0x052a: i2c_sense_sda_done */ +/* 0x051e: i2c_sense_sda_done */ 0x00f80131, -/* 0x052c: i2c_raise_scl */ +/* 0x0520: i2c_raise_scl */ 0x47f140f9, 0x37f00898, - 0xca21f501, -/* 0x0539: i2c_raise_scl_wait */ + 0xbe21f501, +/* 0x052d: i2c_raise_scl_wait */ 0xe8e7f104, 0x6721f403, - 0x050221f5, + 0x04f621f5, 0xb60901f4, 0x1bf40142, -/* 0x054d: i2c_raise_scl_done */ +/* 0x0541: i2c_raise_scl_done */ 0xf840fcef, -/* 0x0551: i2c_start */ - 0x0221f500, - 0x0d11f405, - 0x051721f5, +/* 0x0545: i2c_start */ + 0xf621f500, + 0x0d11f404, + 0x050b21f5, 0xf40611f4, -/* 0x0562: i2c_start_rep */ +/* 0x0556: i2c_start_rep */ 0x37f0300e, - 0xca21f500, + 0xbe21f500, 0x0137f004, - 0x04e621f5, + 0x04da21f5, 0xb60076bb, 0x50f90465, 0xbb046594, 0x50bd0256, 0xfc0475fd, - 0x2c21f550, + 0x2021f550, 0x0464b605, -/* 0x058f: i2c_start_send */ +/* 0x0583: i2c_start_send */ 0xf01f11f4, 0x21f50037, - 0xe7f104e6, + 0xe7f104da, 0x21f41388, 0x0037f067, - 0x04ca21f5, + 0x04be21f5, 0x1388e7f1, -/* 0x05ab: i2c_start_out */ +/* 0x059f: i2c_start_out */ 0xf86721f4, -/* 0x05ad: i2c_stop */ +/* 0x05a1: i2c_stop */ 0x0037f000, - 0x04ca21f5, + 0x04be21f5, 0xf50037f0, - 0xf104e621, + 0xf104da21, 0xf403e8e7, 0x37f06721, - 0xca21f501, + 0xbe21f501, 0x88e7f104, 0x6721f413, 0xf50137f0, - 0xf104e621, + 0xf104da21, 0xf41388e7, 0x00f86721, -/* 0x05e0: i2c_bitw */ - 0x04e621f5, +/* 0x05d4: i2c_bitw */ + 0x04da21f5, 0x03e8e7f1, 0xbb6721f4, 0x65b60076, @@ -1237,18 +1234,18 @@ uint32_t nvd0_pwr_code[] = { 0x56bb0465, 0xfd50bd02, 0x50fc0475, - 0x052c21f5, + 0x052021f5, 0xf40464b6, 0xe7f11811, 0x21f41388, 0x0037f067, - 0x04ca21f5, + 0x04be21f5, 0x1388e7f1, -/* 0x061f: i2c_bitw_out */ +/* 0x0613: i2c_bitw_out */ 0xf86721f4, -/* 0x0621: i2c_bitr */ +/* 0x0615: i2c_bitr */ 0x0137f000, - 0x04e621f5, + 0x04da21f5, 0x03e8e7f1, 0xbb6721f4, 0x65b60076, @@ -1256,19 +1253,19 @@ uint32_t nvd0_pwr_code[] = { 0x56bb0465, 0xfd50bd02, 0x50fc0475, - 0x052c21f5, + 0x052021f5, 0xf40464b6, 0x21f51b11, - 0x37f00517, - 0xca21f500, + 0x37f0050b, + 0xbe21f500, 0x88e7f104, 0x6721f413, 0xf4013cf0, -/* 0x0666: i2c_bitr_done */ +/* 0x065a: i2c_bitr_done */ 0x00f80131, -/* 0x0668: i2c_get_byte */ +/* 0x065c: i2c_get_byte */ 0xf00057f0, -/* 0x066e: i2c_get_byte_next */ +/* 0x0662: i2c_get_byte_next */ 0x54b60847, 0x0076bb01, 0xf90465b6, @@ -1276,7 +1273,7 @@ uint32_t nvd0_pwr_code[] = { 0xbd0256bb, 0x0475fd50, 0x21f550fc, - 0x64b60621, + 0x64b60615, 0x2b11f404, 0xb60553fd, 0x1bf40142, @@ -1286,12 +1283,12 @@ uint32_t nvd0_pwr_code[] = { 0xbb046594, 0x50bd0256, 0xfc0475fd, - 0xe021f550, + 0xd421f550, 0x0464b605, -/* 0x06b8: i2c_get_byte_done */ -/* 0x06ba: i2c_put_byte */ +/* 0x06ac: i2c_get_byte_done */ +/* 0x06ae: i2c_put_byte */ 0x47f000f8, -/* 0x06bd: i2c_put_byte_next */ +/* 0x06b1: i2c_put_byte_next */ 0x0142b608, 0xbb3854ff, 0x65b60076, @@ -1299,7 +1296,7 @@ uint32_t nvd0_pwr_code[] = { 0x56bb0465, 0xfd50bd02, 0x50fc0475, - 0x05e021f5, + 0x05d421f5, 0xf40464b6, 0x46b03411, 0xd81bf400, @@ -1308,21 +1305,21 @@ uint32_t nvd0_pwr_code[] = { 0xbb046594, 0x50bd0256, 0xfc0475fd, - 0x2121f550, + 0x1521f550, 0x0464b606, 0xbb0f11f4, 0x36b00076, 0x061bf401, -/* 0x0713: i2c_put_byte_done */ +/* 0x0707: i2c_put_byte_done */ 0xf80132f4, -/* 0x0715: i2c_addr */ +/* 0x0709: i2c_addr */ 0x0076bb00, 0xf90465b6, 0x04659450, 0xbd0256bb, 0x0475fd50, 0x21f550fc, - 0x64b60551, + 0x64b60545, 0x2911f404, 0x012ec3e7, 0xfd0134b6, @@ -1332,23 +1329,23 @@ uint32_t nvd0_pwr_code[] = { 0x0256bb04, 0x75fd50bd, 0xf550fc04, - 0xb606ba21, -/* 0x075a: i2c_addr_done */ + 0xb606ae21, +/* 0x074e: i2c_addr_done */ 0x00f80464, -/* 0x075c: i2c_acquire_addr */ +/* 0x0750: i2c_acquire_addr */ 0xb6f8cec7, 0xe0b705e4, 0x00f8d014, -/* 0x0768: i2c_acquire */ - 0x075c21f5, +/* 0x075c: i2c_acquire */ + 0x075021f5, 0xf00421f4, 0x21f403d9, -/* 0x0777: i2c_release */ +/* 0x076b: i2c_release */ 0xf500f833, - 0xf4075c21, + 0xf4075021, 0xdaf00421, 0x3321f403, -/* 0x0786: i2c_recv */ +/* 0x077a: i2c_recv */ 0x32f400f8, 0xf8c1c701, 0xb00214b6, @@ -1367,7 +1364,7 @@ uint32_t nvd0_pwr_code[] = { 0xbb046594, 0x50bd0256, 0xfc0475fd, - 0x6821f550, + 0x5c21f550, 0x0464b607, 0xd6b0d0fc, 0xb31bf500, @@ -1377,7 +1374,7 @@ uint32_t nvd0_pwr_code[] = { 0xbb046594, 0x50bd0256, 0xfc0475fd, - 0x1521f550, + 0x0921f550, 0x0464b607, 0x00d011f5, 0xbbe0c5c7, @@ -1386,7 +1383,7 @@ uint32_t nvd0_pwr_code[] = { 0x56bb0465, 0xfd50bd02, 0x50fc0475, - 0x06ba21f5, + 0x06ae21f5, 0xf50464b6, 0xf000ad11, 0x76bb0157, @@ -1395,7 +1392,7 @@ uint32_t nvd0_pwr_code[] = { 0x0256bb04, 0x75fd50bd, 0xf550fc04, - 0xb6071521, + 0xb6070921, 0x11f50464, 0x76bb008a, 0x0465b600, @@ -1403,7 +1400,7 @@ uint32_t nvd0_pwr_code[] = { 0x0256bb04, 0x75fd50bd, 0xf550fc04, - 0xb6066821, + 0xb6065c21, 0x11f40464, 0xe05bcb6a, 0xb60076bb, @@ -1411,38 +1408,38 @@ uint32_t nvd0_pwr_code[] = { 0xbb046594, 0x50bd0256, 0xfc0475fd, - 0xad21f550, + 0xa121f550, 0x0464b605, 0xbd025bb9, 0x430ef474, -/* 0x088c: i2c_recv_not_rd08 */ +/* 0x0880: i2c_recv_not_rd08 */ 0xf401d6b0, 0x57f03d1b, - 0x1521f500, + 0x0921f500, 0x3311f407, 0xf5e0c5c7, - 0xf406ba21, + 0xf406ae21, 0x57f02911, - 0x1521f500, + 0x0921f500, 0x1f11f407, 0xf5e0b5c7, - 0xf406ba21, + 0xf406ae21, 0x21f51511, - 0x74bd05ad, + 0x74bd05a1, 0xf408c5c7, 0x32f4091b, 0x030ef402, -/* 0x08cc: i2c_recv_not_wr08 */ -/* 0x08cc: i2c_recv_done */ +/* 0x08c0: i2c_recv_not_wr08 */ +/* 0x08c0: i2c_recv_done */ 0xf5f8cec7, - 0xfc077721, + 0xfc076b21, 0xf4d0fce0, 0x7cb90a12, 0x6b21f502, -/* 0x08e1: i2c_recv_exit */ -/* 0x08e3: i2c_init */ +/* 0x08d5: i2c_recv_exit */ +/* 0x08d7: i2c_init */ 0xf800f802, -/* 0x08e5: test_recv */ +/* 0x08d9: test_recv */ 0xd817f100, 0x0011cf05, 0xf10110b6, @@ -1451,28 +1448,28 @@ uint32_t nvd0_pwr_code[] = { 0xd900e7f1, 0x134fe3f1, 0x01b621f5, -/* 0x0906: test_init */ +/* 0x08fa: test_init */ 0xe7f100f8, 0x21f50800, 0x00f801b6, -/* 0x0910: idle_recv */ -/* 0x0912: idle */ +/* 0x0904: idle_recv */ +/* 0x0906: idle */ 0x31f400f8, 0xd417f100, 0x0011cf05, 0xf10110b6, 0xd005d407, 0x04bd0001, -/* 0x0928: idle_loop */ +/* 0x091c: idle_loop */ 0xf45817f0, -/* 0x092e: idle_proc */ -/* 0x092e: idle_proc_exec */ +/* 0x0922: idle_proc */ +/* 0x0922: idle_proc_exec */ 0x10f90232, 0xf5021eb9, 0xfc027421, 0x0911f410, 0xf40231f4, -/* 0x0942: idle_proc_next */ +/* 0x0936: idle_proc_next */ 0x10b6ef0e, 0x061fb858, 0xf4e61bf4, @@ -1521,4 +1518,7 @@ uint32_t nvd0_pwr_code[] = { 0x00000000, 0x00000000, 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, }; diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/gk104.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/gk104.c new file mode 100644 index 000000000000..d76612999b9f --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/gk104.c @@ -0,0 +1,69 @@ +/* + * Copyright 2013 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "priv.h" + +#define nvd0_pwr_code gk104_pwr_code +#define nvd0_pwr_data gk104_pwr_data +#include "fuc/nvd0.fuc.h" + +static void +gk104_pwr_pgob(struct nouveau_pwr *ppwr, bool enable) +{ + nv_mask(ppwr, 0x000200, 0x00001000, 0x00000000); + nv_rd32(ppwr, 0x000200); + nv_mask(ppwr, 0x000200, 0x08000000, 0x08000000); + msleep(50); + + nv_mask(ppwr, 0x10a78c, 0x00000002, 0x00000002); + nv_mask(ppwr, 0x10a78c, 0x00000001, 0x00000001); + nv_mask(ppwr, 0x10a78c, 0x00000001, 0x00000000); + + nv_mask(ppwr, 0x020004, 0xc0000000, enable ? 0xc0000000 : 0x40000000); + msleep(50); + + nv_mask(ppwr, 0x10a78c, 0x00000002, 0x00000000); + nv_mask(ppwr, 0x10a78c, 0x00000001, 0x00000001); + nv_mask(ppwr, 0x10a78c, 0x00000001, 0x00000000); + + nv_mask(ppwr, 0x000200, 0x08000000, 0x00000000); + nv_mask(ppwr, 0x000200, 0x00001000, 0x00001000); + nv_rd32(ppwr, 0x000200); +} + +struct nouveau_oclass * +gk104_pwr_oclass = &(struct nvkm_pwr_impl) { + .base.handle = NV_SUBDEV(PWR, 0xe4), + .base.ofuncs = &(struct nouveau_ofuncs) { + .ctor = _nouveau_pwr_ctor, + .dtor = _nouveau_pwr_dtor, + .init = _nouveau_pwr_init, + .fini = _nouveau_pwr_fini, + }, + .code.data = gk104_pwr_code, + .code.size = sizeof(gk104_pwr_code), + .data.data = gk104_pwr_data, + .data.size = sizeof(gk104_pwr_data), + .pgob = gk104_pwr_pgob, +}.base; diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c index 03de3107d29f..def6a9ac68cf 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c +++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c @@ -1,8 +1,7 @@ #ifndef __NVKM_PWR_MEMX_H__ #define __NVKM_PWR_MEMX_H__ -#include <subdev/pwr.h> -#include <subdev/pwr/fuc/os.h> +#include "priv.h" struct nouveau_memx { struct nouveau_pwr *ppwr; diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/nv108.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/nv108.c index 52c85414866a..04ff7c3c34e9 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/pwr/nv108.c +++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/nv108.c @@ -22,41 +22,20 @@ * Authors: Ben Skeggs */ -#include <subdev/pwr.h> - +#include "priv.h" #include "fuc/nv108.fuc.h" -struct nv108_pwr_priv { - struct nouveau_pwr base; -}; - -static int -nv108_pwr_ctor(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv108_pwr_priv *priv; - int ret; - - ret = nouveau_pwr_create(parent, engine, oclass, &priv); - *pobject = nv_object(priv); - if (ret) - return ret; - - priv->base.code.data = nv108_pwr_code; - priv->base.code.size = sizeof(nv108_pwr_code); - priv->base.data.data = nv108_pwr_data; - priv->base.data.size = sizeof(nv108_pwr_data); - return 0; -} - -struct nouveau_oclass -nv108_pwr_oclass = { - .handle = NV_SUBDEV(PWR, 0x00), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = nv108_pwr_ctor, +struct nouveau_oclass * +nv108_pwr_oclass = &(struct nvkm_pwr_impl) { + .base.handle = NV_SUBDEV(PWR, 0x00), + .base.ofuncs = &(struct nouveau_ofuncs) { + .ctor = _nouveau_pwr_ctor, .dtor = _nouveau_pwr_dtor, .init = _nouveau_pwr_init, .fini = _nouveau_pwr_fini, }, -}; + .code.data = nv108_pwr_code, + .code.size = sizeof(nv108_pwr_code), + .data.data = nv108_pwr_data, + .data.size = sizeof(nv108_pwr_data), +}.base; diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/nva3.c index c132b7ca9747..998d53076b8b 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/pwr/nva3.c +++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/nva3.c @@ -22,50 +22,29 @@ * Authors: Ben Skeggs */ -#include <subdev/pwr.h> - +#include "priv.h" #include "fuc/nva3.fuc.h" -struct nva3_pwr_priv { - struct nouveau_pwr base; -}; - static int nva3_pwr_init(struct nouveau_object *object) { - struct nva3_pwr_priv *priv = (void *)object; - nv_mask(priv, 0x022210, 0x00000001, 0x00000000); - nv_mask(priv, 0x022210, 0x00000001, 0x00000001); - return nouveau_pwr_init(&priv->base); -} - -static int -nva3_pwr_ctor(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nva3_pwr_priv *priv; - int ret; - - ret = nouveau_pwr_create(parent, engine, oclass, &priv); - *pobject = nv_object(priv); - if (ret) - return ret; - - priv->base.code.data = nva3_pwr_code; - priv->base.code.size = sizeof(nva3_pwr_code); - priv->base.data.data = nva3_pwr_data; - priv->base.data.size = sizeof(nva3_pwr_data); - return 0; + struct nouveau_pwr *ppwr = (void *)object; + nv_mask(ppwr, 0x022210, 0x00000001, 0x00000000); + nv_mask(ppwr, 0x022210, 0x00000001, 0x00000001); + return nouveau_pwr_init(ppwr); } -struct nouveau_oclass -nva3_pwr_oclass = { - .handle = NV_SUBDEV(PWR, 0xa3), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = nva3_pwr_ctor, +struct nouveau_oclass * +nva3_pwr_oclass = &(struct nvkm_pwr_impl) { + .base.handle = NV_SUBDEV(PWR, 0xa3), + .base.ofuncs = &(struct nouveau_ofuncs) { + .ctor = _nouveau_pwr_ctor, .dtor = _nouveau_pwr_dtor, .init = nva3_pwr_init, .fini = _nouveau_pwr_fini, }, -}; + .code.data = nva3_pwr_code, + .code.size = sizeof(nva3_pwr_code), + .data.data = nva3_pwr_data, + .data.size = sizeof(nva3_pwr_data), +}.base; diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/nvc0.c index 495f6857428d..9a773e66efa4 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/pwr/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/nvc0.c @@ -22,41 +22,20 @@ * Authors: Ben Skeggs */ -#include <subdev/pwr.h> - +#include "priv.h" #include "fuc/nvc0.fuc.h" -struct nvc0_pwr_priv { - struct nouveau_pwr base; -}; - -static int -nvc0_pwr_ctor(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nvc0_pwr_priv *priv; - int ret; - - ret = nouveau_pwr_create(parent, engine, oclass, &priv); - *pobject = nv_object(priv); - if (ret) - return ret; - - priv->base.code.data = nvc0_pwr_code; - priv->base.code.size = sizeof(nvc0_pwr_code); - priv->base.data.data = nvc0_pwr_data; - priv->base.data.size = sizeof(nvc0_pwr_data); - return 0; -} - -struct nouveau_oclass -nvc0_pwr_oclass = { - .handle = NV_SUBDEV(PWR, 0xc0), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = nvc0_pwr_ctor, +struct nouveau_oclass * +nvc0_pwr_oclass = &(struct nvkm_pwr_impl) { + .base.handle = NV_SUBDEV(PWR, 0xc0), + .base.ofuncs = &(struct nouveau_ofuncs) { + .ctor = _nouveau_pwr_ctor, .dtor = _nouveau_pwr_dtor, .init = _nouveau_pwr_init, .fini = _nouveau_pwr_fini, }, -}; + .code.data = nvc0_pwr_code, + .code.size = sizeof(nvc0_pwr_code), + .data.data = nvc0_pwr_data, + .data.size = sizeof(nvc0_pwr_data), +}.base; diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/nvd0.c index 043aa142fe82..2b29be5d08ac 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/pwr/nvd0.c +++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/nvd0.c @@ -22,41 +22,20 @@ * Authors: Ben Skeggs */ -#include <subdev/pwr.h> - +#include "priv.h" #include "fuc/nvd0.fuc.h" -struct nvd0_pwr_priv { - struct nouveau_pwr base; -}; - -static int -nvd0_pwr_ctor(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nvd0_pwr_priv *priv; - int ret; - - ret = nouveau_pwr_create(parent, engine, oclass, &priv); - *pobject = nv_object(priv); - if (ret) - return ret; - - priv->base.code.data = nvd0_pwr_code; - priv->base.code.size = sizeof(nvd0_pwr_code); - priv->base.data.data = nvd0_pwr_data; - priv->base.data.size = sizeof(nvd0_pwr_data); - return 0; -} - -struct nouveau_oclass -nvd0_pwr_oclass = { - .handle = NV_SUBDEV(PWR, 0xd0), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = nvd0_pwr_ctor, +struct nouveau_oclass * +nvd0_pwr_oclass = &(struct nvkm_pwr_impl) { + .base.handle = NV_SUBDEV(PWR, 0xd0), + .base.ofuncs = &(struct nouveau_ofuncs) { + .ctor = _nouveau_pwr_ctor, .dtor = _nouveau_pwr_dtor, .init = _nouveau_pwr_init, .fini = _nouveau_pwr_fini, }, -}; + .code.data = nvd0_pwr_code, + .code.size = sizeof(nvd0_pwr_code), + .data.data = nvd0_pwr_data, + .data.size = sizeof(nvd0_pwr_data), +}.base; diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/priv.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/priv.h new file mode 100644 index 000000000000..3814a341db32 --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/priv.h @@ -0,0 +1,44 @@ +#ifndef __NVKM_PWR_PRIV_H__ +#define __NVKM_PWR_PRIV_H__ + +#include <subdev/pwr.h> +#include <subdev/pwr/fuc/os.h> + +#define nouveau_pwr_create(p, e, o, d) \ + nouveau_pwr_create_((p), (e), (o), sizeof(**d), (void **)d) +#define nouveau_pwr_destroy(p) \ + nouveau_subdev_destroy(&(p)->base) +#define nouveau_pwr_init(p) ({ \ + struct nouveau_pwr *_ppwr = (p); \ + _nouveau_pwr_init(nv_object(_ppwr)); \ +}) +#define nouveau_pwr_fini(p,s) ({ \ + struct nouveau_pwr *_ppwr = (p); \ + _nouveau_pwr_fini(nv_object(_ppwr), (s)); \ +}) + +int nouveau_pwr_create_(struct nouveau_object *, struct nouveau_object *, + struct nouveau_oclass *, int, void **); + +int _nouveau_pwr_ctor(struct nouveau_object *, struct nouveau_object *, + struct nouveau_oclass *, void *, u32, + struct nouveau_object **); +#define _nouveau_pwr_dtor _nouveau_subdev_dtor +int _nouveau_pwr_init(struct nouveau_object *); +int _nouveau_pwr_fini(struct nouveau_object *, bool); + +struct nvkm_pwr_impl { + struct nouveau_oclass base; + struct { + u32 *data; + u32 size; + } code; + struct { + u32 *data; + u32 size; + } data; + + void (*pgob)(struct nouveau_pwr *, bool); +}; + +#endif diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c index 668cf964e4a9..2d0988755530 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c @@ -28,7 +28,7 @@ #include <subdev/timer.h> #include <subdev/fb.h> #include <subdev/vm.h> -#include <subdev/ltcg.h> +#include <subdev/ltc.h> #include <subdev/bar.h> struct nvc0_vmmgr_priv { @@ -116,12 +116,12 @@ nvc0_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, pte <<= 3; if (mem->tag) { - struct nouveau_ltcg *ltcg = - nouveau_ltcg(vma->vm->vmm->base.base.parent); + struct nouveau_ltc *ltc = + nouveau_ltc(vma->vm->vmm->base.base.parent); u32 tag = mem->tag->offset + (delta >> 17); phys |= (u64)tag << (32 + 12); next |= (u64)1 << (32 + 12); - ltcg->tags_clear(ltcg, tag, cnt); + ltc->tags_clear(ltc, tag, cnt); } while (cnt--) { diff --git a/drivers/gpu/drm/nouveau/dispnv04/arb.c b/drivers/gpu/drm/nouveau/dispnv04/arb.c index 2a15b98b4d2b..c6361422a0b2 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/arb.c +++ b/drivers/gpu/drm/nouveau/dispnv04/arb.c @@ -198,12 +198,12 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp, int *burst, int *lwm) { struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_device *device = nouveau_dev(dev); + struct nvif_device *device = &nouveau_drm(dev)->device; struct nv_fifo_info fifo_data; struct nv_sim_state sim_data; int MClk = nouveau_hw_get_clock(dev, PLL_MEMORY); int NVClk = nouveau_hw_get_clock(dev, PLL_CORE); - uint32_t cfg1 = nv_rd32(device, NV04_PFB_CFG1); + uint32_t cfg1 = nvif_rd32(device, NV04_PFB_CFG1); sim_data.pclk_khz = VClk; sim_data.mclk_khz = MClk; @@ -221,13 +221,13 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp, sim_data.mem_latency = 3; sim_data.mem_page_miss = 10; } else { - sim_data.memory_type = nv_rd32(device, NV04_PFB_CFG0) & 0x1; - sim_data.memory_width = (nv_rd32(device, NV_PEXTDEV_BOOT_0) & 0x10) ? 128 : 64; + sim_data.memory_type = nvif_rd32(device, NV04_PFB_CFG0) & 0x1; + sim_data.memory_width = (nvif_rd32(device, NV_PEXTDEV_BOOT_0) & 0x10) ? 128 : 64; sim_data.mem_latency = cfg1 & 0xf; sim_data.mem_page_miss = ((cfg1 >> 4) & 0xf) + ((cfg1 >> 31) & 0x1); } - if (nv_device(drm->device)->card_type == NV_04) + if (drm->device.info.family == NV_DEVICE_INFO_V0_TNT) nv04_calc_arb(&fifo_data, &sim_data); else nv10_calc_arb(&fifo_data, &sim_data); @@ -254,7 +254,7 @@ nouveau_calc_arb(struct drm_device *dev, int vclk, int bpp, int *burst, int *lwm { struct nouveau_drm *drm = nouveau_drm(dev); - if (nv_device(drm->device)->card_type < NV_20) + if (drm->device.info.family < NV_DEVICE_INFO_V0_KELVIN) nv04_update_arb(dev, vclk, bpp, burst, lwm); else if ((dev->pdev->device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ || (dev->pdev->device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) { diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c index 41be3424c906..b90aa5c1f90a 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c +++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c @@ -111,8 +111,8 @@ static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mod { struct drm_device *dev = crtc->dev; struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_bios *bios = nouveau_bios(drm->device); - struct nouveau_clock *clk = nouveau_clock(drm->device); + struct nouveau_bios *bios = nvkm_bios(&drm->device); + struct nouveau_clock *clk = nvkm_clock(&drm->device); struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); struct nv04_mode_state *state = &nv04_display(dev)->mode_reg; struct nv04_crtc_reg *regp = &state->crtc_reg[nv_crtc->index]; @@ -136,7 +136,7 @@ static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mod * has yet been observed in allowing the use a single stage pll on all * nv43 however. the behaviour of single stage use is untested on nv40 */ - if (nv_device(drm->device)->chipset > 0x40 && dot_clock <= (pll_lim.vco1.max_freq / 2)) + if (drm->device.info.chipset > 0x40 && dot_clock <= (pll_lim.vco1.max_freq / 2)) memset(&pll_lim.vco2, 0, sizeof(pll_lim.vco2)); @@ -146,10 +146,10 @@ static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mod state->pllsel &= PLLSEL_VPLL1_MASK | PLLSEL_VPLL2_MASK | PLLSEL_TV_MASK; /* The blob uses this always, so let's do the same */ - if (nv_device(drm->device)->card_type == NV_40) + if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) state->pllsel |= NV_PRAMDAC_PLL_COEFF_SELECT_USE_VPLL2_TRUE; /* again nv40 and some nv43 act more like nv3x as described above */ - if (nv_device(drm->device)->chipset < 0x41) + if (drm->device.info.chipset < 0x41) state->pllsel |= NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_MPLL | NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_NVPLL; state->pllsel |= nv_crtc->index ? PLLSEL_VPLL2_MASK : PLLSEL_VPLL1_MASK; @@ -275,7 +275,7 @@ nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode) horizEnd = horizTotal - 2; horizBlankEnd = horizTotal + 4; #if 0 - if (dev->overlayAdaptor && nv_device(drm->device)->card_type >= NV_10) + if (dev->overlayAdaptor && drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) /* This reportedly works around some video overlay bandwidth problems */ horizTotal += 2; #endif @@ -509,7 +509,7 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode) regp->cursor_cfg = NV_PCRTC_CURSOR_CONFIG_CUR_LINES_64 | NV_PCRTC_CURSOR_CONFIG_CUR_PIXELS_64 | NV_PCRTC_CURSOR_CONFIG_ADDRESS_SPACE_PNVM; - if (nv_device(drm->device)->chipset >= 0x11) + if (drm->device.info.chipset >= 0x11) regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_CUR_BPP_32; if (mode->flags & DRM_MODE_FLAG_DBLSCAN) regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_DOUBLE_SCAN_ENABLE; @@ -550,26 +550,26 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode) * 1 << 30 on 0x60.830), for no apparent reason */ regp->CRTC[NV_CIO_CRE_59] = off_chip_digital; - if (nv_device(drm->device)->card_type >= NV_30) + if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE) regp->CRTC[0x9f] = off_chip_digital ? 0x11 : 0x1; regp->crtc_830 = mode->crtc_vdisplay - 3; regp->crtc_834 = mode->crtc_vdisplay - 1; - if (nv_device(drm->device)->card_type == NV_40) + if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) /* This is what the blob does */ regp->crtc_850 = NVReadCRTC(dev, 0, NV_PCRTC_850); - if (nv_device(drm->device)->card_type >= NV_30) + if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE) regp->gpio_ext = NVReadCRTC(dev, 0, NV_PCRTC_GPIO_EXT); - if (nv_device(drm->device)->card_type >= NV_10) + if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) regp->crtc_cfg = NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC; else regp->crtc_cfg = NV04_PCRTC_CONFIG_START_ADDRESS_HSYNC; /* Some misc regs */ - if (nv_device(drm->device)->card_type == NV_40) { + if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) { regp->CRTC[NV_CIO_CRE_85] = 0xFF; regp->CRTC[NV_CIO_CRE_86] = 0x1; } @@ -581,7 +581,7 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode) /* Generic PRAMDAC regs */ - if (nv_device(drm->device)->card_type >= NV_10) + if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) /* Only bit that bios and blob set. */ regp->nv10_cursync = (1 << 25); @@ -590,7 +590,7 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode) NV_PRAMDAC_GENERAL_CONTROL_PIXMIX_ON; if (crtc->primary->fb->depth == 16) regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL; - if (nv_device(drm->device)->chipset >= 0x11) + if (drm->device.info.chipset >= 0x11) regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_PIPE_LONG; regp->ramdac_630 = 0; /* turn off green mode (tv test pattern?) */ @@ -653,7 +653,7 @@ nv_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, nv_crtc_mode_set_vga(crtc, adjusted_mode); /* calculated in nv04_dfp_prepare, nv40 needs it written before calculating PLLs */ - if (nv_device(drm->device)->card_type == NV_40) + if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, nv04_display(dev)->mode_reg.sel_clk); nv_crtc_mode_set_regs(crtc, adjusted_mode); nv_crtc_calc_state_ext(crtc, mode, adjusted_mode->clock); @@ -714,7 +714,7 @@ static void nv_crtc_prepare(struct drm_crtc *crtc) /* Some more preparation. */ NVWriteCRTC(dev, nv_crtc->index, NV_PCRTC_CONFIG, NV_PCRTC_CONFIG_START_ADDRESS_NON_VGA); - if (nv_device(drm->device)->card_type == NV_40) { + if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) { uint32_t reg900 = NVReadRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_900); NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_900, reg900 & ~0x10000); } @@ -888,7 +888,7 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc, crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FF_INDEX); crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FFLWM__INDEX); - if (nv_device(drm->device)->card_type >= NV_20) { + if (drm->device.info.family >= NV_DEVICE_INFO_V0_KELVIN) { regp->CRTC[NV_CIO_CRE_47] = arb_lwm >> 8; crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_47); } @@ -915,9 +915,9 @@ nv04_crtc_mode_set_base_atomic(struct drm_crtc *crtc, struct drm_device *dev = drm->dev; if (state == ENTER_ATOMIC_MODE_SET) - nouveau_fbcon_save_disable_accel(dev); + nouveau_fbcon_accel_save_disable(dev); else - nouveau_fbcon_restore_accel(dev); + nouveau_fbcon_accel_restore(dev); return nv04_crtc_do_mode_set_base(crtc, fb, x, y, true); } @@ -969,7 +969,7 @@ static void nv11_cursor_upload(struct drm_device *dev, struct nouveau_bo *src, { struct nouveau_drm *drm = nouveau_drm(dev); - if (nv_device(drm->device)->chipset == 0x11) { + if (drm->device.info.chipset == 0x11) { pixel = ((pixel & 0x000000ff) << 24) | ((pixel & 0x0000ff00) << 8) | ((pixel & 0x00ff0000) >> 8) | @@ -1010,7 +1010,7 @@ nv04_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, if (ret) goto out; - if (nv_device(drm->device)->chipset >= 0x11) + if (drm->device.info.chipset >= 0x11) nv11_cursor_upload(dev, cursor, nv_crtc->cursor.nvbo); else nv04_cursor_upload(dev, cursor, nv_crtc->cursor.nvbo); diff --git a/drivers/gpu/drm/nouveau/dispnv04/cursor.c b/drivers/gpu/drm/nouveau/dispnv04/cursor.c index a810303169de..4e61173c3353 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/cursor.c +++ b/drivers/gpu/drm/nouveau/dispnv04/cursor.c @@ -55,7 +55,7 @@ nv04_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset) crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX); crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX); crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX); - if (nv_device(drm->device)->card_type == NV_40) + if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) nv_fix_nv40_hw_cursor(dev, nv_crtc->index); } diff --git a/drivers/gpu/drm/nouveau/dispnv04/dac.c b/drivers/gpu/drm/nouveau/dispnv04/dac.c index a96dda48718e..2d8056cde996 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/dac.c +++ b/drivers/gpu/drm/nouveau/dispnv04/dac.c @@ -65,8 +65,8 @@ int nv04_dac_output_offset(struct drm_encoder *encoder) static int sample_load_twice(struct drm_device *dev, bool sense[2]) { - struct nouveau_device *device = nouveau_dev(dev); - struct nouveau_timer *ptimer = nouveau_timer(device); + struct nvif_device *device = &nouveau_drm(dev)->device; + struct nouveau_timer *ptimer = nvkm_timer(device); int i; for (i = 0; i < 2; i++) { @@ -95,15 +95,15 @@ static int sample_load_twice(struct drm_device *dev, bool sense[2]) udelay(100); /* when level triggers, sense is _LO_ */ - sense_a = nv_rd08(device, NV_PRMCIO_INP0) & 0x10; + sense_a = nvif_rd08(device, NV_PRMCIO_INP0) & 0x10; /* take another reading until it agrees with sense_a... */ do { udelay(100); - sense_b = nv_rd08(device, NV_PRMCIO_INP0) & 0x10; + sense_b = nvif_rd08(device, NV_PRMCIO_INP0) & 0x10; if (sense_a != sense_b) { sense_b_prime = - nv_rd08(device, NV_PRMCIO_INP0) & 0x10; + nvif_rd08(device, NV_PRMCIO_INP0) & 0x10; if (sense_b == sense_b_prime) { /* ... unless two consecutive subsequent * samples agree; sense_a is replaced */ @@ -128,7 +128,7 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) { struct drm_device *dev = encoder->dev; - struct nouveau_device *device = nouveau_dev(dev); + struct nvif_device *device = &nouveau_drm(dev)->device; struct nouveau_drm *drm = nouveau_drm(dev); uint8_t saved_seq1, saved_pi, saved_rpc1, saved_cr_mode; uint8_t saved_palette0[3], saved_palette_mask; @@ -164,11 +164,11 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder, saved_rpc1 = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX); NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1 & ~0xc0); - nv_wr08(device, NV_PRMDIO_READ_MODE_ADDRESS, 0x0); + nvif_wr08(device, NV_PRMDIO_READ_MODE_ADDRESS, 0x0); for (i = 0; i < 3; i++) - saved_palette0[i] = nv_rd08(device, NV_PRMDIO_PALETTE_DATA); - saved_palette_mask = nv_rd08(device, NV_PRMDIO_PIXEL_MASK); - nv_wr08(device, NV_PRMDIO_PIXEL_MASK, 0); + saved_palette0[i] = nvif_rd08(device, NV_PRMDIO_PALETTE_DATA); + saved_palette_mask = nvif_rd08(device, NV_PRMDIO_PIXEL_MASK); + nvif_wr08(device, NV_PRMDIO_PIXEL_MASK, 0); saved_rgen_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL); NVWriteRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL, @@ -181,11 +181,11 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder, do { bool sense_pair[2]; - nv_wr08(device, NV_PRMDIO_WRITE_MODE_ADDRESS, 0); - nv_wr08(device, NV_PRMDIO_PALETTE_DATA, 0); - nv_wr08(device, NV_PRMDIO_PALETTE_DATA, 0); + nvif_wr08(device, NV_PRMDIO_WRITE_MODE_ADDRESS, 0); + nvif_wr08(device, NV_PRMDIO_PALETTE_DATA, 0); + nvif_wr08(device, NV_PRMDIO_PALETTE_DATA, 0); /* testing blue won't find monochrome monitors. I don't care */ - nv_wr08(device, NV_PRMDIO_PALETTE_DATA, blue); + nvif_wr08(device, NV_PRMDIO_PALETTE_DATA, blue); i = 0; /* take sample pairs until both samples in the pair agree */ @@ -208,11 +208,11 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder, } while (++blue < 0x18 && sense); out: - nv_wr08(device, NV_PRMDIO_PIXEL_MASK, saved_palette_mask); + nvif_wr08(device, NV_PRMDIO_PIXEL_MASK, saved_palette_mask); NVWriteRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL, saved_rgen_ctrl); - nv_wr08(device, NV_PRMDIO_WRITE_MODE_ADDRESS, 0); + nvif_wr08(device, NV_PRMDIO_WRITE_MODE_ADDRESS, 0); for (i = 0; i < 3; i++) - nv_wr08(device, NV_PRMDIO_PALETTE_DATA, saved_palette0[i]); + nvif_wr08(device, NV_PRMDIO_PALETTE_DATA, saved_palette0[i]); NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL, saved_rtest_ctrl); NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX, saved_pi); NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1); @@ -231,8 +231,8 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_device *device = nouveau_dev(dev); - struct nouveau_gpio *gpio = nouveau_gpio(device); + struct nvif_device *device = &nouveau_drm(dev)->device; + struct nouveau_gpio *gpio = nvkm_gpio(device); struct dcb_output *dcb = nouveau_encoder(encoder)->dcb; uint32_t sample, testval, regoffset = nv04_dac_output_offset(encoder); uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput, @@ -256,12 +256,12 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder) NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, saved_rtest_ctrl & ~NV_PRAMDAC_TEST_CONTROL_PWRDWN_DAC_OFF); - saved_powerctrl_2 = nv_rd32(device, NV_PBUS_POWERCTRL_2); + saved_powerctrl_2 = nvif_rd32(device, NV_PBUS_POWERCTRL_2); - nv_wr32(device, NV_PBUS_POWERCTRL_2, saved_powerctrl_2 & 0xd7ffffff); + nvif_wr32(device, NV_PBUS_POWERCTRL_2, saved_powerctrl_2 & 0xd7ffffff); if (regoffset == 0x68) { - saved_powerctrl_4 = nv_rd32(device, NV_PBUS_POWERCTRL_4); - nv_wr32(device, NV_PBUS_POWERCTRL_4, saved_powerctrl_4 & 0xffffffcf); + saved_powerctrl_4 = nvif_rd32(device, NV_PBUS_POWERCTRL_4); + nvif_wr32(device, NV_PBUS_POWERCTRL_4, saved_powerctrl_4 & 0xffffffcf); } if (gpio) { @@ -283,7 +283,7 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder) /* nv driver and nv31 use 0xfffffeee, nv34 and 6600 use 0xfffffece */ routput = (saved_routput & 0xfffffece) | head << 8; - if (nv_device(drm->device)->card_type >= NV_40) { + if (drm->device.info.family >= NV_DEVICE_INFO_V0_CURIE) { if (dcb->type == DCB_OUTPUT_TV) routput |= 0x1a << 16; else @@ -316,8 +316,8 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder) NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, saved_routput); NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, saved_rtest_ctrl); if (regoffset == 0x68) - nv_wr32(device, NV_PBUS_POWERCTRL_4, saved_powerctrl_4); - nv_wr32(device, NV_PBUS_POWERCTRL_2, saved_powerctrl_2); + nvif_wr32(device, NV_PBUS_POWERCTRL_4, saved_powerctrl_4); + nvif_wr32(device, NV_PBUS_POWERCTRL_2, saved_powerctrl_2); if (gpio) { gpio->set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, saved_gpio1); @@ -398,7 +398,7 @@ static void nv04_dac_mode_set(struct drm_encoder *encoder, } /* This could use refinement for flatpanels, but it should work this way */ - if (nv_device(drm->device)->chipset < 0x44) + if (drm->device.info.chipset < 0x44) NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000); else NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000); diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c index e57babb206d3..42a5435259f7 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c +++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c @@ -281,7 +281,7 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; - struct nouveau_device *device = nouveau_dev(dev); + struct nvif_device *device = &nouveau_drm(dev)->device; struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index]; @@ -335,7 +335,7 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder, regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_MODE_NATIVE; else /* gpu needs to scale */ regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_MODE_SCALE; - if (nv_rd32(device, NV_PEXTDEV_BOOT_0) & NV_PEXTDEV_BOOT_0_STRAP_FP_IFACE_12BIT) + if (nvif_rd32(device, NV_PEXTDEV_BOOT_0) & NV_PEXTDEV_BOOT_0_STRAP_FP_IFACE_12BIT) regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12; if (nv_encoder->dcb->location != DCB_LOC_ON_CHIP && output_mode->clock > 165000) @@ -416,7 +416,7 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder, if ((nv_connector->dithering_mode == DITHERING_MODE_ON) || (nv_connector->dithering_mode == DITHERING_MODE_AUTO && encoder->crtc->primary->fb->depth > connector->display_info.bpc * 3)) { - if (nv_device(drm->device)->chipset == 0x11) + if (drm->device.info.chipset == 0x11) regp->dither = savep->dither | 0x00010000; else { int i; @@ -427,7 +427,7 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder, } } } else { - if (nv_device(drm->device)->chipset != 0x11) { + if (drm->device.info.chipset != 0x11) { /* reset them */ int i; for (i = 0; i < 3; i++) { @@ -463,7 +463,7 @@ static void nv04_dfp_commit(struct drm_encoder *encoder) NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL); /* This could use refinement for flatpanels, but it should work this way */ - if (nv_device(drm->device)->chipset < 0x44) + if (drm->device.info.chipset < 0x44) NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000); else NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000); @@ -485,7 +485,7 @@ static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode) { #ifdef __powerpc__ struct drm_device *dev = encoder->dev; - struct nouveau_device *device = nouveau_dev(dev); + struct nvif_device *device = &nouveau_drm(dev)->device; /* BIOS scripts usually take care of the backlight, thanks * Apple for your consistency. @@ -623,7 +623,7 @@ static void nv04_tmds_slave_init(struct drm_encoder *encoder) struct drm_device *dev = encoder->dev; struct dcb_output *dcb = nouveau_encoder(encoder)->dcb; struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_i2c *i2c = nouveau_i2c(drm->device); + struct nouveau_i2c *i2c = nvkm_i2c(&drm->device); struct nouveau_i2c_port *port = i2c->find(i2c, 2); struct nouveau_i2c_board_info info[] = { { diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c index 4342fdaee707..3d0afa1c6cff 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c @@ -22,9 +22,6 @@ * Author: Ben Skeggs */ -#include <core/object.h> -#include <core/class.h> - #include <drm/drmP.h> #include <drm/drm_crtc_helper.h> @@ -34,8 +31,6 @@ #include "nouveau_encoder.h" #include "nouveau_connector.h" -#include <subdev/i2c.h> - int nv04_display_early_init(struct drm_device *dev) { @@ -58,7 +53,7 @@ int nv04_display_create(struct drm_device *dev) { struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_i2c *i2c = nouveau_i2c(drm->device); + struct nouveau_i2c *i2c = nvkm_i2c(&drm->device); struct dcb_table *dcb = &drm->vbios.dcb; struct drm_connector *connector, *ct; struct drm_encoder *encoder; @@ -70,6 +65,8 @@ nv04_display_create(struct drm_device *dev) if (!disp) return -ENOMEM; + nvif_object_map(nvif_object(&drm->device)); + nouveau_display(dev)->priv = disp; nouveau_display(dev)->dtor = nv04_display_destroy; nouveau_display(dev)->init = nv04_display_init; @@ -144,6 +141,7 @@ void nv04_display_destroy(struct drm_device *dev) { struct nv04_display *disp = nv04_display(dev); + struct nouveau_drm *drm = nouveau_drm(dev); struct drm_encoder *encoder; struct drm_crtc *crtc; @@ -170,6 +168,8 @@ nv04_display_destroy(struct drm_device *dev) nouveau_display(dev)->priv = NULL; kfree(disp); + + nvif_object_unmap(nvif_object(&drm->device)); } int diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h index 4245fc3dab70..17b899d9aba3 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/disp.h +++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h @@ -131,7 +131,7 @@ nv_two_heads(struct drm_device *dev) struct nouveau_drm *drm = nouveau_drm(dev); const int impl = dev->pdev->device & 0x0ff0; - if (nv_device(drm->device)->card_type >= NV_10 && impl != 0x0100 && + if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS && impl != 0x0100 && impl != 0x0150 && impl != 0x01a0 && impl != 0x0200) return true; @@ -150,7 +150,7 @@ nv_two_reg_pll(struct drm_device *dev) struct nouveau_drm *drm = nouveau_drm(dev); const int impl = dev->pdev->device & 0x0ff0; - if (impl == 0x0310 || impl == 0x0340 || nv_device(drm->device)->card_type >= NV_40) + if (impl == 0x0310 || impl == 0x0340 || drm->device.info.family >= NV_DEVICE_INFO_V0_CURIE) return true; return false; } @@ -171,8 +171,8 @@ static inline void nouveau_bios_run_init_table(struct drm_device *dev, u16 table, struct dcb_output *outp, int crtc) { - struct nouveau_device *device = nouveau_dev(dev); - struct nouveau_bios *bios = nouveau_bios(device); + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_bios *bios = nvkm_bios(&drm->device); struct nvbios_init init = { .subdev = nv_subdev(bios), .bios = bios, diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c index aca76af115b3..3d4c19300768 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/hw.c +++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c @@ -27,9 +27,6 @@ #include "hw.h" #include <subdev/bios/pll.h> -#include <subdev/fb.h> -#include <subdev/clock.h> -#include <subdev/timer.h> #define CHIPSET_NFORCE 0x01a0 #define CHIPSET_NFORCE2 0x01f0 @@ -92,7 +89,7 @@ NVSetOwner(struct drm_device *dev, int owner) if (owner == 1) owner *= 3; - if (nv_device(drm->device)->chipset == 0x11) { + if (drm->device.info.chipset == 0x11) { /* This might seem stupid, but the blob does it and * omitting it often locks the system up. */ @@ -103,7 +100,7 @@ NVSetOwner(struct drm_device *dev, int owner) /* CR44 is always changed on CRTC0 */ NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_44, owner); - if (nv_device(drm->device)->chipset == 0x11) { /* set me harder */ + if (drm->device.info.chipset == 0x11) { /* set me harder */ NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner); NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner); } @@ -152,7 +149,7 @@ nouveau_hw_decode_pll(struct drm_device *dev, uint32_t reg1, uint32_t pll1, pllvals->NM1 = pll1 & 0xffff; if (nv_two_reg_pll(dev) && pll2 & NV31_RAMDAC_ENABLE_VCO2) pllvals->NM2 = pll2 & 0xffff; - else if (nv_device(drm->device)->chipset == 0x30 || nv_device(drm->device)->chipset == 0x35) { + else if (drm->device.info.chipset == 0x30 || drm->device.info.chipset == 0x35) { pllvals->M1 &= 0xf; /* only 4 bits */ if (pll1 & NV30_RAMDAC_ENABLE_VCO2) { pllvals->M2 = (pll1 >> 4) & 0x7; @@ -168,8 +165,8 @@ nouveau_hw_get_pllvals(struct drm_device *dev, enum nvbios_pll_type plltype, struct nouveau_pll_vals *pllvals) { struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_device *device = nv_device(drm->device); - struct nouveau_bios *bios = nouveau_bios(device); + struct nvif_device *device = &drm->device; + struct nouveau_bios *bios = nvkm_bios(device); uint32_t reg1, pll1, pll2 = 0; struct nvbios_pll pll_lim; int ret; @@ -178,16 +175,16 @@ nouveau_hw_get_pllvals(struct drm_device *dev, enum nvbios_pll_type plltype, if (ret || !(reg1 = pll_lim.reg)) return -ENOENT; - pll1 = nv_rd32(device, reg1); + pll1 = nvif_rd32(device, reg1); if (reg1 <= 0x405c) - pll2 = nv_rd32(device, reg1 + 4); + pll2 = nvif_rd32(device, reg1 + 4); else if (nv_two_reg_pll(dev)) { uint32_t reg2 = reg1 + (reg1 == NV_RAMDAC_VPLL2 ? 0x5c : 0x70); - pll2 = nv_rd32(device, reg2); + pll2 = nvif_rd32(device, reg2); } - if (nv_device(drm->device)->card_type == 0x40 && reg1 >= NV_PRAMDAC_VPLL_COEFF) { + if (drm->device.info.family == NV_DEVICE_INFO_V0_CELSIUS && reg1 >= NV_PRAMDAC_VPLL_COEFF) { uint32_t ramdac580 = NVReadRAMDAC(dev, 0, NV_PRAMDAC_580); /* check whether vpll has been forced into single stage mode */ @@ -255,9 +252,9 @@ nouveau_hw_fix_bad_vpll(struct drm_device *dev, int head) */ struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_device *device = nv_device(drm->device); - struct nouveau_clock *clk = nouveau_clock(device); - struct nouveau_bios *bios = nouveau_bios(device); + struct nvif_device *device = &drm->device; + struct nouveau_clock *clk = nvkm_clock(device); + struct nouveau_bios *bios = nvkm_bios(device); struct nvbios_pll pll_lim; struct nouveau_pll_vals pv; enum nvbios_pll_type pll = head ? PLL_VPLL1 : PLL_VPLL0; @@ -394,21 +391,21 @@ nv_save_state_ramdac(struct drm_device *dev, int head, struct nv04_crtc_reg *regp = &state->crtc_reg[head]; int i; - if (nv_device(drm->device)->card_type >= NV_10) + if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) regp->nv10_cursync = NVReadRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC); nouveau_hw_get_pllvals(dev, head ? PLL_VPLL1 : PLL_VPLL0, ®p->pllvals); state->pllsel = NVReadRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT); if (nv_two_heads(dev)) state->sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK); - if (nv_device(drm->device)->chipset == 0x11) + if (drm->device.info.chipset == 0x11) regp->dither = NVReadRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11); regp->ramdac_gen_ctrl = NVReadRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL); if (nv_gf4_disp_arch(dev)) regp->ramdac_630 = NVReadRAMDAC(dev, head, NV_PRAMDAC_630); - if (nv_device(drm->device)->chipset >= 0x30) + if (drm->device.info.chipset >= 0x30) regp->ramdac_634 = NVReadRAMDAC(dev, head, NV_PRAMDAC_634); regp->tv_setup = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP); @@ -450,7 +447,7 @@ nv_save_state_ramdac(struct drm_device *dev, int head, if (nv_gf4_disp_arch(dev)) regp->ramdac_8c0 = NVReadRAMDAC(dev, head, NV_PRAMDAC_8C0); - if (nv_device(drm->device)->card_type == NV_40) { + if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) { regp->ramdac_a20 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A20); regp->ramdac_a24 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A24); regp->ramdac_a34 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A34); @@ -466,26 +463,26 @@ nv_load_state_ramdac(struct drm_device *dev, int head, struct nv04_mode_state *state) { struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_clock *clk = nouveau_clock(drm->device); + struct nouveau_clock *clk = nvkm_clock(&drm->device); struct nv04_crtc_reg *regp = &state->crtc_reg[head]; uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF; int i; - if (nv_device(drm->device)->card_type >= NV_10) + if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) NVWriteRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC, regp->nv10_cursync); clk->pll_prog(clk, pllreg, ®p->pllvals); NVWriteRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT, state->pllsel); if (nv_two_heads(dev)) NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, state->sel_clk); - if (nv_device(drm->device)->chipset == 0x11) + if (drm->device.info.chipset == 0x11) NVWriteRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11, regp->dither); NVWriteRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL, regp->ramdac_gen_ctrl); if (nv_gf4_disp_arch(dev)) NVWriteRAMDAC(dev, head, NV_PRAMDAC_630, regp->ramdac_630); - if (nv_device(drm->device)->chipset >= 0x30) + if (drm->device.info.chipset >= 0x30) NVWriteRAMDAC(dev, head, NV_PRAMDAC_634, regp->ramdac_634); NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP, regp->tv_setup); @@ -522,7 +519,7 @@ nv_load_state_ramdac(struct drm_device *dev, int head, if (nv_gf4_disp_arch(dev)) NVWriteRAMDAC(dev, head, NV_PRAMDAC_8C0, regp->ramdac_8c0); - if (nv_device(drm->device)->card_type == NV_40) { + if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) { NVWriteRAMDAC(dev, head, NV_PRAMDAC_A20, regp->ramdac_a20); NVWriteRAMDAC(dev, head, NV_PRAMDAC_A24, regp->ramdac_a24); NVWriteRAMDAC(dev, head, NV_PRAMDAC_A34, regp->ramdac_a34); @@ -603,10 +600,10 @@ nv_save_state_ext(struct drm_device *dev, int head, rd_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX); rd_cio_state(dev, head, regp, NV_CIO_CRE_21); - if (nv_device(drm->device)->card_type >= NV_20) + if (drm->device.info.family >= NV_DEVICE_INFO_V0_KELVIN) rd_cio_state(dev, head, regp, NV_CIO_CRE_47); - if (nv_device(drm->device)->card_type >= NV_30) + if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE) rd_cio_state(dev, head, regp, 0x9f); rd_cio_state(dev, head, regp, NV_CIO_CRE_49); @@ -615,14 +612,14 @@ nv_save_state_ext(struct drm_device *dev, int head, rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX); rd_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX); - if (nv_device(drm->device)->card_type >= NV_10) { + if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) { regp->crtc_830 = NVReadCRTC(dev, head, NV_PCRTC_830); regp->crtc_834 = NVReadCRTC(dev, head, NV_PCRTC_834); - if (nv_device(drm->device)->card_type >= NV_30) + if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE) regp->gpio_ext = NVReadCRTC(dev, head, NV_PCRTC_GPIO_EXT); - if (nv_device(drm->device)->card_type == NV_40) + if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) regp->crtc_850 = NVReadCRTC(dev, head, NV_PCRTC_850); if (nv_two_heads(dev)) @@ -634,7 +631,7 @@ nv_save_state_ext(struct drm_device *dev, int head, rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX); rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX); - if (nv_device(drm->device)->card_type >= NV_10) { + if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) { rd_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX); rd_cio_state(dev, head, regp, NV_CIO_CRE_CSB); rd_cio_state(dev, head, regp, NV_CIO_CRE_4B); @@ -663,14 +660,13 @@ nv_load_state_ext(struct drm_device *dev, int head, struct nv04_mode_state *state) { struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_device *device = nv_device(drm->device); - struct nouveau_timer *ptimer = nouveau_timer(device); - struct nouveau_fb *pfb = nouveau_fb(device); + struct nvif_device *device = &drm->device; + struct nouveau_timer *ptimer = nvkm_timer(device); struct nv04_crtc_reg *regp = &state->crtc_reg[head]; uint32_t reg900; int i; - if (nv_device(drm->device)->card_type >= NV_10) { + if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) { if (nv_two_heads(dev)) /* setting ENGINE_CTRL (EC) *must* come before * CIO_CRE_LCD, as writing CRE_LCD sets bits 16 & 17 in @@ -678,24 +674,24 @@ nv_load_state_ext(struct drm_device *dev, int head, */ NVWriteCRTC(dev, head, NV_PCRTC_ENGINE_CTRL, regp->crtc_eng_ctrl); - nv_wr32(device, NV_PVIDEO_STOP, 1); - nv_wr32(device, NV_PVIDEO_INTR_EN, 0); - nv_wr32(device, NV_PVIDEO_OFFSET_BUFF(0), 0); - nv_wr32(device, NV_PVIDEO_OFFSET_BUFF(1), 0); - nv_wr32(device, NV_PVIDEO_LIMIT(0), pfb->ram->size - 1); - nv_wr32(device, NV_PVIDEO_LIMIT(1), pfb->ram->size - 1); - nv_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(0), pfb->ram->size - 1); - nv_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(1), pfb->ram->size - 1); - nv_wr32(device, NV_PBUS_POWERCTRL_2, 0); + nvif_wr32(device, NV_PVIDEO_STOP, 1); + nvif_wr32(device, NV_PVIDEO_INTR_EN, 0); + nvif_wr32(device, NV_PVIDEO_OFFSET_BUFF(0), 0); + nvif_wr32(device, NV_PVIDEO_OFFSET_BUFF(1), 0); + nvif_wr32(device, NV_PVIDEO_LIMIT(0), device->info.ram_size - 1); + nvif_wr32(device, NV_PVIDEO_LIMIT(1), device->info.ram_size - 1); + nvif_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(0), device->info.ram_size - 1); + nvif_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(1), device->info.ram_size - 1); + nvif_wr32(device, NV_PBUS_POWERCTRL_2, 0); NVWriteCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG, regp->cursor_cfg); NVWriteCRTC(dev, head, NV_PCRTC_830, regp->crtc_830); NVWriteCRTC(dev, head, NV_PCRTC_834, regp->crtc_834); - if (nv_device(drm->device)->card_type >= NV_30) + if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE) NVWriteCRTC(dev, head, NV_PCRTC_GPIO_EXT, regp->gpio_ext); - if (nv_device(drm->device)->card_type == NV_40) { + if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) { NVWriteCRTC(dev, head, NV_PCRTC_850, regp->crtc_850); reg900 = NVReadRAMDAC(dev, head, NV_PRAMDAC_900); @@ -718,23 +714,23 @@ nv_load_state_ext(struct drm_device *dev, int head, wr_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX); wr_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX); - if (nv_device(drm->device)->card_type >= NV_20) + if (drm->device.info.family >= NV_DEVICE_INFO_V0_KELVIN) wr_cio_state(dev, head, regp, NV_CIO_CRE_47); - if (nv_device(drm->device)->card_type >= NV_30) + if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE) wr_cio_state(dev, head, regp, 0x9f); wr_cio_state(dev, head, regp, NV_CIO_CRE_49); wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX); wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX); wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX); - if (nv_device(drm->device)->card_type == NV_40) + if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) nv_fix_nv40_hw_cursor(dev, head); wr_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX); wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX); wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX); - if (nv_device(drm->device)->card_type >= NV_10) { + if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) { wr_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX); wr_cio_state(dev, head, regp, NV_CIO_CRE_CSB); wr_cio_state(dev, head, regp, NV_CIO_CRE_4B); @@ -742,7 +738,7 @@ nv_load_state_ext(struct drm_device *dev, int head, } /* NV11 and NV20 stop at 0x52. */ if (nv_gf4_disp_arch(dev)) { - if (nv_device(drm->device)->card_type < NV_20) { + if (drm->device.info.family < NV_DEVICE_INFO_V0_KELVIN) { /* Not waiting for vertical retrace before modifying CRE_53/CRE_54 causes lockups. */ nouveau_timer_wait_eq(ptimer, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8); @@ -769,15 +765,15 @@ static void nv_save_state_palette(struct drm_device *dev, int head, struct nv04_mode_state *state) { - struct nouveau_device *device = nouveau_dev(dev); + struct nvif_device *device = &nouveau_drm(dev)->device; int head_offset = head * NV_PRMDIO_SIZE, i; - nv_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset, + nvif_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset, NV_PRMDIO_PIXEL_MASK_MASK); - nv_wr08(device, NV_PRMDIO_READ_MODE_ADDRESS + head_offset, 0x0); + nvif_wr08(device, NV_PRMDIO_READ_MODE_ADDRESS + head_offset, 0x0); for (i = 0; i < 768; i++) { - state->crtc_reg[head].DAC[i] = nv_rd08(device, + state->crtc_reg[head].DAC[i] = nvif_rd08(device, NV_PRMDIO_PALETTE_DATA + head_offset); } @@ -788,15 +784,15 @@ void nouveau_hw_load_state_palette(struct drm_device *dev, int head, struct nv04_mode_state *state) { - struct nouveau_device *device = nouveau_dev(dev); + struct nvif_device *device = &nouveau_drm(dev)->device; int head_offset = head * NV_PRMDIO_SIZE, i; - nv_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset, + nvif_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset, NV_PRMDIO_PIXEL_MASK_MASK); - nv_wr08(device, NV_PRMDIO_WRITE_MODE_ADDRESS + head_offset, 0x0); + nvif_wr08(device, NV_PRMDIO_WRITE_MODE_ADDRESS + head_offset, 0x0); for (i = 0; i < 768; i++) { - nv_wr08(device, NV_PRMDIO_PALETTE_DATA + head_offset, + nvif_wr08(device, NV_PRMDIO_PALETTE_DATA + head_offset, state->crtc_reg[head].DAC[i]); } @@ -808,7 +804,7 @@ void nouveau_hw_save_state(struct drm_device *dev, int head, { struct nouveau_drm *drm = nouveau_drm(dev); - if (nv_device(drm->device)->chipset == 0x11) + if (drm->device.info.chipset == 0x11) /* NB: no attempt is made to restore the bad pll later on */ nouveau_hw_fix_bad_vpll(dev, head); nv_save_state_ramdac(dev, head, state); diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.h b/drivers/gpu/drm/nouveau/dispnv04/hw.h index eeb70d912d99..7f53c571f31f 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/hw.h +++ b/drivers/gpu/drm/nouveau/dispnv04/hw.h @@ -60,41 +60,41 @@ extern void nouveau_calc_arb(struct drm_device *, int vclk, int bpp, static inline uint32_t NVReadCRTC(struct drm_device *dev, int head, uint32_t reg) { - struct nouveau_device *device = nouveau_dev(dev); + struct nvif_device *device = &nouveau_drm(dev)->device; uint32_t val; if (head) reg += NV_PCRTC0_SIZE; - val = nv_rd32(device, reg); + val = nvif_rd32(device, reg); return val; } static inline void NVWriteCRTC(struct drm_device *dev, int head, uint32_t reg, uint32_t val) { - struct nouveau_device *device = nouveau_dev(dev); + struct nvif_device *device = &nouveau_drm(dev)->device; if (head) reg += NV_PCRTC0_SIZE; - nv_wr32(device, reg, val); + nvif_wr32(device, reg, val); } static inline uint32_t NVReadRAMDAC(struct drm_device *dev, int head, uint32_t reg) { - struct nouveau_device *device = nouveau_dev(dev); + struct nvif_device *device = &nouveau_drm(dev)->device; uint32_t val; if (head) reg += NV_PRAMDAC0_SIZE; - val = nv_rd32(device, reg); + val = nvif_rd32(device, reg); return val; } static inline void NVWriteRAMDAC(struct drm_device *dev, int head, uint32_t reg, uint32_t val) { - struct nouveau_device *device = nouveau_dev(dev); + struct nvif_device *device = &nouveau_drm(dev)->device; if (head) reg += NV_PRAMDAC0_SIZE; - nv_wr32(device, reg, val); + nvif_wr32(device, reg, val); } static inline uint8_t nv_read_tmds(struct drm_device *dev, @@ -120,18 +120,18 @@ static inline void nv_write_tmds(struct drm_device *dev, static inline void NVWriteVgaCrtc(struct drm_device *dev, int head, uint8_t index, uint8_t value) { - struct nouveau_device *device = nouveau_dev(dev); - nv_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index); - nv_wr08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE, value); + struct nvif_device *device = &nouveau_drm(dev)->device; + nvif_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index); + nvif_wr08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE, value); } static inline uint8_t NVReadVgaCrtc(struct drm_device *dev, int head, uint8_t index) { - struct nouveau_device *device = nouveau_dev(dev); + struct nvif_device *device = &nouveau_drm(dev)->device; uint8_t val; - nv_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index); - val = nv_rd08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE); + nvif_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index); + val = nvif_rd08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE); return val; } @@ -165,74 +165,74 @@ static inline uint8_t NVReadVgaCrtc5758(struct drm_device *dev, int head, uint8_ static inline uint8_t NVReadPRMVIO(struct drm_device *dev, int head, uint32_t reg) { - struct nouveau_device *device = nouveau_dev(dev); + struct nvif_device *device = &nouveau_drm(dev)->device; struct nouveau_drm *drm = nouveau_drm(dev); uint8_t val; /* Only NV4x have two pvio ranges; other twoHeads cards MUST call * NVSetOwner for the relevant head to be programmed */ - if (head && nv_device(drm->device)->card_type == NV_40) + if (head && drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) reg += NV_PRMVIO_SIZE; - val = nv_rd08(device, reg); + val = nvif_rd08(device, reg); return val; } static inline void NVWritePRMVIO(struct drm_device *dev, int head, uint32_t reg, uint8_t value) { - struct nouveau_device *device = nouveau_dev(dev); + struct nvif_device *device = &nouveau_drm(dev)->device; struct nouveau_drm *drm = nouveau_drm(dev); /* Only NV4x have two pvio ranges; other twoHeads cards MUST call * NVSetOwner for the relevant head to be programmed */ - if (head && nv_device(drm->device)->card_type == NV_40) + if (head && drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) reg += NV_PRMVIO_SIZE; - nv_wr08(device, reg, value); + nvif_wr08(device, reg, value); } static inline void NVSetEnablePalette(struct drm_device *dev, int head, bool enable) { - struct nouveau_device *device = nouveau_dev(dev); - nv_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE); - nv_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, enable ? 0 : 0x20); + struct nvif_device *device = &nouveau_drm(dev)->device; + nvif_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE); + nvif_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, enable ? 0 : 0x20); } static inline bool NVGetEnablePalette(struct drm_device *dev, int head) { - struct nouveau_device *device = nouveau_dev(dev); - nv_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE); - return !(nv_rd08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE) & 0x20); + struct nvif_device *device = &nouveau_drm(dev)->device; + nvif_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE); + return !(nvif_rd08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE) & 0x20); } static inline void NVWriteVgaAttr(struct drm_device *dev, int head, uint8_t index, uint8_t value) { - struct nouveau_device *device = nouveau_dev(dev); + struct nvif_device *device = &nouveau_drm(dev)->device; if (NVGetEnablePalette(dev, head)) index &= ~0x20; else index |= 0x20; - nv_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE); - nv_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, index); - nv_wr08(device, NV_PRMCIO_AR__WRITE + head * NV_PRMCIO_SIZE, value); + nvif_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE); + nvif_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, index); + nvif_wr08(device, NV_PRMCIO_AR__WRITE + head * NV_PRMCIO_SIZE, value); } static inline uint8_t NVReadVgaAttr(struct drm_device *dev, int head, uint8_t index) { - struct nouveau_device *device = nouveau_dev(dev); + struct nvif_device *device = &nouveau_drm(dev)->device; uint8_t val; if (NVGetEnablePalette(dev, head)) index &= ~0x20; else index |= 0x20; - nv_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE); - nv_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, index); - val = nv_rd08(device, NV_PRMCIO_AR__READ + head * NV_PRMCIO_SIZE); + nvif_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE); + nvif_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, index); + val = nvif_rd08(device, NV_PRMCIO_AR__READ + head * NV_PRMCIO_SIZE); return val; } @@ -259,11 +259,11 @@ static inline void NVVgaProtect(struct drm_device *dev, int head, bool protect) static inline bool nv_heads_tied(struct drm_device *dev) { - struct nouveau_device *device = nouveau_dev(dev); + struct nvif_device *device = &nouveau_drm(dev)->device; struct nouveau_drm *drm = nouveau_drm(dev); - if (nv_device(drm->device)->chipset == 0x11) - return !!(nv_rd32(device, NV_PBUS_DEBUG_1) & (1 << 28)); + if (drm->device.info.chipset == 0x11) + return !!(nvif_rd32(device, NV_PBUS_DEBUG_1) & (1 << 28)); return NVReadVgaCrtc(dev, 0, NV_CIO_CRE_44) & 0x4; } @@ -318,7 +318,7 @@ NVLockVgaCrtcs(struct drm_device *dev, bool lock) NVWriteVgaCrtc(dev, 0, NV_CIO_SR_LOCK_INDEX, lock ? NV_CIO_SR_LOCK_VALUE : NV_CIO_SR_UNLOCK_RW_VALUE); /* NV11 has independently lockable extended crtcs, except when tied */ - if (nv_device(drm->device)->chipset == 0x11 && !nv_heads_tied(dev)) + if (drm->device.info.chipset == 0x11 && !nv_heads_tied(dev)) NVWriteVgaCrtc(dev, 1, NV_CIO_SR_LOCK_INDEX, lock ? NV_CIO_SR_LOCK_VALUE : NV_CIO_SR_UNLOCK_RW_VALUE); @@ -335,7 +335,7 @@ static inline int nv_cursor_width(struct drm_device *dev) { struct nouveau_drm *drm = nouveau_drm(dev); - return nv_device(drm->device)->card_type >= NV_10 ? NV10_CURSOR_SIZE : NV04_CURSOR_SIZE; + return drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS ? NV10_CURSOR_SIZE : NV04_CURSOR_SIZE; } static inline void @@ -357,7 +357,7 @@ nv_set_crtc_base(struct drm_device *dev, int head, uint32_t offset) NVWriteCRTC(dev, head, NV_PCRTC_START, offset); - if (nv_device(drm->device)->card_type == NV_04) { + if (drm->device.info.family == NV_DEVICE_INFO_V0_TNT) { /* * Hilarious, the 24th bit doesn't want to stick to * PCRTC_START... @@ -382,7 +382,7 @@ nv_show_cursor(struct drm_device *dev, int head, bool show) *curctl1 &= ~MASK(NV_CIO_CRE_HCUR_ADDR1_ENABLE); NVWriteVgaCrtc(dev, head, NV_CIO_CRE_HCUR_ADDR1_INDEX, *curctl1); - if (nv_device(drm->device)->card_type == NV_40) + if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) nv_fix_nv40_hw_cursor(dev, head); } @@ -398,7 +398,7 @@ nv_pitch_align(struct drm_device *dev, uint32_t width, int bpp) bpp = 8; /* Alignment requirements taken from the Haiku driver */ - if (nv_device(drm->device)->card_type == NV_04) + if (drm->device.info.family == NV_DEVICE_INFO_V0_TNT) mask = 128 / bpp - 1; else mask = 512 / bpp - 1; diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c index ab03f7719d2d..b36afcbbc83f 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/overlay.c +++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c @@ -96,7 +96,7 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h) { - struct nouveau_device *dev = nouveau_dev(plane->dev); + struct nvif_device *dev = &nouveau_drm(plane->dev)->device; struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane; struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb); struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); @@ -117,7 +117,7 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, if (format > 0xffff) return -ERANGE; - if (dev->chipset >= 0x30) { + if (dev->info.chipset >= 0x30) { if (crtc_w < (src_w >> 1) || crtc_h < (src_h >> 1)) return -ERANGE; } else { @@ -131,17 +131,17 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, nv_plane->cur = nv_fb->nvbo; - nv_mask(dev, NV_PCRTC_ENGINE_CTRL + soff, NV_CRTC_FSEL_OVERLAY, NV_CRTC_FSEL_OVERLAY); - nv_mask(dev, NV_PCRTC_ENGINE_CTRL + soff2, NV_CRTC_FSEL_OVERLAY, 0); + nvif_mask(dev, NV_PCRTC_ENGINE_CTRL + soff, NV_CRTC_FSEL_OVERLAY, NV_CRTC_FSEL_OVERLAY); + nvif_mask(dev, NV_PCRTC_ENGINE_CTRL + soff2, NV_CRTC_FSEL_OVERLAY, 0); - nv_wr32(dev, NV_PVIDEO_BASE(flip), 0); - nv_wr32(dev, NV_PVIDEO_OFFSET_BUFF(flip), nv_fb->nvbo->bo.offset); - nv_wr32(dev, NV_PVIDEO_SIZE_IN(flip), src_h << 16 | src_w); - nv_wr32(dev, NV_PVIDEO_POINT_IN(flip), src_y << 16 | src_x); - nv_wr32(dev, NV_PVIDEO_DS_DX(flip), (src_w << 20) / crtc_w); - nv_wr32(dev, NV_PVIDEO_DT_DY(flip), (src_h << 20) / crtc_h); - nv_wr32(dev, NV_PVIDEO_POINT_OUT(flip), crtc_y << 16 | crtc_x); - nv_wr32(dev, NV_PVIDEO_SIZE_OUT(flip), crtc_h << 16 | crtc_w); + nvif_wr32(dev, NV_PVIDEO_BASE(flip), 0); + nvif_wr32(dev, NV_PVIDEO_OFFSET_BUFF(flip), nv_fb->nvbo->bo.offset); + nvif_wr32(dev, NV_PVIDEO_SIZE_IN(flip), src_h << 16 | src_w); + nvif_wr32(dev, NV_PVIDEO_POINT_IN(flip), src_y << 16 | src_x); + nvif_wr32(dev, NV_PVIDEO_DS_DX(flip), (src_w << 20) / crtc_w); + nvif_wr32(dev, NV_PVIDEO_DT_DY(flip), (src_h << 20) / crtc_h); + nvif_wr32(dev, NV_PVIDEO_POINT_OUT(flip), crtc_y << 16 | crtc_x); + nvif_wr32(dev, NV_PVIDEO_SIZE_OUT(flip), crtc_h << 16 | crtc_w); if (fb->pixel_format != DRM_FORMAT_UYVY) format |= NV_PVIDEO_FORMAT_COLOR_LE_CR8YB8CB8YA8; @@ -153,14 +153,14 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, format |= NV_PVIDEO_FORMAT_DISPLAY_COLOR_KEY; if (fb->pixel_format == DRM_FORMAT_NV12) { - nv_wr32(dev, NV_PVIDEO_UVPLANE_BASE(flip), 0); - nv_wr32(dev, NV_PVIDEO_UVPLANE_OFFSET_BUFF(flip), + nvif_wr32(dev, NV_PVIDEO_UVPLANE_BASE(flip), 0); + nvif_wr32(dev, NV_PVIDEO_UVPLANE_OFFSET_BUFF(flip), nv_fb->nvbo->bo.offset + fb->offsets[1]); } - nv_wr32(dev, NV_PVIDEO_FORMAT(flip), format); - nv_wr32(dev, NV_PVIDEO_STOP, 0); + nvif_wr32(dev, NV_PVIDEO_FORMAT(flip), format); + nvif_wr32(dev, NV_PVIDEO_STOP, 0); /* TODO: wait for vblank? */ - nv_wr32(dev, NV_PVIDEO_BUFFER, flip ? 0x10 : 0x1); + nvif_wr32(dev, NV_PVIDEO_BUFFER, flip ? 0x10 : 0x1); nv_plane->flip = !flip; if (cur) @@ -172,10 +172,10 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, static int nv10_disable_plane(struct drm_plane *plane) { - struct nouveau_device *dev = nouveau_dev(plane->dev); + struct nvif_device *dev = &nouveau_drm(plane->dev)->device; struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane; - nv_wr32(dev, NV_PVIDEO_STOP, 1); + nvif_wr32(dev, NV_PVIDEO_STOP, 1); if (nv_plane->cur) { nouveau_bo_unpin(nv_plane->cur); nv_plane->cur = NULL; @@ -195,24 +195,24 @@ nv_destroy_plane(struct drm_plane *plane) static void nv10_set_params(struct nouveau_plane *plane) { - struct nouveau_device *dev = nouveau_dev(plane->base.dev); + struct nvif_device *dev = &nouveau_drm(plane->base.dev)->device; u32 luma = (plane->brightness - 512) << 16 | plane->contrast; u32 chroma = ((sin_mul(plane->hue, plane->saturation) & 0xffff) << 16) | (cos_mul(plane->hue, plane->saturation) & 0xffff); u32 format = 0; - nv_wr32(dev, NV_PVIDEO_LUMINANCE(0), luma); - nv_wr32(dev, NV_PVIDEO_LUMINANCE(1), luma); - nv_wr32(dev, NV_PVIDEO_CHROMINANCE(0), chroma); - nv_wr32(dev, NV_PVIDEO_CHROMINANCE(1), chroma); - nv_wr32(dev, NV_PVIDEO_COLOR_KEY, plane->colorkey & 0xffffff); + nvif_wr32(dev, NV_PVIDEO_LUMINANCE(0), luma); + nvif_wr32(dev, NV_PVIDEO_LUMINANCE(1), luma); + nvif_wr32(dev, NV_PVIDEO_CHROMINANCE(0), chroma); + nvif_wr32(dev, NV_PVIDEO_CHROMINANCE(1), chroma); + nvif_wr32(dev, NV_PVIDEO_COLOR_KEY, plane->colorkey & 0xffffff); if (plane->cur) { if (plane->iturbt_709) format |= NV_PVIDEO_FORMAT_MATRIX_ITURBT709; if (plane->colorkey & (1 << 24)) format |= NV_PVIDEO_FORMAT_DISPLAY_COLOR_KEY; - nv_mask(dev, NV_PVIDEO_FORMAT(plane->flip), + nvif_mask(dev, NV_PVIDEO_FORMAT(plane->flip), NV_PVIDEO_FORMAT_MATRIX_ITURBT709 | NV_PVIDEO_FORMAT_DISPLAY_COLOR_KEY, format); @@ -256,7 +256,7 @@ static const struct drm_plane_funcs nv10_plane_funcs = { static void nv10_overlay_init(struct drm_device *device) { - struct nouveau_device *dev = nouveau_dev(device); + struct nouveau_drm *drm = nouveau_drm(device); struct nouveau_plane *plane = kzalloc(sizeof(struct nouveau_plane), GFP_KERNEL); int num_formats = ARRAY_SIZE(formats); int ret; @@ -264,7 +264,7 @@ nv10_overlay_init(struct drm_device *device) if (!plane) return; - switch (dev->chipset) { + switch (drm->device.info.chipset) { case 0x10: case 0x11: case 0x15: @@ -333,7 +333,7 @@ cleanup: drm_plane_cleanup(&plane->base); err: kfree(plane); - nv_error(dev, "Failed to create plane\n"); + NV_ERROR(drm, "Failed to create plane\n"); } static int @@ -343,7 +343,7 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h) { - struct nouveau_device *dev = nouveau_dev(plane->dev); + struct nvif_device *dev = &nouveau_drm(plane->dev)->device; struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane; struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb); struct nouveau_bo *cur = nv_plane->cur; @@ -375,43 +375,43 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, nv_plane->cur = nv_fb->nvbo; - nv_wr32(dev, NV_PVIDEO_OE_STATE, 0); - nv_wr32(dev, NV_PVIDEO_SU_STATE, 0); - nv_wr32(dev, NV_PVIDEO_RM_STATE, 0); + nvif_wr32(dev, NV_PVIDEO_OE_STATE, 0); + nvif_wr32(dev, NV_PVIDEO_SU_STATE, 0); + nvif_wr32(dev, NV_PVIDEO_RM_STATE, 0); for (i = 0; i < 2; i++) { - nv_wr32(dev, NV_PVIDEO_BUFF0_START_ADDRESS + 4 * i, + nvif_wr32(dev, NV_PVIDEO_BUFF0_START_ADDRESS + 4 * i, nv_fb->nvbo->bo.offset); - nv_wr32(dev, NV_PVIDEO_BUFF0_PITCH_LENGTH + 4 * i, pitch); - nv_wr32(dev, NV_PVIDEO_BUFF0_OFFSET + 4 * i, 0); + nvif_wr32(dev, NV_PVIDEO_BUFF0_PITCH_LENGTH + 4 * i, pitch); + nvif_wr32(dev, NV_PVIDEO_BUFF0_OFFSET + 4 * i, 0); } - nv_wr32(dev, NV_PVIDEO_WINDOW_START, crtc_y << 16 | crtc_x); - nv_wr32(dev, NV_PVIDEO_WINDOW_SIZE, crtc_h << 16 | crtc_w); - nv_wr32(dev, NV_PVIDEO_STEP_SIZE, + nvif_wr32(dev, NV_PVIDEO_WINDOW_START, crtc_y << 16 | crtc_x); + nvif_wr32(dev, NV_PVIDEO_WINDOW_SIZE, crtc_h << 16 | crtc_w); + nvif_wr32(dev, NV_PVIDEO_STEP_SIZE, (uint32_t)(((src_h - 1) << 11) / (crtc_h - 1)) << 16 | (uint32_t)(((src_w - 1) << 11) / (crtc_w - 1))); /* It should be possible to convert hue/contrast to this */ - nv_wr32(dev, NV_PVIDEO_RED_CSC_OFFSET, 0x69 - brightness); - nv_wr32(dev, NV_PVIDEO_GREEN_CSC_OFFSET, 0x3e + brightness); - nv_wr32(dev, NV_PVIDEO_BLUE_CSC_OFFSET, 0x89 - brightness); - nv_wr32(dev, NV_PVIDEO_CSC_ADJUST, 0); + nvif_wr32(dev, NV_PVIDEO_RED_CSC_OFFSET, 0x69 - brightness); + nvif_wr32(dev, NV_PVIDEO_GREEN_CSC_OFFSET, 0x3e + brightness); + nvif_wr32(dev, NV_PVIDEO_BLUE_CSC_OFFSET, 0x89 - brightness); + nvif_wr32(dev, NV_PVIDEO_CSC_ADJUST, 0); - nv_wr32(dev, NV_PVIDEO_CONTROL_Y, 0x001); /* (BLUR_ON, LINE_HALF) */ - nv_wr32(dev, NV_PVIDEO_CONTROL_X, 0x111); /* (WEIGHT_HEAVY, SHARPENING_ON, SMOOTHING_ON) */ + nvif_wr32(dev, NV_PVIDEO_CONTROL_Y, 0x001); /* (BLUR_ON, LINE_HALF) */ + nvif_wr32(dev, NV_PVIDEO_CONTROL_X, 0x111); /* (WEIGHT_HEAVY, SHARPENING_ON, SMOOTHING_ON) */ - nv_wr32(dev, NV_PVIDEO_FIFO_BURST_LENGTH, 0x03); - nv_wr32(dev, NV_PVIDEO_FIFO_THRES_SIZE, 0x38); + nvif_wr32(dev, NV_PVIDEO_FIFO_BURST_LENGTH, 0x03); + nvif_wr32(dev, NV_PVIDEO_FIFO_THRES_SIZE, 0x38); - nv_wr32(dev, NV_PVIDEO_KEY, nv_plane->colorkey); + nvif_wr32(dev, NV_PVIDEO_KEY, nv_plane->colorkey); if (nv_plane->colorkey & (1 << 24)) overlay |= 0x10; if (fb->pixel_format == DRM_FORMAT_YUYV) overlay |= 0x100; - nv_wr32(dev, NV_PVIDEO_OVERLAY, overlay); + nvif_wr32(dev, NV_PVIDEO_OVERLAY, overlay); - nv_wr32(dev, NV_PVIDEO_SU_STATE, nv_rd32(dev, NV_PVIDEO_SU_STATE) ^ (1 << 16)); + nvif_wr32(dev, NV_PVIDEO_SU_STATE, nvif_rd32(dev, NV_PVIDEO_SU_STATE) ^ (1 << 16)); if (cur) nouveau_bo_unpin(cur); @@ -422,13 +422,13 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, static int nv04_disable_plane(struct drm_plane *plane) { - struct nouveau_device *dev = nouveau_dev(plane->dev); + struct nvif_device *dev = &nouveau_drm(plane->dev)->device; struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane; - nv_mask(dev, NV_PVIDEO_OVERLAY, 1, 0); - nv_wr32(dev, NV_PVIDEO_OE_STATE, 0); - nv_wr32(dev, NV_PVIDEO_SU_STATE, 0); - nv_wr32(dev, NV_PVIDEO_RM_STATE, 0); + nvif_mask(dev, NV_PVIDEO_OVERLAY, 1, 0); + nvif_wr32(dev, NV_PVIDEO_OE_STATE, 0); + nvif_wr32(dev, NV_PVIDEO_SU_STATE, 0); + nvif_wr32(dev, NV_PVIDEO_RM_STATE, 0); if (nv_plane->cur) { nouveau_bo_unpin(nv_plane->cur); nv_plane->cur = NULL; @@ -447,7 +447,7 @@ static const struct drm_plane_funcs nv04_plane_funcs = { static void nv04_overlay_init(struct drm_device *device) { - struct nouveau_device *dev = nouveau_dev(device); + struct nouveau_drm *drm = nouveau_drm(device); struct nouveau_plane *plane = kzalloc(sizeof(struct nouveau_plane), GFP_KERNEL); int ret; @@ -483,15 +483,15 @@ cleanup: drm_plane_cleanup(&plane->base); err: kfree(plane); - nv_error(dev, "Failed to create plane\n"); + NV_ERROR(drm, "Failed to create plane\n"); } void nouveau_overlay_init(struct drm_device *device) { - struct nouveau_device *dev = nouveau_dev(device); - if (dev->chipset < 0x10) + struct nvif_device *dev = &nouveau_drm(device)->device; + if (dev->info.chipset < 0x10) nv04_overlay_init(device); - else if (dev->chipset <= 0x40) + else if (dev->info.chipset <= 0x40) nv10_overlay_init(device); } diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c index 8667620b703a..8061d8d0ce79 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c +++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c @@ -35,8 +35,6 @@ #include <drm/i2c/ch7006.h> -#include <subdev/i2c.h> - static struct nouveau_i2c_board_info nv04_tv_encoder_info[] = { { { @@ -56,7 +54,7 @@ static struct nouveau_i2c_board_info nv04_tv_encoder_info[] = { int nv04_tv_identify(struct drm_device *dev, int i2c_index) { struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_i2c *i2c = nouveau_i2c(drm->device); + struct nouveau_i2c *i2c = nvkm_i2c(&drm->device); return i2c->identify(i2c, i2c_index, "TV encoder", nv04_tv_encoder_info, NULL, NULL); @@ -206,7 +204,7 @@ nv04_tv_create(struct drm_connector *connector, struct dcb_output *entry) struct drm_encoder *encoder; struct drm_device *dev = connector->dev; struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_i2c *i2c = nouveau_i2c(drm->device); + struct nouveau_i2c *i2c = nvkm_i2c(&drm->device); struct nouveau_i2c_port *port = i2c->find(i2c, entry->i2c_index); int type, ret; diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c index 195bd8e86c6a..72d2ab04db47 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c +++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c @@ -34,11 +34,6 @@ #include "hw.h" #include "tvnv17.h" -#include <core/device.h> - -#include <subdev/bios/gpio.h> -#include <subdev/gpio.h> - MODULE_PARM_DESC(tv_norm, "Default TV norm.\n" "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n" "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n" @@ -51,7 +46,7 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_gpio *gpio = nouveau_gpio(drm->device); + struct nouveau_gpio *gpio = nvkm_gpio(&drm->device); uint32_t testval, regoffset = nv04_dac_output_offset(encoder); uint32_t gpio0, gpio1, fp_htotal, fp_hsync_start, fp_hsync_end, fp_control, test_ctrl, dacclk, ctv_14, ctv_1c, ctv_6c; @@ -135,17 +130,17 @@ static bool get_tv_detect_quirks(struct drm_device *dev, uint32_t *pin_mask) { struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_object *device = drm->device; + struct nvif_device *device = &drm->device; /* Zotac FX5200 */ - if (nv_device_match(device, 0x0322, 0x19da, 0x1035) || - nv_device_match(device, 0x0322, 0x19da, 0x2035)) { + if (nv_device_match(nvkm_object(device), 0x0322, 0x19da, 0x1035) || + nv_device_match(nvkm_object(device), 0x0322, 0x19da, 0x2035)) { *pin_mask = 0xc; return false; } /* MSI nForce2 IGP */ - if (nv_device_match(device, 0x01f0, 0x1462, 0x5710)) { + if (nv_device_match(nvkm_object(device), 0x01f0, 0x1462, 0x5710)) { *pin_mask = 0xc; return false; } @@ -167,8 +162,8 @@ nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector) return connector_status_disconnected; if (reliable) { - if (nv_device(drm->device)->chipset == 0x42 || - nv_device(drm->device)->chipset == 0x43) + if (drm->device.info.chipset == 0x42 || + drm->device.info.chipset == 0x43) tv_enc->pin_mask = nv42_tv_sample_load(encoder) >> 28 & 0xe; else @@ -375,7 +370,7 @@ static void nv17_tv_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_gpio *gpio = nouveau_gpio(drm->device); + struct nouveau_gpio *gpio = nvkm_gpio(&drm->device); struct nv17_tv_state *regs = &to_tv_enc(encoder)->state; struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); @@ -448,7 +443,7 @@ static void nv17_tv_prepare(struct drm_encoder *encoder) /* Set the DACCLK register */ dacclk = (NVReadRAMDAC(dev, 0, dacclk_off) & ~0x30) | 0x1; - if (nv_device(drm->device)->card_type == NV_40) + if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) dacclk |= 0x1a << 16; if (tv_norm->kind == CTV_ENC_MODE) { @@ -505,7 +500,7 @@ static void nv17_tv_mode_set(struct drm_encoder *encoder, tv_regs->ptv_614 = 0x13; } - if (nv_device(drm->device)->card_type >= NV_30) { + if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE) { tv_regs->ptv_500 = 0xe8e0; tv_regs->ptv_504 = 0x1710; tv_regs->ptv_604 = 0x0; @@ -600,7 +595,7 @@ static void nv17_tv_commit(struct drm_encoder *encoder) nv17_tv_state_load(dev, &to_tv_enc(encoder)->state); /* This could use refinement for flatpanels, but it should work */ - if (nv_device(drm->device)->chipset < 0x44) + if (drm->device.info.chipset < 0x44) NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000); diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h index 7b331543a41b..225894cdcac2 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h +++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h @@ -130,14 +130,14 @@ void nv17_ctv_update_rescaler(struct drm_encoder *encoder); static inline void nv_write_ptv(struct drm_device *dev, uint32_t reg, uint32_t val) { - struct nouveau_device *device = nouveau_dev(dev); - nv_wr32(device, reg, val); + struct nvif_device *device = &nouveau_drm(dev)->device; + nvif_wr32(device, reg, val); } static inline uint32_t nv_read_ptv(struct drm_device *dev, uint32_t reg) { - struct nouveau_device *device = nouveau_dev(dev); - return nv_rd32(device, reg); + struct nvif_device *device = &nouveau_drm(dev)->device; + return nvif_rd32(device, reg); } static inline void nv_write_tv_enc(struct drm_device *dev, uint8_t reg, diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c index b13f441c6431..615714c1727d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_abi16.c +++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c @@ -21,16 +21,10 @@ * */ -#include <core/object.h> -#include <core/client.h> -#include <core/device.h> -#include <core/class.h> -#include <core/mm.h> - -#include <subdev/fb.h> -#include <subdev/timer.h> -#include <subdev/instmem.h> -#include <engine/graph.h> +#include <nvif/client.h> +#include <nvif/driver.h> +#include <nvif/ioctl.h> +#include <nvif/class.h> #include "nouveau_drm.h" #include "nouveau_dma.h" @@ -47,20 +41,20 @@ nouveau_abi16_get(struct drm_file *file_priv, struct drm_device *dev) struct nouveau_abi16 *abi16; cli->abi16 = abi16 = kzalloc(sizeof(*abi16), GFP_KERNEL); if (cli->abi16) { + struct nv_device_v0 args = { + .device = ~0ULL, + }; + INIT_LIST_HEAD(&abi16->channels); - abi16->client = nv_object(cli); /* allocate device object targeting client's default * device (ie. the one that belongs to the fd it * opened) */ - if (nouveau_object_new(abi16->client, NVDRM_CLIENT, - NVDRM_DEVICE, 0x0080, - &(struct nv_device_class) { - .device = ~0ULL, - }, - sizeof(struct nv_device_class), - &abi16->device) == 0) + if (nvif_device_init(&cli->base.base, NULL, + NOUVEAU_ABI16_DEVICE, NV_DEVICE, + &args, sizeof(args), + &abi16->device) == 0) return cli->abi16; kfree(cli->abi16); @@ -75,7 +69,7 @@ nouveau_abi16_get(struct drm_file *file_priv, struct drm_device *dev) int nouveau_abi16_put(struct nouveau_abi16 *abi16, int ret) { - struct nouveau_cli *cli = (void *)abi16->client; + struct nouveau_cli *cli = (void *)nvif_client(&abi16->device.base); mutex_unlock(&cli->mutex); return ret; } @@ -83,21 +77,19 @@ nouveau_abi16_put(struct nouveau_abi16 *abi16, int ret) u16 nouveau_abi16_swclass(struct nouveau_drm *drm) { - switch (nv_device(drm->device)->card_type) { - case NV_04: + switch (drm->device.info.family) { + case NV_DEVICE_INFO_V0_TNT: return 0x006e; - case NV_10: - case NV_11: - case NV_20: - case NV_30: - case NV_40: + case NV_DEVICE_INFO_V0_CELSIUS: + case NV_DEVICE_INFO_V0_KELVIN: + case NV_DEVICE_INFO_V0_RANKINE: + case NV_DEVICE_INFO_V0_CURIE: return 0x016e; - case NV_50: + case NV_DEVICE_INFO_V0_TESLA: return 0x506e; - case NV_C0: - case NV_D0: - case NV_E0: - case GM100: + case NV_DEVICE_INFO_V0_FERMI: + case NV_DEVICE_INFO_V0_KEPLER: + case NV_DEVICE_INFO_V0_MAXWELL: return 0x906e; } @@ -140,7 +132,7 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16, /* destroy channel object, all children will be killed too */ if (chan->chan) { - abi16->handles &= ~(1ULL << (chan->chan->handle & 0xffff)); + abi16->handles &= ~(1ULL << (chan->chan->object->handle & 0xffff)); nouveau_channel_del(&chan->chan); } @@ -151,7 +143,7 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16, void nouveau_abi16_fini(struct nouveau_abi16 *abi16) { - struct nouveau_cli *cli = (void *)abi16->client; + struct nouveau_cli *cli = (void *)nvif_client(&abi16->device.base); struct nouveau_abi16_chan *chan, *temp; /* cleanup channels */ @@ -160,7 +152,7 @@ nouveau_abi16_fini(struct nouveau_abi16 *abi16) } /* destroy the device object */ - nouveau_object_del(abi16->client, NVDRM_CLIENT, NVDRM_DEVICE); + nvif_device_fini(&abi16->device); kfree(cli->abi16); cli->abi16 = NULL; @@ -169,30 +161,31 @@ nouveau_abi16_fini(struct nouveau_abi16 *abi16) int nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS) { + struct nouveau_cli *cli = nouveau_cli(file_priv); struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_device *device = nv_device(drm->device); - struct nouveau_timer *ptimer = nouveau_timer(device); - struct nouveau_graph *graph = (void *)nouveau_engine(device, NVDEV_ENGINE_GR); + struct nvif_device *device = &drm->device; + struct nouveau_timer *ptimer = nvkm_timer(device); + struct nouveau_graph *graph = nvkm_gr(device); struct drm_nouveau_getparam *getparam = data; switch (getparam->param) { case NOUVEAU_GETPARAM_CHIPSET_ID: - getparam->value = device->chipset; + getparam->value = device->info.chipset; break; case NOUVEAU_GETPARAM_PCI_VENDOR: - if (nv_device_is_pci(device)) + if (nv_device_is_pci(nvkm_device(device))) getparam->value = dev->pdev->vendor; else getparam->value = 0; break; case NOUVEAU_GETPARAM_PCI_DEVICE: - if (nv_device_is_pci(device)) + if (nv_device_is_pci(nvkm_device(device))) getparam->value = dev->pdev->device; else getparam->value = 0; break; case NOUVEAU_GETPARAM_BUS_TYPE: - if (!nv_device_is_pci(device)) + if (!nv_device_is_pci(nvkm_device(device))) getparam->value = 3; else if (drm_pci_device_is_agp(dev)) @@ -225,7 +218,7 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS) getparam->value = graph->units ? graph->units(graph) : 0; break; default: - nv_debug(device, "unknown parameter %lld\n", getparam->param); + NV_PRINTK(debug, cli, "unknown parameter %lld\n", getparam->param); return -EINVAL; } @@ -246,10 +239,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS) struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); struct nouveau_abi16_chan *chan; - struct nouveau_client *client; - struct nouveau_device *device; - struct nouveau_instmem *imem; - struct nouveau_fb *pfb; + struct nvif_device *device; int ret; if (unlikely(!abi16)) @@ -258,21 +248,18 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS) if (!drm->channel) return nouveau_abi16_put(abi16, -ENODEV); - client = nv_client(abi16->client); - device = nv_device(abi16->device); - imem = nouveau_instmem(device); - pfb = nouveau_fb(device); + device = &abi16->device; /* hack to allow channel engine type specification on kepler */ - if (device->card_type >= NV_E0) { + if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) { if (init->fb_ctxdma_handle != ~0) - init->fb_ctxdma_handle = NVE0_CHANNEL_IND_ENGINE_GR; + init->fb_ctxdma_handle = KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_GR; else init->fb_ctxdma_handle = init->tt_ctxdma_handle; /* allow flips to be executed if this is a graphics channel */ init->tt_ctxdma_handle = 0; - if (init->fb_ctxdma_handle == NVE0_CHANNEL_IND_ENGINE_GR) + if (init->fb_ctxdma_handle == KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_GR) init->tt_ctxdma_handle = 1; } @@ -293,13 +280,14 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS) abi16->handles |= (1ULL << init->channel); /* create channel object and initialise dma and fence management */ - ret = nouveau_channel_new(drm, cli, NVDRM_DEVICE, NVDRM_CHAN | - init->channel, init->fb_ctxdma_handle, + ret = nouveau_channel_new(drm, device, + NOUVEAU_ABI16_CHAN(init->channel), + init->fb_ctxdma_handle, init->tt_ctxdma_handle, &chan->chan); if (ret) goto done; - if (device->card_type >= NV_50) + if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART; else @@ -308,10 +296,10 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS) else init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART; - if (device->card_type < NV_10) { + if (device->info.family < NV_DEVICE_INFO_V0_CELSIUS) { init->subchan[0].handle = 0x00000000; init->subchan[0].grclass = 0x0000; - init->subchan[1].handle = NvSw; + init->subchan[1].handle = chan->chan->nvsw.handle; init->subchan[1].grclass = 0x506e; init->nr_subchan = 2; } @@ -324,8 +312,8 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS) if (ret) goto done; - if (device->card_type >= NV_50) { - ret = nouveau_bo_vma_add(chan->ntfy, client->vm, + if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) { + ret = nouveau_bo_vma_add(chan->ntfy, cli->vm, &chan->ntfy_vma); if (ret) goto done; @@ -343,6 +331,18 @@ done: return nouveau_abi16_put(abi16, ret); } +static struct nouveau_abi16_chan * +nouveau_abi16_chan(struct nouveau_abi16 *abi16, int channel) +{ + struct nouveau_abi16_chan *chan; + + list_for_each_entry(chan, &abi16->channels, head) { + if (chan->chan->object->handle == NOUVEAU_ABI16_CHAN(channel)) + return chan; + } + + return NULL; +} int nouveau_abi16_ioctl_channel_free(ABI16_IOCTL_ARGS) @@ -350,28 +350,38 @@ nouveau_abi16_ioctl_channel_free(ABI16_IOCTL_ARGS) struct drm_nouveau_channel_free *req = data; struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); struct nouveau_abi16_chan *chan; - int ret = -ENOENT; if (unlikely(!abi16)) return -ENOMEM; - list_for_each_entry(chan, &abi16->channels, head) { - if (chan->chan->handle == (NVDRM_CHAN | req->channel)) { - nouveau_abi16_chan_fini(abi16, chan); - return nouveau_abi16_put(abi16, 0); - } - } - - return nouveau_abi16_put(abi16, ret); + chan = nouveau_abi16_chan(abi16, req->channel); + if (!chan) + return nouveau_abi16_put(abi16, -ENOENT); + nouveau_abi16_chan_fini(abi16, chan); + return nouveau_abi16_put(abi16, 0); } int nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS) { struct drm_nouveau_grobj_alloc *init = data; + struct { + struct nvif_ioctl_v0 ioctl; + struct nvif_ioctl_new_v0 new; + } args = { + .ioctl.owner = NVIF_IOCTL_V0_OWNER_ANY, + .ioctl.type = NVIF_IOCTL_V0_NEW, + .ioctl.path_nr = 3, + .ioctl.path[2] = NOUVEAU_ABI16_CLIENT, + .ioctl.path[1] = NOUVEAU_ABI16_DEVICE, + .ioctl.path[0] = NOUVEAU_ABI16_CHAN(init->channel), + .new.route = NVDRM_OBJECT_ABI16, + .new.handle = init->handle, + .new.oclass = init->class, + }; struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_object *object; + struct nvif_client *client; int ret; if (unlikely(!abi16)) @@ -379,6 +389,7 @@ nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS) if (init->handle == ~0) return nouveau_abi16_put(abi16, -EINVAL); + client = nvif_client(nvif_object(&abi16->device)); /* compatibility with userspace that assumes 506e for all chipsets */ if (init->class == 0x506e) { @@ -387,8 +398,7 @@ nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS) return nouveau_abi16_put(abi16, 0); } - ret = nouveau_object_new(abi16->client, NVDRM_CHAN | init->channel, - init->handle, init->class, NULL, 0, &object); + ret = nvif_client_ioctl(client, &args, sizeof(args)); return nouveau_abi16_put(abi16, ret); } @@ -396,29 +406,38 @@ int nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS) { struct drm_nouveau_notifierobj_alloc *info = data; + struct { + struct nvif_ioctl_v0 ioctl; + struct nvif_ioctl_new_v0 new; + struct nv_dma_v0 ctxdma; + } args = { + .ioctl.owner = NVIF_IOCTL_V0_OWNER_ANY, + .ioctl.type = NVIF_IOCTL_V0_NEW, + .ioctl.path_nr = 3, + .ioctl.path[2] = NOUVEAU_ABI16_CLIENT, + .ioctl.path[1] = NOUVEAU_ABI16_DEVICE, + .ioctl.path[0] = NOUVEAU_ABI16_CHAN(info->channel), + .new.route = NVDRM_OBJECT_ABI16, + .new.handle = info->handle, + .new.oclass = NV_DMA_IN_MEMORY, + }; struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_device *device = nv_device(drm->device); struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); - struct nouveau_abi16_chan *chan = NULL, *temp; + struct nouveau_abi16_chan *chan; struct nouveau_abi16_ntfy *ntfy; - struct nouveau_object *object; - struct nv_dma_class args = {}; + struct nvif_device *device = &abi16->device; + struct nvif_client *client; int ret; if (unlikely(!abi16)) return -ENOMEM; /* completely unnecessary for these chipsets... */ - if (unlikely(nv_device(abi16->device)->card_type >= NV_C0)) + if (unlikely(device->info.family >= NV_DEVICE_INFO_V0_FERMI)) return nouveau_abi16_put(abi16, -EINVAL); + client = nvif_client(nvif_object(&abi16->device)); - list_for_each_entry(temp, &abi16->channels, head) { - if (temp->chan->handle == (NVDRM_CHAN | info->channel)) { - chan = temp; - break; - } - } - + chan = nouveau_abi16_chan(abi16, info->channel); if (!chan) return nouveau_abi16_put(abi16, -ENOENT); @@ -434,26 +453,29 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS) if (ret) goto done; - args.start = ntfy->node->offset; - args.limit = ntfy->node->offset + ntfy->node->length - 1; - if (device->card_type >= NV_50) { - args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM; - args.start += chan->ntfy_vma.offset; - args.limit += chan->ntfy_vma.offset; + args.ctxdma.start = ntfy->node->offset; + args.ctxdma.limit = ntfy->node->offset + ntfy->node->length - 1; + if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) { + args.ctxdma.target = NV_DMA_V0_TARGET_VM; + args.ctxdma.access = NV_DMA_V0_ACCESS_VM; + args.ctxdma.start += chan->ntfy_vma.offset; + args.ctxdma.limit += chan->ntfy_vma.offset; } else if (drm->agp.stat == ENABLED) { - args.flags = NV_DMA_TARGET_AGP | NV_DMA_ACCESS_RDWR; - args.start += drm->agp.base + chan->ntfy->bo.offset; - args.limit += drm->agp.base + chan->ntfy->bo.offset; + args.ctxdma.target = NV_DMA_V0_TARGET_AGP; + args.ctxdma.access = NV_DMA_V0_ACCESS_RDWR; + args.ctxdma.start += drm->agp.base + chan->ntfy->bo.offset; + args.ctxdma.limit += drm->agp.base + chan->ntfy->bo.offset; + client->super = true; } else { - args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_RDWR; - args.start += chan->ntfy->bo.offset; - args.limit += chan->ntfy->bo.offset; + args.ctxdma.target = NV_DMA_V0_TARGET_VM; + args.ctxdma.access = NV_DMA_V0_ACCESS_RDWR; + args.ctxdma.start += chan->ntfy->bo.offset; + args.ctxdma.limit += chan->ntfy->bo.offset; } - ret = nouveau_object_new(abi16->client, chan->chan->handle, - ntfy->handle, 0x003d, &args, - sizeof(args), &object); + ret = nvif_client_ioctl(client, &args, sizeof(args)); + client->super = false; if (ret) goto done; @@ -469,28 +491,36 @@ int nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS) { struct drm_nouveau_gpuobj_free *fini = data; + struct { + struct nvif_ioctl_v0 ioctl; + struct nvif_ioctl_del del; + } args = { + .ioctl.owner = NVDRM_OBJECT_ABI16, + .ioctl.type = NVIF_IOCTL_V0_DEL, + .ioctl.path_nr = 4, + .ioctl.path[3] = NOUVEAU_ABI16_CLIENT, + .ioctl.path[2] = NOUVEAU_ABI16_DEVICE, + .ioctl.path[1] = NOUVEAU_ABI16_CHAN(fini->channel), + .ioctl.path[0] = fini->handle, + }; struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); - struct nouveau_abi16_chan *chan = NULL, *temp; + struct nouveau_abi16_chan *chan; struct nouveau_abi16_ntfy *ntfy; + struct nvif_client *client; int ret; if (unlikely(!abi16)) return -ENOMEM; - list_for_each_entry(temp, &abi16->channels, head) { - if (temp->chan->handle == (NVDRM_CHAN | fini->channel)) { - chan = temp; - break; - } - } - + chan = nouveau_abi16_chan(abi16, fini->channel); if (!chan) return nouveau_abi16_put(abi16, -ENOENT); + client = nvif_client(nvif_object(&abi16->device)); /* synchronize with the user channel and destroy the gpu object */ nouveau_channel_idle(chan->chan); - ret = nouveau_object_del(abi16->client, chan->chan->handle, fini->handle); + ret = nvif_client_ioctl(client, &args, sizeof(args)); if (ret) return nouveau_abi16_put(abi16, ret); diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.h b/drivers/gpu/drm/nouveau/nouveau_abi16.h index 90004081a501..39844e6bfbff 100644 --- a/drivers/gpu/drm/nouveau/nouveau_abi16.h +++ b/drivers/gpu/drm/nouveau/nouveau_abi16.h @@ -28,8 +28,7 @@ struct nouveau_abi16_chan { }; struct nouveau_abi16 { - struct nouveau_object *client; - struct nouveau_object *device; + struct nvif_device device; struct list_head channels; u64 handles; }; diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c index 279206997e5c..622424692b3b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_acpi.c +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c @@ -46,7 +46,6 @@ static struct nouveau_dsm_priv { bool dsm_detected; bool optimus_detected; acpi_handle dhandle; - acpi_handle other_handle; acpi_handle rom_handle; } nouveau_dsm_priv; @@ -222,10 +221,9 @@ static int nouveau_dsm_pci_probe(struct pci_dev *pdev) if (!dhandle) return false; - if (!acpi_has_method(dhandle, "_DSM")) { - nouveau_dsm_priv.other_handle = dhandle; + if (!acpi_has_method(dhandle, "_DSM")) return false; - } + if (acpi_check_dsm(dhandle, nouveau_dsm_muid, 0x00000102, 1 << NOUVEAU_DSM_POWER)) retval |= NOUVEAU_DSM_HAS_MUX; @@ -301,16 +299,6 @@ static bool nouveau_dsm_detect(void) printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n", acpi_method_name); nouveau_dsm_priv.dsm_detected = true; - /* - * On some systems hotplug events are generated for the device - * being switched off when _DSM is executed. They cause ACPI - * hotplug to trigger and attempt to remove the device from - * the system, which causes it to break down. Prevent that from - * happening by setting the no_hotplug flag for the involved - * ACPI device objects. - */ - acpi_bus_no_hotplug(nouveau_dsm_priv.dhandle); - acpi_bus_no_hotplug(nouveau_dsm_priv.other_handle); ret = true; } diff --git a/drivers/gpu/drm/nouveau/nouveau_agp.c b/drivers/gpu/drm/nouveau/nouveau_agp.c index 51666daddb94..1f6f6ba6847a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_agp.c +++ b/drivers/gpu/drm/nouveau/nouveau_agp.c @@ -1,7 +1,5 @@ #include <linux/module.h> -#include <core/device.h> - #include "nouveau_drm.h" #include "nouveau_agp.h" #include "nouveau_reg.h" @@ -29,7 +27,7 @@ static struct nouveau_agpmode_quirk nouveau_agpmode_quirk_list[] = { static unsigned long get_agp_mode(struct nouveau_drm *drm, const struct drm_agp_info *info) { - struct nouveau_device *device = nv_device(drm->device); + struct nvif_device *device = &drm->device; struct nouveau_agpmode_quirk *quirk = nouveau_agpmode_quirk_list; int agpmode = nouveau_agpmode; unsigned long mode = info->mode; @@ -38,7 +36,7 @@ get_agp_mode(struct nouveau_drm *drm, const struct drm_agp_info *info) * FW seems to be broken on nv18, it makes the card lock up * randomly. */ - if (device->chipset == 0x18) + if (device->info.chipset == 0x18) mode &= ~PCI_AGP_COMMAND_FW; /* @@ -47,10 +45,10 @@ get_agp_mode(struct nouveau_drm *drm, const struct drm_agp_info *info) while (agpmode == -1 && quirk->hostbridge_vendor) { if (info->id_vendor == quirk->hostbridge_vendor && info->id_device == quirk->hostbridge_device && - device->pdev->vendor == quirk->chip_vendor && - device->pdev->device == quirk->chip_device) { + nvkm_device(device)->pdev->vendor == quirk->chip_vendor && + nvkm_device(device)->pdev->device == quirk->chip_device) { agpmode = quirk->mode; - nv_info(device, "Forcing agp mode to %dX. Use agpmode to override.\n", + NV_INFO(drm, "Forcing agp mode to %dX. Use agpmode to override.\n", agpmode); break; } @@ -104,7 +102,7 @@ void nouveau_agp_reset(struct nouveau_drm *drm) { #if __OS_HAS_AGP - struct nouveau_device *device = nv_device(drm->device); + struct nvif_device *device = &drm->device; struct drm_device *dev = drm->dev; u32 save[2]; int ret; @@ -115,7 +113,7 @@ nouveau_agp_reset(struct nouveau_drm *drm) /* First of all, disable fast writes, otherwise if it's * already enabled in the AGP bridge and we disable the card's * AGP controller we might be locking ourselves out of it. */ - if ((nv_rd32(device, NV04_PBUS_PCI_NV_19) | + if ((nvif_rd32(device, NV04_PBUS_PCI_NV_19) | dev->agp->mode) & PCI_AGP_COMMAND_FW) { struct drm_agp_info info; struct drm_agp_mode mode; @@ -134,15 +132,15 @@ nouveau_agp_reset(struct nouveau_drm *drm) /* clear busmaster bit, and disable AGP */ - save[0] = nv_mask(device, NV04_PBUS_PCI_NV_1, 0x00000004, 0x00000000); - nv_wr32(device, NV04_PBUS_PCI_NV_19, 0); + save[0] = nvif_mask(device, NV04_PBUS_PCI_NV_1, 0x00000004, 0x00000000); + nvif_wr32(device, NV04_PBUS_PCI_NV_19, 0); /* reset PGRAPH, PFIFO and PTIMER */ - save[1] = nv_mask(device, 0x000200, 0x00011100, 0x00000000); - nv_mask(device, 0x000200, 0x00011100, save[1]); + save[1] = nvif_mask(device, 0x000200, 0x00011100, 0x00000000); + nvif_mask(device, 0x000200, 0x00011100, save[1]); /* and restore bustmaster bit (gives effect of resetting AGP) */ - nv_wr32(device, NV04_PBUS_PCI_NV_1, save[0]); + nvif_wr32(device, NV04_PBUS_PCI_NV_1, save[0]); #endif } @@ -150,7 +148,6 @@ void nouveau_agp_init(struct nouveau_drm *drm) { #if __OS_HAS_AGP - struct nouveau_device *device = nv_device(drm->device); struct drm_device *dev = drm->dev; struct drm_agp_info info; struct drm_agp_mode mode; @@ -162,13 +159,13 @@ nouveau_agp_init(struct nouveau_drm *drm) ret = drm_agp_acquire(dev); if (ret) { - nv_error(device, "unable to acquire AGP: %d\n", ret); + NV_ERROR(drm, "unable to acquire AGP: %d\n", ret); return; } ret = drm_agp_info(dev, &info); if (ret) { - nv_error(device, "unable to get AGP info: %d\n", ret); + NV_ERROR(drm, "unable to get AGP info: %d\n", ret); return; } @@ -177,7 +174,7 @@ nouveau_agp_init(struct nouveau_drm *drm) ret = drm_agp_enable(dev, mode); if (ret) { - nv_error(device, "unable to enable AGP: %d\n", ret); + NV_ERROR(drm, "unable to enable AGP: %d\n", ret); return; } diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c index 2c1e4aad7da3..e566c5b53651 100644 --- a/drivers/gpu/drm/nouveau/nouveau_backlight.c +++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c @@ -40,8 +40,8 @@ static int nv40_get_intensity(struct backlight_device *bd) { struct nouveau_drm *drm = bl_get_data(bd); - struct nouveau_device *device = nv_device(drm->device); - int val = (nv_rd32(device, NV40_PMC_BACKLIGHT) & + struct nvif_device *device = &drm->device; + int val = (nvif_rd32(device, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK) >> 16; return val; @@ -51,11 +51,11 @@ static int nv40_set_intensity(struct backlight_device *bd) { struct nouveau_drm *drm = bl_get_data(bd); - struct nouveau_device *device = nv_device(drm->device); + struct nvif_device *device = &drm->device; int val = bd->props.brightness; - int reg = nv_rd32(device, NV40_PMC_BACKLIGHT); + int reg = nvif_rd32(device, NV40_PMC_BACKLIGHT); - nv_wr32(device, NV40_PMC_BACKLIGHT, + nvif_wr32(device, NV40_PMC_BACKLIGHT, (val << 16) | (reg & ~NV40_PMC_BACKLIGHT_MASK)); return 0; @@ -71,11 +71,11 @@ static int nv40_backlight_init(struct drm_connector *connector) { struct nouveau_drm *drm = nouveau_drm(connector->dev); - struct nouveau_device *device = nv_device(drm->device); + struct nvif_device *device = &drm->device; struct backlight_properties props; struct backlight_device *bd; - if (!(nv_rd32(device, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK)) + if (!(nvif_rd32(device, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK)) return 0; memset(&props, 0, sizeof(struct backlight_properties)); @@ -97,12 +97,12 @@ nv50_get_intensity(struct backlight_device *bd) { struct nouveau_encoder *nv_encoder = bl_get_data(bd); struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); - struct nouveau_device *device = nv_device(drm->device); + struct nvif_device *device = &drm->device; int or = nv_encoder->or; u32 div = 1025; u32 val; - val = nv_rd32(device, NV50_PDISP_SOR_PWM_CTL(or)); + val = nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(or)); val &= NV50_PDISP_SOR_PWM_CTL_VAL; return ((val * 100) + (div / 2)) / div; } @@ -112,12 +112,12 @@ nv50_set_intensity(struct backlight_device *bd) { struct nouveau_encoder *nv_encoder = bl_get_data(bd); struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); - struct nouveau_device *device = nv_device(drm->device); + struct nvif_device *device = &drm->device; int or = nv_encoder->or; u32 div = 1025; u32 val = (bd->props.brightness * div) / 100; - nv_wr32(device, NV50_PDISP_SOR_PWM_CTL(or), + nvif_wr32(device, NV50_PDISP_SOR_PWM_CTL(or), NV50_PDISP_SOR_PWM_CTL_NEW | val); return 0; } @@ -133,12 +133,12 @@ nva3_get_intensity(struct backlight_device *bd) { struct nouveau_encoder *nv_encoder = bl_get_data(bd); struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); - struct nouveau_device *device = nv_device(drm->device); + struct nvif_device *device = &drm->device; int or = nv_encoder->or; u32 div, val; - div = nv_rd32(device, NV50_PDISP_SOR_PWM_DIV(or)); - val = nv_rd32(device, NV50_PDISP_SOR_PWM_CTL(or)); + div = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or)); + val = nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(or)); val &= NVA3_PDISP_SOR_PWM_CTL_VAL; if (div && div >= val) return ((val * 100) + (div / 2)) / div; @@ -151,14 +151,14 @@ nva3_set_intensity(struct backlight_device *bd) { struct nouveau_encoder *nv_encoder = bl_get_data(bd); struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); - struct nouveau_device *device = nv_device(drm->device); + struct nvif_device *device = &drm->device; int or = nv_encoder->or; u32 div, val; - div = nv_rd32(device, NV50_PDISP_SOR_PWM_DIV(or)); + div = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or)); val = (bd->props.brightness * div) / 100; if (div) { - nv_wr32(device, NV50_PDISP_SOR_PWM_CTL(or), val | + nvif_wr32(device, NV50_PDISP_SOR_PWM_CTL(or), val | NV50_PDISP_SOR_PWM_CTL_NEW | NVA3_PDISP_SOR_PWM_CTL_UNK); return 0; @@ -177,7 +177,7 @@ static int nv50_backlight_init(struct drm_connector *connector) { struct nouveau_drm *drm = nouveau_drm(connector->dev); - struct nouveau_device *device = nv_device(drm->device); + struct nvif_device *device = &drm->device; struct nouveau_encoder *nv_encoder; struct backlight_properties props; struct backlight_device *bd; @@ -190,12 +190,12 @@ nv50_backlight_init(struct drm_connector *connector) return -ENODEV; } - if (!nv_rd32(device, NV50_PDISP_SOR_PWM_CTL(nv_encoder->or))) + if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(nv_encoder->or))) return 0; - if (device->chipset <= 0xa0 || - device->chipset == 0xaa || - device->chipset == 0xac) + if (device->info.chipset <= 0xa0 || + device->info.chipset == 0xaa || + device->info.chipset == 0xac) ops = &nv50_bl_ops; else ops = &nva3_bl_ops; @@ -218,7 +218,7 @@ int nouveau_backlight_init(struct drm_device *dev) { struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_device *device = nv_device(drm->device); + struct nvif_device *device = &drm->device; struct drm_connector *connector; list_for_each_entry(connector, &dev->mode_config.connector_list, head) { @@ -226,13 +226,12 @@ nouveau_backlight_init(struct drm_device *dev) connector->connector_type != DRM_MODE_CONNECTOR_eDP) continue; - switch (device->card_type) { - case NV_40: + switch (device->info.family) { + case NV_DEVICE_INFO_V0_CURIE: return nv40_backlight_init(connector); - case NV_50: - case NV_C0: - case NV_D0: - case NV_E0: + case NV_DEVICE_INFO_V0_TESLA: + case NV_DEVICE_INFO_V0_FERMI: + case NV_DEVICE_INFO_V0_KEPLER: return nv50_backlight_init(connector); default: break; diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index 8268a4ccac15..dae2c96deef8 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c @@ -22,8 +22,6 @@ * SOFTWARE. */ -#include <subdev/bios.h> - #include <drm/drmP.h> #include "nouveau_drm.h" @@ -217,7 +215,7 @@ int call_lvds_script(struct drm_device *dev, struct dcb_output *dcbent, int head */ struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_device *device = nv_device(drm->device); + struct nvif_device *device = &drm->device; struct nvbios *bios = &drm->vbios; uint8_t lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer]; uint32_t sel_clk_binding, sel_clk; @@ -240,7 +238,7 @@ int call_lvds_script(struct drm_device *dev, struct dcb_output *dcbent, int head NV_INFO(drm, "Calling LVDS script %d:\n", script); /* don't let script change pll->head binding */ - sel_clk_binding = nv_rd32(device, NV_PRAMDAC_SEL_CLK) & 0x50000; + sel_clk_binding = nvif_rd32(device, NV_PRAMDAC_SEL_CLK) & 0x50000; if (lvds_ver < 0x30) ret = call_lvds_manufacturer_script(dev, dcbent, head, script); @@ -252,7 +250,7 @@ int call_lvds_script(struct drm_device *dev, struct dcb_output *dcbent, int head sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK) & ~0x50000; NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, sel_clk | sel_clk_binding); /* some scripts set a value in NV_PBUS_POWERCTRL_2 and break video overlay */ - nv_wr32(device, NV_PBUS_POWERCTRL_2, 0); + nvif_wr32(device, NV_PBUS_POWERCTRL_2, 0); return ret; } @@ -320,7 +318,7 @@ static int parse_lvds_manufacturer_table_header(struct drm_device *dev, struct n static int get_fp_strap(struct drm_device *dev, struct nvbios *bios) { - struct nouveau_device *device = nouveau_dev(dev); + struct nvif_device *device = &nouveau_drm(dev)->device; /* * The fp strap is normally dictated by the "User Strap" in @@ -334,10 +332,10 @@ get_fp_strap(struct drm_device *dev, struct nvbios *bios) if (bios->major_version < 5 && bios->data[0x48] & 0x4) return NVReadVgaCrtc5758(dev, 0, 0xf) & 0xf; - if (device->card_type >= NV_50) - return (nv_rd32(device, NV_PEXTDEV_BOOT_0) >> 24) & 0xf; + if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) + return (nvif_rd32(device, NV_PEXTDEV_BOOT_0) >> 24) & 0xf; else - return (nv_rd32(device, NV_PEXTDEV_BOOT_0) >> 16) & 0xf; + return (nvif_rd32(device, NV_PEXTDEV_BOOT_0) >> 16) & 0xf; } static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios) @@ -636,7 +634,7 @@ int run_tmds_table(struct drm_device *dev, struct dcb_output *dcbent, int head, */ struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_device *device = nv_device(drm->device); + struct nvif_device *device = &drm->device; struct nvbios *bios = &drm->vbios; int cv = bios->chip_version; uint16_t clktable = 0, scriptptr; @@ -670,7 +668,7 @@ int run_tmds_table(struct drm_device *dev, struct dcb_output *dcbent, int head, } /* don't let script change pll->head binding */ - sel_clk_binding = nv_rd32(device, NV_PRAMDAC_SEL_CLK) & 0x50000; + sel_clk_binding = nvif_rd32(device, NV_PRAMDAC_SEL_CLK) & 0x50000; run_digital_op_script(dev, scriptptr, dcbent, head, pxclk >= 165000); sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK) & ~0x50000; NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, sel_clk | sel_clk_binding); @@ -1253,7 +1251,7 @@ olddcb_table(struct drm_device *dev) struct nouveau_drm *drm = nouveau_drm(dev); u8 *dcb = NULL; - if (nv_device(drm->device)->card_type > NV_04) + if (drm->device.info.family > NV_DEVICE_INFO_V0_TNT) dcb = ROMPTR(dev, drm->vbios.data[0x36]); if (!dcb) { NV_WARN(drm, "No DCB data found in VBIOS\n"); @@ -1399,6 +1397,7 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb, uint32_t conn, uint32_t conf, struct dcb_output *entry) { struct nouveau_drm *drm = nouveau_drm(dev); + int link = 0; entry->type = conn & 0xf; entry->i2c_index = (conn >> 4) & 0xf; @@ -1444,6 +1443,7 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb, if (conf & 0x4) entry->lvdsconf.use_power_scripts = true; entry->lvdsconf.sor.link = (conf & 0x00000030) >> 4; + link = entry->lvdsconf.sor.link; } if (conf & mask) { /* @@ -1492,17 +1492,18 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb, entry->dpconf.link_nr = 1; break; } + link = entry->dpconf.sor.link; break; case DCB_OUTPUT_TMDS: if (dcb->version >= 0x40) { entry->tmdsconf.sor.link = (conf & 0x00000030) >> 4; entry->extdev = (conf & 0x0000ff00) >> 8; + link = entry->tmdsconf.sor.link; } else if (dcb->version >= 0x30) entry->tmdsconf.slave_addr = (conf & 0x00000700) >> 8; else if (dcb->version >= 0x22) entry->tmdsconf.slave_addr = (conf & 0x00000070) >> 4; - break; case DCB_OUTPUT_EOL: /* weird g80 mobile type that "nv" treats as a terminator */ @@ -1526,6 +1527,8 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb, if (conf & 0x100000) entry->i2c_upper_default = true; + entry->hasht = (entry->location << 4) | entry->type; + entry->hashm = (entry->heads << 8) | (link << 6) | entry->or; return true; } @@ -1908,7 +1911,7 @@ static int load_nv17_hwsq_ucode_entry(struct drm_device *dev, struct nvbios *bio */ struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_device *device = nv_device(drm->device); + struct nvif_device *device = &drm->device; uint8_t bytes_to_write; uint16_t hwsq_entry_offset; int i; @@ -1931,15 +1934,15 @@ static int load_nv17_hwsq_ucode_entry(struct drm_device *dev, struct nvbios *bio hwsq_entry_offset = hwsq_offset + 2 + entry * bytes_to_write; /* set sequencer control */ - nv_wr32(device, 0x00001304, ROM32(bios->data[hwsq_entry_offset])); + nvif_wr32(device, 0x00001304, ROM32(bios->data[hwsq_entry_offset])); bytes_to_write -= 4; /* write ucode */ for (i = 0; i < bytes_to_write; i += 4) - nv_wr32(device, 0x00001400 + i, ROM32(bios->data[hwsq_entry_offset + i + 4])); + nvif_wr32(device, 0x00001400 + i, ROM32(bios->data[hwsq_entry_offset + i + 4])); /* twiddle NV_PBUS_DEBUG_4 */ - nv_wr32(device, NV_PBUS_DEBUG_4, nv_rd32(device, NV_PBUS_DEBUG_4) | 0x18); + nvif_wr32(device, NV_PBUS_DEBUG_4, nvif_rd32(device, NV_PBUS_DEBUG_4) | 0x18); return 0; } @@ -2002,7 +2005,7 @@ uint8_t *nouveau_bios_embedded_edid(struct drm_device *dev) static bool NVInitVBIOS(struct drm_device *dev) { struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_bios *bios = nouveau_bios(drm->device); + struct nouveau_bios *bios = nvkm_bios(&drm->device); struct nvbios *legacy = &drm->vbios; memset(legacy, 0, sizeof(struct nvbios)); @@ -2054,7 +2057,7 @@ nouveau_bios_posted(struct drm_device *dev) struct nouveau_drm *drm = nouveau_drm(dev); unsigned htotal; - if (nv_device(drm->device)->card_type >= NV_50) + if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) return true; htotal = NVReadVgaCrtc(dev, 0, 0x06); diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index b6dc85c614be..01da508625f2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -27,13 +27,9 @@ * Jeremy Kolb <jkolb@brandeis.edu> */ -#include <core/engine.h> +#include <linux/dma-mapping.h> #include <linux/swiotlb.h> -#include <subdev/fb.h> -#include <subdev/vm.h> -#include <subdev/bar.h> - #include "nouveau_drm.h" #include "nouveau_dma.h" #include "nouveau_fence.h" @@ -52,7 +48,7 @@ nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg, { struct nouveau_drm *drm = nouveau_drm(dev); int i = reg - drm->tile.reg; - struct nouveau_fb *pfb = nouveau_fb(drm->device); + struct nouveau_fb *pfb = nvkm_fb(&drm->device); struct nouveau_fb_tile *tile = &pfb->tile.region[i]; struct nouveau_engine *engine; @@ -109,7 +105,7 @@ nv10_bo_set_tiling(struct drm_device *dev, u32 addr, u32 size, u32 pitch, u32 flags) { struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_fb *pfb = nouveau_fb(drm->device); + struct nouveau_fb *pfb = nvkm_fb(&drm->device); struct nouveau_drm_tile *tile, *found = NULL; int i; @@ -153,23 +149,23 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags, int *align, int *size) { struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); - struct nouveau_device *device = nv_device(drm->device); + struct nvif_device *device = &drm->device; - if (device->card_type < NV_50) { + if (device->info.family < NV_DEVICE_INFO_V0_TESLA) { if (nvbo->tile_mode) { - if (device->chipset >= 0x40) { + if (device->info.chipset >= 0x40) { *align = 65536; *size = roundup(*size, 64 * nvbo->tile_mode); - } else if (device->chipset >= 0x30) { + } else if (device->info.chipset >= 0x30) { *align = 32768; *size = roundup(*size, 64 * nvbo->tile_mode); - } else if (device->chipset >= 0x20) { + } else if (device->info.chipset >= 0x20) { *align = 16384; *size = roundup(*size, 64 * nvbo->tile_mode); - } else if (device->chipset >= 0x10) { + } else if (device->info.chipset >= 0x10) { *align = 16384; *size = roundup(*size, 32 * nvbo->tile_mode); } @@ -196,12 +192,12 @@ nouveau_bo_new(struct drm_device *dev, int size, int align, int lpg_shift = 12; int max_size; - if (drm->client.base.vm) - lpg_shift = drm->client.base.vm->vmm->lpg_shift; + if (drm->client.vm) + lpg_shift = drm->client.vm->vmm->lpg_shift; max_size = INT_MAX & ~((1 << lpg_shift) - 1); if (size <= 0 || size > max_size) { - nv_warn(drm, "skipped size %x\n", (u32)size); + NV_WARN(drm, "skipped size %x\n", (u32)size); return -EINVAL; } @@ -219,9 +215,9 @@ nouveau_bo_new(struct drm_device *dev, int size, int align, nvbo->bo.bdev = &drm->ttm.bdev; nvbo->page_shift = 12; - if (drm->client.base.vm) { + if (drm->client.vm) { if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024) - nvbo->page_shift = drm->client.base.vm->vmm->lpg_shift; + nvbo->page_shift = drm->client.vm->vmm->lpg_shift; } nouveau_bo_fixup_align(nvbo, flags, &align, &size); @@ -261,11 +257,9 @@ static void set_placement_range(struct nouveau_bo *nvbo, uint32_t type) { struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); - struct nouveau_fb *pfb = nouveau_fb(drm->device); - u32 vram_pages = pfb->ram->size >> PAGE_SHIFT; + u32 vram_pages = drm->device.info.ram_size >> PAGE_SHIFT; - if ((nv_device(drm->device)->card_type == NV_10 || - nv_device(drm->device)->card_type == NV_11) && + if (drm->device.info.family == NV_DEVICE_INFO_V0_CELSIUS && nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) && nvbo->bo.mem.num_pages < vram_pages / 4) { /* @@ -309,7 +303,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype) struct ttm_buffer_object *bo = &nvbo->bo; int ret; - ret = ttm_bo_reserve(bo, false, false, false, 0); + ret = ttm_bo_reserve(bo, false, false, false, NULL); if (ret) goto out; @@ -350,7 +344,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo) struct ttm_buffer_object *bo = &nvbo->bo; int ret, ref; - ret = ttm_bo_reserve(bo, false, false, false, 0); + ret = ttm_bo_reserve(bo, false, false, false, NULL); if (ret) return ret; @@ -385,7 +379,7 @@ nouveau_bo_map(struct nouveau_bo *nvbo) { int ret; - ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0); + ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL); if (ret) return ret; @@ -500,21 +494,28 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, man->default_caching = TTM_PL_FLAG_CACHED; break; case TTM_PL_VRAM: - if (nv_device(drm->device)->card_type >= NV_50) { + man->flags = TTM_MEMTYPE_FLAG_FIXED | + TTM_MEMTYPE_FLAG_MAPPABLE; + man->available_caching = TTM_PL_FLAG_UNCACHED | + TTM_PL_FLAG_WC; + man->default_caching = TTM_PL_FLAG_WC; + + if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { + /* Some BARs do not support being ioremapped WC */ + if (nvkm_bar(&drm->device)->iomap_uncached) { + man->available_caching = TTM_PL_FLAG_UNCACHED; + man->default_caching = TTM_PL_FLAG_UNCACHED; + } + man->func = &nouveau_vram_manager; man->io_reserve_fastpath = false; man->use_io_reserve_lru = true; } else { man->func = &ttm_bo_manager_func; } - man->flags = TTM_MEMTYPE_FLAG_FIXED | - TTM_MEMTYPE_FLAG_MAPPABLE; - man->available_caching = TTM_PL_FLAG_UNCACHED | - TTM_PL_FLAG_WC; - man->default_caching = TTM_PL_FLAG_WC; break; case TTM_PL_TT: - if (nv_device(drm->device)->card_type >= NV_50) + if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) man->func = &nouveau_gart_manager; else if (drm->agp.stat != ENABLED) @@ -763,9 +764,9 @@ nv50_bo_move_init(struct nouveau_channel *chan, u32 handle) BEGIN_NV04(chan, NvSubCopy, 0x0000, 1); OUT_RING (chan, handle); BEGIN_NV04(chan, NvSubCopy, 0x0180, 3); - OUT_RING (chan, NvNotify0); - OUT_RING (chan, NvDmaFB); - OUT_RING (chan, NvDmaFB); + OUT_RING (chan, chan->drm->ntfy.handle); + OUT_RING (chan, chan->vram.handle); + OUT_RING (chan, chan->vram.handle); } return ret; @@ -852,7 +853,7 @@ nv04_bo_move_init(struct nouveau_channel *chan, u32 handle) BEGIN_NV04(chan, NvSubCopy, 0x0000, 1); OUT_RING (chan, handle); BEGIN_NV04(chan, NvSubCopy, 0x0180, 1); - OUT_RING (chan, NvNotify0); + OUT_RING (chan, chan->drm->ntfy.handle); } return ret; @@ -864,7 +865,7 @@ nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo, { if (mem->mem_type == TTM_PL_TT) return NvDmaTT; - return NvDmaFB; + return chan->vram.handle; } static int @@ -922,12 +923,12 @@ nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo, u64 size = (u64)mem->num_pages << PAGE_SHIFT; int ret; - ret = nouveau_vm_get(nv_client(drm)->vm, size, old_node->page_shift, + ret = nouveau_vm_get(drm->client.vm, size, old_node->page_shift, NV_MEM_ACCESS_RW, &old_node->vma[0]); if (ret) return ret; - ret = nouveau_vm_get(nv_client(drm)->vm, size, new_node->page_shift, + ret = nouveau_vm_get(drm->client.vm, size, new_node->page_shift, NV_MEM_ACCESS_RW, &old_node->vma[1]); if (ret) { nouveau_vm_put(&old_node->vma[0]); @@ -945,6 +946,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, { struct nouveau_drm *drm = nouveau_bdev(bo->bdev); struct nouveau_channel *chan = drm->ttm.chan; + struct nouveau_cli *cli = (void *)nvif_client(&chan->device->base); struct nouveau_fence *fence; int ret; @@ -952,13 +954,13 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, * old nouveau_mem node, these will get cleaned up after ttm has * destroyed the ttm_mem_reg */ - if (nv_device(drm->device)->card_type >= NV_50) { + if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { ret = nouveau_bo_move_prep(drm, bo, new_mem); if (ret) return ret; } - mutex_lock_nested(&chan->cli->mutex, SINGLE_DEPTH_NESTING); + mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING); ret = nouveau_fence_sync(bo->sync_obj, chan); if (ret == 0) { ret = drm->ttm.move(chan, bo, &bo->mem, new_mem); @@ -973,7 +975,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, } } } - mutex_unlock(&chan->cli->mutex); + mutex_unlock(&cli->mutex); return ret; } @@ -1005,9 +1007,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm) int ret; do { - struct nouveau_object *object; struct nouveau_channel *chan; - u32 handle = (mthd->engine << 16) | mthd->oclass; if (mthd->engine) chan = drm->cechan; @@ -1016,13 +1016,14 @@ nouveau_bo_move_init(struct nouveau_drm *drm) if (chan == NULL) continue; - ret = nouveau_object_new(nv_object(drm), chan->handle, handle, - mthd->oclass, NULL, 0, &object); + ret = nvif_object_init(chan->object, NULL, + mthd->oclass | (mthd->engine << 16), + mthd->oclass, NULL, 0, + &drm->ttm.copy); if (ret == 0) { - ret = mthd->init(chan, handle); + ret = mthd->init(chan, drm->ttm.copy.handle); if (ret) { - nouveau_object_del(nv_object(drm), - chan->handle, handle); + nvif_object_fini(&drm->ttm.copy); continue; } @@ -1135,7 +1136,7 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem, if (new_mem->mem_type != TTM_PL_VRAM) return 0; - if (nv_device(drm->device)->card_type >= NV_10) { + if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) { *new_tile = nv10_bo_set_tiling(dev, offset, new_mem->size, nvbo->tile_mode, nvbo->tile_flags); @@ -1166,7 +1167,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, struct nouveau_drm_tile *new_tile = NULL; int ret = 0; - if (nv_device(drm->device)->card_type < NV_50) { + if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) { ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile); if (ret) return ret; @@ -1203,7 +1204,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); out: - if (nv_device(drm->device)->card_type < NV_50) { + if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) { if (ret) nouveau_bo_vm_cleanup(bo, NULL, &new_tile); else @@ -1227,7 +1228,6 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; struct nouveau_drm *drm = nouveau_bdev(bdev); struct nouveau_mem *node = mem->mm_node; - struct drm_device *dev = drm->dev; int ret; mem->bus.addr = NULL; @@ -1246,19 +1246,19 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) if (drm->agp.stat == ENABLED) { mem->bus.offset = mem->start << PAGE_SHIFT; mem->bus.base = drm->agp.base; - mem->bus.is_iomem = !dev->agp->cant_use_aperture; + mem->bus.is_iomem = !drm->dev->agp->cant_use_aperture; } #endif - if (nv_device(drm->device)->card_type < NV_50 || !node->memtype) + if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA || !node->memtype) /* untiled */ break; /* fallthrough, tiled memory */ case TTM_PL_VRAM: mem->bus.offset = mem->start << PAGE_SHIFT; - mem->bus.base = nv_device_resource_start(nouveau_dev(dev), 1); + mem->bus.base = nv_device_resource_start(nvkm_device(&drm->device), 1); mem->bus.is_iomem = true; - if (nv_device(drm->device)->card_type >= NV_50) { - struct nouveau_bar *bar = nouveau_bar(drm->device); + if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { + struct nouveau_bar *bar = nvkm_bar(&drm->device); ret = bar->umap(bar, node, NV_MEM_ACCESS_RW, &node->bar_vma); @@ -1278,7 +1278,7 @@ static void nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) { struct nouveau_drm *drm = nouveau_bdev(bdev); - struct nouveau_bar *bar = nouveau_bar(drm->device); + struct nouveau_bar *bar = nvkm_bar(&drm->device); struct nouveau_mem *node = mem->mm_node; if (!node->bar_vma.node) @@ -1292,15 +1292,15 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) { struct nouveau_drm *drm = nouveau_bdev(bo->bdev); struct nouveau_bo *nvbo = nouveau_bo(bo); - struct nouveau_device *device = nv_device(drm->device); - u32 mappable = nv_device_resource_len(device, 1) >> PAGE_SHIFT; + struct nvif_device *device = &drm->device; + u32 mappable = nv_device_resource_len(nvkm_device(device), 1) >> PAGE_SHIFT; int ret; /* as long as the bo isn't in vram, and isn't tiled, we've got * nothing to do here. */ if (bo->mem.mem_type != TTM_PL_VRAM) { - if (nv_device(drm->device)->card_type < NV_50 || + if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA || !nouveau_bo_tile_layout(nvbo)) return 0; @@ -1315,7 +1315,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) } /* make sure bo is in mappable vram */ - if (nv_device(drm->device)->card_type >= NV_50 || + if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA || bo->mem.start + bo->mem.num_pages < mappable) return 0; @@ -1333,6 +1333,7 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm) struct nouveau_drm *drm; struct nouveau_device *device; struct drm_device *dev; + struct device *pdev; unsigned i; int r; bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); @@ -1349,8 +1350,9 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm) } drm = nouveau_bdev(ttm->bdev); - device = nv_device(drm->device); + device = nvkm_device(&drm->device); dev = drm->dev; + pdev = nv_device_base(device); #if __OS_HAS_AGP if (drm->agp.stat == ENABLED) { @@ -1370,17 +1372,22 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm) } for (i = 0; i < ttm->num_pages; i++) { - ttm_dma->dma_address[i] = nv_device_map_page(device, - ttm->pages[i]); - if (!ttm_dma->dma_address[i]) { + dma_addr_t addr; + + addr = dma_map_page(pdev, ttm->pages[i], 0, PAGE_SIZE, + DMA_BIDIRECTIONAL); + + if (dma_mapping_error(pdev, addr)) { while (--i) { - nv_device_unmap_page(device, - ttm_dma->dma_address[i]); + dma_unmap_page(pdev, ttm_dma->dma_address[i], + PAGE_SIZE, DMA_BIDIRECTIONAL); ttm_dma->dma_address[i] = 0; } ttm_pool_unpopulate(ttm); return -EFAULT; } + + ttm_dma->dma_address[i] = addr; } return 0; } @@ -1392,6 +1399,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) struct nouveau_drm *drm; struct nouveau_device *device; struct drm_device *dev; + struct device *pdev; unsigned i; bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); @@ -1399,8 +1407,9 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) return; drm = nouveau_bdev(ttm->bdev); - device = nv_device(drm->device); + device = nvkm_device(&drm->device); dev = drm->dev; + pdev = nv_device_base(device); #if __OS_HAS_AGP if (drm->agp.stat == ENABLED) { @@ -1418,7 +1427,8 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) for (i = 0; i < ttm->num_pages; i++) { if (ttm_dma->dma_address[i]) { - nv_device_unmap_page(device, ttm_dma->dma_address[i]); + dma_unmap_page(pdev, ttm_dma->dma_address[i], PAGE_SIZE, + DMA_BIDIRECTIONAL); } } diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c index ccb6b452d6d0..3440fc999f2f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_chan.c +++ b/drivers/gpu/drm/nouveau/nouveau_chan.c @@ -22,16 +22,11 @@ * Authors: Ben Skeggs */ -#include <core/object.h> -#include <core/client.h> -#include <core/device.h> -#include <core/class.h> - -#include <subdev/fb.h> -#include <subdev/vm.h> -#include <subdev/instmem.h> +#include <nvif/os.h> +#include <nvif/class.h> -#include <engine/software.h> +/*XXX*/ +#include <core/client.h> #include "nouveau_drm.h" #include "nouveau_dma.h" @@ -47,7 +42,7 @@ module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400); int nouveau_channel_idle(struct nouveau_channel *chan) { - struct nouveau_cli *cli = chan->cli; + struct nouveau_cli *cli = (void *)nvif_client(chan->object); struct nouveau_fence *fence = NULL; int ret; @@ -58,8 +53,8 @@ nouveau_channel_idle(struct nouveau_channel *chan) } if (ret) - NV_ERROR(cli, "failed to idle channel 0x%08x [%s]\n", - chan->handle, cli->base.name); + NV_PRINTK(error, cli, "failed to idle channel 0x%08x [%s]\n", + chan->object->handle, nvkm_client(&cli->base)->name); return ret; } @@ -68,36 +63,34 @@ nouveau_channel_del(struct nouveau_channel **pchan) { struct nouveau_channel *chan = *pchan; if (chan) { - struct nouveau_object *client = nv_object(chan->cli); if (chan->fence) { nouveau_channel_idle(chan); nouveau_fence(chan->drm)->context_del(chan); } - nouveau_object_del(client, NVDRM_DEVICE, chan->handle); - nouveau_object_del(client, NVDRM_DEVICE, chan->push.handle); + nvif_object_fini(&chan->nvsw); + nvif_object_fini(&chan->gart); + nvif_object_fini(&chan->vram); + nvif_object_ref(NULL, &chan->object); + nvif_object_fini(&chan->push.ctxdma); nouveau_bo_vma_del(chan->push.buffer, &chan->push.vma); nouveau_bo_unmap(chan->push.buffer); if (chan->push.buffer && chan->push.buffer->pin_refcnt) nouveau_bo_unpin(chan->push.buffer); nouveau_bo_ref(NULL, &chan->push.buffer); + nvif_device_ref(NULL, &chan->device); kfree(chan); } *pchan = NULL; } static int -nouveau_channel_prep(struct nouveau_drm *drm, struct nouveau_cli *cli, - u32 parent, u32 handle, u32 size, - struct nouveau_channel **pchan) +nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device, + u32 handle, u32 size, struct nouveau_channel **pchan) { - struct nouveau_device *device = nv_device(drm->device); - struct nouveau_instmem *imem = nouveau_instmem(device); - struct nouveau_vmmgr *vmm = nouveau_vmmgr(device); - struct nouveau_fb *pfb = nouveau_fb(device); - struct nouveau_client *client = &cli->base; - struct nv_dma_class args = {}; + struct nouveau_cli *cli = (void *)nvif_client(&device->base); + struct nouveau_vmmgr *vmm = nvkm_vmmgr(device); + struct nv_dma_v0 args = {}; struct nouveau_channel *chan; - struct nouveau_object *push; u32 target; int ret; @@ -105,9 +98,8 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nouveau_cli *cli, if (!chan) return -ENOMEM; - chan->cli = cli; + nvif_device_ref(device, &chan->device); chan->drm = drm; - chan->handle = handle; /* allocate memory for dma push buffer */ target = TTM_PL_FLAG_TT; @@ -132,51 +124,54 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nouveau_cli *cli, * we be able to call out to other (indirect) push buffers */ chan->push.vma.offset = chan->push.buffer->bo.offset; - chan->push.handle = NVDRM_PUSH | (handle & 0xffff); - if (device->card_type >= NV_50) { - ret = nouveau_bo_vma_add(chan->push.buffer, client->vm, + if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) { + ret = nouveau_bo_vma_add(chan->push.buffer, cli->vm, &chan->push.vma); if (ret) { nouveau_channel_del(pchan); return ret; } - args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM; + args.target = NV_DMA_V0_TARGET_VM; + args.access = NV_DMA_V0_ACCESS_VM; args.start = 0; - args.limit = client->vm->vmm->limit - 1; + args.limit = cli->vm->vmm->limit - 1; } else if (chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM) { - u64 limit = pfb->ram->size - imem->reserved - 1; - if (device->card_type == NV_04) { + if (device->info.family == NV_DEVICE_INFO_V0_TNT) { /* nv04 vram pushbuf hack, retarget to its location in * the framebuffer bar rather than direct vram access.. * nfi why this exists, it came from the -nv ddx. */ - args.flags = NV_DMA_TARGET_PCI | NV_DMA_ACCESS_RDWR; - args.start = nv_device_resource_start(device, 1); - args.limit = args.start + limit; + args.target = NV_DMA_V0_TARGET_PCI; + args.access = NV_DMA_V0_ACCESS_RDWR; + args.start = nv_device_resource_start(nvkm_device(device), 1); + args.limit = args.start + device->info.ram_user - 1; } else { - args.flags = NV_DMA_TARGET_VRAM | NV_DMA_ACCESS_RDWR; + args.target = NV_DMA_V0_TARGET_VRAM; + args.access = NV_DMA_V0_ACCESS_RDWR; args.start = 0; - args.limit = limit; + args.limit = device->info.ram_user - 1; } } else { if (chan->drm->agp.stat == ENABLED) { - args.flags = NV_DMA_TARGET_AGP | NV_DMA_ACCESS_RDWR; + args.target = NV_DMA_V0_TARGET_AGP; + args.access = NV_DMA_V0_ACCESS_RDWR; args.start = chan->drm->agp.base; args.limit = chan->drm->agp.base + chan->drm->agp.size - 1; } else { - args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_RDWR; + args.target = NV_DMA_V0_TARGET_VM; + args.access = NV_DMA_V0_ACCESS_RDWR; args.start = 0; args.limit = vmm->limit - 1; } } - ret = nouveau_object_new(nv_object(chan->cli), parent, - chan->push.handle, 0x0002, - &args, sizeof(args), &push); + ret = nvif_object_init(nvif_object(device), NULL, NVDRM_PUSH | + (handle & 0xffff), NV_DMA_FROM_MEMORY, + &args, sizeof(args), &chan->push.ctxdma); if (ret) { nouveau_channel_del(pchan); return ret; @@ -186,38 +181,56 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nouveau_cli *cli, } static int -nouveau_channel_ind(struct nouveau_drm *drm, struct nouveau_cli *cli, - u32 parent, u32 handle, u32 engine, - struct nouveau_channel **pchan) +nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device, + u32 handle, u32 engine, struct nouveau_channel **pchan) { - static const u16 oclasses[] = { NVE0_CHANNEL_IND_CLASS, - NVC0_CHANNEL_IND_CLASS, - NV84_CHANNEL_IND_CLASS, - NV50_CHANNEL_IND_CLASS, + static const u16 oclasses[] = { KEPLER_CHANNEL_GPFIFO_A, + FERMI_CHANNEL_GPFIFO, + G82_CHANNEL_GPFIFO, + NV50_CHANNEL_GPFIFO, 0 }; const u16 *oclass = oclasses; - struct nve0_channel_ind_class args; + union { + struct nv50_channel_gpfifo_v0 nv50; + struct kepler_channel_gpfifo_a_v0 kepler; + } args, *retn; struct nouveau_channel *chan; + u32 size; int ret; /* allocate dma push buffer */ - ret = nouveau_channel_prep(drm, cli, parent, handle, 0x12000, &chan); + ret = nouveau_channel_prep(drm, device, handle, 0x12000, &chan); *pchan = chan; if (ret) return ret; /* create channel object */ - args.pushbuf = chan->push.handle; - args.ioffset = 0x10000 + chan->push.vma.offset; - args.ilength = 0x02000; - args.engine = engine; - do { - ret = nouveau_object_new(nv_object(cli), parent, handle, - *oclass++, &args, sizeof(args), - &chan->object); - if (ret == 0) + if (oclass[0] >= KEPLER_CHANNEL_GPFIFO_A) { + args.kepler.version = 0; + args.kepler.engine = engine; + args.kepler.pushbuf = chan->push.ctxdma.handle; + args.kepler.ilength = 0x02000; + args.kepler.ioffset = 0x10000 + chan->push.vma.offset; + size = sizeof(args.kepler); + } else { + args.nv50.version = 0; + args.nv50.pushbuf = chan->push.ctxdma.handle; + args.nv50.ilength = 0x02000; + args.nv50.ioffset = 0x10000 + chan->push.vma.offset; + size = sizeof(args.nv50); + } + + ret = nvif_object_new(nvif_object(device), handle, *oclass++, + &args, size, &chan->object); + if (ret == 0) { + retn = chan->object->data; + if (chan->object->oclass >= KEPLER_CHANNEL_GPFIFO_A) + chan->chid = retn->kepler.chid; + else + chan->chid = retn->nv50.chid; return ret; + } } while (*oclass); nouveau_channel_del(pchan); @@ -225,35 +238,38 @@ nouveau_channel_ind(struct nouveau_drm *drm, struct nouveau_cli *cli, } static int -nouveau_channel_dma(struct nouveau_drm *drm, struct nouveau_cli *cli, - u32 parent, u32 handle, struct nouveau_channel **pchan) +nouveau_channel_dma(struct nouveau_drm *drm, struct nvif_device *device, + u32 handle, struct nouveau_channel **pchan) { - static const u16 oclasses[] = { NV40_CHANNEL_DMA_CLASS, - NV17_CHANNEL_DMA_CLASS, - NV10_CHANNEL_DMA_CLASS, - NV03_CHANNEL_DMA_CLASS, + static const u16 oclasses[] = { NV40_CHANNEL_DMA, + NV17_CHANNEL_DMA, + NV10_CHANNEL_DMA, + NV03_CHANNEL_DMA, 0 }; const u16 *oclass = oclasses; - struct nv03_channel_dma_class args; + struct nv03_channel_dma_v0 args, *retn; struct nouveau_channel *chan; int ret; /* allocate dma push buffer */ - ret = nouveau_channel_prep(drm, cli, parent, handle, 0x10000, &chan); + ret = nouveau_channel_prep(drm, device, handle, 0x10000, &chan); *pchan = chan; if (ret) return ret; /* create channel object */ - args.pushbuf = chan->push.handle; + args.version = 0; + args.pushbuf = chan->push.ctxdma.handle; args.offset = chan->push.vma.offset; do { - ret = nouveau_object_new(nv_object(cli), parent, handle, - *oclass++, &args, sizeof(args), - &chan->object); - if (ret == 0) + ret = nvif_object_new(nvif_object(device), handle, *oclass++, + &args, sizeof(args), &chan->object); + if (ret == 0) { + retn = chan->object->data; + chan->chid = retn->chid; return ret; + } } while (ret && *oclass); nouveau_channel_del(pchan); @@ -263,60 +279,64 @@ nouveau_channel_dma(struct nouveau_drm *drm, struct nouveau_cli *cli, static int nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart) { - struct nouveau_client *client = nv_client(chan->cli); - struct nouveau_device *device = nv_device(chan->drm->device); - struct nouveau_instmem *imem = nouveau_instmem(device); - struct nouveau_vmmgr *vmm = nouveau_vmmgr(device); - struct nouveau_fb *pfb = nouveau_fb(device); + struct nvif_device *device = chan->device; + struct nouveau_cli *cli = (void *)nvif_client(&device->base); + struct nouveau_vmmgr *vmm = nvkm_vmmgr(device); struct nouveau_software_chan *swch; - struct nouveau_object *object; - struct nv_dma_class args = {}; + struct nv_dma_v0 args = {}; int ret, i; + bool save; + + nvif_object_map(chan->object); /* allocate dma objects to cover all allowed vram, and gart */ - if (device->card_type < NV_C0) { - if (device->card_type >= NV_50) { - args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM; + if (device->info.family < NV_DEVICE_INFO_V0_FERMI) { + if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) { + args.target = NV_DMA_V0_TARGET_VM; + args.access = NV_DMA_V0_ACCESS_VM; args.start = 0; - args.limit = client->vm->vmm->limit - 1; + args.limit = cli->vm->vmm->limit - 1; } else { - args.flags = NV_DMA_TARGET_VRAM | NV_DMA_ACCESS_RDWR; + args.target = NV_DMA_V0_TARGET_VRAM; + args.access = NV_DMA_V0_ACCESS_RDWR; args.start = 0; - args.limit = pfb->ram->size - imem->reserved - 1; + args.limit = device->info.ram_user - 1; } - ret = nouveau_object_new(nv_object(client), chan->handle, vram, - 0x003d, &args, sizeof(args), &object); + ret = nvif_object_init(chan->object, NULL, vram, + NV_DMA_IN_MEMORY, &args, + sizeof(args), &chan->vram); if (ret) return ret; - if (device->card_type >= NV_50) { - args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM; + if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) { + args.target = NV_DMA_V0_TARGET_VM; + args.access = NV_DMA_V0_ACCESS_VM; args.start = 0; - args.limit = client->vm->vmm->limit - 1; + args.limit = cli->vm->vmm->limit - 1; } else if (chan->drm->agp.stat == ENABLED) { - args.flags = NV_DMA_TARGET_AGP | NV_DMA_ACCESS_RDWR; + args.target = NV_DMA_V0_TARGET_AGP; + args.access = NV_DMA_V0_ACCESS_RDWR; args.start = chan->drm->agp.base; args.limit = chan->drm->agp.base + chan->drm->agp.size - 1; } else { - args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_RDWR; + args.target = NV_DMA_V0_TARGET_VM; + args.access = NV_DMA_V0_ACCESS_RDWR; args.start = 0; args.limit = vmm->limit - 1; } - ret = nouveau_object_new(nv_object(client), chan->handle, gart, - 0x003d, &args, sizeof(args), &object); + ret = nvif_object_init(chan->object, NULL, gart, + NV_DMA_IN_MEMORY, &args, + sizeof(args), &chan->gart); if (ret) return ret; - - chan->vram = vram; - chan->gart = gart; } /* initialise dma tracking parameters */ - switch (nv_hclass(chan->object) & 0x00ff) { + switch (chan->object->oclass & 0x00ff) { case 0x006b: case 0x006e: chan->user_put = 0x40; @@ -347,13 +367,13 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart) OUT_RING(chan, 0x00000000); /* allocate software object class (used for fences on <= nv05) */ - if (device->card_type < NV_10) { - ret = nouveau_object_new(nv_object(client), chan->handle, - NvSw, 0x006e, NULL, 0, &object); + if (device->info.family < NV_DEVICE_INFO_V0_CELSIUS) { + ret = nvif_object_init(chan->object, NULL, 0x006e, 0x006e, + NULL, 0, &chan->nvsw); if (ret) return ret; - swch = (void *)object->parent; + swch = (void *)nvkm_object(&chan->nvsw)->parent; swch->flip = nouveau_flip_complete; swch->flip_data = chan; @@ -362,34 +382,39 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart) return ret; BEGIN_NV04(chan, NvSubSw, 0x0000, 1); - OUT_RING (chan, NvSw); + OUT_RING (chan, chan->nvsw.handle); FIRE_RING (chan); } /* initialise synchronisation */ - return nouveau_fence(chan->drm)->context_new(chan); + save = cli->base.super; + cli->base.super = true; /* hack until fencenv50 fixed */ + ret = nouveau_fence(chan->drm)->context_new(chan); + cli->base.super = save; + return ret; } int -nouveau_channel_new(struct nouveau_drm *drm, struct nouveau_cli *cli, - u32 parent, u32 handle, u32 arg0, u32 arg1, +nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device, + u32 handle, u32 arg0, u32 arg1, struct nouveau_channel **pchan) { + struct nouveau_cli *cli = (void *)nvif_client(&device->base); int ret; - ret = nouveau_channel_ind(drm, cli, parent, handle, arg0, pchan); + ret = nouveau_channel_ind(drm, device, handle, arg0, pchan); if (ret) { - NV_DEBUG(cli, "ib channel create, %d\n", ret); - ret = nouveau_channel_dma(drm, cli, parent, handle, pchan); + NV_PRINTK(debug, cli, "ib channel create, %d\n", ret); + ret = nouveau_channel_dma(drm, device, handle, pchan); if (ret) { - NV_DEBUG(cli, "dma channel create, %d\n", ret); + NV_PRINTK(debug, cli, "dma channel create, %d\n", ret); return ret; } } ret = nouveau_channel_init(*pchan, arg0, arg1); if (ret) { - NV_ERROR(cli, "channel failed to initialise, %d\n", ret); + NV_PRINTK(error, cli, "channel failed to initialise, %d\n", ret); nouveau_channel_del(pchan); return ret; } diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.h b/drivers/gpu/drm/nouveau/nouveau_chan.h index 40f97e2c47b6..20163709d608 100644 --- a/drivers/gpu/drm/nouveau/nouveau_chan.h +++ b/drivers/gpu/drm/nouveau/nouveau_chan.h @@ -1,20 +1,23 @@ #ifndef __NOUVEAU_CHAN_H__ #define __NOUVEAU_CHAN_H__ -struct nouveau_cli; +#include <nvif/object.h> +struct nvif_device; struct nouveau_channel { - struct nouveau_cli *cli; + struct nvif_device *device; struct nouveau_drm *drm; - u32 handle; - u32 vram; - u32 gart; + int chid; + + struct nvif_object vram; + struct nvif_object gart; + struct nvif_object nvsw; struct { struct nouveau_bo *buffer; struct nouveau_vma vma; - u32 handle; + struct nvif_object ctxdma; } push; /* TODO: this will be reworked in the near future */ @@ -34,12 +37,12 @@ struct nouveau_channel { u32 user_get; u32 user_put; - struct nouveau_object *object; + struct nvif_object *object; }; -int nouveau_channel_new(struct nouveau_drm *, struct nouveau_cli *, - u32 parent, u32 handle, u32 arg0, u32 arg1, +int nouveau_channel_new(struct nouveau_drm *, struct nvif_device *, + u32 handle, u32 arg0, u32 arg1, struct nouveau_channel **); void nouveau_channel_del(struct nouveau_channel **); int nouveau_channel_idle(struct nouveau_channel *); diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 1fa222e8f007..1ec44c83e919 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -42,9 +42,7 @@ #include "nouveau_encoder.h" #include "nouveau_crtc.h" -#include <subdev/i2c.h> -#include <subdev/gpio.h> -#include <engine/disp.h> +#include <nvif/event.h> MODULE_PARM_DESC(tv_disable, "Disable TV-out detection"); static int nouveau_tv_disable = 0; @@ -63,7 +61,7 @@ find_encoder(struct drm_connector *connector, int type) { struct drm_device *dev = connector->dev; struct nouveau_encoder *nv_encoder; - struct drm_mode_object *obj; + struct drm_encoder *enc; int i, id; for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { @@ -71,10 +69,10 @@ find_encoder(struct drm_connector *connector, int type) if (!id) break; - obj = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_ENCODER); - if (!obj) + enc = drm_encoder_find(dev, id); + if (!enc) continue; - nv_encoder = nouveau_encoder(obj_to_encoder(obj)); + nv_encoder = nouveau_encoder(enc); if (type == DCB_OUTPUT_ANY || (nv_encoder->dcb && nv_encoder->dcb->type == type)) @@ -102,9 +100,9 @@ static void nouveau_connector_destroy(struct drm_connector *connector) { struct nouveau_connector *nv_connector = nouveau_connector(connector); - nouveau_event_ref(NULL, &nv_connector->hpd); + nvif_notify_fini(&nv_connector->hpd); kfree(nv_connector->edid); - drm_sysfs_connector_remove(connector); + drm_connector_unregister(connector); drm_connector_cleanup(connector); if (nv_connector->aux.transfer) drm_dp_aux_unregister(&nv_connector->aux); @@ -117,9 +115,9 @@ nouveau_connector_ddc_detect(struct drm_connector *connector) struct drm_device *dev = connector->dev; struct nouveau_connector *nv_connector = nouveau_connector(connector); struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_gpio *gpio = nouveau_gpio(drm->device); + struct nouveau_gpio *gpio = nvkm_gpio(&drm->device); struct nouveau_encoder *nv_encoder; - struct drm_mode_object *obj; + struct drm_encoder *encoder; int i, panel = -ENODEV; /* eDP panels need powering on by us (if the VBIOS doesn't default it @@ -139,10 +137,10 @@ nouveau_connector_ddc_detect(struct drm_connector *connector) if (id == 0) break; - obj = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_ENCODER); - if (!obj) + encoder = drm_encoder_find(dev, id); + if (!encoder) continue; - nv_encoder = nouveau_encoder(obj_to_encoder(obj)); + nv_encoder = nouveau_encoder(encoder); if (nv_encoder->dcb->type == DCB_OUTPUT_DP) { int ret = nouveau_dp_detect(nv_encoder); @@ -206,7 +204,7 @@ nouveau_connector_set_encoder(struct drm_connector *connector, return; nv_connector->detected_encoder = nv_encoder; - if (nv_device(drm->device)->card_type >= NV_50) { + if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { connector->interlace_allowed = true; connector->doublescan_allowed = true; } else @@ -216,9 +214,8 @@ nouveau_connector_set_encoder(struct drm_connector *connector, connector->interlace_allowed = false; } else { connector->doublescan_allowed = true; - if (nv_device(drm->device)->card_type == NV_20 || - ((nv_device(drm->device)->card_type == NV_10 || - nv_device(drm->device)->card_type == NV_11) && + if (drm->device.info.family == NV_DEVICE_INFO_V0_KELVIN || + (drm->device.info.family == NV_DEVICE_INFO_V0_CELSIUS && (dev->pdev->device & 0x0ff0) != 0x0100 && (dev->pdev->device & 0x0ff0) != 0x0150)) /* HW is broken */ @@ -802,11 +799,11 @@ get_tmds_link_bandwidth(struct drm_connector *connector) struct dcb_output *dcb = nv_connector->detected_encoder->dcb; if (dcb->location != DCB_LOC_ON_CHIP || - nv_device(drm->device)->chipset >= 0x46) + drm->device.info.chipset >= 0x46) return 165000; - else if (nv_device(drm->device)->chipset >= 0x40) + else if (drm->device.info.chipset >= 0x40) return 155000; - else if (nv_device(drm->device)->chipset >= 0x18) + else if (drm->device.info.chipset >= 0x18) return 135000; else return 112000; @@ -939,18 +936,19 @@ nouveau_connector_funcs_dp = { .force = nouveau_connector_force }; -static void -nouveau_connector_hotplug_work(struct work_struct *work) +static int +nouveau_connector_hotplug(struct nvif_notify *notify) { struct nouveau_connector *nv_connector = - container_of(work, typeof(*nv_connector), work); + container_of(notify, typeof(*nv_connector), hpd); struct drm_connector *connector = &nv_connector->base; struct nouveau_drm *drm = nouveau_drm(connector->dev); + const struct nvif_notify_conn_rep_v0 *rep = notify->data; const char *name = connector->name; - if (nv_connector->status & NVKM_HPD_IRQ) { + if (rep->mask & NVIF_NOTIFY_CONN_V0_IRQ) { } else { - bool plugged = (nv_connector->status != NVKM_HPD_UNPLUG); + bool plugged = (rep->mask != NVIF_NOTIFY_CONN_V0_UNPLUG); NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", name); @@ -961,16 +959,7 @@ nouveau_connector_hotplug_work(struct work_struct *work) drm_helper_hpd_irq_event(connector->dev); } - nouveau_event_get(nv_connector->hpd); -} - -static int -nouveau_connector_hotplug(void *data, u32 type, int index) -{ - struct nouveau_connector *nv_connector = data; - nv_connector->status = type; - schedule_work(&nv_connector->work); - return NVKM_EVENT_DROP; + return NVIF_NOTIFY_KEEP; } static ssize_t @@ -1040,7 +1029,6 @@ nouveau_connector_create(struct drm_device *dev, int index) struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_display *disp = nouveau_display(dev); struct nouveau_connector *nv_connector = NULL; - struct nouveau_disp *pdisp = nouveau_disp(drm->device); struct drm_connector *connector; int type, ret = 0; bool dummy; @@ -1194,7 +1182,7 @@ nouveau_connector_create(struct drm_device *dev, int index) switch (nv_connector->type) { case DCB_CONNECTOR_VGA: - if (nv_device(drm->device)->card_type >= NV_50) { + if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { drm_object_attach_property(&connector->base, dev->mode_config.scaling_mode_property, nv_connector->scaling_mode); @@ -1226,16 +1214,20 @@ nouveau_connector_create(struct drm_device *dev, int index) break; } - ret = nouveau_event_new(pdisp->hpd, NVKM_HPD, index, - nouveau_connector_hotplug, - nv_connector, &nv_connector->hpd); + ret = nvif_notify_init(&disp->disp, NULL, nouveau_connector_hotplug, + true, NV04_DISP_NTFY_CONN, + &(struct nvif_notify_conn_req_v0) { + .mask = NVIF_NOTIFY_CONN_V0_ANY, + .conn = index, + }, + sizeof(struct nvif_notify_conn_req_v0), + sizeof(struct nvif_notify_conn_rep_v0), + &nv_connector->hpd); if (ret) connector->polled = DRM_CONNECTOR_POLL_CONNECT; else connector->polled = DRM_CONNECTOR_POLL_HPD; - INIT_WORK(&nv_connector->work, nouveau_connector_hotplug_work); - - drm_sysfs_connector_add(connector); + drm_connector_register(connector); return connector; } diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h index 8861b6c579ad..68029d041dd2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.h +++ b/drivers/gpu/drm/nouveau/nouveau_connector.h @@ -27,14 +27,12 @@ #ifndef __NOUVEAU_CONNECTOR_H__ #define __NOUVEAU_CONNECTOR_H__ +#include <nvif/notify.h> + #include <drm/drm_edid.h> #include <drm/drm_dp_helper.h> #include "nouveau_crtc.h" -#include <core/event.h> - -#include <subdev/bios.h> - struct nouveau_i2c_port; enum nouveau_underscan_type { @@ -67,9 +65,7 @@ struct nouveau_connector { u8 index; u8 *dcb; - struct nouveau_eventh *hpd; - u32 status; - struct work_struct work; + struct nvif_notify hpd; struct drm_dp_aux aux; diff --git a/drivers/gpu/drm/nouveau/nouveau_crtc.h b/drivers/gpu/drm/nouveau/nouveau_crtc.h index a0534489d23f..f19cb1c5fc5a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_crtc.h +++ b/drivers/gpu/drm/nouveau/nouveau_crtc.h @@ -27,10 +27,13 @@ #ifndef __NOUVEAU_CRTC_H__ #define __NOUVEAU_CRTC_H__ +#include <nvif/notify.h> + struct nouveau_crtc { struct drm_crtc base; int index; + struct nvif_notify vblank; uint32_t dpms_saved_fp_control; uint32_t fp_users; @@ -46,7 +49,7 @@ struct nouveau_crtc { int cpp; bool blanked; uint32_t offset; - uint32_t tile_flags; + uint32_t handle; } fb; struct { diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 47ad74255bf1..4a21b2b06ce2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -27,6 +27,8 @@ #include <drm/drmP.h> #include <drm/drm_crtc_helper.h> +#include <nvif/class.h> + #include "nouveau_fbcon.h" #include "dispnv04/hw.h" #include "nouveau_crtc.h" @@ -37,35 +39,42 @@ #include "nouveau_fence.h" -#include <engine/disp.h> - -#include <core/class.h> +#include <nvif/event.h> static int -nouveau_display_vblank_handler(void *data, u32 type, int head) +nouveau_display_vblank_handler(struct nvif_notify *notify) { - struct nouveau_drm *drm = data; - drm_handle_vblank(drm->dev, head); - return NVKM_EVENT_KEEP; + struct nouveau_crtc *nv_crtc = + container_of(notify, typeof(*nv_crtc), vblank); + drm_handle_vblank(nv_crtc->base.dev, nv_crtc->index); + return NVIF_NOTIFY_KEEP; } int nouveau_display_vblank_enable(struct drm_device *dev, int head) { - struct nouveau_display *disp = nouveau_display(dev); - if (disp) { - nouveau_event_get(disp->vblank[head]); - return 0; + struct drm_crtc *crtc; + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + if (nv_crtc->index == head) { + nvif_notify_get(&nv_crtc->vblank); + return 0; + } } - return -EIO; + return -EINVAL; } void nouveau_display_vblank_disable(struct drm_device *dev, int head) { - struct nouveau_display *disp = nouveau_display(dev); - if (disp) - nouveau_event_put(disp->vblank[head]); + struct drm_crtc *crtc; + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + if (nv_crtc->index == head) { + nvif_notify_put(&nv_crtc->vblank); + return; + } + } } static inline int @@ -86,17 +95,22 @@ int nouveau_display_scanoutpos_head(struct drm_crtc *crtc, int *vpos, int *hpos, ktime_t *stime, ktime_t *etime) { - const u32 mthd = NV04_DISP_SCANOUTPOS + nouveau_crtc(crtc)->index; + struct { + struct nv04_disp_mthd_v0 base; + struct nv04_disp_scanoutpos_v0 scan; + } args = { + .base.method = NV04_DISP_SCANOUTPOS, + .base.head = nouveau_crtc(crtc)->index, + }; struct nouveau_display *disp = nouveau_display(crtc->dev); - struct nv04_display_scanoutpos args; int ret, retry = 1; do { - ret = nv_exec(disp->core, mthd, &args, sizeof(args)); + ret = nvif_mthd(&disp->disp, 0, &args, sizeof(args)); if (ret != 0) return 0; - if (args.vline) { + if (args.scan.vline) { ret |= DRM_SCANOUTPOS_ACCURATE; ret |= DRM_SCANOUTPOS_VALID; break; @@ -105,10 +119,11 @@ nouveau_display_scanoutpos_head(struct drm_crtc *crtc, int *vpos, int *hpos, if (retry) ndelay(crtc->linedur_ns); } while (retry--); - *hpos = args.hline; - *vpos = calc(args.vblanks, args.vblanke, args.vtotal, args.vline); - if (stime) *stime = ns_to_ktime(args.time[0]); - if (etime) *etime = ns_to_ktime(args.time[1]); + *hpos = args.scan.hline; + *vpos = calc(args.scan.vblanks, args.scan.vblanke, + args.scan.vtotal, args.scan.vline); + if (stime) *stime = ns_to_ktime(args.scan.time[0]); + if (etime) *etime = ns_to_ktime(args.scan.time[1]); if (*vpos < 0) ret |= DRM_SCANOUTPOS_INVBL; @@ -151,16 +166,13 @@ nouveau_display_vblstamp(struct drm_device *dev, int head, int *max_error, static void nouveau_display_vblank_fini(struct drm_device *dev) { - struct nouveau_display *disp = nouveau_display(dev); - int i; + struct drm_crtc *crtc; drm_vblank_cleanup(dev); - if (disp->vblank) { - for (i = 0; i < dev->mode_config.num_crtc; i++) - nouveau_event_ref(NULL, &disp->vblank[i]); - kfree(disp->vblank); - disp->vblank = NULL; + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + nvif_notify_fini(&nv_crtc->vblank); } } @@ -168,19 +180,20 @@ static int nouveau_display_vblank_init(struct drm_device *dev) { struct nouveau_display *disp = nouveau_display(dev); - struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_disp *pdisp = nouveau_disp(drm->device); - int ret, i; - - disp->vblank = kzalloc(dev->mode_config.num_crtc * - sizeof(*disp->vblank), GFP_KERNEL); - if (!disp->vblank) - return -ENOMEM; + struct drm_crtc *crtc; + int ret; - for (i = 0; i < dev->mode_config.num_crtc; i++) { - ret = nouveau_event_new(pdisp->vblank, 1, i, - nouveau_display_vblank_handler, - drm, &disp->vblank[i]); + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + ret = nvif_notify_init(&disp->disp, NULL, + nouveau_display_vblank_handler, false, + NV04_DISP_NTFY_VBLANK, + &(struct nvif_notify_head_req_v0) { + .head = nv_crtc->index, + }, + sizeof(struct nvif_notify_head_req_v0), + sizeof(struct nvif_notify_head_rep_v0), + &nv_crtc->vblank); if (ret) { nouveau_display_vblank_fini(dev); return ret; @@ -200,6 +213,10 @@ static void nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb) { struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb); + struct nouveau_display *disp = nouveau_display(drm_fb->dev); + + if (disp->fb_dtor) + disp->fb_dtor(drm_fb); if (fb->nvbo) drm_gem_object_unreference_unlocked(&fb->nvbo->gem); @@ -229,63 +246,24 @@ nouveau_framebuffer_init(struct drm_device *dev, struct drm_mode_fb_cmd2 *mode_cmd, struct nouveau_bo *nvbo) { - struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_display *disp = nouveau_display(dev); struct drm_framebuffer *fb = &nv_fb->base; int ret; drm_helper_mode_fill_fb_struct(fb, mode_cmd); nv_fb->nvbo = nvbo; - if (nv_device(drm->device)->card_type >= NV_50) { - u32 tile_flags = nouveau_bo_tile_layout(nvbo); - if (tile_flags == 0x7a00 || - tile_flags == 0xfe00) - nv_fb->r_dma = NvEvoFB32; - else - if (tile_flags == 0x7000) - nv_fb->r_dma = NvEvoFB16; - else - nv_fb->r_dma = NvEvoVRAM_LP; - - switch (fb->depth) { - case 8: nv_fb->r_format = 0x1e00; break; - case 15: nv_fb->r_format = 0xe900; break; - case 16: nv_fb->r_format = 0xe800; break; - case 24: - case 32: nv_fb->r_format = 0xcf00; break; - case 30: nv_fb->r_format = 0xd100; break; - default: - NV_ERROR(drm, "unknown depth %d\n", fb->depth); - return -EINVAL; - } - - if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) { - NV_ERROR(drm, "framebuffer requires contiguous bo\n"); - return -EINVAL; - } - - if (nv_device(drm->device)->chipset == 0x50) - nv_fb->r_format |= (tile_flags << 8); - - if (!tile_flags) { - if (nv_device(drm->device)->card_type < NV_D0) - nv_fb->r_pitch = 0x00100000 | fb->pitches[0]; - else - nv_fb->r_pitch = 0x01000000 | fb->pitches[0]; - } else { - u32 mode = nvbo->tile_mode; - if (nv_device(drm->device)->card_type >= NV_C0) - mode >>= 4; - nv_fb->r_pitch = ((fb->pitches[0] / 4) << 4) | mode; - } - } - ret = drm_framebuffer_init(dev, fb, &nouveau_framebuffer_funcs); - if (ret) { + if (ret) return ret; + + if (disp->fb_ctor) { + ret = disp->fb_ctor(fb); + if (ret) + disp->fb_dtor(fb); } - return 0; + return ret; } static struct drm_framebuffer * @@ -393,7 +371,7 @@ nouveau_display_init(struct drm_device *dev) /* enable hotplug interrupts */ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { struct nouveau_connector *conn = nouveau_connector(connector); - if (conn->hpd) nouveau_event_get(conn->hpd); + nvif_notify_get(&conn->hpd); } return ret; @@ -404,37 +382,32 @@ nouveau_display_fini(struct drm_device *dev) { struct nouveau_display *disp = nouveau_display(dev); struct drm_connector *connector; + int head; + + /* Make sure that drm and hw vblank irqs get properly disabled. */ + for (head = 0; head < dev->mode_config.num_crtc; head++) + drm_vblank_off(dev, head); /* disable hotplug interrupts */ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { struct nouveau_connector *conn = nouveau_connector(connector); - if (conn->hpd) nouveau_event_put(conn->hpd); + nvif_notify_put(&conn->hpd); } drm_kms_helper_poll_disable(dev); disp->fini(dev); } -int -nouveau_display_create(struct drm_device *dev) +static void +nouveau_display_create_properties(struct drm_device *dev) { - struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_device *device = nouveau_dev(dev); - struct nouveau_display *disp; - int ret, gen; - - disp = drm->display = kzalloc(sizeof(*disp), GFP_KERNEL); - if (!disp) - return -ENOMEM; - - drm_mode_config_init(dev); - drm_mode_create_scaling_mode_property(dev); - drm_mode_create_dvi_i_properties(dev); + struct nouveau_display *disp = nouveau_display(dev); + int gen; - if (nv_device(drm->device)->card_type < NV_50) + if (disp->disp.oclass < NV50_DISP) gen = 0; else - if (nv_device(drm->device)->card_type < NV_D0) + if (disp->disp.oclass < GF110_DISP) gen = 1; else gen = 2; @@ -449,26 +422,43 @@ nouveau_display_create(struct drm_device *dev) disp->underscan_vborder_property = drm_property_create_range(dev, 0, "underscan vborder", 0, 128); - if (gen >= 1) { - /* -90..+90 */ - disp->vibrant_hue_property = - drm_property_create_range(dev, 0, "vibrant hue", 0, 180); + if (gen < 1) + return; - /* -100..+100 */ - disp->color_vibrance_property = - drm_property_create_range(dev, 0, "color vibrance", 0, 200); - } + /* -90..+90 */ + disp->vibrant_hue_property = + drm_property_create_range(dev, 0, "vibrant hue", 0, 180); + + /* -100..+100 */ + disp->color_vibrance_property = + drm_property_create_range(dev, 0, "color vibrance", 0, 200); +} + +int +nouveau_display_create(struct drm_device *dev) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_display *disp; + int ret; + + disp = drm->display = kzalloc(sizeof(*disp), GFP_KERNEL); + if (!disp) + return -ENOMEM; + + drm_mode_config_init(dev); + drm_mode_create_scaling_mode_property(dev); + drm_mode_create_dvi_i_properties(dev); dev->mode_config.funcs = &nouveau_mode_config_funcs; - dev->mode_config.fb_base = nv_device_resource_start(device, 1); + dev->mode_config.fb_base = nv_device_resource_start(nvkm_device(&drm->device), 1); dev->mode_config.min_width = 0; dev->mode_config.min_height = 0; - if (nv_device(drm->device)->card_type < NV_10) { + if (drm->device.info.family < NV_DEVICE_INFO_V0_CELSIUS) { dev->mode_config.max_width = 2048; dev->mode_config.max_height = 2048; } else - if (nv_device(drm->device)->card_type < NV_50) { + if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) { dev->mode_config.max_width = 4096; dev->mode_config.max_height = 4096; } else { @@ -479,7 +469,7 @@ nouveau_display_create(struct drm_device *dev) dev->mode_config.preferred_depth = 24; dev->mode_config.prefer_shadow = 1; - if (nv_device(drm->device)->chipset < 0x11) + if (drm->device.info.chipset < 0x11) dev->mode_config.async_page_flip = false; else dev->mode_config.async_page_flip = true; @@ -487,29 +477,30 @@ nouveau_display_create(struct drm_device *dev) drm_kms_helper_poll_init(dev); drm_kms_helper_poll_disable(dev); - if (drm->vbios.dcb.entries) { + if (nouveau_modeset != 2 && drm->vbios.dcb.entries) { static const u16 oclass[] = { - GM107_DISP_CLASS, - NVF0_DISP_CLASS, - NVE0_DISP_CLASS, - NVD0_DISP_CLASS, - NVA3_DISP_CLASS, - NV94_DISP_CLASS, - NVA0_DISP_CLASS, - NV84_DISP_CLASS, - NV50_DISP_CLASS, - NV04_DISP_CLASS, + GM107_DISP, + GK110_DISP, + GK104_DISP, + GF110_DISP, + GT214_DISP, + GT206_DISP, + GT200_DISP, + G82_DISP, + NV50_DISP, + NV04_DISP, }; int i; for (i = 0, ret = -ENODEV; ret && i < ARRAY_SIZE(oclass); i++) { - ret = nouveau_object_new(nv_object(drm), NVDRM_DEVICE, - NVDRM_DISPLAY, oclass[i], - NULL, 0, &disp->core); + ret = nvif_object_init(nvif_object(&drm->device), NULL, + NVDRM_DISPLAY, oclass[i], + NULL, 0, &disp->disp); } if (ret == 0) { - if (nv_mclass(disp->core) < NV50_DISP_CLASS) + nouveau_display_create_properties(dev); + if (disp->disp.oclass < NV50_DISP) ret = nv04_display_create(dev); else ret = nv50_display_create(dev); @@ -542,7 +533,6 @@ void nouveau_display_destroy(struct drm_device *dev) { struct nouveau_display *disp = nouveau_display(dev); - struct nouveau_drm *drm = nouveau_drm(dev); nouveau_backlight_exit(dev); nouveau_display_vblank_fini(dev); @@ -553,21 +543,19 @@ nouveau_display_destroy(struct drm_device *dev) if (disp->dtor) disp->dtor(dev); - nouveau_object_del(nv_object(drm), NVDRM_DEVICE, NVDRM_DISPLAY); + nvif_object_fini(&disp->disp); nouveau_drm(dev)->display = NULL; kfree(disp); } int -nouveau_display_suspend(struct drm_device *dev) +nouveau_display_suspend(struct drm_device *dev, bool runtime) { - struct nouveau_drm *drm = nouveau_drm(dev); struct drm_crtc *crtc; nouveau_display_fini(dev); - NV_INFO(drm, "unpinning framebuffer(s)...\n"); list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { struct nouveau_framebuffer *nouveau_fb; @@ -589,12 +577,13 @@ nouveau_display_suspend(struct drm_device *dev) } void -nouveau_display_repin(struct drm_device *dev) +nouveau_display_resume(struct drm_device *dev, bool runtime) { struct nouveau_drm *drm = nouveau_drm(dev); struct drm_crtc *crtc; - int ret; + int ret, head; + /* re-pin fb/cursors */ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { struct nouveau_framebuffer *nouveau_fb; @@ -602,7 +591,9 @@ nouveau_display_repin(struct drm_device *dev) if (!nouveau_fb || !nouveau_fb->nvbo) continue; - nouveau_bo_pin(nouveau_fb->nvbo, TTM_PL_FLAG_VRAM); + ret = nouveau_bo_pin(nouveau_fb->nvbo, TTM_PL_FLAG_VRAM); + if (ret) + NV_ERROR(drm, "Could not pin framebuffer\n"); } list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { @@ -614,12 +605,7 @@ nouveau_display_repin(struct drm_device *dev) if (ret) NV_ERROR(drm, "Could not pin/map cursor.\n"); } -} -void -nouveau_display_resume(struct drm_device *dev) -{ - struct drm_crtc *crtc; nouveau_display_init(dev); /* Force CLUT to get re-loaded during modeset */ @@ -629,6 +615,17 @@ nouveau_display_resume(struct drm_device *dev) nv_crtc->lut.depth = 0; } + /* Make sure that drm and hw vblank irqs get resumed if needed. */ + for (head = 0; head < dev->mode_config.num_crtc; head++) + drm_vblank_on(dev, head); + + /* This should ensure we don't hit a locking problem when someone + * wakes us up via a connector. We should never go into suspend + * while the display is on anyways. + */ + if (runtime) + return; + drm_helper_resume_force_mode(dev); list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { @@ -669,7 +666,7 @@ nouveau_page_flip_emit(struct nouveau_channel *chan, if (ret) goto fail; - if (nv_device(drm->device)->card_type < NV_C0) + if (drm->device.info.family < NV_DEVICE_INFO_V0_FERMI) BEGIN_NV04(chan, NvSubSw, NV_SW_PAGE_FLIP, 1); else BEGIN_NVC0(chan, FermiSw, NV_SW_PAGE_FLIP, 1); @@ -698,12 +695,15 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, struct nouveau_bo *old_bo = nouveau_framebuffer(crtc->primary->fb)->nvbo; struct nouveau_bo *new_bo = nouveau_framebuffer(fb)->nvbo; struct nouveau_page_flip_state *s; - struct nouveau_channel *chan = drm->channel; + struct nouveau_channel *chan; + struct nouveau_cli *cli; struct nouveau_fence *fence; int ret; - if (!drm->channel) + chan = drm->channel; + if (!chan) return -ENODEV; + cli = (void *)nvif_client(&chan->device->base); s = kzalloc(sizeof(*s), GFP_KERNEL); if (!s) @@ -715,7 +715,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, goto fail_free; } - mutex_lock(&chan->cli->mutex); + mutex_lock(&cli->mutex); /* synchronise rendering channel with the kernel's channel */ spin_lock(&new_bo->bo.bdev->fence_lock); @@ -740,7 +740,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, drm_vblank_get(dev, nouveau_crtc(crtc)->index); /* Emit a page flip */ - if (nv_device(drm->device)->card_type >= NV_50) { + if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { ret = nv50_display_flip_next(crtc, fb, chan, swap_interval); if (ret) goto fail_unreserve; @@ -769,7 +769,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence); if (ret) goto fail_unreserve; - mutex_unlock(&chan->cli->mutex); + mutex_unlock(&cli->mutex); /* Update the crtc struct and cleanup */ crtc->primary->fb = fb; @@ -785,7 +785,7 @@ fail_unreserve: drm_vblank_put(dev, nouveau_crtc(crtc)->index); ttm_bo_unreserve(&old_bo->bo); fail_unpin: - mutex_unlock(&chan->cli->mutex); + mutex_unlock(&cli->mutex); if (old_bo != new_bo) nouveau_bo_unpin(new_bo); fail_free: @@ -815,7 +815,7 @@ nouveau_finish_page_flip(struct nouveau_channel *chan, s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head); if (s->event) { /* Vblank timestamps/counts are only correct on >= NV-50 */ - if (nv_device(drm->device)->card_type >= NV_50) + if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) crtcid = s->crtc; drm_send_vblank_event(dev, crtcid, s->event); @@ -841,7 +841,7 @@ nouveau_flip_complete(void *data) struct nouveau_page_flip_state state; if (!nouveau_finish_page_flip(chan, &state)) { - if (nv_device(drm->device)->card_type < NV_50) { + if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) { nv_set_crtc_base(drm->dev, state.crtc, state.offset + state.y * state.pitch + state.x * state.bpp / 8); diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h index a71cf77e55b2..be3d5947c6be 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.h +++ b/drivers/gpu/drm/nouveau/nouveau_display.h @@ -9,9 +9,11 @@ struct nouveau_framebuffer { struct drm_framebuffer base; struct nouveau_bo *nvbo; struct nouveau_vma vma; - u32 r_dma; + u32 r_handle; u32 r_format; u32 r_pitch; + struct nvif_object h_base[4]; + struct nvif_object h_core; }; static inline struct nouveau_framebuffer * @@ -36,8 +38,10 @@ struct nouveau_display { int (*init)(struct drm_device *); void (*fini)(struct drm_device *); - struct nouveau_object *core; - struct nouveau_eventh **vblank; + int (*fb_ctor)(struct drm_framebuffer *); + void (*fb_dtor)(struct drm_framebuffer *); + + struct nvif_object disp; struct drm_property *dithering_mode; struct drm_property *dithering_depth; @@ -59,9 +63,8 @@ int nouveau_display_create(struct drm_device *dev); void nouveau_display_destroy(struct drm_device *dev); int nouveau_display_init(struct drm_device *dev); void nouveau_display_fini(struct drm_device *dev); -int nouveau_display_suspend(struct drm_device *dev); -void nouveau_display_repin(struct drm_device *dev); -void nouveau_display_resume(struct drm_device *dev); +int nouveau_display_suspend(struct drm_device *dev, bool runtime); +void nouveau_display_resume(struct drm_device *dev, bool runtime); int nouveau_display_vblank_enable(struct drm_device *, int); void nouveau_display_vblank_disable(struct drm_device *, int); int nouveau_display_scanoutpos(struct drm_device *, int, unsigned int, diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c index c177272152e2..8508603cc8c3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dma.c +++ b/drivers/gpu/drm/nouveau/nouveau_dma.c @@ -24,8 +24,6 @@ * */ -#include <core/client.h> - #include "nouveau_drm.h" #include "nouveau_dma.h" @@ -54,9 +52,9 @@ READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout) { uint64_t val; - val = nv_ro32(chan->object, chan->user_get); + val = nvif_rd32(chan, chan->user_get); if (chan->user_get_hi) - val |= (uint64_t)nv_ro32(chan->object, chan->user_get_hi) << 32; + val |= (uint64_t)nvif_rd32(chan, chan->user_get_hi) << 32; /* reset counter as long as GET is still advancing, this is * to avoid misdetecting a GPU lockup if the GPU happens to @@ -84,12 +82,13 @@ void nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo, int delta, int length) { + struct nouveau_cli *cli = (void *)nvif_client(&chan->device->base); struct nouveau_bo *pb = chan->push.buffer; struct nouveau_vma *vma; int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base; u64 offset; - vma = nouveau_bo_vma_find(bo, nv_client(chan->cli)->vm); + vma = nouveau_bo_vma_find(bo, cli->vm); BUG_ON(!vma); offset = vma->offset + delta; @@ -104,7 +103,7 @@ nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo, /* Flush writes. */ nouveau_bo_rd32(pb, 0); - nv_wo32(chan->object, 0x8c, chan->dma.ib_put); + nvif_wr32(chan, 0x8c, chan->dma.ib_put); chan->dma.ib_free--; } @@ -114,7 +113,7 @@ nv50_dma_push_wait(struct nouveau_channel *chan, int count) uint32_t cnt = 0, prev_get = 0; while (chan->dma.ib_free < count) { - uint32_t get = nv_ro32(chan->object, 0x88); + uint32_t get = nvif_rd32(chan, 0x88); if (get != prev_get) { prev_get = get; cnt = 0; diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h index dc0e0c5cadb4..8da0a272c45a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dma.h +++ b/drivers/gpu/drm/nouveau/nouveau_dma.h @@ -58,31 +58,14 @@ enum { FermiSw = 5, /* DO NOT CHANGE (well.. 6/7 will work...) */ }; -/* Object handles. */ +/* Object handles - for stuff that's doesn't use handle == oclass. */ enum { - NvM2MF = 0x80000001, NvDmaFB = 0x80000002, NvDmaTT = 0x80000003, NvNotify0 = 0x80000006, - Nv2D = 0x80000007, - NvCtxSurf2D = 0x80000008, - NvRop = 0x80000009, - NvImagePatt = 0x8000000a, - NvClipRect = 0x8000000b, - NvGdiRect = 0x8000000c, - NvImageBlit = 0x8000000d, - NvSw = 0x8000000e, NvSema = 0x8000000f, NvEvoSema0 = 0x80000010, NvEvoSema1 = 0x80000011, - NvNotify1 = 0x80000012, - - /* G80+ display objects */ - NvEvoVRAM = 0x01000000, - NvEvoFB16 = 0x01000001, - NvEvoFB32 = 0x01000002, - NvEvoVRAM_LP = 0x01000003, - NvEvoSync = 0xcafe0000 }; #define NV_MEMORY_TO_MEMORY_FORMAT 0x00000039 @@ -157,7 +140,7 @@ BEGIN_IMC0(struct nouveau_channel *chan, int subc, int mthd, u16 data) #define WRITE_PUT(val) do { \ mb(); \ nouveau_bo_rd32(chan->push.buffer, 0); \ - nv_wo32(chan->object, chan->user_put, ((val) << 2) + chan->push.vma.offset); \ + nvif_wr32(chan, chan->user_put, ((val) << 2) + chan->push.vma.offset); \ } while (0) static inline void diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c index 5675ffc175ae..c5137cccce7d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dp.c +++ b/drivers/gpu/drm/nouveau/nouveau_dp.c @@ -30,11 +30,6 @@ #include "nouveau_encoder.h" #include "nouveau_crtc.h" -#include <core/class.h> - -#include <subdev/gpio.h> -#include <subdev/i2c.h> - static void nouveau_dp_probe_oui(struct drm_device *dev, struct nouveau_i2c_port *auxch, u8 *dpcd) diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index c9428c943afb..3ed32dd90303 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -27,21 +27,14 @@ #include <linux/pci.h> #include <linux/pm_runtime.h> #include <linux/vga_switcheroo.h> + #include "drmP.h" #include "drm_crtc_helper.h" + #include <core/device.h> -#include <core/client.h> #include <core/gpuobj.h> -#include <core/class.h> #include <core/option.h> -#include <engine/device.h> -#include <engine/disp.h> -#include <engine/fifo.h> -#include <engine/software.h> - -#include <subdev/vm.h> - #include "nouveau_drm.h" #include "nouveau_dma.h" #include "nouveau_ttm.h" @@ -57,6 +50,7 @@ #include "nouveau_fbcon.h" #include "nouveau_fence.h" #include "nouveau_debugfs.h" +#include "nouveau_usif.h" MODULE_PARM_DESC(config, "option string to pass to driver core"); static char *nouveau_config; @@ -109,40 +103,37 @@ static int nouveau_cli_create(u64 name, const char *sname, int size, void **pcli) { - struct nouveau_cli *cli; - int ret; - - *pcli = NULL; - ret = nouveau_client_create_(sname, name, nouveau_config, - nouveau_debug, size, pcli); - cli = *pcli; - if (ret) { - if (cli) - nouveau_client_destroy(&cli->base); - *pcli = NULL; + struct nouveau_cli *cli = *pcli = kzalloc(size, GFP_KERNEL); + if (cli) { + int ret = nvif_client_init(NULL, NULL, sname, name, + nouveau_config, nouveau_debug, + &cli->base); + if (ret == 0) { + mutex_init(&cli->mutex); + usif_client_init(cli); + } return ret; } - - mutex_init(&cli->mutex); - return 0; + return -ENOMEM; } static void nouveau_cli_destroy(struct nouveau_cli *cli) { - struct nouveau_object *client = nv_object(cli); - nouveau_vm_ref(NULL, &cli->base.vm, NULL); - nouveau_client_fini(&cli->base, false); - atomic_set(&client->refcount, 1); - nouveau_object_ref(NULL, &client); + nouveau_vm_ref(NULL, &nvkm_client(&cli->base)->vm, NULL); + nvif_client_fini(&cli->base); + usif_client_fini(cli); } static void nouveau_accel_fini(struct nouveau_drm *drm) { - nouveau_gpuobj_ref(NULL, &drm->notify); nouveau_channel_del(&drm->channel); + nvif_object_fini(&drm->ntfy); + nouveau_gpuobj_ref(NULL, &drm->notify); + nvif_object_fini(&drm->nvsw); nouveau_channel_del(&drm->cechan); + nvif_object_fini(&drm->ttm.copy); if (drm->fence) nouveau_fence(drm)->dtor(drm); } @@ -150,46 +141,71 @@ nouveau_accel_fini(struct nouveau_drm *drm) static void nouveau_accel_init(struct nouveau_drm *drm) { - struct nouveau_device *device = nv_device(drm->device); - struct nouveau_object *object; + struct nvif_device *device = &drm->device; u32 arg0, arg1; - int ret; + u32 sclass[16]; + int ret, i; - if (nouveau_noaccel || !nouveau_fifo(device) /*XXX*/) + if (nouveau_noaccel) return; /* initialise synchronisation routines */ - if (device->card_type < NV_10) ret = nv04_fence_create(drm); - else if (device->card_type < NV_11 || - device->chipset < 0x17) ret = nv10_fence_create(drm); - else if (device->card_type < NV_50) ret = nv17_fence_create(drm); - else if (device->chipset < 0x84) ret = nv50_fence_create(drm); - else if (device->card_type < NV_C0) ret = nv84_fence_create(drm); - else ret = nvc0_fence_create(drm); + /*XXX: this is crap, but the fence/channel stuff is a little + * backwards in some places. this will be fixed. + */ + ret = nvif_object_sclass(&device->base, sclass, ARRAY_SIZE(sclass)); + if (ret < 0) + return; + + for (ret = -ENOSYS, i = 0; ret && i < ARRAY_SIZE(sclass); i++) { + switch (sclass[i]) { + case NV03_CHANNEL_DMA: + ret = nv04_fence_create(drm); + break; + case NV10_CHANNEL_DMA: + ret = nv10_fence_create(drm); + break; + case NV17_CHANNEL_DMA: + case NV40_CHANNEL_DMA: + ret = nv17_fence_create(drm); + break; + case NV50_CHANNEL_GPFIFO: + ret = nv50_fence_create(drm); + break; + case G82_CHANNEL_GPFIFO: + ret = nv84_fence_create(drm); + break; + case FERMI_CHANNEL_GPFIFO: + case KEPLER_CHANNEL_GPFIFO_A: + ret = nvc0_fence_create(drm); + break; + default: + break; + } + } + if (ret) { NV_ERROR(drm, "failed to initialise sync subsystem, %d\n", ret); nouveau_accel_fini(drm); return; } - if (device->card_type >= NV_E0) { - ret = nouveau_channel_new(drm, &drm->client, NVDRM_DEVICE, - NVDRM_CHAN + 1, - NVE0_CHANNEL_IND_ENGINE_CE0 | - NVE0_CHANNEL_IND_ENGINE_CE1, 0, - &drm->cechan); + if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) { + ret = nouveau_channel_new(drm, &drm->device, NVDRM_CHAN + 1, + KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_CE0| + KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_CE1, + 0, &drm->cechan); if (ret) NV_ERROR(drm, "failed to create ce channel, %d\n", ret); - arg0 = NVE0_CHANNEL_IND_ENGINE_GR; + arg0 = KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_GR; arg1 = 1; } else - if (device->chipset >= 0xa3 && - device->chipset != 0xaa && - device->chipset != 0xac) { - ret = nouveau_channel_new(drm, &drm->client, NVDRM_DEVICE, - NVDRM_CHAN + 1, NvDmaFB, NvDmaTT, - &drm->cechan); + if (device->info.chipset >= 0xa3 && + device->info.chipset != 0xaa && + device->info.chipset != 0xac) { + ret = nouveau_channel_new(drm, &drm->device, NVDRM_CHAN + 1, + NvDmaFB, NvDmaTT, &drm->cechan); if (ret) NV_ERROR(drm, "failed to create ce channel, %d\n", ret); @@ -200,30 +216,30 @@ nouveau_accel_init(struct nouveau_drm *drm) arg1 = NvDmaTT; } - ret = nouveau_channel_new(drm, &drm->client, NVDRM_DEVICE, NVDRM_CHAN, - arg0, arg1, &drm->channel); + ret = nouveau_channel_new(drm, &drm->device, NVDRM_CHAN, arg0, arg1, + &drm->channel); if (ret) { NV_ERROR(drm, "failed to create kernel channel, %d\n", ret); nouveau_accel_fini(drm); return; } - ret = nouveau_object_new(nv_object(drm), NVDRM_CHAN, NVDRM_NVSW, - nouveau_abi16_swclass(drm), NULL, 0, &object); + ret = nvif_object_init(drm->channel->object, NULL, NVDRM_NVSW, + nouveau_abi16_swclass(drm), NULL, 0, &drm->nvsw); if (ret == 0) { - struct nouveau_software_chan *swch = (void *)object->parent; + struct nouveau_software_chan *swch; ret = RING_SPACE(drm->channel, 2); if (ret == 0) { - if (device->card_type < NV_C0) { + if (device->info.family < NV_DEVICE_INFO_V0_FERMI) { BEGIN_NV04(drm->channel, NvSubSw, 0, 1); OUT_RING (drm->channel, NVDRM_NVSW); } else - if (device->card_type < NV_E0) { + if (device->info.family < NV_DEVICE_INFO_V0_KEPLER) { BEGIN_NVC0(drm->channel, FermiSw, 0, 1); OUT_RING (drm->channel, 0x001f0000); } } - swch = (void *)object->parent; + swch = (void *)nvkm_object(&drm->nvsw)->parent; swch->flip = nouveau_flip_complete; swch->flip_data = drm->channel; } @@ -234,24 +250,24 @@ nouveau_accel_init(struct nouveau_drm *drm) return; } - if (device->card_type < NV_C0) { - ret = nouveau_gpuobj_new(drm->device, NULL, 32, 0, 0, - &drm->notify); + if (device->info.family < NV_DEVICE_INFO_V0_FERMI) { + ret = nouveau_gpuobj_new(nvkm_object(&drm->device), NULL, 32, + 0, 0, &drm->notify); if (ret) { NV_ERROR(drm, "failed to allocate notifier, %d\n", ret); nouveau_accel_fini(drm); return; } - ret = nouveau_object_new(nv_object(drm), - drm->channel->handle, NvNotify0, - 0x003d, &(struct nv_dma_class) { - .flags = NV_DMA_TARGET_VRAM | - NV_DMA_ACCESS_RDWR, + ret = nvif_object_init(drm->channel->object, NULL, NvNotify0, + NV_DMA_IN_MEMORY, + &(struct nv_dma_v0) { + .target = NV_DMA_V0_TARGET_VRAM, + .access = NV_DMA_V0_ACCESS_RDWR, .start = drm->notify->addr, .limit = drm->notify->addr + 31 - }, sizeof(struct nv_dma_class), - &object); + }, sizeof(struct nv_dma_v0), + &drm->ntfy); if (ret) { nouveau_accel_fini(drm); return; @@ -294,7 +310,8 @@ static int nouveau_drm_probe(struct pci_dev *pdev, #ifdef CONFIG_X86 boot = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; #endif - remove_conflicting_framebuffers(aper, "nouveaufb", boot); + if (nouveau_modeset != 2) + remove_conflicting_framebuffers(aper, "nouveaufb", boot); kfree(aper); ret = nouveau_device_create(pdev, NOUVEAU_BUS_PCI, @@ -348,7 +365,6 @@ static int nouveau_drm_load(struct drm_device *dev, unsigned long flags) { struct pci_dev *pdev = dev->pdev; - struct nouveau_device *device; struct nouveau_drm *drm; int ret; @@ -359,7 +375,8 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags) dev->dev_private = drm; drm->dev = dev; - nouveau_client(drm)->debug = nouveau_dbgopt(nouveau_debug, "DRM"); + nvkm_client(&drm->client.base)->debug = + nouveau_dbgopt(nouveau_debug, "DRM"); INIT_LIST_HEAD(&drm->clients); spin_lock_init(&drm->tile.lock); @@ -370,33 +387,34 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags) * (possibly) execute vbios init tables (see nouveau_agp.h) */ if (pdev && drm_pci_device_is_agp(dev) && dev->agp) { + const u64 enables = NV_DEVICE_V0_DISABLE_IDENTIFY | + NV_DEVICE_V0_DISABLE_MMIO; /* dummy device object, doesn't init anything, but allows * agp code access to registers */ - ret = nouveau_object_new(nv_object(drm), NVDRM_CLIENT, - NVDRM_DEVICE, 0x0080, - &(struct nv_device_class) { + ret = nvif_device_init(&drm->client.base.base, NULL, + NVDRM_DEVICE, NV_DEVICE, + &(struct nv_device_v0) { .device = ~0, - .disable = - ~(NV_DEVICE_DISABLE_MMIO | - NV_DEVICE_DISABLE_IDENTIFY), + .disable = ~enables, .debug0 = ~0, - }, sizeof(struct nv_device_class), - &drm->device); + }, sizeof(struct nv_device_v0), + &drm->device); if (ret) goto fail_device; nouveau_agp_reset(drm); - nouveau_object_del(nv_object(drm), NVDRM_CLIENT, NVDRM_DEVICE); + nvif_device_fini(&drm->device); } - ret = nouveau_object_new(nv_object(drm), NVDRM_CLIENT, NVDRM_DEVICE, - 0x0080, &(struct nv_device_class) { + ret = nvif_device_init(&drm->client.base.base, NULL, NVDRM_DEVICE, + NV_DEVICE, + &(struct nv_device_v0) { .device = ~0, .disable = 0, .debug0 = 0, - }, sizeof(struct nv_device_class), - &drm->device); + }, sizeof(struct nv_device_v0), + &drm->device); if (ret) goto fail_device; @@ -406,18 +424,19 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags) * nosnoop capability. hopefully won't cause issues until a * better fix is found - assuming there is one... */ - device = nv_device(drm->device); - if (nv_device(drm->device)->chipset == 0xc1) - nv_mask(device, 0x00088080, 0x00000800, 0x00000000); + if (drm->device.info.chipset == 0xc1) + nvif_mask(&drm->device, 0x00088080, 0x00000800, 0x00000000); nouveau_vga_init(drm); nouveau_agp_init(drm); - if (device->card_type >= NV_50) { - ret = nouveau_vm_new(nv_device(drm->device), 0, (1ULL << 40), - 0x1000, &drm->client.base.vm); + if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { + ret = nouveau_vm_new(nvkm_device(&drm->device), 0, (1ULL << 40), + 0x1000, &drm->client.vm); if (ret) goto fail_device; + + nvkm_client(&drm->client.base)->vm = drm->client.vm; } ret = nouveau_ttm_init(drm); @@ -463,6 +482,7 @@ fail_ttm: nouveau_agp_fini(drm); nouveau_vga_fini(drm); fail_device: + nvif_device_fini(&drm->device); nouveau_cli_destroy(&drm->client); return ret; } @@ -488,26 +508,37 @@ nouveau_drm_unload(struct drm_device *dev) nouveau_agp_fini(drm); nouveau_vga_fini(drm); + nvif_device_fini(&drm->device); if (drm->hdmi_device) pci_dev_put(drm->hdmi_device); nouveau_cli_destroy(&drm->client); return 0; } -static void -nouveau_drm_remove(struct pci_dev *pdev) +void +nouveau_drm_device_remove(struct drm_device *dev) { - struct drm_device *dev = pci_get_drvdata(pdev); struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_client *client; struct nouveau_object *device; dev->irq_enabled = false; - device = drm->client.base.device; + client = nvkm_client(&drm->client.base); + device = client->device; drm_put_dev(dev); nouveau_object_ref(NULL, &device); nouveau_object_debug(); } +EXPORT_SYMBOL(nouveau_drm_device_remove); + +static void +nouveau_drm_remove(struct pci_dev *pdev) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + + nouveau_drm_device_remove(dev); +} static int nouveau_do_suspend(struct drm_device *dev, bool runtime) @@ -516,9 +547,11 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime) struct nouveau_cli *cli; int ret; - if (dev->mode_config.num_crtc && !runtime) { + if (dev->mode_config.num_crtc) { + NV_INFO(drm, "suspending console...\n"); + nouveau_fbcon_set_suspend(dev, 1); NV_INFO(drm, "suspending display...\n"); - ret = nouveau_display_suspend(dev); + ret = nouveau_display_suspend(dev, runtime); if (ret) return ret; } @@ -548,13 +581,13 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime) } list_for_each_entry(cli, &drm->clients, head) { - ret = nouveau_client_fini(&cli->base, true); + ret = nvif_client_suspend(&cli->base); if (ret) goto fail_client; } NV_INFO(drm, "suspending kernel object tree...\n"); - ret = nouveau_client_fini(&drm->client.base, true); + ret = nvif_client_suspend(&drm->client.base); if (ret) goto fail_client; @@ -563,7 +596,7 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime) fail_client: list_for_each_entry_continue_reverse(cli, &drm->clients, head) { - nouveau_client_init(&cli->base); + nvif_client_resume(&cli->base); } if (drm->fence && nouveau_fence(drm)->resume) @@ -572,7 +605,7 @@ fail_client: fail_display: if (dev->mode_config.num_crtc) { NV_INFO(drm, "resuming display...\n"); - nouveau_display_resume(dev); + nouveau_display_resume(dev, runtime); } return ret; } @@ -587,21 +620,19 @@ int nouveau_pmops_suspend(struct device *dev) drm_dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF) return 0; - if (drm_dev->mode_config.num_crtc) - nouveau_fbcon_set_suspend(drm_dev, 1); - ret = nouveau_do_suspend(drm_dev, false); if (ret) return ret; pci_save_state(pdev); pci_disable_device(pdev); + pci_ignore_hotplug(pdev); pci_set_power_state(pdev, PCI_D3hot); return 0; } static int -nouveau_do_resume(struct drm_device *dev) +nouveau_do_resume(struct drm_device *dev, bool runtime) { struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_cli *cli; @@ -611,7 +642,7 @@ nouveau_do_resume(struct drm_device *dev) nouveau_agp_reset(drm); NV_INFO(drm, "resuming kernel object tree...\n"); - nouveau_client_init(&drm->client.base); + nvif_client_resume(&drm->client.base); nouveau_agp_init(drm); NV_INFO(drm, "resuming client object trees...\n"); @@ -619,14 +650,16 @@ nouveau_do_resume(struct drm_device *dev) nouveau_fence(drm)->resume(drm); list_for_each_entry(cli, &drm->clients, head) { - nouveau_client_init(&cli->base); + nvif_client_resume(&cli->base); } nouveau_run_vbios_init(dev); if (dev->mode_config.num_crtc) { NV_INFO(drm, "resuming display...\n"); - nouveau_display_repin(dev); + nouveau_display_resume(dev, runtime); + NV_INFO(drm, "resuming console...\n"); + nouveau_fbcon_set_suspend(dev, 0); } return 0; @@ -649,47 +682,21 @@ int nouveau_pmops_resume(struct device *dev) return ret; pci_set_master(pdev); - ret = nouveau_do_resume(drm_dev); - if (ret) - return ret; - - if (drm_dev->mode_config.num_crtc) { - nouveau_display_resume(drm_dev); - nouveau_fbcon_set_suspend(drm_dev, 0); - } - - return 0; + return nouveau_do_resume(drm_dev, false); } static int nouveau_pmops_freeze(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); - int ret; - - if (drm_dev->mode_config.num_crtc) - nouveau_fbcon_set_suspend(drm_dev, 1); - - ret = nouveau_do_suspend(drm_dev, false); - return ret; + return nouveau_do_suspend(drm_dev, false); } static int nouveau_pmops_thaw(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); - int ret; - - ret = nouveau_do_resume(drm_dev); - if (ret) - return ret; - - if (drm_dev->mode_config.num_crtc) { - nouveau_display_resume(drm_dev); - nouveau_fbcon_set_suspend(drm_dev, 0); - } - - return 0; + return nouveau_do_resume(drm_dev, false); } @@ -715,13 +722,17 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv) if (ret) goto out_suspend; - if (nv_device(drm->device)->card_type >= NV_50) { - ret = nouveau_vm_new(nv_device(drm->device), 0, (1ULL << 40), - 0x1000, &cli->base.vm); + cli->base.super = false; + + if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { + ret = nouveau_vm_new(nvkm_device(&drm->device), 0, (1ULL << 40), + 0x1000, &cli->vm); if (ret) { nouveau_cli_destroy(cli); goto out_suspend; } + + nvkm_client(&cli->base)->vm = cli->vm; } fpriv->driver_priv = cli; @@ -779,24 +790,31 @@ nouveau_ioctls[] = { DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW), }; -long nouveau_drm_ioctl(struct file *filp, - unsigned int cmd, unsigned long arg) +long +nouveau_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { - struct drm_file *file_priv = filp->private_data; - struct drm_device *dev; + struct drm_file *filp = file->private_data; + struct drm_device *dev = filp->minor->dev; long ret; - dev = file_priv->minor->dev; ret = pm_runtime_get_sync(dev->dev); if (ret < 0 && ret != -EACCES) return ret; - ret = drm_ioctl(filp, cmd, arg); + switch (_IOC_NR(cmd) - DRM_COMMAND_BASE) { + case DRM_NOUVEAU_NVIF: + ret = usif_ioctl(filp, (void __user *)arg, _IOC_SIZE(cmd)); + break; + default: + ret = drm_ioctl(file, cmd, arg); + break; + } pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); return ret; } + static const struct file_operations nouveau_driver_fops = { .owner = THIS_MODULE, @@ -921,7 +939,7 @@ static int nouveau_pmops_runtime_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); - struct nouveau_device *device = nouveau_dev(drm_dev); + struct nvif_device *device = &nouveau_drm(drm_dev)->device; int ret; if (nouveau_runtime_pm == 0) @@ -934,10 +952,10 @@ static int nouveau_pmops_runtime_resume(struct device *dev) return ret; pci_set_master(pdev); - ret = nouveau_do_resume(drm_dev); + ret = nouveau_do_resume(drm_dev, true); drm_kms_helper_poll_enable(drm_dev); /* do magic */ - nv_mask(device, 0x88488, (1 << 25), (1 << 25)); + nvif_mask(device, 0x88488, (1 << 25), (1 << 25)); vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON); drm_dev->switch_power_state = DRM_SWITCH_POWER_ON; nv_debug_level(NORMAL); @@ -1005,24 +1023,41 @@ nouveau_drm_pci_driver = { .driver.pm = &nouveau_pm_ops, }; -int nouveau_drm_platform_probe(struct platform_device *pdev) +struct drm_device * +nouveau_platform_device_create_(struct platform_device *pdev, int size, + void **pobject) { - struct nouveau_device *device; - int ret; + struct drm_device *drm; + int err; - ret = nouveau_device_create(pdev, NOUVEAU_BUS_PLATFORM, + err = nouveau_device_create_(pdev, NOUVEAU_BUS_PLATFORM, nouveau_platform_name(pdev), dev_name(&pdev->dev), nouveau_config, - nouveau_debug, &device); - - ret = drm_platform_init(&driver, pdev); - if (ret) { - nouveau_object_ref(NULL, (struct nouveau_object **)&device); - return ret; + nouveau_debug, size, pobject); + if (err) + return ERR_PTR(err); + + drm = drm_dev_alloc(&driver, &pdev->dev); + if (!drm) { + err = -ENOMEM; + goto err_free; } - return ret; + err = drm_dev_set_unique(drm, "%s", dev_name(&pdev->dev)); + if (err < 0) + goto err_free; + + drm->platformdev = pdev; + platform_set_drvdata(pdev, drm); + + return drm; + +err_free: + nouveau_object_ref(NULL, (struct nouveau_object **)pobject); + + return ERR_PTR(err); } +EXPORT_SYMBOL(nouveau_platform_device_create_); static int __init nouveau_drm_init(void) diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h index 7efbafaf7c1d..b02b02452c85 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.h +++ b/drivers/gpu/drm/nouveau/nouveau_drm.h @@ -9,8 +9,8 @@ #define DRIVER_DATE "20120801" #define DRIVER_MAJOR 1 -#define DRIVER_MINOR 1 -#define DRIVER_PATCHLEVEL 1 +#define DRIVER_MINOR 2 +#define DRIVER_PATCHLEVEL 0 /* * 1.1.1: @@ -21,15 +21,17 @@ * to control registers on the MPs to enable performance counters, * and to control the warp error enable mask (OpenGL requires out of * bounds access to local memory to be silently ignored / return 0). + * 1.1.2: + * - fixes multiple bugs in flip completion events and timestamping + * 1.2.0: + * - object api exposed to userspace + * - fermi,kepler,maxwell zbc */ -#include <core/client.h> -#include <core/event.h> - -#include <subdev/vm.h> +#include <nvif/client.h> +#include <nvif/device.h> #include <drmP.h> -#include <drm/nouveau_drm.h> #include <drm/ttm/ttm_bo_api.h> #include <drm/ttm/ttm_bo_driver.h> @@ -38,7 +40,10 @@ #include <drm/ttm/ttm_module.h> #include <drm/ttm/ttm_page_alloc.h> +#include "uapi/drm/nouveau_drm.h" + struct nouveau_channel; +struct platform_device; #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT) @@ -50,6 +55,17 @@ struct nouveau_drm_tile { bool used; }; +enum nouveau_drm_object_route { + NVDRM_OBJECT_NVIF = 0, + NVDRM_OBJECT_USIF, + NVDRM_OBJECT_ABI16, +}; + +enum nouveau_drm_notify_route { + NVDRM_NOTIFY_NVIF = 0, + NVDRM_NOTIFY_USIF +}; + enum nouveau_drm_handle { NVDRM_CLIENT = 0xffffffff, NVDRM_DEVICE = 0xdddddddd, @@ -61,10 +77,13 @@ enum nouveau_drm_handle { }; struct nouveau_cli { - struct nouveau_client base; + struct nvif_client base; + struct nouveau_vm *vm; /*XXX*/ struct list_head head; struct mutex mutex; void *abi16; + struct list_head objects; + struct list_head notifys; }; static inline struct nouveau_cli * @@ -73,13 +92,16 @@ nouveau_cli(struct drm_file *fpriv) return fpriv ? fpriv->driver_priv : NULL; } +#include <nvif/object.h> +#include <nvif/device.h> + extern int nouveau_runtime_pm; struct nouveau_drm { struct nouveau_cli client; struct drm_device *dev; - struct nouveau_object *device; + struct nvif_device device; struct list_head clients; struct { @@ -102,6 +124,7 @@ struct nouveau_drm { struct ttm_buffer_object *, struct ttm_mem_reg *, struct ttm_mem_reg *); struct nouveau_channel *chan; + struct nvif_object copy; int mtrr; } ttm; @@ -119,6 +142,8 @@ struct nouveau_drm { struct nouveau_channel *channel; struct nouveau_gpuobj *notify; struct nouveau_fbdev *fbcon; + struct nvif_object nvsw; + struct nvif_object ntfy; /* nv10-nv40 tiling regions */ struct { @@ -148,20 +173,25 @@ nouveau_drm(struct drm_device *dev) return dev->dev_private; } -static inline struct nouveau_device * -nouveau_dev(struct drm_device *dev) -{ - return nv_device(nouveau_drm(dev)->device); -} - int nouveau_pmops_suspend(struct device *); int nouveau_pmops_resume(struct device *); -#define NV_FATAL(cli, fmt, args...) nv_fatal((cli), fmt, ##args) -#define NV_ERROR(cli, fmt, args...) nv_error((cli), fmt, ##args) -#define NV_WARN(cli, fmt, args...) nv_warn((cli), fmt, ##args) -#define NV_INFO(cli, fmt, args...) nv_info((cli), fmt, ##args) -#define NV_DEBUG(cli, fmt, args...) nv_debug((cli), fmt, ##args) +#define nouveau_platform_device_create(p, u) \ + nouveau_platform_device_create_(p, sizeof(**u), (void **)u) +struct drm_device * +nouveau_platform_device_create_(struct platform_device *pdev, + int size, void **pobject); +void nouveau_drm_device_remove(struct drm_device *dev); + +#define NV_PRINTK(l,c,f,a...) do { \ + struct nouveau_cli *_cli = (c); \ + nv_##l(_cli->base.base.priv, f, ##a); \ +} while(0) +#define NV_FATAL(drm,f,a...) NV_PRINTK(fatal, &(drm)->client, f, ##a) +#define NV_ERROR(drm,f,a...) NV_PRINTK(error, &(drm)->client, f, ##a) +#define NV_WARN(drm,f,a...) NV_PRINTK(warn, &(drm)->client, f, ##a) +#define NV_INFO(drm,f,a...) NV_PRINTK(info, &(drm)->client, f, ##a) +#define NV_DEBUG(drm,f,a...) NV_PRINTK(debug, &(drm)->client, f, ##a) extern int nouveau_modeset; diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 191665ee7f52..49fe6075cc7c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -51,11 +51,6 @@ #include "nouveau_crtc.h" -#include <core/client.h> -#include <core/device.h> - -#include <subdev/fb.h> - MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration"); static int nouveau_nofbaccel = 0; module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400); @@ -65,7 +60,7 @@ nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) { struct nouveau_fbdev *fbcon = info->par; struct nouveau_drm *drm = nouveau_drm(fbcon->dev); - struct nouveau_device *device = nv_device(drm->device); + struct nvif_device *device = &drm->device; int ret; if (info->state != FBINFO_STATE_RUNNING) @@ -74,10 +69,10 @@ nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) ret = -ENODEV; if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) && mutex_trylock(&drm->client.mutex)) { - if (device->card_type < NV_50) + if (device->info.family < NV_DEVICE_INFO_V0_TESLA) ret = nv04_fbcon_fillrect(info, rect); else - if (device->card_type < NV_C0) + if (device->info.family < NV_DEVICE_INFO_V0_FERMI) ret = nv50_fbcon_fillrect(info, rect); else ret = nvc0_fbcon_fillrect(info, rect); @@ -97,7 +92,7 @@ nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image) { struct nouveau_fbdev *fbcon = info->par; struct nouveau_drm *drm = nouveau_drm(fbcon->dev); - struct nouveau_device *device = nv_device(drm->device); + struct nvif_device *device = &drm->device; int ret; if (info->state != FBINFO_STATE_RUNNING) @@ -106,10 +101,10 @@ nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image) ret = -ENODEV; if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) && mutex_trylock(&drm->client.mutex)) { - if (device->card_type < NV_50) + if (device->info.family < NV_DEVICE_INFO_V0_TESLA) ret = nv04_fbcon_copyarea(info, image); else - if (device->card_type < NV_C0) + if (device->info.family < NV_DEVICE_INFO_V0_FERMI) ret = nv50_fbcon_copyarea(info, image); else ret = nvc0_fbcon_copyarea(info, image); @@ -129,7 +124,7 @@ nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) { struct nouveau_fbdev *fbcon = info->par; struct nouveau_drm *drm = nouveau_drm(fbcon->dev); - struct nouveau_device *device = nv_device(drm->device); + struct nvif_device *device = &drm->device; int ret; if (info->state != FBINFO_STATE_RUNNING) @@ -138,10 +133,10 @@ nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) ret = -ENODEV; if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) && mutex_trylock(&drm->client.mutex)) { - if (device->card_type < NV_50) + if (device->info.family < NV_DEVICE_INFO_V0_TESLA) ret = nv04_fbcon_imageblit(info, image); else - if (device->card_type < NV_C0) + if (device->info.family < NV_DEVICE_INFO_V0_FERMI) ret = nv50_fbcon_imageblit(info, image); else ret = nvc0_fbcon_imageblit(info, image); @@ -212,6 +207,65 @@ static struct fb_ops nouveau_fbcon_sw_ops = { .fb_debug_leave = drm_fb_helper_debug_leave, }; +void +nouveau_fbcon_accel_save_disable(struct drm_device *dev) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + if (drm->fbcon) { + drm->fbcon->saved_flags = drm->fbcon->helper.fbdev->flags; + drm->fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED; + } +} + +void +nouveau_fbcon_accel_restore(struct drm_device *dev) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + if (drm->fbcon) { + drm->fbcon->helper.fbdev->flags = drm->fbcon->saved_flags; + } +} + +static void +nouveau_fbcon_accel_fini(struct drm_device *dev) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_fbdev *fbcon = drm->fbcon; + if (fbcon && drm->channel) { + console_lock(); + fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED; + console_unlock(); + nouveau_channel_idle(drm->channel); + nvif_object_fini(&fbcon->twod); + nvif_object_fini(&fbcon->blit); + nvif_object_fini(&fbcon->gdi); + nvif_object_fini(&fbcon->patt); + nvif_object_fini(&fbcon->rop); + nvif_object_fini(&fbcon->clip); + nvif_object_fini(&fbcon->surf2d); + } +} + +static void +nouveau_fbcon_accel_init(struct drm_device *dev) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_fbdev *fbcon = drm->fbcon; + struct fb_info *info = fbcon->helper.fbdev; + int ret; + + if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) + ret = nv04_fbcon_accel_init(info); + else + if (drm->device.info.family < NV_DEVICE_INFO_V0_FERMI) + ret = nv50_fbcon_accel_init(info); + else + ret = nvc0_fbcon_accel_init(info); + + if (ret == 0) + info->fbops = &nouveau_fbcon_ops; +} + static void nouveau_fbcon_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, u16 blue, int regno) { @@ -257,7 +311,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper, struct nouveau_fbdev *fbcon = (struct nouveau_fbdev *)helper; struct drm_device *dev = fbcon->dev; struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_device *device = nv_device(drm->device); + struct nvif_device *device = &drm->device; struct fb_info *info; struct drm_framebuffer *fb; struct nouveau_framebuffer *nouveau_fb; @@ -299,8 +353,8 @@ nouveau_fbcon_create(struct drm_fb_helper *helper, } chan = nouveau_nofbaccel ? NULL : drm->channel; - if (chan && device->card_type >= NV_50) { - ret = nouveau_bo_vma_add(nvbo, nv_client(chan->cli)->vm, + if (chan && device->info.family >= NV_DEVICE_INFO_V0_TESLA) { + ret = nouveau_bo_vma_add(nvbo, drm->client.vm, &fbcon->nouveau_fb.vma); if (ret) { NV_ERROR(drm, "failed to map fb into chan: %d\n", ret); @@ -357,20 +411,8 @@ nouveau_fbcon_create(struct drm_fb_helper *helper, mutex_unlock(&dev->struct_mutex); - if (chan) { - ret = -ENODEV; - if (device->card_type < NV_50) - ret = nv04_fbcon_accel_init(info); - else - if (device->card_type < NV_C0) - ret = nv50_fbcon_accel_init(info); - else - ret = nvc0_fbcon_accel_init(info); - - if (ret == 0) - info->fbops = &nouveau_fbcon_ops; - } - + if (chan) + nouveau_fbcon_accel_init(dev); nouveau_fbcon_zfill(dev, fbcon); /* To allow resizeing without swapping buffers */ @@ -438,18 +480,27 @@ void nouveau_fbcon_gpu_lockup(struct fb_info *info) info->flags |= FBINFO_HWACCEL_DISABLED; } -static struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = { +static const struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = { .gamma_set = nouveau_fbcon_gamma_set, .gamma_get = nouveau_fbcon_gamma_get, .fb_probe = nouveau_fbcon_create, }; +static void +nouveau_fbcon_set_suspend_work(struct work_struct *work) +{ + struct nouveau_fbdev *fbcon = container_of(work, typeof(*fbcon), work); + console_lock(); + nouveau_fbcon_accel_restore(fbcon->dev); + nouveau_fbcon_zfill(fbcon->dev, fbcon); + fb_set_suspend(fbcon->helper.fbdev, FBINFO_STATE_RUNNING); + console_unlock(); +} int nouveau_fbcon_init(struct drm_device *dev) { struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_fb *pfb = nouveau_fb(drm->device); struct nouveau_fbdev *fbcon; int preferred_bpp; int ret; @@ -462,9 +513,11 @@ nouveau_fbcon_init(struct drm_device *dev) if (!fbcon) return -ENOMEM; + INIT_WORK(&fbcon->work, nouveau_fbcon_set_suspend_work); fbcon->dev = dev; drm->fbcon = fbcon; - fbcon->helper.funcs = &nouveau_fbcon_helper_funcs; + + drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs); ret = drm_fb_helper_init(dev, &fbcon->helper, dev->mode_config.num_crtc, 4); @@ -475,10 +528,10 @@ nouveau_fbcon_init(struct drm_device *dev) drm_fb_helper_single_add_all_connectors(&fbcon->helper); - if (pfb->ram->size <= 32 * 1024 * 1024) + if (drm->device.info.ram_size <= 32 * 1024 * 1024) preferred_bpp = 8; else - if (pfb->ram->size <= 64 * 1024 * 1024) + if (drm->device.info.ram_size <= 64 * 1024 * 1024) preferred_bpp = 16; else preferred_bpp = 32; @@ -498,43 +551,25 @@ nouveau_fbcon_fini(struct drm_device *dev) if (!drm->fbcon) return; + nouveau_fbcon_accel_fini(dev); nouveau_fbcon_destroy(dev, drm->fbcon); kfree(drm->fbcon); drm->fbcon = NULL; } void -nouveau_fbcon_save_disable_accel(struct drm_device *dev) -{ - struct nouveau_drm *drm = nouveau_drm(dev); - if (drm->fbcon) { - drm->fbcon->saved_flags = drm->fbcon->helper.fbdev->flags; - drm->fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED; - } -} - -void -nouveau_fbcon_restore_accel(struct drm_device *dev) -{ - struct nouveau_drm *drm = nouveau_drm(dev); - if (drm->fbcon) { - drm->fbcon->helper.fbdev->flags = drm->fbcon->saved_flags; - } -} - -void nouveau_fbcon_set_suspend(struct drm_device *dev, int state) { struct nouveau_drm *drm = nouveau_drm(dev); if (drm->fbcon) { + if (state == FBINFO_STATE_RUNNING) { + schedule_work(&drm->fbcon->work); + return; + } + flush_work(&drm->fbcon->work); console_lock(); - if (state == 1) - nouveau_fbcon_save_disable_accel(dev); fb_set_suspend(drm->fbcon->helper.fbdev, state); - if (state == 0) { - nouveau_fbcon_restore_accel(dev); - nouveau_fbcon_zfill(dev, drm->fbcon); - } + nouveau_fbcon_accel_save_disable(dev); console_unlock(); } } diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h index fcff797d2084..0b465c7d3907 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h @@ -36,7 +36,15 @@ struct nouveau_fbdev { struct nouveau_framebuffer nouveau_fb; struct list_head fbdev_list; struct drm_device *dev; + struct work_struct work; unsigned int saved_flags; + struct nvif_object surf2d; + struct nvif_object clip; + struct nvif_object rop; + struct nvif_object patt; + struct nvif_object gdi; + struct nvif_object blit; + struct nvif_object twod; }; void nouveau_fbcon_restore(void); @@ -61,8 +69,8 @@ void nouveau_fbcon_gpu_lockup(struct fb_info *info); int nouveau_fbcon_init(struct drm_device *dev); void nouveau_fbcon_fini(struct drm_device *dev); void nouveau_fbcon_set_suspend(struct drm_device *dev, int state); -void nouveau_fbcon_save_disable_accel(struct drm_device *dev); -void nouveau_fbcon_restore_accel(struct drm_device *dev); +void nouveau_fbcon_accel_save_disable(struct drm_device *dev); +void nouveau_fbcon_accel_restore(struct drm_device *dev); void nouveau_fbcon_output_poll_changed(struct drm_device *dev); #endif /* __NV50_FBCON_H__ */ diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index ab5ea3b0d666..0a93114158cd 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -29,12 +29,13 @@ #include <linux/ktime.h> #include <linux/hrtimer.h> +#include <nvif/notify.h> +#include <nvif/event.h> + #include "nouveau_drm.h" #include "nouveau_dma.h" #include "nouveau_fence.h" -#include <engine/fifo.h> - struct fence_work { struct work_struct base; struct list_head head; @@ -165,12 +166,18 @@ nouveau_fence_done(struct nouveau_fence *fence) return !fence->channel; } +struct nouveau_fence_wait { + struct nouveau_fence_priv *priv; + struct nvif_notify notify; +}; + static int -nouveau_fence_wait_uevent_handler(void *data, u32 type, int index) +nouveau_fence_wait_uevent_handler(struct nvif_notify *notify) { - struct nouveau_fence_priv *priv = data; - wake_up_all(&priv->waiting); - return NVKM_EVENT_KEEP; + struct nouveau_fence_wait *wait = + container_of(notify, typeof(*wait), notify); + wake_up_all(&wait->priv->waiting); + return NVIF_NOTIFY_KEEP; } static int @@ -178,18 +185,22 @@ nouveau_fence_wait_uevent(struct nouveau_fence *fence, bool intr) { struct nouveau_channel *chan = fence->channel; - struct nouveau_fifo *pfifo = nouveau_fifo(chan->drm->device); struct nouveau_fence_priv *priv = chan->drm->fence; - struct nouveau_eventh *handler; + struct nouveau_fence_wait wait = { .priv = priv }; int ret = 0; - ret = nouveau_event_new(pfifo->uevent, 1, 0, - nouveau_fence_wait_uevent_handler, - priv, &handler); + ret = nvif_notify_init(chan->object, NULL, + nouveau_fence_wait_uevent_handler, false, + G82_CHANNEL_DMA_V0_NTFY_UEVENT, + &(struct nvif_notify_uevent_req) { + }, + sizeof(struct nvif_notify_uevent_req), + sizeof(struct nvif_notify_uevent_rep), + &wait.notify); if (ret) return ret; - nouveau_event_get(handler); + nvif_notify_get(&wait.notify); if (fence->timeout) { unsigned long timeout = fence->timeout - jiffies; @@ -221,7 +232,7 @@ nouveau_fence_wait_uevent(struct nouveau_fence *fence, bool intr) } } - nouveau_event_ref(NULL, &handler); + nvif_notify_fini(&wait.notify); if (unlikely(ret < 0)) return ret; diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index c90c0dc0afe8..292a677bfed4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -24,8 +24,6 @@ * */ -#include <subdev/fb.h> - #include "nouveau_drm.h" #include "nouveau_dma.h" #include "nouveau_fence.h" @@ -58,14 +56,14 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv) struct nouveau_vma *vma; int ret; - if (!cli->base.vm) + if (!cli->vm) return 0; - ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0); + ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL); if (ret) return ret; - vma = nouveau_bo_vma_find(nvbo, cli->base.vm); + vma = nouveau_bo_vma_find(nvbo, cli->vm); if (!vma) { vma = kzalloc(sizeof(*vma), GFP_KERNEL); if (!vma) { @@ -73,7 +71,7 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv) goto out; } - ret = nouveau_bo_vma_add(nvbo, cli->base.vm, vma); + ret = nouveau_bo_vma_add(nvbo, cli->vm, vma); if (ret) { kfree(vma); goto out; @@ -129,14 +127,14 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv) struct nouveau_vma *vma; int ret; - if (!cli->base.vm) + if (!cli->vm) return; - ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0); + ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL); if (ret) return; - vma = nouveau_bo_vma_find(nvbo, cli->base.vm); + vma = nouveau_bo_vma_find(nvbo, cli->vm); if (vma) { if (--vma->refcount == 0) nouveau_gem_object_unmap(nvbo, vma); @@ -173,7 +171,7 @@ nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain, */ nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART; - if (nv_device(drm->device)->card_type >= NV_50) + if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) nvbo->valid_domains &= domain; /* Initialize the embedded gem-object. We return a single gem-reference @@ -202,8 +200,8 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem, rep->domain = NOUVEAU_GEM_DOMAIN_VRAM; rep->offset = nvbo->bo.offset; - if (cli->base.vm) { - vma = nouveau_bo_vma_find(nvbo, cli->base.vm); + if (cli->vm) { + vma = nouveau_bo_vma_find(nvbo, cli->vm); if (!vma) return -EINVAL; @@ -223,13 +221,13 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data, { struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_cli *cli = nouveau_cli(file_priv); - struct nouveau_fb *pfb = nouveau_fb(drm->device); + struct nouveau_fb *pfb = nvkm_fb(&drm->device); struct drm_nouveau_gem_new *req = data; struct nouveau_bo *nvbo = NULL; int ret = 0; if (!pfb->memtype_valid(pfb, req->info.tile_flags)) { - NV_ERROR(cli, "bad page flags: 0x%08x\n", req->info.tile_flags); + NV_PRINTK(error, cli, "bad page flags: 0x%08x\n", req->info.tile_flags); return -EINVAL; } @@ -350,7 +348,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv, ww_acquire_init(&op->ticket, &reservation_ww_class); retry: if (++trycnt > 100000) { - NV_ERROR(cli, "%s failed and gave up.\n", __func__); + NV_PRINTK(error, cli, "%s failed and gave up.\n", __func__); return -EINVAL; } @@ -361,7 +359,7 @@ retry: gem = drm_gem_object_lookup(dev, file_priv, b->handle); if (!gem) { - NV_ERROR(cli, "Unknown handle 0x%08x\n", b->handle); + NV_PRINTK(error, cli, "Unknown handle 0x%08x\n", b->handle); ww_acquire_done(&op->ticket); validate_fini(op, NULL); return -ENOENT; @@ -374,7 +372,7 @@ retry: } if (nvbo->reserved_by && nvbo->reserved_by == file_priv) { - NV_ERROR(cli, "multiple instances of buffer %d on " + NV_PRINTK(error, cli, "multiple instances of buffer %d on " "validation list\n", b->handle); drm_gem_object_unreference_unlocked(gem); ww_acquire_done(&op->ticket); @@ -396,7 +394,7 @@ retry: ww_acquire_fini(&op->ticket); drm_gem_object_unreference_unlocked(gem); if (ret != -ERESTARTSYS) - NV_ERROR(cli, "fail reserve\n"); + NV_PRINTK(error, cli, "fail reserve\n"); return ret; } } @@ -414,7 +412,7 @@ retry: if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART) list_add_tail(&nvbo->entry, &op->gart_list); else { - NV_ERROR(cli, "invalid valid domains: 0x%08x\n", + NV_PRINTK(error, cli, "invalid valid domains: 0x%08x\n", b->valid_domains); list_add_tail(&nvbo->entry, &op->both_list); ww_acquire_done(&op->ticket); @@ -465,24 +463,24 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli, b->write_domains, b->valid_domains); if (unlikely(ret)) { - NV_ERROR(cli, "fail set_domain\n"); + NV_PRINTK(error, cli, "fail set_domain\n"); return ret; } ret = nouveau_bo_validate(nvbo, true, false); if (unlikely(ret)) { if (ret != -ERESTARTSYS) - NV_ERROR(cli, "fail ttm_validate\n"); + NV_PRINTK(error, cli, "fail ttm_validate\n"); return ret; } ret = validate_sync(chan, nvbo); if (unlikely(ret)) { - NV_ERROR(cli, "fail post-validate sync\n"); + NV_PRINTK(error, cli, "fail post-validate sync\n"); return ret; } - if (nv_device(drm->device)->card_type < NV_50) { + if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) { if (nvbo->bo.offset == b->presumed.offset && ((nvbo->bo.mem.mem_type == TTM_PL_VRAM && b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) || @@ -527,14 +525,14 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan, ret = validate_init(chan, file_priv, pbbo, nr_buffers, op); if (unlikely(ret)) { if (ret != -ERESTARTSYS) - NV_ERROR(cli, "validate_init\n"); + NV_PRINTK(error, cli, "validate_init\n"); return ret; } ret = validate_list(chan, cli, &op->vram_list, pbbo, user_buffers); if (unlikely(ret < 0)) { if (ret != -ERESTARTSYS) - NV_ERROR(cli, "validate vram_list\n"); + NV_PRINTK(error, cli, "validate vram_list\n"); validate_fini(op, NULL); return ret; } @@ -543,7 +541,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan, ret = validate_list(chan, cli, &op->gart_list, pbbo, user_buffers); if (unlikely(ret < 0)) { if (ret != -ERESTARTSYS) - NV_ERROR(cli, "validate gart_list\n"); + NV_PRINTK(error, cli, "validate gart_list\n"); validate_fini(op, NULL); return ret; } @@ -552,7 +550,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan, ret = validate_list(chan, cli, &op->both_list, pbbo, user_buffers); if (unlikely(ret < 0)) { if (ret != -ERESTARTSYS) - NV_ERROR(cli, "validate both_list\n"); + NV_PRINTK(error, cli, "validate both_list\n"); validate_fini(op, NULL); return ret; } @@ -613,7 +611,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli, uint32_t data; if (unlikely(r->bo_index > req->nr_buffers)) { - NV_ERROR(cli, "reloc bo index invalid\n"); + NV_PRINTK(error, cli, "reloc bo index invalid\n"); ret = -EINVAL; break; } @@ -623,7 +621,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli, continue; if (unlikely(r->reloc_bo_index > req->nr_buffers)) { - NV_ERROR(cli, "reloc container bo index invalid\n"); + NV_PRINTK(error, cli, "reloc container bo index invalid\n"); ret = -EINVAL; break; } @@ -631,7 +629,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli, if (unlikely(r->reloc_bo_offset + 4 > nvbo->bo.mem.num_pages << PAGE_SHIFT)) { - NV_ERROR(cli, "reloc outside of bo\n"); + NV_PRINTK(error, cli, "reloc outside of bo\n"); ret = -EINVAL; break; } @@ -640,7 +638,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli, ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap); if (ret) { - NV_ERROR(cli, "failed kmap for reloc\n"); + NV_PRINTK(error, cli, "failed kmap for reloc\n"); break; } nvbo->validate_mapped = true; @@ -665,7 +663,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli, ret = ttm_bo_wait(&nvbo->bo, false, false, false); spin_unlock(&nvbo->bo.bdev->fence_lock); if (ret) { - NV_ERROR(cli, "reloc wait_idle failed: %d\n", ret); + NV_PRINTK(error, cli, "reloc wait_idle failed: %d\n", ret); break; } @@ -696,7 +694,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, return -ENOMEM; list_for_each_entry(temp, &abi16->channels, head) { - if (temp->chan->handle == (NVDRM_CHAN | req->channel)) { + if (temp->chan->object->handle == (NVDRM_CHAN | req->channel)) { chan = temp->chan; break; } @@ -711,19 +709,19 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, goto out_next; if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) { - NV_ERROR(cli, "pushbuf push count exceeds limit: %d max %d\n", + NV_PRINTK(error, cli, "pushbuf push count exceeds limit: %d max %d\n", req->nr_push, NOUVEAU_GEM_MAX_PUSH); return nouveau_abi16_put(abi16, -EINVAL); } if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) { - NV_ERROR(cli, "pushbuf bo count exceeds limit: %d max %d\n", + NV_PRINTK(error, cli, "pushbuf bo count exceeds limit: %d max %d\n", req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS); return nouveau_abi16_put(abi16, -EINVAL); } if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) { - NV_ERROR(cli, "pushbuf reloc count exceeds limit: %d max %d\n", + NV_PRINTK(error, cli, "pushbuf reloc count exceeds limit: %d max %d\n", req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS); return nouveau_abi16_put(abi16, -EINVAL); } @@ -741,7 +739,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, /* Ensure all push buffers are on validate list */ for (i = 0; i < req->nr_push; i++) { if (push[i].bo_index >= req->nr_buffers) { - NV_ERROR(cli, "push %d buffer not in list\n", i); + NV_PRINTK(error, cli, "push %d buffer not in list\n", i); ret = -EINVAL; goto out_prevalid; } @@ -752,7 +750,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, req->nr_buffers, &op, &do_reloc); if (ret) { if (ret != -ERESTARTSYS) - NV_ERROR(cli, "validate: %d\n", ret); + NV_PRINTK(error, cli, "validate: %d\n", ret); goto out_prevalid; } @@ -760,7 +758,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, if (do_reloc) { ret = nouveau_gem_pushbuf_reloc_apply(cli, req, bo); if (ret) { - NV_ERROR(cli, "reloc apply: %d\n", ret); + NV_PRINTK(error, cli, "reloc apply: %d\n", ret); goto out; } } @@ -768,7 +766,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, if (chan->dma.ib_max) { ret = nouveau_dma_wait(chan, req->nr_push + 1, 16); if (ret) { - NV_ERROR(cli, "nv50cal_space: %d\n", ret); + NV_PRINTK(error, cli, "nv50cal_space: %d\n", ret); goto out; } @@ -780,10 +778,10 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, push[i].length); } } else - if (nv_device(drm->device)->chipset >= 0x25) { + if (drm->device.info.chipset >= 0x25) { ret = RING_SPACE(chan, req->nr_push * 2); if (ret) { - NV_ERROR(cli, "cal_space: %d\n", ret); + NV_PRINTK(error, cli, "cal_space: %d\n", ret); goto out; } @@ -797,7 +795,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, } else { ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS)); if (ret) { - NV_ERROR(cli, "jmp_space: %d\n", ret); + NV_PRINTK(error, cli, "jmp_space: %d\n", ret); goto out; } @@ -835,7 +833,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, ret = nouveau_fence_new(chan, false, &fence); if (ret) { - NV_ERROR(cli, "error fencing pushbuf: %d\n", ret); + NV_PRINTK(error, cli, "error fencing pushbuf: %d\n", ret); WIND_RING(chan); goto out; } @@ -853,7 +851,7 @@ out_next: req->suffix0 = 0x00000000; req->suffix1 = 0x00000000; } else - if (nv_device(drm->device)->chipset >= 0x25) { + if (drm->device.info.chipset >= 0x25) { req->suffix0 = 0x00020000; req->suffix1 = 0x00000000; } else { diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.c b/drivers/gpu/drm/nouveau/nouveau_hwmon.c index 19fd767bab10..afb36d66e78d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_hwmon.c +++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.c @@ -34,17 +34,13 @@ #include "nouveau_drm.h" #include "nouveau_hwmon.h" -#include <subdev/gpio.h> -#include <subdev/timer.h> -#include <subdev/therm.h> - #if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE)) static ssize_t nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf) { struct drm_device *dev = dev_get_drvdata(d); struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_therm *therm = nouveau_therm(drm->device); + struct nouveau_therm *therm = nvkm_therm(&drm->device); int temp = therm->temp_get(therm); if (temp < 0) @@ -70,7 +66,7 @@ nouveau_hwmon_temp1_auto_point1_temp(struct device *d, { struct drm_device *dev = dev_get_drvdata(d); struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_therm *therm = nouveau_therm(drm->device); + struct nouveau_therm *therm = nvkm_therm(&drm->device); return snprintf(buf, PAGE_SIZE, "%d\n", therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_FAN_BOOST) * 1000); @@ -82,7 +78,7 @@ nouveau_hwmon_set_temp1_auto_point1_temp(struct device *d, { struct drm_device *dev = dev_get_drvdata(d); struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_therm *therm = nouveau_therm(drm->device); + struct nouveau_therm *therm = nvkm_therm(&drm->device); long value; if (kstrtol(buf, 10, &value) == -EINVAL) @@ -103,7 +99,7 @@ nouveau_hwmon_temp1_auto_point1_temp_hyst(struct device *d, { struct drm_device *dev = dev_get_drvdata(d); struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_therm *therm = nouveau_therm(drm->device); + struct nouveau_therm *therm = nvkm_therm(&drm->device); return snprintf(buf, PAGE_SIZE, "%d\n", therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_FAN_BOOST_HYST) * 1000); @@ -115,7 +111,7 @@ nouveau_hwmon_set_temp1_auto_point1_temp_hyst(struct device *d, { struct drm_device *dev = dev_get_drvdata(d); struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_therm *therm = nouveau_therm(drm->device); + struct nouveau_therm *therm = nvkm_therm(&drm->device); long value; if (kstrtol(buf, 10, &value) == -EINVAL) @@ -135,7 +131,7 @@ nouveau_hwmon_max_temp(struct device *d, struct device_attribute *a, char *buf) { struct drm_device *dev = dev_get_drvdata(d); struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_therm *therm = nouveau_therm(drm->device); + struct nouveau_therm *therm = nvkm_therm(&drm->device); return snprintf(buf, PAGE_SIZE, "%d\n", therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_DOWN_CLK) * 1000); @@ -146,7 +142,7 @@ nouveau_hwmon_set_max_temp(struct device *d, struct device_attribute *a, { struct drm_device *dev = dev_get_drvdata(d); struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_therm *therm = nouveau_therm(drm->device); + struct nouveau_therm *therm = nvkm_therm(&drm->device); long value; if (kstrtol(buf, 10, &value) == -EINVAL) @@ -166,7 +162,7 @@ nouveau_hwmon_max_temp_hyst(struct device *d, struct device_attribute *a, { struct drm_device *dev = dev_get_drvdata(d); struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_therm *therm = nouveau_therm(drm->device); + struct nouveau_therm *therm = nvkm_therm(&drm->device); return snprintf(buf, PAGE_SIZE, "%d\n", therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_DOWN_CLK_HYST) * 1000); @@ -177,7 +173,7 @@ nouveau_hwmon_set_max_temp_hyst(struct device *d, struct device_attribute *a, { struct drm_device *dev = dev_get_drvdata(d); struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_therm *therm = nouveau_therm(drm->device); + struct nouveau_therm *therm = nvkm_therm(&drm->device); long value; if (kstrtol(buf, 10, &value) == -EINVAL) @@ -198,7 +194,7 @@ nouveau_hwmon_critical_temp(struct device *d, struct device_attribute *a, { struct drm_device *dev = dev_get_drvdata(d); struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_therm *therm = nouveau_therm(drm->device); + struct nouveau_therm *therm = nvkm_therm(&drm->device); return snprintf(buf, PAGE_SIZE, "%d\n", therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_CRITICAL) * 1000); @@ -210,7 +206,7 @@ nouveau_hwmon_set_critical_temp(struct device *d, struct device_attribute *a, { struct drm_device *dev = dev_get_drvdata(d); struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_therm *therm = nouveau_therm(drm->device); + struct nouveau_therm *therm = nvkm_therm(&drm->device); long value; if (kstrtol(buf, 10, &value) == -EINVAL) @@ -231,7 +227,7 @@ nouveau_hwmon_critical_temp_hyst(struct device *d, struct device_attribute *a, { struct drm_device *dev = dev_get_drvdata(d); struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_therm *therm = nouveau_therm(drm->device); + struct nouveau_therm *therm = nvkm_therm(&drm->device); return snprintf(buf, PAGE_SIZE, "%d\n", therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_CRITICAL_HYST) * 1000); @@ -244,7 +240,7 @@ nouveau_hwmon_set_critical_temp_hyst(struct device *d, { struct drm_device *dev = dev_get_drvdata(d); struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_therm *therm = nouveau_therm(drm->device); + struct nouveau_therm *therm = nvkm_therm(&drm->device); long value; if (kstrtol(buf, 10, &value) == -EINVAL) @@ -264,7 +260,7 @@ nouveau_hwmon_emergency_temp(struct device *d, struct device_attribute *a, { struct drm_device *dev = dev_get_drvdata(d); struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_therm *therm = nouveau_therm(drm->device); + struct nouveau_therm *therm = nvkm_therm(&drm->device); return snprintf(buf, PAGE_SIZE, "%d\n", therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_SHUTDOWN) * 1000); @@ -276,7 +272,7 @@ nouveau_hwmon_set_emergency_temp(struct device *d, struct device_attribute *a, { struct drm_device *dev = dev_get_drvdata(d); struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_therm *therm = nouveau_therm(drm->device); + struct nouveau_therm *therm = nvkm_therm(&drm->device); long value; if (kstrtol(buf, 10, &value) == -EINVAL) @@ -297,7 +293,7 @@ nouveau_hwmon_emergency_temp_hyst(struct device *d, struct device_attribute *a, { struct drm_device *dev = dev_get_drvdata(d); struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_therm *therm = nouveau_therm(drm->device); + struct nouveau_therm *therm = nvkm_therm(&drm->device); return snprintf(buf, PAGE_SIZE, "%d\n", therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_SHUTDOWN_HYST) * 1000); @@ -310,7 +306,7 @@ nouveau_hwmon_set_emergency_temp_hyst(struct device *d, { struct drm_device *dev = dev_get_drvdata(d); struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_therm *therm = nouveau_therm(drm->device); + struct nouveau_therm *therm = nvkm_therm(&drm->device); long value; if (kstrtol(buf, 10, &value) == -EINVAL) @@ -350,7 +346,7 @@ nouveau_hwmon_show_fan1_input(struct device *d, struct device_attribute *attr, { struct drm_device *dev = dev_get_drvdata(d); struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_therm *therm = nouveau_therm(drm->device); + struct nouveau_therm *therm = nvkm_therm(&drm->device); return snprintf(buf, PAGE_SIZE, "%d\n", therm->fan_sense(therm)); } @@ -363,7 +359,7 @@ nouveau_hwmon_get_pwm1_enable(struct device *d, { struct drm_device *dev = dev_get_drvdata(d); struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_therm *therm = nouveau_therm(drm->device); + struct nouveau_therm *therm = nvkm_therm(&drm->device); int ret; ret = therm->attr_get(therm, NOUVEAU_THERM_ATTR_FAN_MODE); @@ -379,7 +375,7 @@ nouveau_hwmon_set_pwm1_enable(struct device *d, struct device_attribute *a, { struct drm_device *dev = dev_get_drvdata(d); struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_therm *therm = nouveau_therm(drm->device); + struct nouveau_therm *therm = nvkm_therm(&drm->device); long value; int ret; @@ -402,7 +398,7 @@ nouveau_hwmon_get_pwm1(struct device *d, struct device_attribute *a, char *buf) { struct drm_device *dev = dev_get_drvdata(d); struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_therm *therm = nouveau_therm(drm->device); + struct nouveau_therm *therm = nvkm_therm(&drm->device); int ret; ret = therm->fan_get(therm); @@ -418,7 +414,7 @@ nouveau_hwmon_set_pwm1(struct device *d, struct device_attribute *a, { struct drm_device *dev = dev_get_drvdata(d); struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_therm *therm = nouveau_therm(drm->device); + struct nouveau_therm *therm = nvkm_therm(&drm->device); int ret = -ENODEV; long value; @@ -442,7 +438,7 @@ nouveau_hwmon_get_pwm1_min(struct device *d, { struct drm_device *dev = dev_get_drvdata(d); struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_therm *therm = nouveau_therm(drm->device); + struct nouveau_therm *therm = nvkm_therm(&drm->device); int ret; ret = therm->attr_get(therm, NOUVEAU_THERM_ATTR_FAN_MIN_DUTY); @@ -458,7 +454,7 @@ nouveau_hwmon_set_pwm1_min(struct device *d, struct device_attribute *a, { struct drm_device *dev = dev_get_drvdata(d); struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_therm *therm = nouveau_therm(drm->device); + struct nouveau_therm *therm = nvkm_therm(&drm->device); long value; int ret; @@ -482,7 +478,7 @@ nouveau_hwmon_get_pwm1_max(struct device *d, { struct drm_device *dev = dev_get_drvdata(d); struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_therm *therm = nouveau_therm(drm->device); + struct nouveau_therm *therm = nvkm_therm(&drm->device); int ret; ret = therm->attr_get(therm, NOUVEAU_THERM_ATTR_FAN_MAX_DUTY); @@ -498,7 +494,7 @@ nouveau_hwmon_set_pwm1_max(struct device *d, struct device_attribute *a, { struct drm_device *dev = dev_get_drvdata(d); struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_therm *therm = nouveau_therm(drm->device); + struct nouveau_therm *therm = nvkm_therm(&drm->device); long value; int ret; @@ -565,7 +561,7 @@ nouveau_hwmon_init(struct drm_device *dev) { #if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE)) struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_therm *therm = nouveau_therm(drm->device); + struct nouveau_therm *therm = nvkm_therm(&drm->device); struct nouveau_hwmon *hwmon; struct device *hwmon_dev; int ret = 0; diff --git a/drivers/gpu/drm/nouveau/nouveau_nvif.c b/drivers/gpu/drm/nouveau/nouveau_nvif.c new file mode 100644 index 000000000000..47ca88623753 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_nvif.c @@ -0,0 +1,136 @@ +/* + * Copyright 2014 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs <bskeggs@redhat.com> + */ + +/******************************************************************************* + * NVIF client driver - NVKM directly linked + ******************************************************************************/ + +#include <core/client.h> +#include <core/notify.h> +#include <core/ioctl.h> + +#include <nvif/client.h> +#include <nvif/driver.h> +#include <nvif/notify.h> +#include <nvif/event.h> +#include <nvif/ioctl.h> + +#include "nouveau_drm.h" +#include "nouveau_usif.h" + +static void +nvkm_client_unmap(void *priv, void *ptr, u32 size) +{ + iounmap(ptr); +} + +static void * +nvkm_client_map(void *priv, u64 handle, u32 size) +{ + return ioremap(handle, size); +} + +static int +nvkm_client_ioctl(void *priv, bool super, void *data, u32 size, void **hack) +{ + return nvkm_ioctl(priv, super, data, size, hack); +} + +static int +nvkm_client_resume(void *priv) +{ + return nouveau_client_init(priv); +} + +static int +nvkm_client_suspend(void *priv) +{ + return nouveau_client_fini(priv, true); +} + +static void +nvkm_client_fini(void *priv) +{ + struct nouveau_object *client = priv; + nouveau_client_fini(nv_client(client), false); + atomic_set(&client->refcount, 1); + nouveau_object_ref(NULL, &client); +} + +static int +nvkm_client_ntfy(const void *header, u32 length, const void *data, u32 size) +{ + const union { + struct nvif_notify_req_v0 v0; + } *args = header; + u8 route; + + if (length == sizeof(args->v0) && args->v0.version == 0) { + route = args->v0.route; + } else { + WARN_ON(1); + return NVKM_NOTIFY_DROP; + } + + switch (route) { + case NVDRM_NOTIFY_NVIF: + return nvif_notify(header, length, data, size); + case NVDRM_NOTIFY_USIF: + return usif_notify(header, length, data, size); + default: + WARN_ON(1); + break; + } + + return NVKM_NOTIFY_DROP; +} + +static int +nvkm_client_init(const char *name, u64 device, const char *cfg, + const char *dbg, void **ppriv) +{ + struct nouveau_client *client; + int ret; + + ret = nouveau_client_create(name, device, cfg, dbg, &client); + *ppriv = client; + if (ret) + return ret; + + client->ntfy = nvkm_client_ntfy; + return 0; +} + +const struct nvif_driver +nvif_driver_nvkm = { + .name = "nvkm", + .init = nvkm_client_init, + .fini = nvkm_client_fini, + .suspend = nvkm_client_suspend, + .resume = nvkm_client_resume, + .ioctl = nvkm_client_ioctl, + .map = nvkm_client_map, + .unmap = nvkm_client_unmap, + .keep = false, +}; diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c new file mode 100644 index 000000000000..246a824c16ca --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_platform.c @@ -0,0 +1,183 @@ +/* + * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include <linux/clk.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/of.h> +#include <linux/reset.h> +#include <linux/regulator/consumer.h> +#include <soc/tegra/pmc.h> + +#include "nouveau_drm.h" +#include "nouveau_platform.h" + +static int nouveau_platform_power_up(struct nouveau_platform_gpu *gpu) +{ + int err; + + err = regulator_enable(gpu->vdd); + if (err) + goto err_power; + + err = clk_prepare_enable(gpu->clk); + if (err) + goto err_clk; + err = clk_prepare_enable(gpu->clk_pwr); + if (err) + goto err_clk_pwr; + clk_set_rate(gpu->clk_pwr, 204000000); + udelay(10); + + reset_control_assert(gpu->rst); + udelay(10); + + err = tegra_powergate_remove_clamping(TEGRA_POWERGATE_3D); + if (err) + goto err_clamp; + udelay(10); + + reset_control_deassert(gpu->rst); + udelay(10); + + return 0; + +err_clamp: + clk_disable_unprepare(gpu->clk_pwr); +err_clk_pwr: + clk_disable_unprepare(gpu->clk); +err_clk: + regulator_disable(gpu->vdd); +err_power: + return err; +} + +static int nouveau_platform_power_down(struct nouveau_platform_gpu *gpu) +{ + int err; + + reset_control_assert(gpu->rst); + udelay(10); + + clk_disable_unprepare(gpu->clk_pwr); + clk_disable_unprepare(gpu->clk); + udelay(10); + + err = regulator_disable(gpu->vdd); + if (err) + return err; + + return 0; +} + +static int nouveau_platform_probe(struct platform_device *pdev) +{ + struct nouveau_platform_gpu *gpu; + struct nouveau_platform_device *device; + struct drm_device *drm; + int err; + + gpu = devm_kzalloc(&pdev->dev, sizeof(*gpu), GFP_KERNEL); + if (!gpu) + return -ENOMEM; + + gpu->vdd = devm_regulator_get(&pdev->dev, "vdd"); + if (IS_ERR(gpu->vdd)) + return PTR_ERR(gpu->vdd); + + gpu->rst = devm_reset_control_get(&pdev->dev, "gpu"); + if (IS_ERR(gpu->rst)) + return PTR_ERR(gpu->rst); + + gpu->clk = devm_clk_get(&pdev->dev, "gpu"); + if (IS_ERR(gpu->clk)) + return PTR_ERR(gpu->clk); + + gpu->clk_pwr = devm_clk_get(&pdev->dev, "pwr"); + if (IS_ERR(gpu->clk_pwr)) + return PTR_ERR(gpu->clk_pwr); + + err = nouveau_platform_power_up(gpu); + if (err) + return err; + + drm = nouveau_platform_device_create(pdev, &device); + if (IS_ERR(drm)) { + err = PTR_ERR(drm); + goto power_down; + } + + device->gpu = gpu; + + err = drm_dev_register(drm, 0); + if (err < 0) + goto err_unref; + + return 0; + +err_unref: + drm_dev_unref(drm); + + return 0; + +power_down: + nouveau_platform_power_down(gpu); + + return err; +} + +static int nouveau_platform_remove(struct platform_device *pdev) +{ + struct drm_device *drm_dev = platform_get_drvdata(pdev); + struct nouveau_drm *drm = nouveau_drm(drm_dev); + struct nouveau_device *device = nvkm_device(&drm->device); + struct nouveau_platform_gpu *gpu = nv_device_to_platform(device)->gpu; + + nouveau_drm_device_remove(drm_dev); + + return nouveau_platform_power_down(gpu); +} + +#if IS_ENABLED(CONFIG_OF) +static const struct of_device_id nouveau_platform_match[] = { + { .compatible = "nvidia,gk20a" }, + { } +}; + +MODULE_DEVICE_TABLE(of, nouveau_platform_match); +#endif + +struct platform_driver nouveau_platform_driver = { + .driver = { + .name = "nouveau", + .of_match_table = of_match_ptr(nouveau_platform_match), + }, + .probe = nouveau_platform_probe, + .remove = nouveau_platform_remove, +}; + +module_platform_driver(nouveau_platform_driver); + +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL and additional rights"); diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.h b/drivers/gpu/drm/nouveau/nouveau_platform.h new file mode 100644 index 000000000000..91f66504900e --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_platform.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NOUVEAU_PLATFORM_H__ +#define __NOUVEAU_PLATFORM_H__ + +#include "core/device.h" + +struct reset_control; +struct clk; +struct regulator; + +struct nouveau_platform_gpu { + struct reset_control *rst; + struct clk *clk; + struct clk *clk_pwr; + + struct regulator *vdd; +}; + +struct nouveau_platform_device { + struct nouveau_device device; + + struct nouveau_platform_gpu *gpu; +}; + +#define nv_device_to_platform(d) \ + container_of(d, struct nouveau_platform_device, device) + +#endif diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index a4d22e5eb176..01707e7deaf5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c @@ -1,8 +1,6 @@ #include <linux/pagemap.h> #include <linux/slab.h> -#include <subdev/fb.h> - #include "nouveau_drm.h" #include "nouveau_ttm.h" @@ -104,7 +102,7 @@ nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev, return NULL; nvbe->dev = drm->dev; - if (nv_device(drm->device)->card_type < NV_50) + if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) nvbe->ttm.ttm.func = &nv04_sgdma_backend; else nvbe->ttm.ttm.func = &nv50_sgdma_backend; diff --git a/drivers/gpu/drm/nouveau/nouveau_sysfs.c b/drivers/gpu/drm/nouveau/nouveau_sysfs.c index 75dda2b07176..3c6962d15b26 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sysfs.c +++ b/drivers/gpu/drm/nouveau/nouveau_sysfs.c @@ -22,10 +22,15 @@ * Authors: Ben Skeggs <bskeggs@redhat.com> */ +#include <nvif/os.h> +#include <nvif/class.h> +#include <nvif/ioctl.h> + #include "nouveau_sysfs.h" -#include <core/object.h> -#include <core/class.h> +MODULE_PARM_DESC(pstate, "enable sysfs pstate file, which will be moved in the future"); +static int nouveau_pstate; +module_param_named(pstate, nouveau_pstate, int, 0400); static inline struct drm_device * drm_device(struct device *d) @@ -43,38 +48,42 @@ static ssize_t nouveau_sysfs_pstate_get(struct device *d, struct device_attribute *a, char *b) { struct nouveau_sysfs *sysfs = nouveau_sysfs(drm_device(d)); - struct nv_control_pstate_info info; + struct nvif_control_pstate_info_v0 info = {}; size_t cnt = PAGE_SIZE; char *buf = b; int ret, i; - ret = nv_exec(sysfs->ctrl, NV_CONTROL_PSTATE_INFO, &info, sizeof(info)); + ret = nvif_mthd(&sysfs->ctrl, NVIF_CONTROL_PSTATE_INFO, + &info, sizeof(info)); if (ret) return ret; for (i = 0; i < info.count + 1; i++) { const s32 state = i < info.count ? i : - NV_CONTROL_PSTATE_ATTR_STATE_CURRENT; - struct nv_control_pstate_attr attr = { + NVIF_CONTROL_PSTATE_ATTR_V0_STATE_CURRENT; + struct nvif_control_pstate_attr_v0 attr = { .state = state, .index = 0, }; - ret = nv_exec(sysfs->ctrl, NV_CONTROL_PSTATE_ATTR, - &attr, sizeof(attr)); + ret = nvif_mthd(&sysfs->ctrl, NVIF_CONTROL_PSTATE_ATTR, + &attr, sizeof(attr)); if (ret) return ret; if (i < info.count) snappendf(buf, cnt, "%02x:", attr.state); else - snappendf(buf, cnt, "--:"); + snappendf(buf, cnt, "%s:", info.pwrsrc == 0 ? "DC" : + info.pwrsrc == 1 ? "AC" : + "--"); attr.index = 0; do { attr.state = state; - ret = nv_exec(sysfs->ctrl, NV_CONTROL_PSTATE_ATTR, - &attr, sizeof(attr)); + ret = nvif_mthd(&sysfs->ctrl, + NVIF_CONTROL_PSTATE_ATTR, + &attr, sizeof(attr)); if (ret) return ret; @@ -84,9 +93,20 @@ nouveau_sysfs_pstate_get(struct device *d, struct device_attribute *a, char *b) snappendf(buf, cnt, " %s", attr.unit); } while (attr.index); - if ((state >= 0 && info.pstate == state) || - (state < 0 && info.ustate < 0)) - snappendf(buf, cnt, " *"); + if (state >= 0) { + if (info.ustate_ac == state) + snappendf(buf, cnt, " AC"); + if (info.ustate_dc == state) + snappendf(buf, cnt, " DC"); + if (info.pstate == state) + snappendf(buf, cnt, " *"); + } else { + if (info.ustate_ac < -1) + snappendf(buf, cnt, " AC"); + if (info.ustate_dc < -1) + snappendf(buf, cnt, " DC"); + } + snappendf(buf, cnt, "\n"); } @@ -98,26 +118,36 @@ nouveau_sysfs_pstate_set(struct device *d, struct device_attribute *a, const char *buf, size_t count) { struct nouveau_sysfs *sysfs = nouveau_sysfs(drm_device(d)); - struct nv_control_pstate_user args; + struct nvif_control_pstate_user_v0 args = { .pwrsrc = -EINVAL }; long value, ret; char *tmp; if ((tmp = strchr(buf, '\n'))) *tmp = '\0'; + if (!strncasecmp(buf, "dc:", 3)) { + args.pwrsrc = 0; + buf += 3; + } else + if (!strncasecmp(buf, "ac:", 3)) { + args.pwrsrc = 1; + buf += 3; + } + if (!strcasecmp(buf, "none")) - args.state = NV_CONTROL_PSTATE_USER_STATE_UNKNOWN; + args.ustate = NVIF_CONTROL_PSTATE_USER_V0_STATE_UNKNOWN; else if (!strcasecmp(buf, "auto")) - args.state = NV_CONTROL_PSTATE_USER_STATE_PERFMON; + args.ustate = NVIF_CONTROL_PSTATE_USER_V0_STATE_PERFMON; else { ret = kstrtol(buf, 16, &value); if (ret) return ret; - args.state = value; + args.ustate = value; } - ret = nv_exec(sysfs->ctrl, NV_CONTROL_PSTATE_USER, &args, sizeof(args)); + ret = nvif_mthd(&sysfs->ctrl, NVIF_CONTROL_PSTATE_USER, + &args, sizeof(args)); if (ret < 0) return ret; @@ -132,11 +162,11 @@ nouveau_sysfs_fini(struct drm_device *dev) { struct nouveau_sysfs *sysfs = nouveau_sysfs(dev); struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_device *device = nv_device(drm->device); + struct nvif_device *device = &drm->device; - if (sysfs->ctrl) { - device_remove_file(nv_device_base(device), &dev_attr_pstate); - nouveau_object_del(nv_object(drm), NVDRM_DEVICE, NVDRM_CONTROL); + if (sysfs && sysfs->ctrl.priv) { + device_remove_file(nv_device_base(nvkm_device(device)), &dev_attr_pstate); + nvif_object_fini(&sysfs->ctrl); } drm->sysfs = NULL; @@ -147,18 +177,22 @@ int nouveau_sysfs_init(struct drm_device *dev) { struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_device *device = nv_device(drm->device); + struct nvif_device *device = &drm->device; struct nouveau_sysfs *sysfs; int ret; + if (!nouveau_pstate) + return 0; + sysfs = drm->sysfs = kzalloc(sizeof(*sysfs), GFP_KERNEL); if (!sysfs) return -ENOMEM; - ret = nouveau_object_new(nv_object(drm), NVDRM_DEVICE, NVDRM_CONTROL, - NV_CONTROL_CLASS, NULL, 0, &sysfs->ctrl); + ret = nvif_object_init(nvif_object(device), NULL, NVDRM_CONTROL, + NVIF_IOCTL_NEW_V0_CONTROL, NULL, 0, + &sysfs->ctrl); if (ret == 0) - device_create_file(nv_device_base(device), &dev_attr_pstate); + device_create_file(nv_device_base(nvkm_device(device)), &dev_attr_pstate); return 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_sysfs.h b/drivers/gpu/drm/nouveau/nouveau_sysfs.h index 74b47f1e01ed..f973378160f8 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sysfs.h +++ b/drivers/gpu/drm/nouveau/nouveau_sysfs.h @@ -4,7 +4,7 @@ #include "nouveau_drm.h" struct nouveau_sysfs { - struct nouveau_object *ctrl; + struct nvif_object ctrl; }; static inline struct nouveau_sysfs * diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index ab0228f640a5..53874b76b031 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -24,10 +24,6 @@ * USE OR OTHER DEALINGS IN THE SOFTWARE. */ -#include <subdev/fb.h> -#include <subdev/vm.h> -#include <subdev/instmem.h> - #include "nouveau_drm.h" #include "nouveau_ttm.h" #include "nouveau_gem.h" @@ -36,7 +32,7 @@ static int nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize) { struct nouveau_drm *drm = nouveau_bdev(man->bdev); - struct nouveau_fb *pfb = nouveau_fb(drm->device); + struct nouveau_fb *pfb = nvkm_fb(&drm->device); man->priv = pfb; return 0; } @@ -67,7 +63,7 @@ nouveau_vram_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem) { struct nouveau_drm *drm = nouveau_bdev(man->bdev); - struct nouveau_fb *pfb = nouveau_fb(drm->device); + struct nouveau_fb *pfb = nvkm_fb(&drm->device); nouveau_mem_node_cleanup(mem->mm_node); pfb->ram->put(pfb, (struct nouveau_mem **)&mem->mm_node); } @@ -76,10 +72,11 @@ static int nouveau_vram_manager_new(struct ttm_mem_type_manager *man, struct ttm_buffer_object *bo, struct ttm_placement *placement, + uint32_t flags, struct ttm_mem_reg *mem) { struct nouveau_drm *drm = nouveau_bdev(man->bdev); - struct nouveau_fb *pfb = nouveau_fb(drm->device); + struct nouveau_fb *pfb = nvkm_fb(&drm->device); struct nouveau_bo *nvbo = nouveau_bo(bo); struct nouveau_mem *node; u32 size_nc = 0; @@ -162,6 +159,7 @@ static int nouveau_gart_manager_new(struct ttm_mem_type_manager *man, struct ttm_buffer_object *bo, struct ttm_placement *placement, + uint32_t flags, struct ttm_mem_reg *mem) { struct nouveau_drm *drm = nouveau_bdev(bo->bdev); @@ -174,14 +172,13 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man, node->page_shift = 12; - switch (nv_device(drm->device)->card_type) { - case NV_50: - if (nv_device(drm->device)->chipset != 0x50) + switch (drm->device.info.family) { + case NV_DEVICE_INFO_V0_TESLA: + if (drm->device.info.chipset != 0x50) node->memtype = (nvbo->tile_flags & 0x7f00) >> 8; break; - case NV_C0: - case NV_D0: - case NV_E0: + case NV_DEVICE_INFO_V0_FERMI: + case NV_DEVICE_INFO_V0_KEPLER: node->memtype = (nvbo->tile_flags & 0xff00) >> 8; break; default: @@ -206,12 +203,13 @@ const struct ttm_mem_type_manager_func nouveau_gart_manager = { nouveau_gart_manager_debug }; +/*XXX*/ #include <core/subdev/vm/nv04.h> static int nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize) { struct nouveau_drm *drm = nouveau_bdev(man->bdev); - struct nouveau_vmmgr *vmm = nouveau_vmmgr(drm->device); + struct nouveau_vmmgr *vmm = nvkm_vmmgr(&drm->device); struct nv04_vmmgr_priv *priv = (void *)vmm; struct nouveau_vm *vm = NULL; nouveau_vm_ref(priv->vm, &vm, NULL); @@ -242,6 +240,7 @@ static int nv04_gart_manager_new(struct ttm_mem_type_manager *man, struct ttm_buffer_object *bo, struct ttm_placement *placement, + uint32_t flags, struct ttm_mem_reg *mem) { struct nouveau_mem *node; @@ -354,12 +353,11 @@ int nouveau_ttm_init(struct nouveau_drm *drm) { struct drm_device *dev = drm->dev; - struct nouveau_device *device = nv_device(drm->device); u32 bits; int ret; - bits = nouveau_vmmgr(drm->device)->dma_bits; - if (nv_device_is_pci(device)) { + bits = nvkm_vmmgr(&drm->device)->dma_bits; + if (nv_device_is_pci(nvkm_device(&drm->device))) { if (drm->agp.stat == ENABLED || !pci_dma_supported(dev->pdev, DMA_BIT_MASK(bits))) bits = 32; @@ -391,8 +389,7 @@ nouveau_ttm_init(struct nouveau_drm *drm) } /* VRAM init */ - drm->gem.vram_available = nouveau_fb(drm->device)->ram->size; - drm->gem.vram_available -= nouveau_instmem(drm->device)->reserved; + drm->gem.vram_available = drm->device.info.ram_user; ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_VRAM, drm->gem.vram_available >> PAGE_SHIFT); @@ -401,12 +398,12 @@ nouveau_ttm_init(struct nouveau_drm *drm) return ret; } - drm->ttm.mtrr = arch_phys_wc_add(nv_device_resource_start(device, 1), - nv_device_resource_len(device, 1)); + drm->ttm.mtrr = arch_phys_wc_add(nv_device_resource_start(nvkm_device(&drm->device), 1), + nv_device_resource_len(nvkm_device(&drm->device), 1)); /* GART init */ if (drm->agp.stat != ENABLED) { - drm->gem.gart_available = nouveau_vmmgr(drm->device)->limit; + drm->gem.gart_available = nvkm_vmmgr(&drm->device)->limit; } else { drm->gem.gart_available = drm->agp.size; } diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.c b/drivers/gpu/drm/nouveau/nouveau_usif.c new file mode 100644 index 000000000000..cb1182d7e80e --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_usif.c @@ -0,0 +1,384 @@ +/* + * Copyright 2014 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs <bskeggs@redhat.com> + */ + +#include "nouveau_drm.h" +#include "nouveau_usif.h" + +#include <nvif/notify.h> +#include <nvif/unpack.h> +#include <nvif/client.h> +#include <nvif/event.h> +#include <nvif/ioctl.h> + +struct usif_notify_p { + struct drm_pending_event base; + struct { + struct drm_event base; + u8 data[]; + } e; +}; + +struct usif_notify { + struct list_head head; + atomic_t enabled; + u32 handle; + u16 reply; + u8 route; + u64 token; + struct usif_notify_p *p; +}; + +static inline struct usif_notify * +usif_notify_find(struct drm_file *filp, u32 handle) +{ + struct nouveau_cli *cli = nouveau_cli(filp); + struct usif_notify *ntfy; + list_for_each_entry(ntfy, &cli->notifys, head) { + if (ntfy->handle == handle) + return ntfy; + } + return NULL; +} + +static inline void +usif_notify_dtor(struct usif_notify *ntfy) +{ + list_del(&ntfy->head); + kfree(ntfy); +} + +int +usif_notify(const void *header, u32 length, const void *data, u32 size) +{ + struct usif_notify *ntfy = NULL; + const union { + struct nvif_notify_rep_v0 v0; + } *rep = header; + struct drm_device *dev; + struct drm_file *filp; + unsigned long flags; + + if (length == sizeof(rep->v0) && rep->v0.version == 0) { + if (WARN_ON(!(ntfy = (void *)(unsigned long)rep->v0.token))) + return NVIF_NOTIFY_DROP; + BUG_ON(rep->v0.route != NVDRM_NOTIFY_USIF); + } else + if (WARN_ON(1)) + return NVIF_NOTIFY_DROP; + + if (WARN_ON(!ntfy->p || ntfy->reply != (length + size))) + return NVIF_NOTIFY_DROP; + filp = ntfy->p->base.file_priv; + dev = filp->minor->dev; + + memcpy(&ntfy->p->e.data[0], header, length); + memcpy(&ntfy->p->e.data[length], data, size); + switch (rep->v0.version) { + case 0: { + struct nvif_notify_rep_v0 *rep = (void *)ntfy->p->e.data; + rep->route = ntfy->route; + rep->token = ntfy->token; + } + break; + default: + BUG_ON(1); + break; + } + + spin_lock_irqsave(&dev->event_lock, flags); + if (!WARN_ON(filp->event_space < ntfy->p->e.base.length)) { + list_add_tail(&ntfy->p->base.link, &filp->event_list); + filp->event_space -= ntfy->p->e.base.length; + } + wake_up_interruptible(&filp->event_wait); + spin_unlock_irqrestore(&dev->event_lock, flags); + atomic_set(&ntfy->enabled, 0); + return NVIF_NOTIFY_DROP; +} + +static int +usif_notify_new(struct drm_file *f, void *data, u32 size, void *argv, u32 argc) +{ + struct nouveau_cli *cli = nouveau_cli(f); + struct nvif_client *client = &cli->base; + union { + struct nvif_ioctl_ntfy_new_v0 v0; + } *args = data; + union { + struct nvif_notify_req_v0 v0; + } *req; + struct usif_notify *ntfy; + int ret; + + if (nvif_unpack(args->v0, 0, 0, true)) { + if (usif_notify_find(f, args->v0.index)) + return -EEXIST; + } else + return ret; + req = data; + + if (!(ntfy = kmalloc(sizeof(*ntfy), GFP_KERNEL))) + return -ENOMEM; + atomic_set(&ntfy->enabled, 0); + + if (nvif_unpack(req->v0, 0, 0, true)) { + ntfy->reply = sizeof(struct nvif_notify_rep_v0) + req->v0.reply; + ntfy->route = req->v0.route; + ntfy->token = req->v0.token; + req->v0.route = NVDRM_NOTIFY_USIF; + req->v0.token = (unsigned long)(void *)ntfy; + ret = nvif_client_ioctl(client, argv, argc); + req->v0.token = ntfy->token; + req->v0.route = ntfy->route; + ntfy->handle = args->v0.index; + } + + if (ret == 0) + list_add(&ntfy->head, &cli->notifys); + if (ret) + kfree(ntfy); + return ret; +} + +static int +usif_notify_del(struct drm_file *f, void *data, u32 size, void *argv, u32 argc) +{ + struct nouveau_cli *cli = nouveau_cli(f); + struct nvif_client *client = &cli->base; + union { + struct nvif_ioctl_ntfy_del_v0 v0; + } *args = data; + struct usif_notify *ntfy; + int ret; + + if (nvif_unpack(args->v0, 0, 0, true)) { + if (!(ntfy = usif_notify_find(f, args->v0.index))) + return -ENOENT; + } else + return ret; + + ret = nvif_client_ioctl(client, argv, argc); + if (ret == 0) + usif_notify_dtor(ntfy); + return ret; +} + +static int +usif_notify_get(struct drm_file *f, void *data, u32 size, void *argv, u32 argc) +{ + struct nouveau_cli *cli = nouveau_cli(f); + struct nvif_client *client = &cli->base; + union { + struct nvif_ioctl_ntfy_del_v0 v0; + } *args = data; + struct usif_notify *ntfy; + int ret; + + if (nvif_unpack(args->v0, 0, 0, true)) { + if (!(ntfy = usif_notify_find(f, args->v0.index))) + return -ENOENT; + } else + return ret; + + if (atomic_xchg(&ntfy->enabled, 1)) + return 0; + + ntfy->p = kmalloc(sizeof(*ntfy->p) + ntfy->reply, GFP_KERNEL); + if (ret = -ENOMEM, !ntfy->p) + goto done; + ntfy->p->base.event = &ntfy->p->e.base; + ntfy->p->base.file_priv = f; + ntfy->p->base.pid = current->pid; + ntfy->p->base.destroy =(void(*)(struct drm_pending_event *))kfree; + ntfy->p->e.base.type = DRM_NOUVEAU_EVENT_NVIF; + ntfy->p->e.base.length = sizeof(ntfy->p->e.base) + ntfy->reply; + + ret = nvif_client_ioctl(client, argv, argc); +done: + if (ret) { + atomic_set(&ntfy->enabled, 0); + kfree(ntfy->p); + } + return ret; +} + +static int +usif_notify_put(struct drm_file *f, void *data, u32 size, void *argv, u32 argc) +{ + struct nouveau_cli *cli = nouveau_cli(f); + struct nvif_client *client = &cli->base; + union { + struct nvif_ioctl_ntfy_put_v0 v0; + } *args = data; + struct usif_notify *ntfy; + int ret; + + if (nvif_unpack(args->v0, 0, 0, true)) { + if (!(ntfy = usif_notify_find(f, args->v0.index))) + return -ENOENT; + } else + return ret; + + ret = nvif_client_ioctl(client, argv, argc); + if (ret == 0 && atomic_xchg(&ntfy->enabled, 0)) + kfree(ntfy->p); + return ret; +} + +struct usif_object { + struct list_head head; + struct list_head ntfy; + u8 route; + u64 token; +}; + +static void +usif_object_dtor(struct usif_object *object) +{ + list_del(&object->head); + kfree(object); +} + +static int +usif_object_new(struct drm_file *f, void *data, u32 size, void *argv, u32 argc) +{ + struct nouveau_cli *cli = nouveau_cli(f); + struct nvif_client *client = &cli->base; + union { + struct nvif_ioctl_new_v0 v0; + } *args = data; + struct usif_object *object; + int ret; + + if (!(object = kmalloc(sizeof(*object), GFP_KERNEL))) + return -ENOMEM; + list_add(&object->head, &cli->objects); + + if (nvif_unpack(args->v0, 0, 0, true)) { + object->route = args->v0.route; + object->token = args->v0.token; + args->v0.route = NVDRM_OBJECT_USIF; + args->v0.token = (unsigned long)(void *)object; + ret = nvif_client_ioctl(client, argv, argc); + args->v0.token = object->token; + args->v0.route = object->route; + } + + if (ret) + usif_object_dtor(object); + return ret; +} + +int +usif_ioctl(struct drm_file *filp, void __user *user, u32 argc) +{ + struct nouveau_cli *cli = nouveau_cli(filp); + struct nvif_client *client = &cli->base; + void *data = kmalloc(argc, GFP_KERNEL); + u32 size = argc; + union { + struct nvif_ioctl_v0 v0; + } *argv = data; + struct usif_object *object; + u8 owner; + int ret; + + if (ret = -ENOMEM, !argv) + goto done; + if (ret = -EFAULT, copy_from_user(argv, user, size)) + goto done; + + if (nvif_unpack(argv->v0, 0, 0, true)) { + /* block access to objects not created via this interface */ + owner = argv->v0.owner; + argv->v0.owner = NVDRM_OBJECT_USIF; + } else + goto done; + + mutex_lock(&cli->mutex); + switch (argv->v0.type) { + case NVIF_IOCTL_V0_NEW: + /* ... except if we're creating children */ + argv->v0.owner = NVIF_IOCTL_V0_OWNER_ANY; + ret = usif_object_new(filp, data, size, argv, argc); + break; + case NVIF_IOCTL_V0_NTFY_NEW: + ret = usif_notify_new(filp, data, size, argv, argc); + break; + case NVIF_IOCTL_V0_NTFY_DEL: + ret = usif_notify_del(filp, data, size, argv, argc); + break; + case NVIF_IOCTL_V0_NTFY_GET: + ret = usif_notify_get(filp, data, size, argv, argc); + break; + case NVIF_IOCTL_V0_NTFY_PUT: + ret = usif_notify_put(filp, data, size, argv, argc); + break; + default: + ret = nvif_client_ioctl(client, argv, argc); + break; + } + if (argv->v0.route == NVDRM_OBJECT_USIF) { + object = (void *)(unsigned long)argv->v0.token; + argv->v0.route = object->route; + argv->v0.token = object->token; + if (ret == 0 && argv->v0.type == NVIF_IOCTL_V0_DEL) { + list_del(&object->head); + kfree(object); + } + } else { + argv->v0.route = NVIF_IOCTL_V0_ROUTE_HIDDEN; + argv->v0.token = 0; + } + argv->v0.owner = owner; + mutex_unlock(&cli->mutex); + + if (copy_to_user(user, argv, argc)) + ret = -EFAULT; +done: + kfree(argv); + return ret; +} + +void +usif_client_fini(struct nouveau_cli *cli) +{ + struct usif_object *object, *otemp; + struct usif_notify *notify, *ntemp; + + list_for_each_entry_safe(notify, ntemp, &cli->notifys, head) { + usif_notify_dtor(notify); + } + + list_for_each_entry_safe(object, otemp, &cli->objects, head) { + usif_object_dtor(object); + } +} + +void +usif_client_init(struct nouveau_cli *cli) +{ + INIT_LIST_HEAD(&cli->objects); + INIT_LIST_HEAD(&cli->notifys); +} diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.h b/drivers/gpu/drm/nouveau/nouveau_usif.h new file mode 100644 index 000000000000..c037e3ae8c70 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_usif.h @@ -0,0 +1,9 @@ +#ifndef __NOUVEAU_USIF_H__ +#define __NOUVEAU_USIF_H__ + +void usif_client_init(struct nouveau_cli *); +void usif_client_fini(struct nouveau_cli *); +int usif_ioctl(struct drm_file *, void __user *, u32); +int usif_notify(const void *, u32, const void *, u32); + +#endif diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c index 4f4c3fec6916..c7592ec8ecb8 100644 --- a/drivers/gpu/drm/nouveau/nouveau_vga.c +++ b/drivers/gpu/drm/nouveau/nouveau_vga.c @@ -12,14 +12,16 @@ static unsigned int nouveau_vga_set_decode(void *priv, bool state) { - struct nouveau_device *device = nouveau_dev(priv); + struct nvif_device *device = &nouveau_drm(priv)->device; - if (device->card_type == NV_40 && device->chipset >= 0x4c) - nv_wr32(device, 0x088060, state); - else if (device->chipset >= 0x40) - nv_wr32(device, 0x088054, state); + if (device->info.family == NV_DEVICE_INFO_V0_CURIE && + device->info.chipset >= 0x4c) + nvif_wr32(device, 0x088060, state); else - nv_wr32(device, 0x001854, state); + if (device->info.chipset >= 0x40) + nvif_wr32(device, 0x088054, state); + else + nvif_wr32(device, 0x001854, state); if (state) return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | @@ -106,7 +108,16 @@ void nouveau_vga_fini(struct nouveau_drm *drm) { struct drm_device *dev = drm->dev; + bool runtime = false; + + if (nouveau_runtime_pm == 1) + runtime = true; + if ((nouveau_runtime_pm == -1) && (nouveau_is_optimus() || nouveau_is_v1_dsm())) + runtime = true; + vga_switcheroo_unregister_client(dev->pdev); + if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus()) + vga_switcheroo_fini_domain_pm_ops(drm->dev->dev); vga_client_register(dev->pdev, NULL, NULL, NULL); } diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c index 8fe32bbed99a..4ef602c5469d 100644 --- a/drivers/gpu/drm/nouveau/nv04_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c @@ -22,8 +22,6 @@ * DEALINGS IN THE SOFTWARE. */ -#include <core/object.h> - #include "nouveau_drm.h" #include "nouveau_dma.h" #include "nouveau_fbcon.h" @@ -141,8 +139,7 @@ nv04_fbcon_accel_init(struct fb_info *info) struct drm_device *dev = nfbdev->dev; struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_channel *chan = drm->channel; - struct nouveau_device *device = nv_device(drm->device); - struct nouveau_object *object; + struct nvif_device *device = &drm->device; int surface_fmt, pattern_fmt, rect_fmt; int ret; @@ -174,35 +171,35 @@ nv04_fbcon_accel_init(struct fb_info *info) return -EINVAL; } - ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvCtxSurf2D, - device->card_type >= NV_10 ? 0x0062 : 0x0042, - NULL, 0, &object); + ret = nvif_object_init(chan->object, NULL, 0x0062, + device->info.family >= NV_DEVICE_INFO_V0_CELSIUS ? + 0x0062 : 0x0042, NULL, 0, &nfbdev->surf2d); if (ret) return ret; - ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvClipRect, - 0x0019, NULL, 0, &object); + ret = nvif_object_init(chan->object, NULL, 0x0019, 0x0019, NULL, 0, + &nfbdev->clip); if (ret) return ret; - ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvRop, - 0x0043, NULL, 0, &object); + ret = nvif_object_init(chan->object, NULL, 0x0043, 0x0043, NULL, 0, + &nfbdev->rop); if (ret) return ret; - ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvImagePatt, - 0x0044, NULL, 0, &object); + ret = nvif_object_init(chan->object, NULL, 0x0044, 0x0044, NULL, 0, + &nfbdev->patt); if (ret) return ret; - ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvGdiRect, - 0x004a, NULL, 0, &object); + ret = nvif_object_init(chan->object, NULL, 0x004a, 0x004a, NULL, 0, + &nfbdev->gdi); if (ret) return ret; - ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvImageBlit, - device->chipset >= 0x11 ? 0x009f : 0x005f, - NULL, 0, &object); + ret = nvif_object_init(chan->object, NULL, 0x005f, + device->info.chipset >= 0x11 ? 0x009f : 0x005f, + NULL, 0, &nfbdev->blit); if (ret) return ret; @@ -212,10 +209,10 @@ nv04_fbcon_accel_init(struct fb_info *info) } BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1); - OUT_RING(chan, NvCtxSurf2D); + OUT_RING(chan, nfbdev->surf2d.handle); BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0184, 2); - OUT_RING(chan, NvDmaFB); - OUT_RING(chan, NvDmaFB); + OUT_RING(chan, chan->vram.handle); + OUT_RING(chan, chan->vram.handle); BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 4); OUT_RING(chan, surface_fmt); OUT_RING(chan, info->fix.line_length | (info->fix.line_length << 16)); @@ -223,12 +220,12 @@ nv04_fbcon_accel_init(struct fb_info *info) OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base); BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1); - OUT_RING(chan, NvRop); + OUT_RING(chan, nfbdev->rop.handle); BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 1); OUT_RING(chan, 0x55); BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1); - OUT_RING(chan, NvImagePatt); + OUT_RING(chan, nfbdev->patt.handle); BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 8); OUT_RING(chan, pattern_fmt); #ifdef __BIG_ENDIAN @@ -244,18 +241,18 @@ nv04_fbcon_accel_init(struct fb_info *info) OUT_RING(chan, ~0); BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1); - OUT_RING(chan, NvClipRect); + OUT_RING(chan, nfbdev->clip.handle); BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 2); OUT_RING(chan, 0); OUT_RING(chan, (info->var.yres_virtual << 16) | info->var.xres_virtual); BEGIN_NV04(chan, NvSubImageBlit, 0x0000, 1); - OUT_RING(chan, NvImageBlit); + OUT_RING(chan, nfbdev->blit.handle); BEGIN_NV04(chan, NvSubImageBlit, 0x019c, 1); - OUT_RING(chan, NvCtxSurf2D); + OUT_RING(chan, nfbdev->surf2d.handle); BEGIN_NV04(chan, NvSubImageBlit, 0x02fc, 1); OUT_RING(chan, 3); - if (device->chipset >= 0x11 /*XXX: oclass == 0x009f*/) { + if (device->info.chipset >= 0x11 /*XXX: oclass == 0x009f*/) { BEGIN_NV04(chan, NvSubImageBlit, 0x0120, 3); OUT_RING(chan, 0); OUT_RING(chan, 1); @@ -263,12 +260,12 @@ nv04_fbcon_accel_init(struct fb_info *info) } BEGIN_NV04(chan, NvSubGdiRect, 0x0000, 1); - OUT_RING(chan, NvGdiRect); + OUT_RING(chan, nfbdev->gdi.handle); BEGIN_NV04(chan, NvSubGdiRect, 0x0198, 1); - OUT_RING(chan, NvCtxSurf2D); + OUT_RING(chan, nfbdev->surf2d.handle); BEGIN_NV04(chan, NvSubGdiRect, 0x0188, 2); - OUT_RING(chan, NvImagePatt); - OUT_RING(chan, NvRop); + OUT_RING(chan, nfbdev->patt.handle); + OUT_RING(chan, nfbdev->rop.handle); BEGIN_NV04(chan, NvSubGdiRect, 0x0304, 1); OUT_RING(chan, 1); BEGIN_NV04(chan, NvSubGdiRect, 0x0300, 1); diff --git a/drivers/gpu/drm/nouveau/nv04_fence.c b/drivers/gpu/drm/nouveau/nv04_fence.c index 94eadd1dd10a..239c2c5a9615 100644 --- a/drivers/gpu/drm/nouveau/nv04_fence.c +++ b/drivers/gpu/drm/nouveau/nv04_fence.c @@ -22,8 +22,6 @@ * Authors: Ben Skeggs */ -#include <engine/fifo.h> - #include "nouveau_drm.h" #include "nouveau_dma.h" #include "nouveau_fence.h" @@ -59,7 +57,7 @@ nv04_fence_sync(struct nouveau_fence *fence, static u32 nv04_fence_read(struct nouveau_channel *chan) { - struct nouveau_fifo_chan *fifo = (void *)chan->object; + struct nouveau_fifo_chan *fifo = nvkm_fifo_chan(chan);; return atomic_read(&fifo->refcnt); } diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c index 06f434f03fba..4faaf0acf5d7 100644 --- a/drivers/gpu/drm/nouveau/nv10_fence.c +++ b/drivers/gpu/drm/nouveau/nv10_fence.c @@ -22,9 +22,6 @@ * Authors: Ben Skeggs <bskeggs@redhat.com> */ -#include <core/object.h> -#include <core/class.h> - #include "nouveau_drm.h" #include "nouveau_dma.h" #include "nv10_fence.h" @@ -53,14 +50,18 @@ nv10_fence_sync(struct nouveau_fence *fence, u32 nv10_fence_read(struct nouveau_channel *chan) { - return nv_ro32(chan->object, 0x0048); + return nvif_rd32(chan, 0x0048); } void nv10_fence_context_del(struct nouveau_channel *chan) { struct nv10_fence_chan *fctx = chan->fence; + int i; nouveau_fence_context_del(&fctx->base); + for (i = 0; i < ARRAY_SIZE(fctx->head); i++) + nvif_object_fini(&fctx->head[i]); + nvif_object_fini(&fctx->sema); chan->fence = NULL; kfree(fctx); } diff --git a/drivers/gpu/drm/nouveau/nv10_fence.h b/drivers/gpu/drm/nouveau/nv10_fence.h index e5d9204826c2..a87259f3983a 100644 --- a/drivers/gpu/drm/nouveau/nv10_fence.h +++ b/drivers/gpu/drm/nouveau/nv10_fence.h @@ -1,12 +1,13 @@ #ifndef __NV10_FENCE_H_ #define __NV10_FENCE_H_ -#include <core/os.h> #include "nouveau_fence.h" #include "nouveau_bo.h" struct nv10_fence_chan { struct nouveau_fence_chan base; + struct nvif_object sema; + struct nvif_object head[4]; }; struct nv10_fence_priv { diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c index 22aa9963ea6f..ca907479f92f 100644 --- a/drivers/gpu/drm/nouveau/nv17_fence.c +++ b/drivers/gpu/drm/nouveau/nv17_fence.c @@ -22,8 +22,8 @@ * Authors: Ben Skeggs <bskeggs@redhat.com> */ -#include <core/object.h> -#include <core/class.h> +#include <nvif/os.h> +#include <nvif/class.h> #include "nouveau_drm.h" #include "nouveau_dma.h" @@ -33,11 +33,13 @@ int nv17_fence_sync(struct nouveau_fence *fence, struct nouveau_channel *prev, struct nouveau_channel *chan) { + struct nouveau_cli *cli = (void *)nvif_client(&prev->device->base); struct nv10_fence_priv *priv = chan->drm->fence; + struct nv10_fence_chan *fctx = chan->fence; u32 value; int ret; - if (!mutex_trylock(&prev->cli->mutex)) + if (!mutex_trylock(&cli->mutex)) return -EBUSY; spin_lock(&priv->lock); @@ -48,7 +50,7 @@ nv17_fence_sync(struct nouveau_fence *fence, ret = RING_SPACE(prev, 5); if (!ret) { BEGIN_NV04(prev, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4); - OUT_RING (prev, NvSema); + OUT_RING (prev, fctx->sema.handle); OUT_RING (prev, 0); OUT_RING (prev, value + 0); OUT_RING (prev, value + 1); @@ -57,14 +59,14 @@ nv17_fence_sync(struct nouveau_fence *fence, if (!ret && !(ret = RING_SPACE(chan, 5))) { BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4); - OUT_RING (chan, NvSema); + OUT_RING (chan, fctx->sema.handle); OUT_RING (chan, 0); OUT_RING (chan, value + 1); OUT_RING (chan, value + 2); FIRE_RING (chan); } - mutex_unlock(&prev->cli->mutex); + mutex_unlock(&cli->mutex); return 0; } @@ -74,7 +76,6 @@ nv17_fence_context_new(struct nouveau_channel *chan) struct nv10_fence_priv *priv = chan->drm->fence; struct nv10_fence_chan *fctx; struct ttm_mem_reg *mem = &priv->bo->bo.mem; - struct nouveau_object *object; u32 start = mem->start * PAGE_SIZE; u32 limit = start + mem->size - 1; int ret = 0; @@ -88,15 +89,14 @@ nv17_fence_context_new(struct nouveau_channel *chan) fctx->base.read = nv10_fence_read; fctx->base.sync = nv17_fence_sync; - ret = nouveau_object_new(nv_object(chan->cli), chan->handle, - NvSema, 0x0002, - &(struct nv_dma_class) { - .flags = NV_DMA_TARGET_VRAM | - NV_DMA_ACCESS_RDWR, + ret = nvif_object_init(chan->object, NULL, NvSema, NV_DMA_FROM_MEMORY, + &(struct nv_dma_v0) { + .target = NV_DMA_V0_TARGET_VRAM, + .access = NV_DMA_V0_ACCESS_RDWR, .start = start, .limit = limit, - }, sizeof(struct nv_dma_class), - &object); + }, sizeof(struct nv_dma_v0), + &fctx->sema); if (ret) nv10_fence_context_del(chan); return ret; diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 4c534b7b04da..03949eaa629f 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -28,6 +28,8 @@ #include <drm/drm_crtc_helper.h> #include <drm/drm_dp_helper.h> +#include <nvif/class.h> + #include "nouveau_drm.h" #include "nouveau_dma.h" #include "nouveau_gem.h" @@ -37,15 +39,6 @@ #include "nouveau_fence.h" #include "nv50_display.h" -#include <core/client.h> -#include <core/gpuobj.h> -#include <core/class.h> - -#include <subdev/timer.h> -#include <subdev/bar.h> -#include <subdev/fb.h> -#include <subdev/i2c.h> - #define EVO_DMA_NR 9 #define EVO_MASTER (0x00) @@ -60,45 +53,34 @@ #define EVO_FLIP_SEM0(c) EVO_SYNC((c) + 1, 0x00) #define EVO_FLIP_SEM1(c) EVO_SYNC((c) + 1, 0x10) -#define EVO_CORE_HANDLE (0xd1500000) -#define EVO_CHAN_HANDLE(t,i) (0xd15c0000 | (((t) & 0x00ff) << 8) | (i)) -#define EVO_CHAN_OCLASS(t,c) ((nv_hclass(c) & 0xff00) | ((t) & 0x00ff)) -#define EVO_PUSH_HANDLE(t,i) (0xd15b0000 | (i) | \ - (((NV50_DISP_##t##_CLASS) & 0x00ff) << 8)) - /****************************************************************************** * EVO channel *****************************************************************************/ struct nv50_chan { - struct nouveau_object *user; - u32 handle; + struct nvif_object user; }; static int -nv50_chan_create(struct nouveau_object *core, u32 bclass, u8 head, +nv50_chan_create(struct nvif_object *disp, const u32 *oclass, u8 head, void *data, u32 size, struct nv50_chan *chan) { - struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS); - const u32 oclass = EVO_CHAN_OCLASS(bclass, core); - const u32 handle = EVO_CHAN_HANDLE(bclass, head); - int ret; - - ret = nouveau_object_new(client, EVO_CORE_HANDLE, handle, - oclass, data, size, &chan->user); - if (ret) - return ret; - - chan->handle = handle; - return 0; + while (oclass[0]) { + int ret = nvif_object_init(disp, NULL, (oclass[0] << 16) | head, + oclass[0], data, size, + &chan->user); + if (oclass++, ret == 0) { + nvif_object_map(&chan->user); + return ret; + } + } + return -ENOSYS; } static void -nv50_chan_destroy(struct nouveau_object *core, struct nv50_chan *chan) +nv50_chan_destroy(struct nv50_chan *chan) { - struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS); - if (chan->handle) - nouveau_object_del(client, EVO_CORE_HANDLE, chan->handle); + nvif_object_fini(&chan->user); } /****************************************************************************** @@ -110,16 +92,70 @@ struct nv50_pioc { }; static void -nv50_pioc_destroy(struct nouveau_object *core, struct nv50_pioc *pioc) +nv50_pioc_destroy(struct nv50_pioc *pioc) { - nv50_chan_destroy(core, &pioc->base); + nv50_chan_destroy(&pioc->base); } static int -nv50_pioc_create(struct nouveau_object *core, u32 bclass, u8 head, +nv50_pioc_create(struct nvif_object *disp, const u32 *oclass, u8 head, void *data, u32 size, struct nv50_pioc *pioc) { - return nv50_chan_create(core, bclass, head, data, size, &pioc->base); + return nv50_chan_create(disp, oclass, head, data, size, &pioc->base); +} + +/****************************************************************************** + * Cursor Immediate + *****************************************************************************/ + +struct nv50_curs { + struct nv50_pioc base; +}; + +static int +nv50_curs_create(struct nvif_object *disp, int head, struct nv50_curs *curs) +{ + struct nv50_disp_cursor_v0 args = { + .head = head, + }; + static const u32 oclass[] = { + GK104_DISP_CURSOR, + GF110_DISP_CURSOR, + GT214_DISP_CURSOR, + G82_DISP_CURSOR, + NV50_DISP_CURSOR, + 0 + }; + + return nv50_pioc_create(disp, oclass, head, &args, sizeof(args), + &curs->base); +} + +/****************************************************************************** + * Overlay Immediate + *****************************************************************************/ + +struct nv50_oimm { + struct nv50_pioc base; +}; + +static int +nv50_oimm_create(struct nvif_object *disp, int head, struct nv50_oimm *oimm) +{ + struct nv50_disp_cursor_v0 args = { + .head = head, + }; + static const u32 oclass[] = { + GK104_DISP_OVERLAY, + GF110_DISP_OVERLAY, + GT214_DISP_OVERLAY, + G82_DISP_OVERLAY, + NV50_DISP_OVERLAY, + 0 + }; + + return nv50_pioc_create(disp, oclass, head, &args, sizeof(args), + &oimm->base); } /****************************************************************************** @@ -131,6 +167,9 @@ struct nv50_dmac { dma_addr_t handle; u32 *ptr; + struct nvif_object sync; + struct nvif_object vram; + /* Protects against concurrent pushbuf access to this channel, lock is * grabbed by evo_wait (if the pushbuf reservation is successful) and * dropped again by evo_kick. */ @@ -138,207 +177,113 @@ struct nv50_dmac { }; static void -nv50_dmac_destroy(struct nouveau_object *core, struct nv50_dmac *dmac) +nv50_dmac_destroy(struct nv50_dmac *dmac, struct nvif_object *disp) { + nvif_object_fini(&dmac->vram); + nvif_object_fini(&dmac->sync); + + nv50_chan_destroy(&dmac->base); + if (dmac->ptr) { - struct pci_dev *pdev = nv_device(core)->pdev; + struct pci_dev *pdev = nvkm_device(nvif_device(disp))->pdev; pci_free_consistent(pdev, PAGE_SIZE, dmac->ptr, dmac->handle); } - - nv50_chan_destroy(core, &dmac->base); -} - -static int -nv50_dmac_create_fbdma(struct nouveau_object *core, u32 parent) -{ - struct nouveau_fb *pfb = nouveau_fb(core); - struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS); - struct nouveau_object *object; - int ret = nouveau_object_new(client, parent, NvEvoVRAM_LP, - NV_DMA_IN_MEMORY_CLASS, - &(struct nv_dma_class) { - .flags = NV_DMA_TARGET_VRAM | - NV_DMA_ACCESS_RDWR, - .start = 0, - .limit = pfb->ram->size - 1, - .conf0 = NV50_DMA_CONF0_ENABLE | - NV50_DMA_CONF0_PART_256, - }, sizeof(struct nv_dma_class), &object); - if (ret) - return ret; - - ret = nouveau_object_new(client, parent, NvEvoFB16, - NV_DMA_IN_MEMORY_CLASS, - &(struct nv_dma_class) { - .flags = NV_DMA_TARGET_VRAM | - NV_DMA_ACCESS_RDWR, - .start = 0, - .limit = pfb->ram->size - 1, - .conf0 = NV50_DMA_CONF0_ENABLE | 0x70 | - NV50_DMA_CONF0_PART_256, - }, sizeof(struct nv_dma_class), &object); - if (ret) - return ret; - - ret = nouveau_object_new(client, parent, NvEvoFB32, - NV_DMA_IN_MEMORY_CLASS, - &(struct nv_dma_class) { - .flags = NV_DMA_TARGET_VRAM | - NV_DMA_ACCESS_RDWR, - .start = 0, - .limit = pfb->ram->size - 1, - .conf0 = NV50_DMA_CONF0_ENABLE | 0x7a | - NV50_DMA_CONF0_PART_256, - }, sizeof(struct nv_dma_class), &object); - return ret; -} - -static int -nvc0_dmac_create_fbdma(struct nouveau_object *core, u32 parent) -{ - struct nouveau_fb *pfb = nouveau_fb(core); - struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS); - struct nouveau_object *object; - int ret = nouveau_object_new(client, parent, NvEvoVRAM_LP, - NV_DMA_IN_MEMORY_CLASS, - &(struct nv_dma_class) { - .flags = NV_DMA_TARGET_VRAM | - NV_DMA_ACCESS_RDWR, - .start = 0, - .limit = pfb->ram->size - 1, - .conf0 = NVC0_DMA_CONF0_ENABLE, - }, sizeof(struct nv_dma_class), &object); - if (ret) - return ret; - - ret = nouveau_object_new(client, parent, NvEvoFB16, - NV_DMA_IN_MEMORY_CLASS, - &(struct nv_dma_class) { - .flags = NV_DMA_TARGET_VRAM | - NV_DMA_ACCESS_RDWR, - .start = 0, - .limit = pfb->ram->size - 1, - .conf0 = NVC0_DMA_CONF0_ENABLE | 0xfe, - }, sizeof(struct nv_dma_class), &object); - if (ret) - return ret; - - ret = nouveau_object_new(client, parent, NvEvoFB32, - NV_DMA_IN_MEMORY_CLASS, - &(struct nv_dma_class) { - .flags = NV_DMA_TARGET_VRAM | - NV_DMA_ACCESS_RDWR, - .start = 0, - .limit = pfb->ram->size - 1, - .conf0 = NVC0_DMA_CONF0_ENABLE | 0xfe, - }, sizeof(struct nv_dma_class), &object); - return ret; -} - -static int -nvd0_dmac_create_fbdma(struct nouveau_object *core, u32 parent) -{ - struct nouveau_fb *pfb = nouveau_fb(core); - struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS); - struct nouveau_object *object; - int ret = nouveau_object_new(client, parent, NvEvoVRAM_LP, - NV_DMA_IN_MEMORY_CLASS, - &(struct nv_dma_class) { - .flags = NV_DMA_TARGET_VRAM | - NV_DMA_ACCESS_RDWR, - .start = 0, - .limit = pfb->ram->size - 1, - .conf0 = NVD0_DMA_CONF0_ENABLE | - NVD0_DMA_CONF0_PAGE_LP, - }, sizeof(struct nv_dma_class), &object); - if (ret) - return ret; - - ret = nouveau_object_new(client, parent, NvEvoFB32, - NV_DMA_IN_MEMORY_CLASS, - &(struct nv_dma_class) { - .flags = NV_DMA_TARGET_VRAM | - NV_DMA_ACCESS_RDWR, - .start = 0, - .limit = pfb->ram->size - 1, - .conf0 = NVD0_DMA_CONF0_ENABLE | 0xfe | - NVD0_DMA_CONF0_PAGE_LP, - }, sizeof(struct nv_dma_class), &object); - return ret; } static int -nv50_dmac_create(struct nouveau_object *core, u32 bclass, u8 head, +nv50_dmac_create(struct nvif_object *disp, const u32 *oclass, u8 head, void *data, u32 size, u64 syncbuf, struct nv50_dmac *dmac) { - struct nouveau_fb *pfb = nouveau_fb(core); - struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS); - struct nouveau_object *object; - u32 pushbuf = *(u32 *)data; + struct nvif_device *device = nvif_device(disp); + struct nv50_disp_core_channel_dma_v0 *args = data; + struct nvif_object pushbuf; int ret; mutex_init(&dmac->lock); - dmac->ptr = pci_alloc_consistent(nv_device(core)->pdev, PAGE_SIZE, - &dmac->handle); + dmac->ptr = pci_alloc_consistent(nvkm_device(device)->pdev, + PAGE_SIZE, &dmac->handle); if (!dmac->ptr) return -ENOMEM; - ret = nouveau_object_new(client, NVDRM_DEVICE, pushbuf, - NV_DMA_FROM_MEMORY_CLASS, - &(struct nv_dma_class) { - .flags = NV_DMA_TARGET_PCI_US | - NV_DMA_ACCESS_RD, + ret = nvif_object_init(nvif_object(device), NULL, + args->pushbuf, NV_DMA_FROM_MEMORY, + &(struct nv_dma_v0) { + .target = NV_DMA_V0_TARGET_PCI_US, + .access = NV_DMA_V0_ACCESS_RD, .start = dmac->handle + 0x0000, .limit = dmac->handle + 0x0fff, - }, sizeof(struct nv_dma_class), &object); + }, sizeof(struct nv_dma_v0), &pushbuf); if (ret) return ret; - ret = nv50_chan_create(core, bclass, head, data, size, &dmac->base); + ret = nv50_chan_create(disp, oclass, head, data, size, &dmac->base); + nvif_object_fini(&pushbuf); if (ret) return ret; - ret = nouveau_object_new(client, dmac->base.handle, NvEvoSync, - NV_DMA_IN_MEMORY_CLASS, - &(struct nv_dma_class) { - .flags = NV_DMA_TARGET_VRAM | - NV_DMA_ACCESS_RDWR, + ret = nvif_object_init(&dmac->base.user, NULL, 0xf0000000, + NV_DMA_IN_MEMORY, + &(struct nv_dma_v0) { + .target = NV_DMA_V0_TARGET_VRAM, + .access = NV_DMA_V0_ACCESS_RDWR, .start = syncbuf + 0x0000, .limit = syncbuf + 0x0fff, - }, sizeof(struct nv_dma_class), &object); + }, sizeof(struct nv_dma_v0), + &dmac->sync); if (ret) return ret; - ret = nouveau_object_new(client, dmac->base.handle, NvEvoVRAM, - NV_DMA_IN_MEMORY_CLASS, - &(struct nv_dma_class) { - .flags = NV_DMA_TARGET_VRAM | - NV_DMA_ACCESS_RDWR, + ret = nvif_object_init(&dmac->base.user, NULL, 0xf0000001, + NV_DMA_IN_MEMORY, + &(struct nv_dma_v0) { + .target = NV_DMA_V0_TARGET_VRAM, + .access = NV_DMA_V0_ACCESS_RDWR, .start = 0, - .limit = pfb->ram->size - 1, - }, sizeof(struct nv_dma_class), &object); + .limit = device->info.ram_user - 1, + }, sizeof(struct nv_dma_v0), + &dmac->vram); if (ret) return ret; - if (nv_device(core)->card_type < NV_C0) - ret = nv50_dmac_create_fbdma(core, dmac->base.handle); - else - if (nv_device(core)->card_type < NV_D0) - ret = nvc0_dmac_create_fbdma(core, dmac->base.handle); - else - ret = nvd0_dmac_create_fbdma(core, dmac->base.handle); return ret; } +/****************************************************************************** + * Core + *****************************************************************************/ + struct nv50_mast { struct nv50_dmac base; }; -struct nv50_curs { - struct nv50_pioc base; -}; +static int +nv50_core_create(struct nvif_object *disp, u64 syncbuf, struct nv50_mast *core) +{ + struct nv50_disp_core_channel_dma_v0 args = { + .pushbuf = 0xb0007d00, + }; + static const u32 oclass[] = { + GM107_DISP_CORE_CHANNEL_DMA, + GK110_DISP_CORE_CHANNEL_DMA, + GK104_DISP_CORE_CHANNEL_DMA, + GF110_DISP_CORE_CHANNEL_DMA, + GT214_DISP_CORE_CHANNEL_DMA, + GT206_DISP_CORE_CHANNEL_DMA, + GT200_DISP_CORE_CHANNEL_DMA, + G82_DISP_CORE_CHANNEL_DMA, + NV50_DISP_CORE_CHANNEL_DMA, + 0 + }; + + return nv50_dmac_create(disp, oclass, 0, &args, sizeof(args), syncbuf, + &core->base); +} + +/****************************************************************************** + * Base + *****************************************************************************/ struct nv50_sync { struct nv50_dmac base; @@ -346,13 +291,58 @@ struct nv50_sync { u32 data; }; +static int +nv50_base_create(struct nvif_object *disp, int head, u64 syncbuf, + struct nv50_sync *base) +{ + struct nv50_disp_base_channel_dma_v0 args = { + .pushbuf = 0xb0007c00 | head, + .head = head, + }; + static const u32 oclass[] = { + GK110_DISP_BASE_CHANNEL_DMA, + GK104_DISP_BASE_CHANNEL_DMA, + GF110_DISP_BASE_CHANNEL_DMA, + GT214_DISP_BASE_CHANNEL_DMA, + GT200_DISP_BASE_CHANNEL_DMA, + G82_DISP_BASE_CHANNEL_DMA, + NV50_DISP_BASE_CHANNEL_DMA, + 0 + }; + + return nv50_dmac_create(disp, oclass, head, &args, sizeof(args), + syncbuf, &base->base); +} + +/****************************************************************************** + * Overlay + *****************************************************************************/ + struct nv50_ovly { struct nv50_dmac base; }; -struct nv50_oimm { - struct nv50_pioc base; -}; +static int +nv50_ovly_create(struct nvif_object *disp, int head, u64 syncbuf, + struct nv50_ovly *ovly) +{ + struct nv50_disp_overlay_channel_dma_v0 args = { + .pushbuf = 0xb0007e00 | head, + .head = head, + }; + static const u32 oclass[] = { + GK104_DISP_OVERLAY_CONTROL_DMA, + GF110_DISP_OVERLAY_CONTROL_DMA, + GT214_DISP_OVERLAY_CHANNEL_DMA, + GT200_DISP_OVERLAY_CHANNEL_DMA, + G82_DISP_OVERLAY_CHANNEL_DMA, + NV50_DISP_OVERLAY_CHANNEL_DMA, + 0 + }; + + return nv50_dmac_create(disp, oclass, head, &args, sizeof(args), + syncbuf, &ovly->base); +} struct nv50_head { struct nouveau_crtc base; @@ -369,13 +359,19 @@ struct nv50_head { #define nv50_ovly(c) (&nv50_head(c)->ovly) #define nv50_oimm(c) (&nv50_head(c)->oimm) #define nv50_chan(c) (&(c)->base.base) -#define nv50_vers(c) nv_mclass(nv50_chan(c)->user) +#define nv50_vers(c) nv50_chan(c)->user.oclass + +struct nv50_fbdma { + struct list_head head; + struct nvif_object core; + struct nvif_object base[4]; +}; struct nv50_disp { - struct nouveau_object *core; + struct nvif_object *disp; struct nv50_mast mast; - u32 modeset; + struct list_head fbdma; struct nouveau_bo *sync; }; @@ -401,16 +397,16 @@ static u32 * evo_wait(void *evoc, int nr) { struct nv50_dmac *dmac = evoc; - u32 put = nv_ro32(dmac->base.user, 0x0000) / 4; + u32 put = nvif_rd32(&dmac->base.user, 0x0000) / 4; mutex_lock(&dmac->lock); if (put + nr >= (PAGE_SIZE / 4) - 8) { dmac->ptr[put] = 0x20000000; - nv_wo32(dmac->base.user, 0x0000, 0x00000000); - if (!nv_wait(dmac->base.user, 0x0004, ~0, 0x00000000)) { + nvif_wr32(&dmac->base.user, 0x0000, 0x00000000); + if (!nvkm_wait(&dmac->base.user, 0x0004, ~0, 0x00000000)) { mutex_unlock(&dmac->lock); - NV_ERROR(dmac->base.user, "channel stalled\n"); + nv_error(nvkm_object(&dmac->base.user), "channel stalled\n"); return NULL; } @@ -424,7 +420,7 @@ static void evo_kick(u32 *push, void *evoc) { struct nv50_dmac *dmac = evoc; - nv_wo32(dmac->base.user, 0x0000, (push - dmac->ptr) << 2); + nvif_wr32(&dmac->base.user, 0x0000, (push - dmac->ptr) << 2); mutex_unlock(&dmac->lock); } @@ -443,7 +439,7 @@ evo_sync_wait(void *data) static int evo_sync(struct drm_device *dev) { - struct nouveau_device *device = nouveau_dev(dev); + struct nvif_device *device = &nouveau_drm(dev)->device; struct nv50_disp *disp = nv50_disp(dev); struct nv50_mast *mast = nv50_mast(dev); u32 *push = evo_wait(mast, 8); @@ -455,7 +451,7 @@ evo_sync(struct drm_device *dev) evo_data(push, 0x00000000); evo_data(push, 0x00000000); evo_kick(push, mast); - if (nv_wait_cb(device, evo_sync_wait, disp->sync)) + if (nv_wait_cb(nvkm_device(device), evo_sync_wait, disp->sync)) return 0; } @@ -490,7 +486,7 @@ nv50_display_flip_wait(void *data) void nv50_display_flip_stop(struct drm_crtc *crtc) { - struct nouveau_device *device = nouveau_dev(crtc->dev); + struct nvif_device *device = &nouveau_drm(crtc->dev)->device; struct nv50_display_flip flip = { .disp = nv50_disp(crtc->dev), .chan = nv50_sync(crtc), @@ -510,7 +506,7 @@ nv50_display_flip_stop(struct drm_crtc *crtc) evo_kick(push, flip.chan); } - nv_wait_cb(device, nv50_display_flip_wait, &flip); + nv_wait_cb(nvkm_device(device), nv50_display_flip_wait, &flip); } int @@ -534,7 +530,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, if (unlikely(push == NULL)) return -EBUSY; - if (chan && nv_mclass(chan->object) < NV84_CHANNEL_IND_CLASS) { + if (chan && chan->object->oclass < G82_CHANNEL_GPFIFO) { ret = RING_SPACE(chan, 8); if (ret) return ret; @@ -548,14 +544,14 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, OUT_RING (chan, sync->addr); OUT_RING (chan, sync->data); } else - if (chan && nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) { + if (chan && chan->object->oclass < FERMI_CHANNEL_GPFIFO) { u64 addr = nv84_fence_crtc(chan, nv_crtc->index) + sync->addr; ret = RING_SPACE(chan, 12); if (ret) return ret; BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1); - OUT_RING (chan, chan->vram); + OUT_RING (chan, chan->vram.handle); BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); OUT_RING (chan, upper_32_bits(addr ^ 0x10)); OUT_RING (chan, lower_32_bits(addr ^ 0x10)); @@ -606,16 +602,16 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, evo_data(push, sync->addr); evo_data(push, sync->data++); evo_data(push, sync->data); - evo_data(push, NvEvoSync); + evo_data(push, sync->base.sync.handle); evo_mthd(push, 0x00a0, 2); evo_data(push, 0x00000000); evo_data(push, 0x00000000); evo_mthd(push, 0x00c0, 1); - evo_data(push, nv_fb->r_dma); + evo_data(push, nv_fb->r_handle); evo_mthd(push, 0x0110, 2); evo_data(push, 0x00000000); evo_data(push, 0x00000000); - if (nv50_vers(sync) < NVD0_DISP_SYNC_CLASS) { + if (nv50_vers(sync) < GF110_DISP_BASE_CHANNEL_DMA) { evo_mthd(push, 0x0800, 5); evo_data(push, nv_fb->nvbo->bo.offset >> 8); evo_data(push, 0); @@ -667,11 +663,11 @@ nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update) push = evo_wait(mast, 4); if (push) { - if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { + if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) { evo_mthd(push, 0x08a0 + (nv_crtc->index * 0x0400), 1); evo_data(push, mode); } else - if (nv50_vers(mast) < NVE0_DISP_MAST_CLASS) { + if (nv50_vers(mast) < GK104_DISP_CORE_CHANNEL_DMA) { evo_mthd(push, 0x0490 + (nv_crtc->index * 0x0300), 1); evo_data(push, mode); } else { @@ -762,7 +758,7 @@ nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update) push = evo_wait(mast, 8); if (push) { - if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { + if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) { /*XXX: SCALE_CTRL_ACTIVE??? */ evo_mthd(push, 0x08d8 + (nv_crtc->index * 0x400), 2); evo_data(push, (oY << 16) | oX); @@ -807,7 +803,7 @@ nv50_crtc_set_color_vibrance(struct nouveau_crtc *nv_crtc, bool update) push = evo_wait(mast, 16); if (push) { - if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { + if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) { evo_mthd(push, 0x08a8 + (nv_crtc->index * 0x400), 1); evo_data(push, (hue << 20) | (vib << 8)); } else { @@ -835,7 +831,7 @@ nv50_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb, push = evo_wait(mast, 16); if (push) { - if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { + if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) { evo_mthd(push, 0x0860 + (nv_crtc->index * 0x400), 1); evo_data(push, nvfb->nvbo->bo.offset >> 8); evo_mthd(push, 0x0868 + (nv_crtc->index * 0x400), 3); @@ -844,9 +840,9 @@ nv50_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb, evo_data(push, nvfb->r_format); evo_mthd(push, 0x08c0 + (nv_crtc->index * 0x400), 1); evo_data(push, (y << 16) | x); - if (nv50_vers(mast) > NV50_DISP_MAST_CLASS) { + if (nv50_vers(mast) > NV50_DISP_CORE_CHANNEL_DMA) { evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1); - evo_data(push, nvfb->r_dma); + evo_data(push, nvfb->r_handle); } } else { evo_mthd(push, 0x0460 + (nv_crtc->index * 0x300), 1); @@ -855,7 +851,7 @@ nv50_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb, evo_data(push, (fb->height << 16) | fb->width); evo_data(push, nvfb->r_pitch); evo_data(push, nvfb->r_format); - evo_data(push, nvfb->r_dma); + evo_data(push, nvfb->r_handle); evo_mthd(push, 0x04b0 + (nv_crtc->index * 0x300), 1); evo_data(push, (y << 16) | x); } @@ -867,7 +863,7 @@ nv50_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb, evo_kick(push, mast); } - nv_crtc->fb.tile_flags = nvfb->r_dma; + nv_crtc->fb.handle = nvfb->r_handle; return 0; } @@ -877,23 +873,23 @@ nv50_crtc_cursor_show(struct nouveau_crtc *nv_crtc) struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev); u32 *push = evo_wait(mast, 16); if (push) { - if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) { + if (nv50_vers(mast) < G82_DISP_CORE_CHANNEL_DMA) { evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 2); evo_data(push, 0x85000000); evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8); } else - if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { + if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) { evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 2); evo_data(push, 0x85000000); evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8); evo_mthd(push, 0x089c + (nv_crtc->index * 0x400), 1); - evo_data(push, NvEvoVRAM); + evo_data(push, mast->base.vram.handle); } else { evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 2); evo_data(push, 0x85000000); evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8); evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1); - evo_data(push, NvEvoVRAM); + evo_data(push, mast->base.vram.handle); } evo_kick(push, mast); } @@ -905,11 +901,11 @@ nv50_crtc_cursor_hide(struct nouveau_crtc *nv_crtc) struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev); u32 *push = evo_wait(mast, 16); if (push) { - if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) { + if (nv50_vers(mast) < G82_DISP_CORE_CHANNEL_DMA) { evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 1); evo_data(push, 0x05000000); } else - if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { + if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) { evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 1); evo_data(push, 0x05000000); evo_mthd(push, 0x089c + (nv_crtc->index * 0x400), 1); @@ -960,13 +956,13 @@ nv50_crtc_prepare(struct drm_crtc *crtc) push = evo_wait(mast, 6); if (push) { - if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) { + if (nv50_vers(mast) < G82_DISP_CORE_CHANNEL_DMA) { evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1); evo_data(push, 0x00000000); evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 1); evo_data(push, 0x40000000); } else - if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { + if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) { evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1); evo_data(push, 0x00000000); evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 1); @@ -997,31 +993,31 @@ nv50_crtc_commit(struct drm_crtc *crtc) push = evo_wait(mast, 32); if (push) { - if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) { + if (nv50_vers(mast) < G82_DISP_CORE_CHANNEL_DMA) { evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1); - evo_data(push, NvEvoVRAM_LP); + evo_data(push, nv_crtc->fb.handle); evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 2); evo_data(push, 0xc0000000); evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8); } else - if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { + if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) { evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1); - evo_data(push, nv_crtc->fb.tile_flags); + evo_data(push, nv_crtc->fb.handle); evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 2); evo_data(push, 0xc0000000); evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8); evo_mthd(push, 0x085c + (nv_crtc->index * 0x400), 1); - evo_data(push, NvEvoVRAM); + evo_data(push, mast->base.vram.handle); } else { evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1); - evo_data(push, nv_crtc->fb.tile_flags); + evo_data(push, nv_crtc->fb.handle); evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 4); evo_data(push, 0x83000000); evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8); evo_data(push, 0x00000000); evo_data(push, 0x00000000); evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1); - evo_data(push, NvEvoVRAM); + evo_data(push, mast->base.vram.handle); evo_mthd(push, 0x0430 + (nv_crtc->index * 0x300), 1); evo_data(push, 0xffffff00); } @@ -1099,7 +1095,7 @@ nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode, push = evo_wait(mast, 64); if (push) { - if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { + if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) { evo_mthd(push, 0x0804 + (nv_crtc->index * 0x400), 2); evo_data(push, 0x00800000 | mode->clock); evo_data(push, (ilace == 2) ? 2 : 0); @@ -1192,7 +1188,7 @@ nv50_crtc_lut_load(struct drm_crtc *crtc) u16 g = nv_crtc->lut.g[i] >> 2; u16 b = nv_crtc->lut.b[i] >> 2; - if (nv_mclass(disp->core) < NVD0_DISP_CLASS) { + if (disp->disp->oclass < GF110_DISP) { writew(r + 0x0000, lut + (i * 0x08) + 0); writew(g + 0x0000, lut + (i * 0x08) + 2); writew(b + 0x0000, lut + (i * 0x08) + 4); @@ -1259,8 +1255,8 @@ nv50_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) { struct nv50_curs *curs = nv50_curs(crtc); struct nv50_chan *chan = nv50_chan(curs); - nv_wo32(chan->user, 0x0084, (y << 16) | (x & 0xffff)); - nv_wo32(chan->user, 0x0080, 0x00000000); + nvif_wr32(&chan->user, 0x0084, (y << 16) | (x & 0xffff)); + nvif_wr32(&chan->user, 0x0080, 0x00000000); return 0; } @@ -1287,11 +1283,16 @@ nv50_crtc_destroy(struct drm_crtc *crtc) struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); struct nv50_disp *disp = nv50_disp(crtc->dev); struct nv50_head *head = nv50_head(crtc); + struct nv50_fbdma *fbdma; + + list_for_each_entry(fbdma, &disp->fbdma, head) { + nvif_object_fini(&fbdma->base[nv_crtc->index]); + } - nv50_dmac_destroy(disp->core, &head->ovly.base); - nv50_pioc_destroy(disp->core, &head->oimm.base); - nv50_dmac_destroy(disp->core, &head->sync.base); - nv50_pioc_destroy(disp->core, &head->curs.base); + nv50_dmac_destroy(&head->ovly.base, disp->disp); + nv50_pioc_destroy(&head->oimm.base); + nv50_dmac_destroy(&head->sync.base, disp->disp); + nv50_pioc_destroy(&head->curs.base); /*XXX: this shouldn't be necessary, but the core doesn't call * disconnect() during the cleanup paths @@ -1346,7 +1347,7 @@ nv50_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset) } static int -nv50_crtc_create(struct drm_device *dev, struct nouveau_object *core, int index) +nv50_crtc_create(struct drm_device *dev, int index) { struct nv50_disp *disp = nv50_disp(dev); struct nv50_head *head; @@ -1395,11 +1396,7 @@ nv50_crtc_create(struct drm_device *dev, struct nouveau_object *core, int index) nv50_crtc_lut_load(crtc); /* allocate cursor resources */ - ret = nv50_pioc_create(disp->core, NV50_DISP_CURS_CLASS, index, - &(struct nv50_display_curs_class) { - .head = index, - }, sizeof(struct nv50_display_curs_class), - &head->curs.base); + ret = nv50_curs_create(disp->disp, index, &head->curs); if (ret) goto out; @@ -1420,12 +1417,8 @@ nv50_crtc_create(struct drm_device *dev, struct nouveau_object *core, int index) goto out; /* allocate page flip / sync resources */ - ret = nv50_dmac_create(disp->core, NV50_DISP_SYNC_CLASS, index, - &(struct nv50_display_sync_class) { - .pushbuf = EVO_PUSH_HANDLE(SYNC, index), - .head = index, - }, sizeof(struct nv50_display_sync_class), - disp->sync->bo.offset, &head->sync.base); + ret = nv50_base_create(disp->disp, index, disp->sync->bo.offset, + &head->sync); if (ret) goto out; @@ -1433,20 +1426,12 @@ nv50_crtc_create(struct drm_device *dev, struct nouveau_object *core, int index) head->sync.data = 0x00000000; /* allocate overlay resources */ - ret = nv50_pioc_create(disp->core, NV50_DISP_OIMM_CLASS, index, - &(struct nv50_display_oimm_class) { - .head = index, - }, sizeof(struct nv50_display_oimm_class), - &head->oimm.base); + ret = nv50_oimm_create(disp->disp, index, &head->oimm); if (ret) goto out; - ret = nv50_dmac_create(disp->core, NV50_DISP_OVLY_CLASS, index, - &(struct nv50_display_ovly_class) { - .pushbuf = EVO_PUSH_HANDLE(OVLY, index), - .head = index, - }, sizeof(struct nv50_display_ovly_class), - disp->sync->bo.offset, &head->ovly.base); + ret = nv50_ovly_create(disp->disp, index, disp->sync->bo.offset, + &head->ovly); if (ret) goto out; @@ -1464,16 +1449,23 @@ nv50_dac_dpms(struct drm_encoder *encoder, int mode) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct nv50_disp *disp = nv50_disp(encoder->dev); - int or = nv_encoder->or; - u32 dpms_ctrl; - - dpms_ctrl = 0x00000000; - if (mode == DRM_MODE_DPMS_STANDBY || mode == DRM_MODE_DPMS_OFF) - dpms_ctrl |= 0x00000001; - if (mode == DRM_MODE_DPMS_SUSPEND || mode == DRM_MODE_DPMS_OFF) - dpms_ctrl |= 0x00000004; + struct { + struct nv50_disp_mthd_v1 base; + struct nv50_disp_dac_pwr_v0 pwr; + } args = { + .base.version = 1, + .base.method = NV50_DISP_MTHD_V1_DAC_PWR, + .base.hasht = nv_encoder->dcb->hasht, + .base.hashm = nv_encoder->dcb->hashm, + .pwr.state = 1, + .pwr.data = 1, + .pwr.vsync = (mode != DRM_MODE_DPMS_SUSPEND && + mode != DRM_MODE_DPMS_OFF), + .pwr.hsync = (mode != DRM_MODE_DPMS_STANDBY && + mode != DRM_MODE_DPMS_OFF), + }; - nv_call(disp->core, NV50_DISP_DAC_PWR + or, dpms_ctrl); + nvif_mthd(disp->disp, 0, &args, sizeof(args)); } static bool @@ -1514,7 +1506,7 @@ nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, push = evo_wait(mast, 8); if (push) { - if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { + if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) { u32 syncs = 0x00000000; if (mode->flags & DRM_MODE_FLAG_NHSYNC) @@ -1563,7 +1555,7 @@ nv50_dac_disconnect(struct drm_encoder *encoder) push = evo_wait(mast, 4); if (push) { - if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { + if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) { evo_mthd(push, 0x0400 + (or * 0x080), 1); evo_data(push, 0x00000000); } else { @@ -1580,14 +1572,25 @@ nv50_dac_disconnect(struct drm_encoder *encoder) static enum drm_connector_status nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) { + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct nv50_disp *disp = nv50_disp(encoder->dev); - int ret, or = nouveau_encoder(encoder)->or; - u32 load = nouveau_drm(encoder->dev)->vbios.dactestval; - if (load == 0) - load = 340; + struct { + struct nv50_disp_mthd_v1 base; + struct nv50_disp_dac_load_v0 load; + } args = { + .base.version = 1, + .base.method = NV50_DISP_MTHD_V1_DAC_LOAD, + .base.hasht = nv_encoder->dcb->hasht, + .base.hashm = nv_encoder->dcb->hashm, + }; + int ret; + + args.load.data = nouveau_drm(encoder->dev)->vbios.dactestval; + if (args.load.data == 0) + args.load.data = 340; - ret = nv_exec(disp->core, NV50_DISP_DAC_LOAD + or, &load, sizeof(load)); - if (ret || !load) + ret = nvif_mthd(disp->disp, 0, &args, sizeof(args)); + if (ret || !args.load.load) return connector_status_disconnected; return connector_status_connected; @@ -1619,7 +1622,7 @@ static int nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe) { struct nouveau_drm *drm = nouveau_drm(connector->dev); - struct nouveau_i2c *i2c = nouveau_i2c(drm->device); + struct nouveau_i2c *i2c = nvkm_i2c(&drm->device); struct nouveau_encoder *nv_encoder; struct drm_encoder *encoder; int type = DRM_MODE_ENCODER_DAC; @@ -1650,16 +1653,25 @@ nv50_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode) struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct nouveau_connector *nv_connector; struct nv50_disp *disp = nv50_disp(encoder->dev); + struct { + struct nv50_disp_mthd_v1 base; + struct nv50_disp_sor_hda_eld_v0 eld; + u8 data[sizeof(nv_connector->base.eld)]; + } args = { + .base.version = 1, + .base.method = NV50_DISP_MTHD_V1_SOR_HDA_ELD, + .base.hasht = nv_encoder->dcb->hasht, + .base.hashm = nv_encoder->dcb->hashm, + }; nv_connector = nouveau_encoder_connector_get(nv_encoder); if (!drm_detect_monitor_audio(nv_connector->edid)) return; drm_edid_to_eld(&nv_connector->base, nv_connector->edid); + memcpy(args.data, nv_connector->base.eld, sizeof(args.data)); - nv_exec(disp->core, NVA3_DISP_SOR_HDA_ELD + nv_encoder->or, - nv_connector->base.eld, - nv_connector->base.eld[2] * 4); + nvif_mthd(disp->disp, 0, &args, sizeof(args)); } static void @@ -1667,8 +1679,17 @@ nv50_audio_disconnect(struct drm_encoder *encoder) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct nv50_disp *disp = nv50_disp(encoder->dev); + struct { + struct nv50_disp_mthd_v1 base; + struct nv50_disp_sor_hda_eld_v0 eld; + } args = { + .base.version = 1, + .base.method = NV50_DISP_MTHD_V1_SOR_HDA_ELD, + .base.hasht = nv_encoder->dcb->hasht, + .base.hashm = nv_encoder->dcb->hashm, + }; - nv_exec(disp->core, NVA3_DISP_SOR_HDA_ELD + nv_encoder->or, NULL, 0); + nvif_mthd(disp->disp, 0, &args, sizeof(args)); } /****************************************************************************** @@ -1679,10 +1700,20 @@ nv50_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); - struct nouveau_connector *nv_connector; struct nv50_disp *disp = nv50_disp(encoder->dev); - const u32 moff = (nv_crtc->index << 3) | nv_encoder->or; - u32 rekey = 56; /* binary driver, and tegra constant */ + struct { + struct nv50_disp_mthd_v1 base; + struct nv50_disp_sor_hdmi_pwr_v0 pwr; + } args = { + .base.version = 1, + .base.method = NV50_DISP_MTHD_V1_SOR_HDMI_PWR, + .base.hasht = nv_encoder->dcb->hasht, + .base.hashm = (0xf0ff & nv_encoder->dcb->hashm) | + (0x0100 << nv_crtc->index), + .pwr.state = 1, + .pwr.rekey = 56, /* binary driver, and tegra, constant */ + }; + struct nouveau_connector *nv_connector; u32 max_ac_packet; nv_connector = nouveau_encoder_connector_get(nv_encoder); @@ -1690,14 +1721,11 @@ nv50_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode) return; max_ac_packet = mode->htotal - mode->hdisplay; - max_ac_packet -= rekey; + max_ac_packet -= args.pwr.rekey; max_ac_packet -= 18; /* constant from tegra */ - max_ac_packet /= 32; - - nv_call(disp->core, NV84_DISP_SOR_HDMI_PWR + moff, - NV84_DISP_SOR_HDMI_PWR_STATE_ON | - (max_ac_packet << 16) | rekey); + args.pwr.max_ac_packet = max_ac_packet / 32; + nvif_mthd(disp->disp, 0, &args, sizeof(args)); nv50_audio_mode_set(encoder, mode); } @@ -1706,11 +1734,20 @@ nv50_hdmi_disconnect(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct nv50_disp *disp = nv50_disp(encoder->dev); - const u32 moff = (nv_crtc->index << 3) | nv_encoder->or; + struct { + struct nv50_disp_mthd_v1 base; + struct nv50_disp_sor_hdmi_pwr_v0 pwr; + } args = { + .base.version = 1, + .base.method = NV50_DISP_MTHD_V1_SOR_HDMI_PWR, + .base.hasht = nv_encoder->dcb->hasht, + .base.hashm = (0xf0ff & nv_encoder->dcb->hashm) | + (0x0100 << nv_crtc->index), + }; nv50_audio_disconnect(encoder); - nv_call(disp->core, NV84_DISP_SOR_HDMI_PWR + moff, 0x00000000); + nvif_mthd(disp->disp, 0, &args, sizeof(args)); } /****************************************************************************** @@ -1720,10 +1757,29 @@ static void nv50_sor_dpms(struct drm_encoder *encoder, int mode) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nv50_disp *disp = nv50_disp(encoder->dev); + struct { + struct nv50_disp_mthd_v1 base; + struct nv50_disp_sor_pwr_v0 pwr; + } args = { + .base.version = 1, + .base.method = NV50_DISP_MTHD_V1_SOR_PWR, + .base.hasht = nv_encoder->dcb->hasht, + .base.hashm = nv_encoder->dcb->hashm, + .pwr.state = mode == DRM_MODE_DPMS_ON, + }; + struct { + struct nv50_disp_mthd_v1 base; + struct nv50_disp_sor_dp_pwr_v0 pwr; + } link = { + .base.version = 1, + .base.method = NV50_DISP_MTHD_V1_SOR_DP_PWR, + .base.hasht = nv_encoder->dcb->hasht, + .base.hashm = nv_encoder->dcb->hashm, + .pwr.state = mode == DRM_MODE_DPMS_ON, + }; struct drm_device *dev = encoder->dev; - struct nv50_disp *disp = nv50_disp(dev); struct drm_encoder *partner; - u32 mthd; nv_encoder->last_dpms = mode; @@ -1741,18 +1797,13 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode) } } - mthd = (ffs(nv_encoder->dcb->heads) - 1) << 3; - mthd |= (ffs(nv_encoder->dcb->sorconf.link) - 1) << 2; - mthd |= nv_encoder->or; - if (nv_encoder->dcb->type == DCB_OUTPUT_DP) { - nv_call(disp->core, NV50_DISP_SOR_PWR | mthd, 1); - mthd |= NV94_DISP_SOR_DP_PWR; + args.pwr.state = 1; + nvif_mthd(disp->disp, 0, &args, sizeof(args)); + nvif_mthd(disp->disp, 0, &link, sizeof(link)); } else { - mthd |= NV50_DISP_SOR_PWR; + nvif_mthd(disp->disp, 0, &args, sizeof(args)); } - - nv_call(disp->core, mthd, (mode == DRM_MODE_DPMS_ON)); } static bool @@ -1781,7 +1832,7 @@ nv50_sor_ctrl(struct nouveau_encoder *nv_encoder, u32 mask, u32 data) struct nv50_mast *mast = nv50_mast(nv_encoder->base.base.dev); u32 temp = (nv_encoder->ctrl & ~mask) | (data & mask), *push; if (temp != nv_encoder->ctrl && (push = evo_wait(mast, 2))) { - if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { + if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) { evo_mthd(push, 0x0600 + (nv_encoder->or * 0x40), 1); evo_data(push, (nv_encoder->ctrl = temp)); } else { @@ -1817,15 +1868,24 @@ static void nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode, struct drm_display_mode *mode) { + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); + struct { + struct nv50_disp_mthd_v1 base; + struct nv50_disp_sor_lvds_script_v0 lvds; + } lvds = { + .base.version = 1, + .base.method = NV50_DISP_MTHD_V1_SOR_LVDS_SCRIPT, + .base.hasht = nv_encoder->dcb->hasht, + .base.hashm = nv_encoder->dcb->hashm, + }; struct nv50_disp *disp = nv50_disp(encoder->dev); struct nv50_mast *mast = nv50_mast(encoder->dev); struct drm_device *dev = encoder->dev; struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); - struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); struct nouveau_connector *nv_connector; struct nvbios *bios = &drm->vbios; - u32 lvds = 0, mask, ctrl; + u32 mask, ctrl; u8 owner = 1 << nv_crtc->index; u8 proto = 0xf; u8 depth = 0x0; @@ -1851,31 +1911,31 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode, if (bios->fp_no_ddc) { if (bios->fp.dual_link) - lvds |= 0x0100; + lvds.lvds.script |= 0x0100; if (bios->fp.if_is_24bit) - lvds |= 0x0200; + lvds.lvds.script |= 0x0200; } else { if (nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) { if (((u8 *)nv_connector->edid)[121] == 2) - lvds |= 0x0100; + lvds.lvds.script |= 0x0100; } else if (mode->clock >= bios->fp.duallink_transition_clk) { - lvds |= 0x0100; + lvds.lvds.script |= 0x0100; } - if (lvds & 0x0100) { + if (lvds.lvds.script & 0x0100) { if (bios->fp.strapless_is_24bit & 2) - lvds |= 0x0200; + lvds.lvds.script |= 0x0200; } else { if (bios->fp.strapless_is_24bit & 1) - lvds |= 0x0200; + lvds.lvds.script |= 0x0200; } if (nv_connector->base.display_info.bpc == 8) - lvds |= 0x0200; + lvds.lvds.script |= 0x0200; } - nv_call(disp->core, NV50_DISP_SOR_LVDS_SCRIPT + nv_encoder->or, lvds); + nvif_mthd(disp->disp, 0, &lvds, sizeof(lvds)); break; case DCB_OUTPUT_DP: if (nv_connector->base.display_info.bpc == 6) { @@ -1902,7 +1962,7 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode, nv50_sor_dpms(&nv_encoder->base.base, DRM_MODE_DPMS_ON); - if (nv50_vers(mast) >= NVD0_DISP_CLASS) { + if (nv50_vers(mast) >= GF110_DISP) { u32 *push = evo_wait(mast, 3); if (push) { u32 magic = 0x31ec6000 | (nv_crtc->index << 25); @@ -1961,7 +2021,7 @@ static int nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe) { struct nouveau_drm *drm = nouveau_drm(connector->dev); - struct nouveau_i2c *i2c = nouveau_i2c(drm->device); + struct nouveau_i2c *i2c = nvkm_i2c(&drm->device); struct nouveau_encoder *nv_encoder; struct drm_encoder *encoder; int type; @@ -2002,9 +2062,19 @@ nv50_pior_dpms(struct drm_encoder *encoder, int mode) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct nv50_disp *disp = nv50_disp(encoder->dev); - u32 mthd = (nv_encoder->dcb->type << 12) | nv_encoder->or; - u32 ctrl = (mode == DRM_MODE_DPMS_ON); - nv_call(disp->core, NV50_DISP_PIOR_PWR + mthd, ctrl); + struct { + struct nv50_disp_mthd_v1 base; + struct nv50_disp_pior_pwr_v0 pwr; + } args = { + .base.version = 1, + .base.method = NV50_DISP_MTHD_V1_PIOR_PWR, + .base.hasht = nv_encoder->dcb->hasht, + .base.hashm = nv_encoder->dcb->hashm, + .pwr.state = mode == DRM_MODE_DPMS_ON, + .pwr.type = nv_encoder->dcb->type, + }; + + nvif_mthd(disp->disp, 0, &args, sizeof(args)); } static bool @@ -2067,7 +2137,7 @@ nv50_pior_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, push = evo_wait(mast, 8); if (push) { - if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { + if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) { u32 ctrl = (depth << 16) | (proto << 8) | owner; if (mode->flags & DRM_MODE_FLAG_NHSYNC) ctrl |= 0x00001000; @@ -2096,7 +2166,7 @@ nv50_pior_disconnect(struct drm_encoder *encoder) push = evo_wait(mast, 4); if (push) { - if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) { + if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) { evo_mthd(push, 0x0700 + (or * 0x040), 1); evo_data(push, 0x00000000); } @@ -2132,7 +2202,7 @@ static int nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe) { struct nouveau_drm *drm = nouveau_drm(connector->dev); - struct nouveau_i2c *i2c = nouveau_i2c(drm->device); + struct nouveau_i2c *i2c = nvkm_i2c(&drm->device); struct nouveau_i2c_port *ddc = NULL; struct nouveau_encoder *nv_encoder; struct drm_encoder *encoder; @@ -2169,8 +2239,151 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe) } /****************************************************************************** + * Framebuffer + *****************************************************************************/ + +static void +nv50_fbdma_fini(struct nv50_fbdma *fbdma) +{ + int i; + for (i = 0; i < ARRAY_SIZE(fbdma->base); i++) + nvif_object_fini(&fbdma->base[i]); + nvif_object_fini(&fbdma->core); + list_del(&fbdma->head); + kfree(fbdma); +} + +static int +nv50_fbdma_init(struct drm_device *dev, u32 name, u64 offset, u64 length, u8 kind) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct nv50_disp *disp = nv50_disp(dev); + struct nv50_mast *mast = nv50_mast(dev); + struct __attribute__ ((packed)) { + struct nv_dma_v0 base; + union { + struct nv50_dma_v0 nv50; + struct gf100_dma_v0 gf100; + struct gf110_dma_v0 gf110; + }; + } args = {}; + struct nv50_fbdma *fbdma; + struct drm_crtc *crtc; + u32 size = sizeof(args.base); + int ret; + + list_for_each_entry(fbdma, &disp->fbdma, head) { + if (fbdma->core.handle == name) + return 0; + } + + fbdma = kzalloc(sizeof(*fbdma), GFP_KERNEL); + if (!fbdma) + return -ENOMEM; + list_add(&fbdma->head, &disp->fbdma); + + args.base.target = NV_DMA_V0_TARGET_VRAM; + args.base.access = NV_DMA_V0_ACCESS_RDWR; + args.base.start = offset; + args.base.limit = offset + length - 1; + + if (drm->device.info.chipset < 0x80) { + args.nv50.part = NV50_DMA_V0_PART_256; + size += sizeof(args.nv50); + } else + if (drm->device.info.chipset < 0xc0) { + args.nv50.part = NV50_DMA_V0_PART_256; + args.nv50.kind = kind; + size += sizeof(args.nv50); + } else + if (drm->device.info.chipset < 0xd0) { + args.gf100.kind = kind; + size += sizeof(args.gf100); + } else { + args.gf110.page = GF110_DMA_V0_PAGE_LP; + args.gf110.kind = kind; + size += sizeof(args.gf110); + } + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct nv50_head *head = nv50_head(crtc); + int ret = nvif_object_init(&head->sync.base.base.user, NULL, + name, NV_DMA_IN_MEMORY, &args, size, + &fbdma->base[head->base.index]); + if (ret) { + nv50_fbdma_fini(fbdma); + return ret; + } + } + + ret = nvif_object_init(&mast->base.base.user, NULL, name, + NV_DMA_IN_MEMORY, &args, size, + &fbdma->core); + if (ret) { + nv50_fbdma_fini(fbdma); + return ret; + } + + return 0; +} + +static void +nv50_fb_dtor(struct drm_framebuffer *fb) +{ +} + +static int +nv50_fb_ctor(struct drm_framebuffer *fb) +{ + struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb); + struct nouveau_drm *drm = nouveau_drm(fb->dev); + struct nouveau_bo *nvbo = nv_fb->nvbo; + struct nv50_disp *disp = nv50_disp(fb->dev); + u8 kind = nouveau_bo_tile_layout(nvbo) >> 8; + u8 tile = nvbo->tile_mode; + + if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) { + NV_ERROR(drm, "framebuffer requires contiguous bo\n"); + return -EINVAL; + } + + if (drm->device.info.chipset >= 0xc0) + tile >>= 4; /* yep.. */ + + switch (fb->depth) { + case 8: nv_fb->r_format = 0x1e00; break; + case 15: nv_fb->r_format = 0xe900; break; + case 16: nv_fb->r_format = 0xe800; break; + case 24: + case 32: nv_fb->r_format = 0xcf00; break; + case 30: nv_fb->r_format = 0xd100; break; + default: + NV_ERROR(drm, "unknown depth %d\n", fb->depth); + return -EINVAL; + } + + if (disp->disp->oclass < G82_DISP) { + nv_fb->r_pitch = kind ? (((fb->pitches[0] / 4) << 4) | tile) : + (fb->pitches[0] | 0x00100000); + nv_fb->r_format |= kind << 16; + } else + if (disp->disp->oclass < GF110_DISP) { + nv_fb->r_pitch = kind ? (((fb->pitches[0] / 4) << 4) | tile) : + (fb->pitches[0] | 0x00100000); + } else { + nv_fb->r_pitch = kind ? (((fb->pitches[0] / 4) << 4) | tile) : + (fb->pitches[0] | 0x01000000); + } + nv_fb->r_handle = 0xffff0000 | kind; + + return nv50_fbdma_init(fb->dev, nv_fb->r_handle, 0, + drm->device.info.ram_user, kind); +} + +/****************************************************************************** * Init *****************************************************************************/ + void nv50_display_fini(struct drm_device *dev) { @@ -2193,7 +2406,7 @@ nv50_display_init(struct drm_device *dev) } evo_mthd(push, 0x0088, 1); - evo_data(push, NvEvoSync); + evo_data(push, nv50_mast(dev)->base.sync.handle); evo_kick(push, nv50_mast(dev)); return 0; } @@ -2202,8 +2415,13 @@ void nv50_display_destroy(struct drm_device *dev) { struct nv50_disp *disp = nv50_disp(dev); + struct nv50_fbdma *fbdma, *fbtmp; + + list_for_each_entry_safe(fbdma, fbtmp, &disp->fbdma, head) { + nv50_fbdma_fini(fbdma); + } - nv50_dmac_destroy(disp->core, &disp->mast.base); + nv50_dmac_destroy(&disp->mast.base, disp->disp); nouveau_bo_unmap(disp->sync); if (disp->sync) @@ -2217,7 +2435,7 @@ nv50_display_destroy(struct drm_device *dev) int nv50_display_create(struct drm_device *dev) { - struct nouveau_device *device = nouveau_dev(dev); + struct nvif_device *device = &nouveau_drm(dev)->device; struct nouveau_drm *drm = nouveau_drm(dev); struct dcb_table *dcb = &drm->vbios.dcb; struct drm_connector *connector, *tmp; @@ -2228,12 +2446,15 @@ nv50_display_create(struct drm_device *dev) disp = kzalloc(sizeof(*disp), GFP_KERNEL); if (!disp) return -ENOMEM; + INIT_LIST_HEAD(&disp->fbdma); nouveau_display(dev)->priv = disp; nouveau_display(dev)->dtor = nv50_display_destroy; nouveau_display(dev)->init = nv50_display_init; nouveau_display(dev)->fini = nv50_display_fini; - disp->core = nouveau_display(dev)->core; + nouveau_display(dev)->fb_ctor = nv50_fb_ctor; + nouveau_display(dev)->fb_dtor = nv50_fb_dtor; + disp->disp = &nouveau_display(dev)->disp; /* small shared memory area we use for notifiers and semaphores */ ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, @@ -2253,22 +2474,19 @@ nv50_display_create(struct drm_device *dev) goto out; /* allocate master evo channel */ - ret = nv50_dmac_create(disp->core, NV50_DISP_MAST_CLASS, 0, - &(struct nv50_display_mast_class) { - .pushbuf = EVO_PUSH_HANDLE(MAST, 0), - }, sizeof(struct nv50_display_mast_class), - disp->sync->bo.offset, &disp->mast.base); + ret = nv50_core_create(disp->disp, disp->sync->bo.offset, + &disp->mast); if (ret) goto out; /* create crtc objects to represent the hw heads */ - if (nv_mclass(disp->core) >= NVD0_DISP_CLASS) - crtcs = nv_rd32(device, 0x022448); + if (disp->disp->oclass >= GF110_DISP) + crtcs = nvif_rd32(device, 0x022448); else crtcs = 2; for (i = 0; i < crtcs; i++) { - ret = nv50_crtc_create(dev, disp->core, i); + ret = nv50_crtc_create(dev, i); if (ret) goto out; } diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c index 52068a0910dc..394c89abcc97 100644 --- a/drivers/gpu/drm/nouveau/nv50_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c @@ -154,7 +154,6 @@ nv50_fbcon_accel_init(struct fb_info *info) struct drm_device *dev = nfbdev->dev; struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_channel *chan = drm->channel; - struct nouveau_object *object; int ret, format; switch (info->var.bits_per_pixel) { @@ -184,8 +183,8 @@ nv50_fbcon_accel_init(struct fb_info *info) return -EINVAL; } - ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, Nv2D, - 0x502d, NULL, 0, &object); + ret = nvif_object_init(chan->object, NULL, 0x502d, 0x502d, NULL, 0, + &nfbdev->twod); if (ret) return ret; @@ -196,11 +195,11 @@ nv50_fbcon_accel_init(struct fb_info *info) } BEGIN_NV04(chan, NvSub2D, 0x0000, 1); - OUT_RING(chan, Nv2D); + OUT_RING(chan, nfbdev->twod.handle); BEGIN_NV04(chan, NvSub2D, 0x0184, 3); - OUT_RING(chan, NvDmaFB); - OUT_RING(chan, NvDmaFB); - OUT_RING(chan, NvDmaFB); + OUT_RING(chan, chan->vram.handle); + OUT_RING(chan, chan->vram.handle); + OUT_RING(chan, chan->vram.handle); BEGIN_NV04(chan, NvSub2D, 0x0290, 1); OUT_RING(chan, 0); BEGIN_NV04(chan, NvSub2D, 0x0888, 1); diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c index 0ee363840035..195cf51a7c31 100644 --- a/drivers/gpu/drm/nouveau/nv50_fence.c +++ b/drivers/gpu/drm/nouveau/nv50_fence.c @@ -22,8 +22,8 @@ * Authors: Ben Skeggs <bskeggs@redhat.com> */ -#include <core/object.h> -#include <core/class.h> +#include <nvif/os.h> +#include <nvif/class.h> #include "nouveau_drm.h" #include "nouveau_dma.h" @@ -38,7 +38,6 @@ nv50_fence_context_new(struct nouveau_channel *chan) struct nv10_fence_priv *priv = chan->drm->fence; struct nv10_fence_chan *fctx; struct ttm_mem_reg *mem = &priv->bo->bo.mem; - struct nouveau_object *object; u32 start = mem->start * PAGE_SIZE; u32 limit = start + mem->size - 1; int ret, i; @@ -52,15 +51,14 @@ nv50_fence_context_new(struct nouveau_channel *chan) fctx->base.read = nv10_fence_read; fctx->base.sync = nv17_fence_sync; - ret = nouveau_object_new(nv_object(chan->cli), chan->handle, - NvSema, 0x003d, - &(struct nv_dma_class) { - .flags = NV_DMA_TARGET_VRAM | - NV_DMA_ACCESS_RDWR, + ret = nvif_object_init(chan->object, NULL, NvSema, NV_DMA_IN_MEMORY, + &(struct nv_dma_v0) { + .target = NV_DMA_V0_TARGET_VRAM, + .access = NV_DMA_V0_ACCESS_RDWR, .start = start, .limit = limit, - }, sizeof(struct nv_dma_class), - &object); + }, sizeof(struct nv_dma_v0), + &fctx->sema); /* dma objects for display sync channel semaphore blocks */ for (i = 0; !ret && i < dev->mode_config.num_crtc; i++) { @@ -68,15 +66,14 @@ nv50_fence_context_new(struct nouveau_channel *chan) u32 start = bo->bo.mem.start * PAGE_SIZE; u32 limit = start + bo->bo.mem.size - 1; - ret = nouveau_object_new(nv_object(chan->cli), chan->handle, - NvEvoSema0 + i, 0x003d, - &(struct nv_dma_class) { - .flags = NV_DMA_TARGET_VRAM | - NV_DMA_ACCESS_RDWR, + ret = nvif_object_init(chan->object, NULL, NvEvoSema0 + i, + NV_DMA_IN_MEMORY, &(struct nv_dma_v0) { + .target = NV_DMA_V0_TARGET_VRAM, + .access = NV_DMA_V0_ACCESS_RDWR, .start = start, .limit = limit, - }, sizeof(struct nv_dma_class), - &object); + }, sizeof(struct nv_dma_v0), + &fctx->head[i]); } if (ret) diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c index 9fd475c89820..933a779c93ab 100644 --- a/drivers/gpu/drm/nouveau/nv84_fence.c +++ b/drivers/gpu/drm/nouveau/nv84_fence.c @@ -22,12 +22,6 @@ * Authors: Ben Skeggs */ -#include <core/object.h> -#include <core/client.h> -#include <core/class.h> - -#include <engine/fifo.h> - #include "nouveau_drm.h" #include "nouveau_dma.h" #include "nouveau_fence.h" @@ -47,7 +41,7 @@ nv84_fence_emit32(struct nouveau_channel *chan, u64 virtual, u32 sequence) int ret = RING_SPACE(chan, 8); if (ret == 0) { BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1); - OUT_RING (chan, chan->vram); + OUT_RING (chan, chan->vram.handle); BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 5); OUT_RING (chan, upper_32_bits(virtual)); OUT_RING (chan, lower_32_bits(virtual)); @@ -65,7 +59,7 @@ nv84_fence_sync32(struct nouveau_channel *chan, u64 virtual, u32 sequence) int ret = RING_SPACE(chan, 7); if (ret == 0) { BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1); - OUT_RING (chan, chan->vram); + OUT_RING (chan, chan->vram.handle); BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); OUT_RING (chan, upper_32_bits(virtual)); OUT_RING (chan, lower_32_bits(virtual)); @@ -81,8 +75,7 @@ nv84_fence_emit(struct nouveau_fence *fence) { struct nouveau_channel *chan = fence->channel; struct nv84_fence_chan *fctx = chan->fence; - struct nouveau_fifo_chan *fifo = (void *)chan->object; - u64 addr = fifo->chid * 16; + u64 addr = chan->chid * 16; if (fence->sysmem) addr += fctx->vma_gart.offset; @@ -97,8 +90,7 @@ nv84_fence_sync(struct nouveau_fence *fence, struct nouveau_channel *prev, struct nouveau_channel *chan) { struct nv84_fence_chan *fctx = chan->fence; - struct nouveau_fifo_chan *fifo = (void *)prev->object; - u64 addr = fifo->chid * 16; + u64 addr = prev->chid * 16; if (fence->sysmem) addr += fctx->vma_gart.offset; @@ -111,9 +103,8 @@ nv84_fence_sync(struct nouveau_fence *fence, static u32 nv84_fence_read(struct nouveau_channel *chan) { - struct nouveau_fifo_chan *fifo = (void *)chan->object; struct nv84_fence_priv *priv = chan->drm->fence; - return nouveau_bo_rd32(priv->bo, fifo->chid * 16/4); + return nouveau_bo_rd32(priv->bo, chan->chid * 16/4); } static void @@ -139,8 +130,7 @@ nv84_fence_context_del(struct nouveau_channel *chan) int nv84_fence_context_new(struct nouveau_channel *chan) { - struct nouveau_fifo_chan *fifo = (void *)chan->object; - struct nouveau_client *client = nouveau_client(fifo); + struct nouveau_cli *cli = (void *)nvif_client(&chan->device->base); struct nv84_fence_priv *priv = chan->drm->fence; struct nv84_fence_chan *fctx; int ret, i; @@ -156,19 +146,19 @@ nv84_fence_context_new(struct nouveau_channel *chan) fctx->base.emit32 = nv84_fence_emit32; fctx->base.sync32 = nv84_fence_sync32; - ret = nouveau_bo_vma_add(priv->bo, client->vm, &fctx->vma); + ret = nouveau_bo_vma_add(priv->bo, cli->vm, &fctx->vma); if (ret == 0) { - ret = nouveau_bo_vma_add(priv->bo_gart, client->vm, + ret = nouveau_bo_vma_add(priv->bo_gart, cli->vm, &fctx->vma_gart); } /* map display semaphore buffers into channel's vm */ for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) { struct nouveau_bo *bo = nv50_display_crtc_sema(chan->drm->dev, i); - ret = nouveau_bo_vma_add(bo, client->vm, &fctx->dispc_vma[i]); + ret = nouveau_bo_vma_add(bo, cli->vm, &fctx->dispc_vma[i]); } - nouveau_bo_wr32(priv->bo, fifo->chid * 16/4, 0x00000000); + nouveau_bo_wr32(priv->bo, chan->chid * 16/4, 0x00000000); if (ret) nv84_fence_context_del(chan); @@ -178,7 +168,7 @@ nv84_fence_context_new(struct nouveau_channel *chan) static bool nv84_fence_suspend(struct nouveau_drm *drm) { - struct nouveau_fifo *pfifo = nouveau_fifo(drm->device); + struct nouveau_fifo *pfifo = nvkm_fifo(&drm->device); struct nv84_fence_priv *priv = drm->fence; int i; @@ -194,7 +184,7 @@ nv84_fence_suspend(struct nouveau_drm *drm) static void nv84_fence_resume(struct nouveau_drm *drm) { - struct nouveau_fifo *pfifo = nouveau_fifo(drm->device); + struct nouveau_fifo *pfifo = nvkm_fifo(&drm->device); struct nv84_fence_priv *priv = drm->fence; int i; @@ -225,7 +215,7 @@ nv84_fence_destroy(struct nouveau_drm *drm) int nv84_fence_create(struct nouveau_drm *drm) { - struct nouveau_fifo *pfifo = nouveau_fifo(drm->device); + struct nouveau_fifo *pfifo = nvkm_fifo(&drm->device); struct nv84_fence_priv *priv; int ret; diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c index 9dcd30f3e1e0..61246677e8dc 100644 --- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c +++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c @@ -154,11 +154,10 @@ nvc0_fbcon_accel_init(struct fb_info *info) struct nouveau_framebuffer *fb = &nfbdev->nouveau_fb; struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_channel *chan = drm->channel; - struct nouveau_object *object; int ret, format; - ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, Nv2D, - 0x902d, NULL, 0, &object); + ret = nvif_object_init(chan->object, NULL, 0x902d, 0x902d, NULL, 0, + &nfbdev->twod); if (ret) return ret; @@ -197,7 +196,7 @@ nvc0_fbcon_accel_init(struct fb_info *info) } BEGIN_NVC0(chan, NvSub2D, 0x0000, 1); - OUT_RING (chan, 0x0000902d); + OUT_RING (chan, nfbdev->twod.handle); BEGIN_NVC0(chan, NvSub2D, 0x0290, 1); OUT_RING (chan, 0); BEGIN_NVC0(chan, NvSub2D, 0x0888, 1); diff --git a/drivers/gpu/drm/nouveau/nvc0_fence.c b/drivers/gpu/drm/nouveau/nvc0_fence.c index 9566267fbc42..becf19abda2d 100644 --- a/drivers/gpu/drm/nouveau/nvc0_fence.c +++ b/drivers/gpu/drm/nouveau/nvc0_fence.c @@ -22,12 +22,6 @@ * Authors: Ben Skeggs */ -#include <core/object.h> -#include <core/client.h> -#include <core/class.h> - -#include <engine/fifo.h> - #include "nouveau_drm.h" #include "nouveau_dma.h" #include "nouveau_fence.h" diff --git a/drivers/gpu/drm/nouveau/nvif/class.h b/drivers/gpu/drm/nouveau/nvif/class.h new file mode 100644 index 000000000000..573491f84792 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvif/class.h @@ -0,0 +1,558 @@ +#ifndef __NVIF_CLASS_H__ +#define __NVIF_CLASS_H__ + +/******************************************************************************* + * class identifiers + ******************************************************************************/ + +/* the below match nvidia-assigned (either in hw, or sw) class numbers */ +#define NV_DEVICE 0x00000080 + +#define NV_DMA_FROM_MEMORY 0x00000002 +#define NV_DMA_TO_MEMORY 0x00000003 +#define NV_DMA_IN_MEMORY 0x0000003d + +#define NV04_DISP 0x00000046 + +#define NV03_CHANNEL_DMA 0x0000006b +#define NV10_CHANNEL_DMA 0x0000006e +#define NV17_CHANNEL_DMA 0x0000176e +#define NV40_CHANNEL_DMA 0x0000406e +#define NV50_CHANNEL_DMA 0x0000506e +#define G82_CHANNEL_DMA 0x0000826e + +#define NV50_CHANNEL_GPFIFO 0x0000506f +#define G82_CHANNEL_GPFIFO 0x0000826f +#define FERMI_CHANNEL_GPFIFO 0x0000906f +#define KEPLER_CHANNEL_GPFIFO_A 0x0000a06f + +#define NV50_DISP 0x00005070 +#define G82_DISP 0x00008270 +#define GT200_DISP 0x00008370 +#define GT214_DISP 0x00008570 +#define GT206_DISP 0x00008870 +#define GF110_DISP 0x00009070 +#define GK104_DISP 0x00009170 +#define GK110_DISP 0x00009270 +#define GM107_DISP 0x00009470 + +#define NV50_DISP_CURSOR 0x0000507a +#define G82_DISP_CURSOR 0x0000827a +#define GT214_DISP_CURSOR 0x0000857a +#define GF110_DISP_CURSOR 0x0000907a +#define GK104_DISP_CURSOR 0x0000917a + +#define NV50_DISP_OVERLAY 0x0000507b +#define G82_DISP_OVERLAY 0x0000827b +#define GT214_DISP_OVERLAY 0x0000857b +#define GF110_DISP_OVERLAY 0x0000907b +#define GK104_DISP_OVERLAY 0x0000917b + +#define NV50_DISP_BASE_CHANNEL_DMA 0x0000507c +#define G82_DISP_BASE_CHANNEL_DMA 0x0000827c +#define GT200_DISP_BASE_CHANNEL_DMA 0x0000837c +#define GT214_DISP_BASE_CHANNEL_DMA 0x0000857c +#define GF110_DISP_BASE_CHANNEL_DMA 0x0000907c +#define GK104_DISP_BASE_CHANNEL_DMA 0x0000917c +#define GK110_DISP_BASE_CHANNEL_DMA 0x0000927c + +#define NV50_DISP_CORE_CHANNEL_DMA 0x0000507d +#define G82_DISP_CORE_CHANNEL_DMA 0x0000827d +#define GT200_DISP_CORE_CHANNEL_DMA 0x0000837d +#define GT214_DISP_CORE_CHANNEL_DMA 0x0000857d +#define GT206_DISP_CORE_CHANNEL_DMA 0x0000887d +#define GF110_DISP_CORE_CHANNEL_DMA 0x0000907d +#define GK104_DISP_CORE_CHANNEL_DMA 0x0000917d +#define GK110_DISP_CORE_CHANNEL_DMA 0x0000927d +#define GM107_DISP_CORE_CHANNEL_DMA 0x0000947d + +#define NV50_DISP_OVERLAY_CHANNEL_DMA 0x0000507e +#define G82_DISP_OVERLAY_CHANNEL_DMA 0x0000827e +#define GT200_DISP_OVERLAY_CHANNEL_DMA 0x0000837e +#define GT214_DISP_OVERLAY_CHANNEL_DMA 0x0000857e +#define GF110_DISP_OVERLAY_CONTROL_DMA 0x0000907e +#define GK104_DISP_OVERLAY_CONTROL_DMA 0x0000917e + +#define FERMI_A 0x00009097 +#define FERMI_B 0x00009197 +#define FERMI_C 0x00009297 + +#define KEPLER_A 0x0000a097 +#define KEPLER_B 0x0000a197 +#define KEPLER_C 0x0000a297 + +#define MAXWELL_A 0x0000b097 + +#define FERMI_COMPUTE_A 0x000090c0 +#define FERMI_COMPUTE_B 0x000091c0 + +#define KEPLER_COMPUTE_A 0x0000a0c0 +#define KEPLER_COMPUTE_B 0x0000a1c0 + +#define MAXWELL_COMPUTE_A 0x0000b0c0 + + +/******************************************************************************* + * client + ******************************************************************************/ + +#define NV_CLIENT_DEVLIST 0x00 + +struct nv_client_devlist_v0 { + __u8 version; + __u8 count; + __u8 pad02[6]; + __u64 device[]; +}; + + +/******************************************************************************* + * device + ******************************************************************************/ + +struct nv_device_v0 { + __u8 version; + __u8 pad01[7]; + __u64 device; /* device identifier, ~0 for client default */ +#define NV_DEVICE_V0_DISABLE_IDENTIFY 0x0000000000000001ULL +#define NV_DEVICE_V0_DISABLE_MMIO 0x0000000000000002ULL +#define NV_DEVICE_V0_DISABLE_VBIOS 0x0000000000000004ULL +#define NV_DEVICE_V0_DISABLE_CORE 0x0000000000000008ULL +#define NV_DEVICE_V0_DISABLE_DISP 0x0000000000010000ULL +#define NV_DEVICE_V0_DISABLE_FIFO 0x0000000000020000ULL +#define NV_DEVICE_V0_DISABLE_GRAPH 0x0000000100000000ULL +#define NV_DEVICE_V0_DISABLE_MPEG 0x0000000200000000ULL +#define NV_DEVICE_V0_DISABLE_ME 0x0000000400000000ULL +#define NV_DEVICE_V0_DISABLE_VP 0x0000000800000000ULL +#define NV_DEVICE_V0_DISABLE_CRYPT 0x0000001000000000ULL +#define NV_DEVICE_V0_DISABLE_BSP 0x0000002000000000ULL +#define NV_DEVICE_V0_DISABLE_PPP 0x0000004000000000ULL +#define NV_DEVICE_V0_DISABLE_COPY0 0x0000008000000000ULL +#define NV_DEVICE_V0_DISABLE_COPY1 0x0000010000000000ULL +#define NV_DEVICE_V0_DISABLE_VIC 0x0000020000000000ULL +#define NV_DEVICE_V0_DISABLE_VENC 0x0000040000000000ULL + __u64 disable; /* disable particular subsystems */ + __u64 debug0; /* as above, but *internal* ids, and *NOT* ABI */ +}; + +#define NV_DEVICE_V0_INFO 0x00 + +struct nv_device_info_v0 { + __u8 version; +#define NV_DEVICE_INFO_V0_IGP 0x00 +#define NV_DEVICE_INFO_V0_PCI 0x01 +#define NV_DEVICE_INFO_V0_AGP 0x02 +#define NV_DEVICE_INFO_V0_PCIE 0x03 +#define NV_DEVICE_INFO_V0_SOC 0x04 + __u8 platform; + __u16 chipset; /* from NV_PMC_BOOT_0 */ + __u8 revision; /* from NV_PMC_BOOT_0 */ +#define NV_DEVICE_INFO_V0_TNT 0x01 +#define NV_DEVICE_INFO_V0_CELSIUS 0x02 +#define NV_DEVICE_INFO_V0_KELVIN 0x03 +#define NV_DEVICE_INFO_V0_RANKINE 0x04 +#define NV_DEVICE_INFO_V0_CURIE 0x05 +#define NV_DEVICE_INFO_V0_TESLA 0x06 +#define NV_DEVICE_INFO_V0_FERMI 0x07 +#define NV_DEVICE_INFO_V0_KEPLER 0x08 +#define NV_DEVICE_INFO_V0_MAXWELL 0x09 + __u8 family; + __u8 pad06[2]; + __u64 ram_size; + __u64 ram_user; +}; + + +/******************************************************************************* + * context dma + ******************************************************************************/ + +struct nv_dma_v0 { + __u8 version; +#define NV_DMA_V0_TARGET_VM 0x00 +#define NV_DMA_V0_TARGET_VRAM 0x01 +#define NV_DMA_V0_TARGET_PCI 0x02 +#define NV_DMA_V0_TARGET_PCI_US 0x03 +#define NV_DMA_V0_TARGET_AGP 0x04 + __u8 target; +#define NV_DMA_V0_ACCESS_VM 0x00 +#define NV_DMA_V0_ACCESS_RD 0x01 +#define NV_DMA_V0_ACCESS_WR 0x02 +#define NV_DMA_V0_ACCESS_RDWR (NV_DMA_V0_ACCESS_RD | NV_DMA_V0_ACCESS_WR) + __u8 access; + __u8 pad03[5]; + __u64 start; + __u64 limit; + /* ... chipset-specific class data */ +}; + +struct nv50_dma_v0 { + __u8 version; +#define NV50_DMA_V0_PRIV_VM 0x00 +#define NV50_DMA_V0_PRIV_US 0x01 +#define NV50_DMA_V0_PRIV__S 0x02 + __u8 priv; +#define NV50_DMA_V0_PART_VM 0x00 +#define NV50_DMA_V0_PART_256 0x01 +#define NV50_DMA_V0_PART_1KB 0x02 + __u8 part; +#define NV50_DMA_V0_COMP_NONE 0x00 +#define NV50_DMA_V0_COMP_1 0x01 +#define NV50_DMA_V0_COMP_2 0x02 +#define NV50_DMA_V0_COMP_VM 0x03 + __u8 comp; +#define NV50_DMA_V0_KIND_PITCH 0x00 +#define NV50_DMA_V0_KIND_VM 0x7f + __u8 kind; + __u8 pad05[3]; +}; + +struct gf100_dma_v0 { + __u8 version; +#define GF100_DMA_V0_PRIV_VM 0x00 +#define GF100_DMA_V0_PRIV_US 0x01 +#define GF100_DMA_V0_PRIV__S 0x02 + __u8 priv; +#define GF100_DMA_V0_KIND_PITCH 0x00 +#define GF100_DMA_V0_KIND_VM 0xff + __u8 kind; + __u8 pad03[5]; +}; + +struct gf110_dma_v0 { + __u8 version; +#define GF110_DMA_V0_PAGE_LP 0x00 +#define GF110_DMA_V0_PAGE_SP 0x01 + __u8 page; +#define GF110_DMA_V0_KIND_PITCH 0x00 +#define GF110_DMA_V0_KIND_VM 0xff + __u8 kind; + __u8 pad03[5]; +}; + + +/******************************************************************************* + * perfmon + ******************************************************************************/ + +struct nvif_perfctr_v0 { + __u8 version; + __u8 pad01[1]; + __u16 logic_op; + __u8 pad04[4]; + char name[4][64]; +}; + +#define NVIF_PERFCTR_V0_QUERY 0x00 +#define NVIF_PERFCTR_V0_SAMPLE 0x01 +#define NVIF_PERFCTR_V0_READ 0x02 + +struct nvif_perfctr_query_v0 { + __u8 version; + __u8 pad01[3]; + __u32 iter; + char name[64]; +}; + +struct nvif_perfctr_sample { +}; + +struct nvif_perfctr_read_v0 { + __u8 version; + __u8 pad01[7]; + __u32 ctr; + __u32 clk; +}; + + +/******************************************************************************* + * device control + ******************************************************************************/ + +#define NVIF_CONTROL_PSTATE_INFO 0x00 +#define NVIF_CONTROL_PSTATE_ATTR 0x01 +#define NVIF_CONTROL_PSTATE_USER 0x02 + +struct nvif_control_pstate_info_v0 { + __u8 version; + __u8 count; /* out: number of power states */ +#define NVIF_CONTROL_PSTATE_INFO_V0_USTATE_DISABLE (-1) +#define NVIF_CONTROL_PSTATE_INFO_V0_USTATE_PERFMON (-2) + __s8 ustate_ac; /* out: target pstate index */ + __s8 ustate_dc; /* out: target pstate index */ + __s8 pwrsrc; /* out: current power source */ +#define NVIF_CONTROL_PSTATE_INFO_V0_PSTATE_UNKNOWN (-1) +#define NVIF_CONTROL_PSTATE_INFO_V0_PSTATE_PERFMON (-2) + __s8 pstate; /* out: current pstate index */ + __u8 pad06[2]; +}; + +struct nvif_control_pstate_attr_v0 { + __u8 version; +#define NVIF_CONTROL_PSTATE_ATTR_V0_STATE_CURRENT (-1) + __s8 state; /* in: index of pstate to query + * out: pstate identifier + */ + __u8 index; /* in: index of attribute to query + * out: index of next attribute, or 0 if no more + */ + __u8 pad03[5]; + __u32 min; + __u32 max; + char name[32]; + char unit[16]; +}; + +struct nvif_control_pstate_user_v0 { + __u8 version; +#define NVIF_CONTROL_PSTATE_USER_V0_STATE_UNKNOWN (-1) +#define NVIF_CONTROL_PSTATE_USER_V0_STATE_PERFMON (-2) + __s8 ustate; /* in: pstate identifier */ + __s8 pwrsrc; /* in: target power source */ + __u8 pad03[5]; +}; + + +/******************************************************************************* + * DMA FIFO channels + ******************************************************************************/ + +struct nv03_channel_dma_v0 { + __u8 version; + __u8 chid; + __u8 pad02[2]; + __u32 pushbuf; + __u64 offset; +}; + +#define G82_CHANNEL_DMA_V0_NTFY_UEVENT 0x00 + +/******************************************************************************* + * GPFIFO channels + ******************************************************************************/ + +struct nv50_channel_gpfifo_v0 { + __u8 version; + __u8 chid; + __u8 pad01[6]; + __u32 pushbuf; + __u32 ilength; + __u64 ioffset; +}; + +struct kepler_channel_gpfifo_a_v0 { + __u8 version; +#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_GR 0x01 +#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_VP 0x02 +#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_PPP 0x04 +#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_BSP 0x08 +#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_CE0 0x10 +#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_CE1 0x20 +#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_ENC 0x40 + __u8 engine; + __u16 chid; + __u8 pad04[4]; + __u32 pushbuf; + __u32 ilength; + __u64 ioffset; +}; + +/******************************************************************************* + * legacy display + ******************************************************************************/ + +#define NV04_DISP_NTFY_VBLANK 0x00 +#define NV04_DISP_NTFY_CONN 0x01 + +struct nv04_disp_mthd_v0 { + __u8 version; +#define NV04_DISP_SCANOUTPOS 0x00 + __u8 method; + __u8 head; + __u8 pad03[5]; +}; + +struct nv04_disp_scanoutpos_v0 { + __u8 version; + __u8 pad01[7]; + __s64 time[2]; + __u16 vblanks; + __u16 vblanke; + __u16 vtotal; + __u16 vline; + __u16 hblanks; + __u16 hblanke; + __u16 htotal; + __u16 hline; +}; + +/******************************************************************************* + * display + ******************************************************************************/ + +#define NV50_DISP_MTHD 0x00 + +struct nv50_disp_mthd_v0 { + __u8 version; +#define NV50_DISP_SCANOUTPOS 0x00 + __u8 method; + __u8 head; + __u8 pad03[5]; +}; + +struct nv50_disp_mthd_v1 { + __u8 version; +#define NV50_DISP_MTHD_V1_DAC_PWR 0x10 +#define NV50_DISP_MTHD_V1_DAC_LOAD 0x11 +#define NV50_DISP_MTHD_V1_SOR_PWR 0x20 +#define NV50_DISP_MTHD_V1_SOR_HDA_ELD 0x21 +#define NV50_DISP_MTHD_V1_SOR_HDMI_PWR 0x22 +#define NV50_DISP_MTHD_V1_SOR_LVDS_SCRIPT 0x23 +#define NV50_DISP_MTHD_V1_SOR_DP_PWR 0x24 +#define NV50_DISP_MTHD_V1_PIOR_PWR 0x30 + __u8 method; + __u16 hasht; + __u16 hashm; + __u8 pad06[2]; +}; + +struct nv50_disp_dac_pwr_v0 { + __u8 version; + __u8 state; + __u8 data; + __u8 vsync; + __u8 hsync; + __u8 pad05[3]; +}; + +struct nv50_disp_dac_load_v0 { + __u8 version; + __u8 load; + __u8 pad02[2]; + __u32 data; +}; + +struct nv50_disp_sor_pwr_v0 { + __u8 version; + __u8 state; + __u8 pad02[6]; +}; + +struct nv50_disp_sor_hda_eld_v0 { + __u8 version; + __u8 pad01[7]; + __u8 data[]; +}; + +struct nv50_disp_sor_hdmi_pwr_v0 { + __u8 version; + __u8 state; + __u8 max_ac_packet; + __u8 rekey; + __u8 pad04[4]; +}; + +struct nv50_disp_sor_lvds_script_v0 { + __u8 version; + __u8 pad01[1]; + __u16 script; + __u8 pad04[4]; +}; + +struct nv50_disp_sor_dp_pwr_v0 { + __u8 version; + __u8 state; + __u8 pad02[6]; +}; + +struct nv50_disp_pior_pwr_v0 { + __u8 version; + __u8 state; + __u8 type; + __u8 pad03[5]; +}; + +/* core */ +struct nv50_disp_core_channel_dma_v0 { + __u8 version; + __u8 pad01[3]; + __u32 pushbuf; +}; + +/* cursor immediate */ +struct nv50_disp_cursor_v0 { + __u8 version; + __u8 head; + __u8 pad02[6]; +}; + +/* base */ +struct nv50_disp_base_channel_dma_v0 { + __u8 version; + __u8 pad01[2]; + __u8 head; + __u32 pushbuf; +}; + +/* overlay */ +struct nv50_disp_overlay_channel_dma_v0 { + __u8 version; + __u8 pad01[2]; + __u8 head; + __u32 pushbuf; +}; + +/* overlay immediate */ +struct nv50_disp_overlay_v0 { + __u8 version; + __u8 head; + __u8 pad02[6]; +}; + + +/******************************************************************************* + * fermi + ******************************************************************************/ + +#define FERMI_A_ZBC_COLOR 0x00 +#define FERMI_A_ZBC_DEPTH 0x01 + +struct fermi_a_zbc_color_v0 { + __u8 version; +#define FERMI_A_ZBC_COLOR_V0_FMT_ZERO 0x01 +#define FERMI_A_ZBC_COLOR_V0_FMT_UNORM_ONE 0x02 +#define FERMI_A_ZBC_COLOR_V0_FMT_RF32_GF32_BF32_AF32 0x04 +#define FERMI_A_ZBC_COLOR_V0_FMT_R16_G16_B16_A16 0x08 +#define FERMI_A_ZBC_COLOR_V0_FMT_RN16_GN16_BN16_AN16 0x0c +#define FERMI_A_ZBC_COLOR_V0_FMT_RS16_GS16_BS16_AS16 0x10 +#define FERMI_A_ZBC_COLOR_V0_FMT_RU16_GU16_BU16_AU16 0x14 +#define FERMI_A_ZBC_COLOR_V0_FMT_RF16_GF16_BF16_AF16 0x16 +#define FERMI_A_ZBC_COLOR_V0_FMT_A8R8G8B8 0x18 +#define FERMI_A_ZBC_COLOR_V0_FMT_A8RL8GL8BL8 0x1c +#define FERMI_A_ZBC_COLOR_V0_FMT_A2B10G10R10 0x20 +#define FERMI_A_ZBC_COLOR_V0_FMT_AU2BU10GU10RU10 0x24 +#define FERMI_A_ZBC_COLOR_V0_FMT_A8B8G8R8 0x28 +#define FERMI_A_ZBC_COLOR_V0_FMT_A8BL8GL8RL8 0x2c +#define FERMI_A_ZBC_COLOR_V0_FMT_AN8BN8GN8RN8 0x30 +#define FERMI_A_ZBC_COLOR_V0_FMT_AS8BS8GS8RS8 0x34 +#define FERMI_A_ZBC_COLOR_V0_FMT_AU8BU8GU8RU8 0x38 +#define FERMI_A_ZBC_COLOR_V0_FMT_A2R10G10B10 0x3c +#define FERMI_A_ZBC_COLOR_V0_FMT_BF10GF11RF11 0x40 + __u8 format; + __u8 index; + __u8 pad03[5]; + __u32 ds[4]; + __u32 l2[4]; +}; + +struct fermi_a_zbc_depth_v0 { + __u8 version; +#define FERMI_A_ZBC_DEPTH_V0_FMT_FP32 0x01 + __u8 format; + __u8 index; + __u8 pad03[5]; + __u32 ds; + __u32 l2; +}; + +#endif diff --git a/drivers/gpu/drm/nouveau/nvif/client.c b/drivers/gpu/drm/nouveau/nvif/client.c new file mode 100644 index 000000000000..3c4df1fc26dc --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvif/client.c @@ -0,0 +1,129 @@ +/* + * Copyright 2013 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs <bskeggs@redhat.com> + */ + +#include "client.h" +#include "driver.h" +#include "ioctl.h" + +int +nvif_client_ioctl(struct nvif_client *client, void *data, u32 size) +{ + return client->driver->ioctl(client->base.priv, client->super, data, size, NULL); +} + +int +nvif_client_suspend(struct nvif_client *client) +{ + return client->driver->suspend(client->base.priv); +} + +int +nvif_client_resume(struct nvif_client *client) +{ + return client->driver->resume(client->base.priv); +} + +void +nvif_client_fini(struct nvif_client *client) +{ + if (client->driver) { + client->driver->fini(client->base.priv); + client->driver = NULL; + client->base.parent = NULL; + nvif_object_fini(&client->base); + } +} + +const struct nvif_driver * +nvif_drivers[] = { +#ifdef __KERNEL__ + &nvif_driver_nvkm, +#else + &nvif_driver_drm, + &nvif_driver_lib, +#endif + NULL +}; + +int +nvif_client_init(void (*dtor)(struct nvif_client *), const char *driver, + const char *name, u64 device, const char *cfg, const char *dbg, + struct nvif_client *client) +{ + int ret, i; + + ret = nvif_object_init(NULL, (void*)dtor, 0, 0, NULL, 0, &client->base); + if (ret) + return ret; + + client->base.parent = &client->base; + client->base.handle = ~0; + client->object = &client->base; + client->super = true; + + for (i = 0, ret = -EINVAL; (client->driver = nvif_drivers[i]); i++) { + if (!driver || !strcmp(client->driver->name, driver)) { + ret = client->driver->init(name, device, cfg, dbg, + &client->base.priv); + if (!ret || driver) + break; + } + } + + if (ret) + nvif_client_fini(client); + return ret; +} + +static void +nvif_client_del(struct nvif_client *client) +{ + nvif_client_fini(client); + kfree(client); +} + +int +nvif_client_new(const char *driver, const char *name, u64 device, + const char *cfg, const char *dbg, + struct nvif_client **pclient) +{ + struct nvif_client *client = kzalloc(sizeof(*client), GFP_KERNEL); + if (client) { + int ret = nvif_client_init(nvif_client_del, driver, name, + device, cfg, dbg, client); + if (ret) { + kfree(client); + client = NULL; + } + *pclient = client; + return ret; + } + return -ENOMEM; +} + +void +nvif_client_ref(struct nvif_client *client, struct nvif_client **pclient) +{ + nvif_object_ref(&client->base, (struct nvif_object **)pclient); +} diff --git a/drivers/gpu/drm/nouveau/nvif/client.h b/drivers/gpu/drm/nouveau/nvif/client.h new file mode 100644 index 000000000000..28352f0882ec --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvif/client.h @@ -0,0 +1,39 @@ +#ifndef __NVIF_CLIENT_H__ +#define __NVIF_CLIENT_H__ + +#include "object.h" + +struct nvif_client { + struct nvif_object base; + struct nvif_object *object; /*XXX: hack for nvif_object() */ + const struct nvif_driver *driver; + bool super; +}; + +static inline struct nvif_client * +nvif_client(struct nvif_object *object) +{ + while (object && object->parent != object) + object = object->parent; + return (void *)object; +} + +int nvif_client_init(void (*dtor)(struct nvif_client *), const char *, + const char *, u64, const char *, const char *, + struct nvif_client *); +void nvif_client_fini(struct nvif_client *); +int nvif_client_new(const char *, const char *, u64, const char *, + const char *, struct nvif_client **); +void nvif_client_ref(struct nvif_client *, struct nvif_client **); +int nvif_client_ioctl(struct nvif_client *, void *, u32); +int nvif_client_suspend(struct nvif_client *); +int nvif_client_resume(struct nvif_client *); + +/*XXX*/ +#include <core/client.h> +#define nvkm_client(a) ({ \ + struct nvif_client *_client = nvif_client(nvif_object(a)); \ + nouveau_client(_client->base.priv); \ +}) + +#endif diff --git a/drivers/gpu/drm/nouveau/nvif/device.c b/drivers/gpu/drm/nouveau/nvif/device.c new file mode 100644 index 000000000000..f477579725e3 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvif/device.c @@ -0,0 +1,78 @@ +/* + * Copyright 2014 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs <bskeggs@redhat.com> + */ + +#include "device.h" + +void +nvif_device_fini(struct nvif_device *device) +{ + nvif_object_fini(&device->base); +} + +int +nvif_device_init(struct nvif_object *parent, void (*dtor)(struct nvif_device *), + u32 handle, u32 oclass, void *data, u32 size, + struct nvif_device *device) +{ + int ret = nvif_object_init(parent, (void *)dtor, handle, oclass, + data, size, &device->base); + if (ret == 0) { + device->object = &device->base; + device->info.version = 0; + ret = nvif_object_mthd(&device->base, NV_DEVICE_V0_INFO, + &device->info, sizeof(device->info)); + } + return ret; +} + +static void +nvif_device_del(struct nvif_device *device) +{ + nvif_device_fini(device); + kfree(device); +} + +int +nvif_device_new(struct nvif_object *parent, u32 handle, u32 oclass, + void *data, u32 size, struct nvif_device **pdevice) +{ + struct nvif_device *device = kzalloc(sizeof(*device), GFP_KERNEL); + if (device) { + int ret = nvif_device_init(parent, nvif_device_del, handle, + oclass, data, size, device); + if (ret) { + kfree(device); + device = NULL; + } + *pdevice = device; + return ret; + } + return -ENOMEM; +} + +void +nvif_device_ref(struct nvif_device *device, struct nvif_device **pdevice) +{ + nvif_object_ref(&device->base, (struct nvif_object **)pdevice); +} diff --git a/drivers/gpu/drm/nouveau/nvif/device.h b/drivers/gpu/drm/nouveau/nvif/device.h new file mode 100644 index 000000000000..43180f9fe630 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvif/device.h @@ -0,0 +1,62 @@ +#ifndef __NVIF_DEVICE_H__ +#define __NVIF_DEVICE_H__ + +#include "object.h" +#include "class.h" + +struct nvif_device { + struct nvif_object base; + struct nvif_object *object; /*XXX: hack for nvif_object() */ + struct nv_device_info_v0 info; +}; + +static inline struct nvif_device * +nvif_device(struct nvif_object *object) +{ + while (object && object->oclass != 0x0080 /*XXX: NV_DEVICE_CLASS*/ ) + object = object->parent; + return (void *)object; +} + +int nvif_device_init(struct nvif_object *, void (*dtor)(struct nvif_device *), + u32 handle, u32 oclass, void *, u32, + struct nvif_device *); +void nvif_device_fini(struct nvif_device *); +int nvif_device_new(struct nvif_object *, u32 handle, u32 oclass, + void *, u32, struct nvif_device **); +void nvif_device_ref(struct nvif_device *, struct nvif_device **); + +/*XXX*/ +#include <subdev/bios.h> +#include <subdev/fb.h> +#include <subdev/vm.h> +#include <subdev/bar.h> +#include <subdev/gpio.h> +#include <subdev/clock.h> +#include <subdev/i2c.h> +#include <subdev/timer.h> +#include <subdev/therm.h> + +#define nvkm_device(a) nv_device(nvkm_object((a))) +#define nvkm_bios(a) nouveau_bios(nvkm_device(a)) +#define nvkm_fb(a) nouveau_fb(nvkm_device(a)) +#define nvkm_vmmgr(a) nouveau_vmmgr(nvkm_device(a)) +#define nvkm_bar(a) nouveau_bar(nvkm_device(a)) +#define nvkm_gpio(a) nouveau_gpio(nvkm_device(a)) +#define nvkm_clock(a) nouveau_clock(nvkm_device(a)) +#define nvkm_i2c(a) nouveau_i2c(nvkm_device(a)) +#define nvkm_timer(a) nouveau_timer(nvkm_device(a)) +#define nvkm_wait(a,b,c,d) nv_wait(nvkm_timer(a), (b), (c), (d)) +#define nvkm_wait_cb(a,b,c) nv_wait_cb(nvkm_timer(a), (b), (c)) +#define nvkm_therm(a) nouveau_therm(nvkm_device(a)) + +#include <engine/device.h> +#include <engine/fifo.h> +#include <engine/graph.h> +#include <engine/software.h> + +#define nvkm_fifo(a) nouveau_fifo(nvkm_device(a)) +#define nvkm_fifo_chan(a) ((struct nouveau_fifo_chan *)nvkm_object(a)) +#define nvkm_gr(a) ((struct nouveau_graph *)nouveau_engine(nvkm_object(a), NVDEV_ENGINE_GR)) + +#endif diff --git a/drivers/gpu/drm/nouveau/nvif/driver.h b/drivers/gpu/drm/nouveau/nvif/driver.h new file mode 100644 index 000000000000..b72a8f0c2758 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvif/driver.h @@ -0,0 +1,21 @@ +#ifndef __NVIF_DRIVER_H__ +#define __NVIF_DRIVER_H__ + +struct nvif_driver { + const char *name; + int (*init)(const char *name, u64 device, const char *cfg, + const char *dbg, void **priv); + void (*fini)(void *priv); + int (*suspend)(void *priv); + int (*resume)(void *priv); + int (*ioctl)(void *priv, bool super, void *data, u32 size, void **hack); + void *(*map)(void *priv, u64 handle, u32 size); + void (*unmap)(void *priv, void *ptr, u32 size); + bool keep; +}; + +extern const struct nvif_driver nvif_driver_nvkm; +extern const struct nvif_driver nvif_driver_drm; +extern const struct nvif_driver nvif_driver_lib; + +#endif diff --git a/drivers/gpu/drm/nouveau/nvif/event.h b/drivers/gpu/drm/nouveau/nvif/event.h new file mode 100644 index 000000000000..21764499b4be --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvif/event.h @@ -0,0 +1,62 @@ +#ifndef __NVIF_EVENT_H__ +#define __NVIF_EVENT_H__ + +struct nvif_notify_req_v0 { + __u8 version; + __u8 reply; + __u8 pad02[5]; +#define NVIF_NOTIFY_V0_ROUTE_NVIF 0x00 + __u8 route; + __u64 token; /* must be unique */ + __u8 data[]; /* request data (below) */ +}; + +struct nvif_notify_rep_v0 { + __u8 version; + __u8 pad01[6]; + __u8 route; + __u64 token; + __u8 data[]; /* reply data (below) */ +}; + +struct nvif_notify_head_req_v0 { + /* nvif_notify_req ... */ + __u8 version; + __u8 head; + __u8 pad02[6]; +}; + +struct nvif_notify_head_rep_v0 { + /* nvif_notify_rep ... */ + __u8 version; + __u8 pad01[7]; +}; + +struct nvif_notify_conn_req_v0 { + /* nvif_notify_req ... */ + __u8 version; +#define NVIF_NOTIFY_CONN_V0_PLUG 0x01 +#define NVIF_NOTIFY_CONN_V0_UNPLUG 0x02 +#define NVIF_NOTIFY_CONN_V0_IRQ 0x04 +#define NVIF_NOTIFY_CONN_V0_ANY 0x07 + __u8 mask; + __u8 conn; + __u8 pad03[5]; +}; + +struct nvif_notify_conn_rep_v0 { + /* nvif_notify_rep ... */ + __u8 version; + __u8 mask; + __u8 pad02[6]; +}; + +struct nvif_notify_uevent_req { + /* nvif_notify_req ... */ +}; + +struct nvif_notify_uevent_rep { + /* nvif_notify_rep ... */ +}; + +#endif diff --git a/drivers/gpu/drm/nouveau/nvif/ioctl.h b/drivers/gpu/drm/nouveau/nvif/ioctl.h new file mode 100644 index 000000000000..4cd8e323b23d --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvif/ioctl.h @@ -0,0 +1,128 @@ +#ifndef __NVIF_IOCTL_H__ +#define __NVIF_IOCTL_H__ + +struct nvif_ioctl_v0 { + __u8 version; +#define NVIF_IOCTL_V0_OWNER_NVIF 0x00 +#define NVIF_IOCTL_V0_OWNER_ANY 0xff + __u8 owner; +#define NVIF_IOCTL_V0_NOP 0x00 +#define NVIF_IOCTL_V0_SCLASS 0x01 +#define NVIF_IOCTL_V0_NEW 0x02 +#define NVIF_IOCTL_V0_DEL 0x03 +#define NVIF_IOCTL_V0_MTHD 0x04 +#define NVIF_IOCTL_V0_RD 0x05 +#define NVIF_IOCTL_V0_WR 0x06 +#define NVIF_IOCTL_V0_MAP 0x07 +#define NVIF_IOCTL_V0_UNMAP 0x08 +#define NVIF_IOCTL_V0_NTFY_NEW 0x09 +#define NVIF_IOCTL_V0_NTFY_DEL 0x0a +#define NVIF_IOCTL_V0_NTFY_GET 0x0b +#define NVIF_IOCTL_V0_NTFY_PUT 0x0c + __u8 type; + __u8 path_nr; +#define NVIF_IOCTL_V0_ROUTE_NVIF 0x00 +#define NVIF_IOCTL_V0_ROUTE_HIDDEN 0xff + __u8 pad04[3]; + __u8 route; + __u64 token; + __u32 path[8]; /* in reverse */ + __u8 data[]; /* ioctl data (below) */ +}; + +struct nvif_ioctl_nop { +}; + +struct nvif_ioctl_sclass_v0 { + /* nvif_ioctl ... */ + __u8 version; + __u8 count; + __u8 pad02[6]; + __u32 oclass[]; +}; + +struct nvif_ioctl_new_v0 { + /* nvif_ioctl ... */ + __u8 version; + __u8 pad01[6]; + __u8 route; + __u64 token; + __u32 handle; +/* these class numbers are made up by us, and not nvidia-assigned */ +#define NVIF_IOCTL_NEW_V0_PERFCTR 0x0000ffff +#define NVIF_IOCTL_NEW_V0_CONTROL 0x0000fffe + __u32 oclass; + __u8 data[]; /* class data (class.h) */ +}; + +struct nvif_ioctl_del { +}; + +struct nvif_ioctl_rd_v0 { + /* nvif_ioctl ... */ + __u8 version; + __u8 size; + __u8 pad02[2]; + __u32 data; + __u64 addr; +}; + +struct nvif_ioctl_wr_v0 { + /* nvif_ioctl ... */ + __u8 version; + __u8 size; + __u8 pad02[2]; + __u32 data; + __u64 addr; +}; + +struct nvif_ioctl_map_v0 { + /* nvif_ioctl ... */ + __u8 version; + __u8 pad01[3]; + __u32 length; + __u64 handle; +}; + +struct nvif_ioctl_unmap { +}; + +struct nvif_ioctl_ntfy_new_v0 { + /* nvif_ioctl ... */ + __u8 version; + __u8 event; + __u8 index; + __u8 pad03[5]; + __u8 data[]; /* event request data (event.h) */ +}; + +struct nvif_ioctl_ntfy_del_v0 { + /* nvif_ioctl ... */ + __u8 version; + __u8 index; + __u8 pad02[6]; +}; + +struct nvif_ioctl_ntfy_get_v0 { + /* nvif_ioctl ... */ + __u8 version; + __u8 index; + __u8 pad02[6]; +}; + +struct nvif_ioctl_ntfy_put_v0 { + /* nvif_ioctl ... */ + __u8 version; + __u8 index; + __u8 pad02[6]; +}; + +struct nvif_ioctl_mthd_v0 { + /* nvif_ioctl ... */ + __u8 version; + __u8 method; + __u8 pad02[6]; + __u8 data[]; /* method data (class.h) */ +}; + +#endif diff --git a/drivers/gpu/drm/nouveau/nvif/list.h b/drivers/gpu/drm/nouveau/nvif/list.h new file mode 100644 index 000000000000..8af5d144ecb0 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvif/list.h @@ -0,0 +1,353 @@ +/* + * Copyright © 2010 Intel Corporation + * Copyright © 2010 Francisco Jerez <currojerez@riseup.net> + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +/* Modified by Ben Skeggs <bskeggs@redhat.com> to match kernel list APIs */ + +#ifndef _XORG_LIST_H_ +#define _XORG_LIST_H_ + +/** + * @file Classic doubly-link circular list implementation. + * For real usage examples of the linked list, see the file test/list.c + * + * Example: + * We need to keep a list of struct foo in the parent struct bar, i.e. what + * we want is something like this. + * + * struct bar { + * ... + * struct foo *list_of_foos; -----> struct foo {}, struct foo {}, struct foo{} + * ... + * } + * + * We need one list head in bar and a list element in all list_of_foos (both are of + * data type 'struct list_head'). + * + * struct bar { + * ... + * struct list_head list_of_foos; + * ... + * } + * + * struct foo { + * ... + * struct list_head entry; + * ... + * } + * + * Now we initialize the list head: + * + * struct bar bar; + * ... + * INIT_LIST_HEAD(&bar.list_of_foos); + * + * Then we create the first element and add it to this list: + * + * struct foo *foo = malloc(...); + * .... + * list_add(&foo->entry, &bar.list_of_foos); + * + * Repeat the above for each element you want to add to the list. Deleting + * works with the element itself. + * list_del(&foo->entry); + * free(foo); + * + * Note: calling list_del(&bar.list_of_foos) will set bar.list_of_foos to an empty + * list again. + * + * Looping through the list requires a 'struct foo' as iterator and the + * name of the field the subnodes use. + * + * struct foo *iterator; + * list_for_each_entry(iterator, &bar.list_of_foos, entry) { + * if (iterator->something == ...) + * ... + * } + * + * Note: You must not call list_del() on the iterator if you continue the + * loop. You need to run the safe for-each loop instead: + * + * struct foo *iterator, *next; + * list_for_each_entry_safe(iterator, next, &bar.list_of_foos, entry) { + * if (...) + * list_del(&iterator->entry); + * } + * + */ + +/** + * The linkage struct for list nodes. This struct must be part of your + * to-be-linked struct. struct list_head is required for both the head of the + * list and for each list node. + * + * Position and name of the struct list_head field is irrelevant. + * There are no requirements that elements of a list are of the same type. + * There are no requirements for a list head, any struct list_head can be a list + * head. + */ +struct list_head { + struct list_head *next, *prev; +}; + +/** + * Initialize the list as an empty list. + * + * Example: + * INIT_LIST_HEAD(&bar->list_of_foos); + * + * @param The list to initialized. + */ +#define LIST_HEAD_INIT(name) { &(name), &(name) } + +#define LIST_HEAD(name) \ + struct list_head name = LIST_HEAD_INIT(name) + +static inline void +INIT_LIST_HEAD(struct list_head *list) +{ + list->next = list->prev = list; +} + +static inline void +__list_add(struct list_head *entry, + struct list_head *prev, struct list_head *next) +{ + next->prev = entry; + entry->next = next; + entry->prev = prev; + prev->next = entry; +} + +/** + * Insert a new element after the given list head. The new element does not + * need to be initialised as empty list. + * The list changes from: + * head → some element → ... + * to + * head → new element → older element → ... + * + * Example: + * struct foo *newfoo = malloc(...); + * list_add(&newfoo->entry, &bar->list_of_foos); + * + * @param entry The new element to prepend to the list. + * @param head The existing list. + */ +static inline void +list_add(struct list_head *entry, struct list_head *head) +{ + __list_add(entry, head, head->next); +} + +/** + * Append a new element to the end of the list given with this list head. + * + * The list changes from: + * head → some element → ... → lastelement + * to + * head → some element → ... → lastelement → new element + * + * Example: + * struct foo *newfoo = malloc(...); + * list_add_tail(&newfoo->entry, &bar->list_of_foos); + * + * @param entry The new element to prepend to the list. + * @param head The existing list. + */ +static inline void +list_add_tail(struct list_head *entry, struct list_head *head) +{ + __list_add(entry, head->prev, head); +} + +static inline void +__list_del(struct list_head *prev, struct list_head *next) +{ + next->prev = prev; + prev->next = next; +} + +/** + * Remove the element from the list it is in. Using this function will reset + * the pointers to/from this element so it is removed from the list. It does + * NOT free the element itself or manipulate it otherwise. + * + * Using list_del on a pure list head (like in the example at the top of + * this file) will NOT remove the first element from + * the list but rather reset the list as empty list. + * + * Example: + * list_del(&foo->entry); + * + * @param entry The element to remove. + */ +static inline void +list_del(struct list_head *entry) +{ + __list_del(entry->prev, entry->next); +} + +static inline void +list_del_init(struct list_head *entry) +{ + __list_del(entry->prev, entry->next); + INIT_LIST_HEAD(entry); +} + +static inline void list_move_tail(struct list_head *list, + struct list_head *head) +{ + __list_del(list->prev, list->next); + list_add_tail(list, head); +} + +/** + * Check if the list is empty. + * + * Example: + * list_empty(&bar->list_of_foos); + * + * @return True if the list contains one or more elements or False otherwise. + */ +static inline bool +list_empty(struct list_head *head) +{ + return head->next == head; +} + +/** + * Returns a pointer to the container of this list element. + * + * Example: + * struct foo* f; + * f = container_of(&foo->entry, struct foo, entry); + * assert(f == foo); + * + * @param ptr Pointer to the struct list_head. + * @param type Data type of the list element. + * @param member Member name of the struct list_head field in the list element. + * @return A pointer to the data struct containing the list head. + */ +#ifndef container_of +#define container_of(ptr, type, member) \ + (type *)((char *)(ptr) - (char *) &((type *)0)->member) +#endif + +/** + * Alias of container_of + */ +#define list_entry(ptr, type, member) \ + container_of(ptr, type, member) + +/** + * Retrieve the first list entry for the given list pointer. + * + * Example: + * struct foo *first; + * first = list_first_entry(&bar->list_of_foos, struct foo, list_of_foos); + * + * @param ptr The list head + * @param type Data type of the list element to retrieve + * @param member Member name of the struct list_head field in the list element. + * @return A pointer to the first list element. + */ +#define list_first_entry(ptr, type, member) \ + list_entry((ptr)->next, type, member) + +/** + * Retrieve the last list entry for the given listpointer. + * + * Example: + * struct foo *first; + * first = list_last_entry(&bar->list_of_foos, struct foo, list_of_foos); + * + * @param ptr The list head + * @param type Data type of the list element to retrieve + * @param member Member name of the struct list_head field in the list element. + * @return A pointer to the last list element. + */ +#define list_last_entry(ptr, type, member) \ + list_entry((ptr)->prev, type, member) + +#define __container_of(ptr, sample, member) \ + (void *)container_of((ptr), typeof(*(sample)), member) + +/** + * Loop through the list given by head and set pos to struct in the list. + * + * Example: + * struct foo *iterator; + * list_for_each_entry(iterator, &bar->list_of_foos, entry) { + * [modify iterator] + * } + * + * This macro is not safe for node deletion. Use list_for_each_entry_safe + * instead. + * + * @param pos Iterator variable of the type of the list elements. + * @param head List head + * @param member Member name of the struct list_head in the list elements. + * + */ +#define list_for_each_entry(pos, head, member) \ + for (pos = __container_of((head)->next, pos, member); \ + &pos->member != (head); \ + pos = __container_of(pos->member.next, pos, member)) + +/** + * Loop through the list, keeping a backup pointer to the element. This + * macro allows for the deletion of a list element while looping through the + * list. + * + * See list_for_each_entry for more details. + */ +#define list_for_each_entry_safe(pos, tmp, head, member) \ + for (pos = __container_of((head)->next, pos, member), \ + tmp = __container_of(pos->member.next, pos, member); \ + &pos->member != (head); \ + pos = tmp, tmp = __container_of(pos->member.next, tmp, member)) + + +#define list_for_each_entry_reverse(pos, head, member) \ + for (pos = __container_of((head)->prev, pos, member); \ + &pos->member != (head); \ + pos = __container_of(pos->member.prev, pos, member)) + +#define list_for_each_entry_continue(pos, head, member) \ + for (pos = __container_of(pos->member.next, pos, member); \ + &pos->member != (head); \ + pos = __container_of(pos->member.next, pos, member)) + +#define list_for_each_entry_continue_reverse(pos, head, member) \ + for (pos = __container_of(pos->member.prev, pos, member); \ + &pos->member != (head); \ + pos = __container_of(pos->member.prev, pos, member)) + +#define list_for_each_entry_from(pos, head, member) \ + for (; \ + &pos->member != (head); \ + pos = __container_of(pos->member.next, pos, member)) + +#endif diff --git a/drivers/gpu/drm/nouveau/nvif/notify.c b/drivers/gpu/drm/nouveau/nvif/notify.c new file mode 100644 index 000000000000..0898c3155292 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvif/notify.c @@ -0,0 +1,248 @@ +/* + * Copyright 2014 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs <bskeggs@redhat.com> + */ + +#include <nvif/client.h> +#include <nvif/driver.h> +#include <nvif/notify.h> +#include <nvif/object.h> +#include <nvif/ioctl.h> +#include <nvif/event.h> + +static inline int +nvif_notify_put_(struct nvif_notify *notify) +{ + struct nvif_object *object = notify->object; + struct { + struct nvif_ioctl_v0 ioctl; + struct nvif_ioctl_ntfy_put_v0 ntfy; + } args = { + .ioctl.type = NVIF_IOCTL_V0_NTFY_PUT, + .ntfy.index = notify->index, + }; + + if (atomic_inc_return(¬ify->putcnt) != 1) + return 0; + + return nvif_object_ioctl(object, &args, sizeof(args), NULL); +} + +int +nvif_notify_put(struct nvif_notify *notify) +{ + if (likely(notify->object) && + test_and_clear_bit(NVIF_NOTIFY_USER, ¬ify->flags)) { + int ret = nvif_notify_put_(notify); + if (test_bit(NVIF_NOTIFY_WORK, ¬ify->flags)) + flush_work(¬ify->work); + return ret; + } + return 0; +} + +static inline int +nvif_notify_get_(struct nvif_notify *notify) +{ + struct nvif_object *object = notify->object; + struct { + struct nvif_ioctl_v0 ioctl; + struct nvif_ioctl_ntfy_get_v0 ntfy; + } args = { + .ioctl.type = NVIF_IOCTL_V0_NTFY_GET, + .ntfy.index = notify->index, + }; + + if (atomic_dec_return(¬ify->putcnt) != 0) + return 0; + + return nvif_object_ioctl(object, &args, sizeof(args), NULL); +} + +int +nvif_notify_get(struct nvif_notify *notify) +{ + if (likely(notify->object) && + !test_and_set_bit(NVIF_NOTIFY_USER, ¬ify->flags)) + return nvif_notify_get_(notify); + return 0; +} + +static inline int +nvif_notify_func(struct nvif_notify *notify, bool keep) +{ + int ret = notify->func(notify); + if (ret == NVIF_NOTIFY_KEEP || + !test_and_clear_bit(NVKM_NOTIFY_USER, ¬ify->flags)) { + if (!keep) + atomic_dec(¬ify->putcnt); + else + nvif_notify_get_(notify); + } + return ret; +} + +static void +nvif_notify_work(struct work_struct *work) +{ + struct nvif_notify *notify = container_of(work, typeof(*notify), work); + nvif_notify_func(notify, true); +} + +int +nvif_notify(const void *header, u32 length, const void *data, u32 size) +{ + struct nvif_notify *notify = NULL; + const union { + struct nvif_notify_rep_v0 v0; + } *args = header; + int ret = NVIF_NOTIFY_DROP; + + if (length == sizeof(args->v0) && args->v0.version == 0) { + if (WARN_ON(args->v0.route)) + return NVIF_NOTIFY_DROP; + notify = (void *)(unsigned long)args->v0.token; + } + + if (!WARN_ON(notify == NULL)) { + struct nvif_client *client = nvif_client(notify->object); + if (!WARN_ON(notify->size != size)) { + atomic_inc(¬ify->putcnt); + if (test_bit(NVIF_NOTIFY_WORK, ¬ify->flags)) { + memcpy((void *)notify->data, data, size); + schedule_work(¬ify->work); + return NVIF_NOTIFY_DROP; + } + notify->data = data; + ret = nvif_notify_func(notify, client->driver->keep); + notify->data = NULL; + } + } + + return ret; +} + +int +nvif_notify_fini(struct nvif_notify *notify) +{ + struct nvif_object *object = notify->object; + struct { + struct nvif_ioctl_v0 ioctl; + struct nvif_ioctl_ntfy_del_v0 ntfy; + } args = { + .ioctl.type = NVIF_IOCTL_V0_NTFY_DEL, + .ntfy.index = notify->index, + }; + int ret = nvif_notify_put(notify); + if (ret >= 0 && object) { + ret = nvif_object_ioctl(object, &args, sizeof(args), NULL); + if (ret == 0) { + nvif_object_ref(NULL, ¬ify->object); + kfree((void *)notify->data); + } + } + return ret; +} + +int +nvif_notify_init(struct nvif_object *object, void (*dtor)(struct nvif_notify *), + int (*func)(struct nvif_notify *), bool work, u8 event, + void *data, u32 size, u32 reply, struct nvif_notify *notify) +{ + struct { + struct nvif_ioctl_v0 ioctl; + struct nvif_ioctl_ntfy_new_v0 ntfy; + struct nvif_notify_req_v0 req; + } *args; + int ret = -ENOMEM; + + notify->object = NULL; + nvif_object_ref(object, ¬ify->object); + notify->flags = 0; + atomic_set(¬ify->putcnt, 1); + notify->dtor = dtor; + notify->func = func; + notify->data = NULL; + notify->size = reply; + if (work) { + INIT_WORK(¬ify->work, nvif_notify_work); + set_bit(NVIF_NOTIFY_WORK, ¬ify->flags); + notify->data = kmalloc(notify->size, GFP_KERNEL); + if (!notify->data) + goto done; + } + + if (!(args = kmalloc(sizeof(*args) + size, GFP_KERNEL))) + goto done; + args->ioctl.version = 0; + args->ioctl.type = NVIF_IOCTL_V0_NTFY_NEW; + args->ntfy.version = 0; + args->ntfy.event = event; + args->req.version = 0; + args->req.reply = notify->size; + args->req.route = 0; + args->req.token = (unsigned long)(void *)notify; + + memcpy(args->req.data, data, size); + ret = nvif_object_ioctl(object, args, sizeof(*args) + size, NULL); + notify->index = args->ntfy.index; + kfree(args); +done: + if (ret) + nvif_notify_fini(notify); + return ret; +} + +static void +nvif_notify_del(struct nvif_notify *notify) +{ + nvif_notify_fini(notify); + kfree(notify); +} + +void +nvif_notify_ref(struct nvif_notify *notify, struct nvif_notify **pnotify) +{ + BUG_ON(notify != NULL); + if (*pnotify) + (*pnotify)->dtor(*pnotify); + *pnotify = notify; +} + +int +nvif_notify_new(struct nvif_object *object, int (*func)(struct nvif_notify *), + bool work, u8 type, void *data, u32 size, u32 reply, + struct nvif_notify **pnotify) +{ + struct nvif_notify *notify = kzalloc(sizeof(*notify), GFP_KERNEL); + if (notify) { + int ret = nvif_notify_init(object, nvif_notify_del, func, work, + type, data, size, reply, notify); + if (ret) { + kfree(notify); + notify = NULL; + } + *pnotify = notify; + return ret; + } + return -ENOMEM; +} diff --git a/drivers/gpu/drm/nouveau/nvif/notify.h b/drivers/gpu/drm/nouveau/nvif/notify.h new file mode 100644 index 000000000000..9ebfa3b45e76 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvif/notify.h @@ -0,0 +1,39 @@ +#ifndef __NVIF_NOTIFY_H__ +#define __NVIF_NOTIFY_H__ + +struct nvif_notify { + struct nvif_object *object; + int index; + +#define NVIF_NOTIFY_USER 0 +#define NVIF_NOTIFY_WORK 1 + unsigned long flags; + atomic_t putcnt; + void (*dtor)(struct nvif_notify *); +#define NVIF_NOTIFY_DROP 0 +#define NVIF_NOTIFY_KEEP 1 + int (*func)(struct nvif_notify *); + + /* this is const for a *very* good reason - the data might be on the + * stack from an irq handler. if you're not nvif/notify.c then you + * should probably think twice before casting it away... + */ + const void *data; + u32 size; + struct work_struct work; +}; + +int nvif_notify_init(struct nvif_object *, void (*dtor)(struct nvif_notify *), + int (*func)(struct nvif_notify *), bool work, u8 type, + void *data, u32 size, u32 reply, struct nvif_notify *); +int nvif_notify_fini(struct nvif_notify *); +int nvif_notify_get(struct nvif_notify *); +int nvif_notify_put(struct nvif_notify *); +int nvif_notify(const void *, u32, const void *, u32); + +int nvif_notify_new(struct nvif_object *, int (*func)(struct nvif_notify *), + bool work, u8 type, void *data, u32 size, u32 reply, + struct nvif_notify **); +void nvif_notify_ref(struct nvif_notify *, struct nvif_notify **); + +#endif diff --git a/drivers/gpu/drm/nouveau/nvif/object.c b/drivers/gpu/drm/nouveau/nvif/object.c new file mode 100644 index 000000000000..dd85b56f6aa5 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvif/object.c @@ -0,0 +1,304 @@ +/* + * Copyright 2014 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs <bskeggs@redhat.com> + */ + +#include "object.h" +#include "client.h" +#include "driver.h" +#include "ioctl.h" + +int +nvif_object_ioctl(struct nvif_object *object, void *data, u32 size, void **hack) +{ + struct nvif_client *client = nvif_client(object); + union { + struct nvif_ioctl_v0 v0; + } *args = data; + + if (size >= sizeof(*args) && args->v0.version == 0) { + args->v0.owner = NVIF_IOCTL_V0_OWNER_ANY; + args->v0.path_nr = 0; + while (args->v0.path_nr < ARRAY_SIZE(args->v0.path)) { + args->v0.path[args->v0.path_nr++] = object->handle; + if (object->parent == object) + break; + object = object->parent; + } + } else + return -ENOSYS; + + return client->driver->ioctl(client->base.priv, client->super, data, size, hack); +} + +int +nvif_object_sclass(struct nvif_object *object, u32 *oclass, int count) +{ + struct { + struct nvif_ioctl_v0 ioctl; + struct nvif_ioctl_sclass_v0 sclass; + } *args; + u32 size = count * sizeof(args->sclass.oclass[0]); + int ret; + + if (!(args = kmalloc(sizeof(*args) + size, GFP_KERNEL))) + return -ENOMEM; + args->ioctl.version = 0; + args->ioctl.type = NVIF_IOCTL_V0_SCLASS; + args->sclass.version = 0; + args->sclass.count = count; + + memcpy(args->sclass.oclass, oclass, size); + ret = nvif_object_ioctl(object, args, sizeof(*args) + size, NULL); + ret = ret ? ret : args->sclass.count; + memcpy(oclass, args->sclass.oclass, size); + kfree(args); + return ret; +} + +u32 +nvif_object_rd(struct nvif_object *object, int size, u64 addr) +{ + struct { + struct nvif_ioctl_v0 ioctl; + struct nvif_ioctl_rd_v0 rd; + } args = { + .ioctl.type = NVIF_IOCTL_V0_RD, + .rd.size = size, + .rd.addr = addr, + }; + int ret = nvif_object_ioctl(object, &args, sizeof(args), NULL); + if (ret) { + /*XXX: warn? */ + return 0; + } + return args.rd.data; +} + +void +nvif_object_wr(struct nvif_object *object, int size, u64 addr, u32 data) +{ + struct { + struct nvif_ioctl_v0 ioctl; + struct nvif_ioctl_wr_v0 wr; + } args = { + .ioctl.type = NVIF_IOCTL_V0_WR, + .wr.size = size, + .wr.addr = addr, + .wr.data = data, + }; + int ret = nvif_object_ioctl(object, &args, sizeof(args), NULL); + if (ret) { + /*XXX: warn? */ + } +} + +int +nvif_object_mthd(struct nvif_object *object, u32 mthd, void *data, u32 size) +{ + struct { + struct nvif_ioctl_v0 ioctl; + struct nvif_ioctl_mthd_v0 mthd; + } *args; + u8 stack[128]; + int ret; + + if (sizeof(*args) + size > sizeof(stack)) { + if (!(args = kmalloc(sizeof(*args) + size, GFP_KERNEL))) + return -ENOMEM; + } else { + args = (void *)stack; + } + args->ioctl.version = 0; + args->ioctl.type = NVIF_IOCTL_V0_MTHD; + args->mthd.version = 0; + args->mthd.method = mthd; + + memcpy(args->mthd.data, data, size); + ret = nvif_object_ioctl(object, args, sizeof(*args) + size, NULL); + memcpy(data, args->mthd.data, size); + if (args != (void *)stack) + kfree(args); + return ret; +} + +void +nvif_object_unmap(struct nvif_object *object) +{ + if (object->map.size) { + struct nvif_client *client = nvif_client(object); + struct { + struct nvif_ioctl_v0 ioctl; + struct nvif_ioctl_unmap unmap; + } args = { + .ioctl.type = NVIF_IOCTL_V0_UNMAP, + }; + + if (object->map.ptr) { + client->driver->unmap(client, object->map.ptr, + object->map.size); + object->map.ptr = NULL; + } + + nvif_object_ioctl(object, &args, sizeof(args), NULL); + object->map.size = 0; + } +} + +int +nvif_object_map(struct nvif_object *object) +{ + struct nvif_client *client = nvif_client(object); + struct { + struct nvif_ioctl_v0 ioctl; + struct nvif_ioctl_map_v0 map; + } args = { + .ioctl.type = NVIF_IOCTL_V0_MAP, + }; + int ret = nvif_object_ioctl(object, &args, sizeof(args), NULL); + if (ret == 0) { + object->map.size = args.map.length; + object->map.ptr = client->driver->map(client, args.map.handle, + object->map.size); + if (ret = -ENOMEM, object->map.ptr) + return 0; + nvif_object_unmap(object); + } + return ret; +} + +struct ctor { + struct nvif_ioctl_v0 ioctl; + struct nvif_ioctl_new_v0 new; +}; + +void +nvif_object_fini(struct nvif_object *object) +{ + struct ctor *ctor = container_of(object->data, typeof(*ctor), new.data); + if (object->parent) { + struct { + struct nvif_ioctl_v0 ioctl; + struct nvif_ioctl_del del; + } args = { + .ioctl.type = NVIF_IOCTL_V0_DEL, + }; + + nvif_object_unmap(object); + nvif_object_ioctl(object, &args, sizeof(args), NULL); + if (object->data) { + object->size = 0; + object->data = NULL; + kfree(ctor); + } + nvif_object_ref(NULL, &object->parent); + } +} + +int +nvif_object_init(struct nvif_object *parent, void (*dtor)(struct nvif_object *), + u32 handle, u32 oclass, void *data, u32 size, + struct nvif_object *object) +{ + struct ctor *ctor; + int ret = 0; + + object->parent = NULL; + object->object = object; + nvif_object_ref(parent, &object->parent); + kref_init(&object->refcount); + object->handle = handle; + object->oclass = oclass; + object->data = NULL; + object->size = 0; + object->dtor = dtor; + object->map.ptr = NULL; + object->map.size = 0; + + if (object->parent) { + if (!(ctor = kmalloc(sizeof(*ctor) + size, GFP_KERNEL))) { + nvif_object_fini(object); + return -ENOMEM; + } + object->data = ctor->new.data; + object->size = size; + memcpy(object->data, data, size); + + ctor->ioctl.version = 0; + ctor->ioctl.type = NVIF_IOCTL_V0_NEW; + ctor->new.version = 0; + ctor->new.route = NVIF_IOCTL_V0_ROUTE_NVIF; + ctor->new.token = (unsigned long)(void *)object; + ctor->new.handle = handle; + ctor->new.oclass = oclass; + + ret = nvif_object_ioctl(parent, ctor, sizeof(*ctor) + + object->size, &object->priv); + } + + if (ret) + nvif_object_fini(object); + return ret; +} + +static void +nvif_object_del(struct nvif_object *object) +{ + nvif_object_fini(object); + kfree(object); +} + +int +nvif_object_new(struct nvif_object *parent, u32 handle, u32 oclass, + void *data, u32 size, struct nvif_object **pobject) +{ + struct nvif_object *object = kzalloc(sizeof(*object), GFP_KERNEL); + if (object) { + int ret = nvif_object_init(parent, nvif_object_del, handle, + oclass, data, size, object); + if (ret) { + kfree(object); + object = NULL; + } + *pobject = object; + return ret; + } + return -ENOMEM; +} + +static void +nvif_object_put(struct kref *kref) +{ + struct nvif_object *object = + container_of(kref, typeof(*object), refcount); + object->dtor(object); +} + +void +nvif_object_ref(struct nvif_object *object, struct nvif_object **pobject) +{ + if (object) + kref_get(&object->refcount); + if (*pobject) + kref_put(&(*pobject)->refcount, nvif_object_put); + *pobject = object; +} diff --git a/drivers/gpu/drm/nouveau/nvif/object.h b/drivers/gpu/drm/nouveau/nvif/object.h new file mode 100644 index 000000000000..fac3a3bbec44 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvif/object.h @@ -0,0 +1,75 @@ +#ifndef __NVIF_OBJECT_H__ +#define __NVIF_OBJECT_H__ + +#include <nvif/os.h> + +struct nvif_object { + struct nvif_object *parent; + struct nvif_object *object; /*XXX: hack for nvif_object() */ + struct kref refcount; + u32 handle; + u32 oclass; + void *data; + u32 size; + void *priv; /*XXX: hack */ + void (*dtor)(struct nvif_object *); + struct { + void *ptr; + u32 size; + } map; +}; + +int nvif_object_init(struct nvif_object *, void (*dtor)(struct nvif_object *), + u32 handle, u32 oclass, void *, u32, + struct nvif_object *); +void nvif_object_fini(struct nvif_object *); +int nvif_object_new(struct nvif_object *, u32 handle, u32 oclass, + void *, u32, struct nvif_object **); +void nvif_object_ref(struct nvif_object *, struct nvif_object **); +int nvif_object_ioctl(struct nvif_object *, void *, u32, void **); +int nvif_object_sclass(struct nvif_object *, u32 *, int); +u32 nvif_object_rd(struct nvif_object *, int, u64); +void nvif_object_wr(struct nvif_object *, int, u64, u32); +int nvif_object_mthd(struct nvif_object *, u32, void *, u32); +int nvif_object_map(struct nvif_object *); +void nvif_object_unmap(struct nvif_object *); + +#define nvif_object(a) (a)->object + +#define ioread8_native ioread8 +#define iowrite8_native iowrite8 +#define nvif_rd(a,b,c) ({ \ + struct nvif_object *_object = nvif_object(a); \ + u32 _data; \ + if (likely(_object->map.ptr)) \ + _data = ioread##b##_native((u8 *)_object->map.ptr + (c)); \ + else \ + _data = nvif_object_rd(_object, (b) / 8, (c)); \ + _data; \ +}) +#define nvif_wr(a,b,c,d) ({ \ + struct nvif_object *_object = nvif_object(a); \ + if (likely(_object->map.ptr)) \ + iowrite##b##_native((d), (u8 *)_object->map.ptr + (c)); \ + else \ + nvif_object_wr(_object, (b) / 8, (c), (d)); \ +}) +#define nvif_rd08(a,b) ({ u8 _v = nvif_rd((a), 8, (b)); _v; }) +#define nvif_rd16(a,b) ({ u16 _v = nvif_rd((a), 16, (b)); _v; }) +#define nvif_rd32(a,b) ({ u32 _v = nvif_rd((a), 32, (b)); _v; }) +#define nvif_wr08(a,b,c) nvif_wr((a), 8, (b), (u8)(c)) +#define nvif_wr16(a,b,c) nvif_wr((a), 16, (b), (u16)(c)) +#define nvif_wr32(a,b,c) nvif_wr((a), 32, (b), (u32)(c)) +#define nvif_mask(a,b,c,d) ({ \ + u32 _v = nvif_rd32(nvif_object(a), (b)); \ + nvif_wr32(nvif_object(a), (b), (_v & ~(c)) | (d)); \ + _v; \ +}) + +#define nvif_mthd(a,b,c,d) nvif_object_mthd(nvif_object(a), (b), (c), (d)) + +/*XXX*/ +#include <core/object.h> +#define nvkm_object(a) ((struct nouveau_object *)nvif_object(a)->priv) + +#endif diff --git a/drivers/gpu/drm/nouveau/nvif/os.h b/drivers/gpu/drm/nouveau/nvif/os.h new file mode 120000 index 000000000000..bd744b2cf5cf --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvif/os.h @@ -0,0 +1 @@ +../core/os.h
\ No newline at end of file diff --git a/drivers/gpu/drm/nouveau/nvif/unpack.h b/drivers/gpu/drm/nouveau/nvif/unpack.h new file mode 100644 index 000000000000..5933188b4a77 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvif/unpack.h @@ -0,0 +1,24 @@ +#ifndef __NVIF_UNPACK_H__ +#define __NVIF_UNPACK_H__ + +#define nvif_unvers(d) ({ \ + ret = (size == sizeof(d)) ? 0 : -ENOSYS; \ + (ret == 0); \ +}) + +#define nvif_unpack(d,vl,vh,m) ({ \ + if ((vl) == 0 || ret == -ENOSYS) { \ + int _size = sizeof(d); \ + if (_size <= size && (d).version >= (vl) && \ + (d).version <= (vh)) { \ + data = (u8 *)data + _size; \ + size = size - _size; \ + ret = ((m) || !size) ? 0 : -E2BIG; \ + } else { \ + ret = -ENOSYS; \ + } \ + } \ + (ret == 0); \ +}) + +#endif diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c index 86f4ead0441d..a94b11f7859d 100644 --- a/drivers/gpu/drm/omapdrm/omap_connector.c +++ b/drivers/gpu/drm/omapdrm/omap_connector.c @@ -32,8 +32,16 @@ struct omap_connector { struct drm_connector base; struct omap_dss_device *dssdev; struct drm_encoder *encoder; + bool hdmi_mode; }; +bool omap_connector_get_hdmi_mode(struct drm_connector *connector) +{ + struct omap_connector *omap_connector = to_omap_connector(connector); + + return omap_connector->hdmi_mode; +} + void copy_timings_omap_to_drm(struct drm_display_mode *mode, struct omap_video_timings *timings) { @@ -130,7 +138,7 @@ static void omap_connector_destroy(struct drm_connector *connector) struct omap_dss_device *dssdev = omap_connector->dssdev; DBG("%s", omap_connector->dssdev->name); - drm_sysfs_connector_remove(connector); + drm_connector_unregister(connector); drm_connector_cleanup(connector); kfree(omap_connector); @@ -162,10 +170,14 @@ static int omap_connector_get_modes(struct drm_connector *connector) drm_mode_connector_update_edid_property( connector, edid); n = drm_add_edid_modes(connector, edid); + + omap_connector->hdmi_mode = + drm_detect_hdmi_monitor(edid); } else { drm_mode_connector_update_edid_property( connector, NULL); } + kfree(edid); } else { struct drm_display_mode *mode = drm_mode_create(dev); @@ -307,7 +319,7 @@ struct drm_connector *omap_connector_init(struct drm_device *dev, connector->interlace_allowed = 1; connector->doublescan_allowed = 0; - drm_sysfs_connector_add(connector); + drm_connector_register(connector); return connector; diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c index f926b4caf449..56c60552abba 100644 --- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c +++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c @@ -199,7 +199,7 @@ static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct tcm *tcm) static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area, struct page **pages, uint32_t npages, uint32_t roll) { - dma_addr_t pat_pa = 0; + dma_addr_t pat_pa = 0, data_pa = 0; uint32_t *data; struct pat *pat; struct refill_engine *engine = txn->engine_handle; @@ -223,7 +223,9 @@ static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area, .lut_id = engine->tcm->lut_id, }; - data = alloc_dma(txn, 4*i, &pat->data_pa); + data = alloc_dma(txn, 4*i, &data_pa); + /* FIXME: what if data_pa is more than 32-bit ? */ + pat->data_pa = data_pa; while (i--) { int n = i + roll; diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h index 284b80fc3c54..84d73a61b34b 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.h +++ b/drivers/gpu/drm/omapdrm/omap_drv.h @@ -119,13 +119,6 @@ struct omap_drm_private { struct omap_drm_irq error_handler; }; -/* this should probably be in drm-core to standardize amongst drivers */ -#define DRM_ROTATE_0 0 -#define DRM_ROTATE_90 1 -#define DRM_ROTATE_180 2 -#define DRM_ROTATE_270 3 -#define DRM_REFLECT_X 4 -#define DRM_REFLECT_Y 5 #ifdef CONFIG_DEBUG_FS int omap_debugfs_init(struct drm_minor *minor); @@ -194,6 +187,7 @@ struct drm_encoder *omap_connector_attached_encoder( struct drm_connector *connector); void omap_connector_flush(struct drm_connector *connector, int x, int y, int w, int h); +bool omap_connector_get_hdmi_mode(struct drm_connector *connector); void copy_timings_omap_to_drm(struct drm_display_mode *mode, struct omap_video_timings *timings); diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c index 5290a88c681d..7445fb1491ae 100644 --- a/drivers/gpu/drm/omapdrm/omap_encoder.c +++ b/drivers/gpu/drm/omapdrm/omap_encoder.c @@ -17,6 +17,8 @@ * this program. If not, see <http://www.gnu.org/licenses/>. */ +#include <drm/drm_edid.h> + #include "omap_drv.h" #include "drm_crtc.h" @@ -89,6 +91,31 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { + struct drm_device *dev = encoder->dev; + struct omap_encoder *omap_encoder = to_omap_encoder(encoder); + struct omap_dss_device *dssdev = omap_encoder->dssdev; + struct drm_connector *connector; + bool hdmi_mode; + int r; + + hdmi_mode = false; + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + if (connector->encoder == encoder) { + hdmi_mode = omap_connector_get_hdmi_mode(connector); + break; + } + } + + if (dssdev->driver->set_hdmi_mode) + dssdev->driver->set_hdmi_mode(dssdev, hdmi_mode); + + if (hdmi_mode && dssdev->driver->set_hdmi_infoframe) { + struct hdmi_avi_infoframe avi; + + r = drm_hdmi_avi_infoframe_from_display_mode(&avi, adjusted_mode); + if (r == 0) + dssdev->driver->set_hdmi_infoframe(dssdev, &avi); + } } static void omap_encoder_prepare(struct drm_encoder *encoder) diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c index 1388ca7f87e8..8436c6857cda 100644 --- a/drivers/gpu/drm/omapdrm/omap_fbdev.c +++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c @@ -281,7 +281,7 @@ fail: return ret; } -static struct drm_fb_helper_funcs omap_fb_helper_funcs = { +static const struct drm_fb_helper_funcs omap_fb_helper_funcs = { .fb_probe = omap_fbdev_create, }; @@ -325,7 +325,7 @@ struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev) helper = &fbdev->base; - helper->funcs = &omap_fb_helper_funcs; + drm_fb_helper_prepare(dev, helper, &omap_fb_helper_funcs); ret = drm_fb_helper_init(dev, helper, priv->num_crtcs, priv->num_connectors); diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c index 95dbce286a41..e4849413ee80 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem.c +++ b/drivers/gpu/drm/omapdrm/omap_gem.c @@ -233,11 +233,7 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj) WARN_ON(omap_obj->pages); - /* TODO: __GFP_DMA32 .. but somehow GFP_HIGHMEM is coming from the - * mapping_gfp_mask(mapping) which conflicts w/ GFP_DMA32.. probably - * we actually want CMA memory for it all anyways.. - */ - pages = drm_gem_get_pages(obj, GFP_KERNEL); + pages = drm_gem_get_pages(obj); if (IS_ERR(pages)) { dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages)); return PTR_ERR(pages); @@ -791,7 +787,7 @@ int omap_gem_get_paddr(struct drm_gem_object *obj, omap_obj->paddr = tiler_ssptr(block); omap_obj->block = block; - DBG("got paddr: %08x", omap_obj->paddr); + DBG("got paddr: %pad", &omap_obj->paddr); } omap_obj->paddr_cnt++; @@ -985,9 +981,9 @@ void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m) off = drm_vma_node_start(&obj->vma_node); - seq_printf(m, "%08x: %2d (%2d) %08llx %08Zx (%2d) %p %4d", + seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d", omap_obj->flags, obj->name, obj->refcount.refcount.counter, - off, omap_obj->paddr, omap_obj->paddr_cnt, + off, &omap_obj->paddr, omap_obj->paddr_cnt, omap_obj->vaddr, omap_obj->roll); if (omap_obj->flags & OMAP_BO_TILED) { @@ -1183,9 +1179,7 @@ int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op) } } spin_unlock(&sync_lock); - - if (waiter) - kfree(waiter); + kfree(waiter); } return ret; } @@ -1347,6 +1341,7 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev, struct omap_drm_private *priv = dev->dev_private; struct omap_gem_object *omap_obj; struct drm_gem_object *obj = NULL; + struct address_space *mapping; size_t size; int ret; @@ -1404,14 +1399,16 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev, omap_obj->height = gsize.tiled.height; } - ret = 0; - if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM)) + if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM)) { drm_gem_private_object_init(dev, obj, size); - else + } else { ret = drm_gem_object_init(dev, obj, size); + if (ret) + goto fail; - if (ret) - goto fail; + mapping = file_inode(obj->filp)->i_mapping; + mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32); + } return obj; @@ -1467,8 +1464,8 @@ void omap_gem_init(struct drm_device *dev) entry->paddr = tiler_ssptr(block); entry->block = block; - DBG("%d:%d: %dx%d: paddr=%08x stride=%d", i, j, w, h, - entry->paddr, + DBG("%d:%d: %dx%d: paddr=%pad stride=%d", i, j, w, h, + &entry->paddr, usergart[i].stride_pfn << PAGE_SHIFT); } } diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c index 3cf31ee59aac..891a4dc608af 100644 --- a/drivers/gpu/drm/omapdrm/omap_plane.c +++ b/drivers/gpu/drm/omapdrm/omap_plane.c @@ -142,8 +142,8 @@ static void omap_plane_pre_apply(struct omap_drm_apply *apply) DBG("%dx%d -> %dx%d (%d)", info->width, info->height, info->out_width, info->out_height, info->screen_width); - DBG("%d,%d %08x %08x", info->pos_x, info->pos_y, - info->paddr, info->p_uv_addr); + DBG("%d,%d %pad %pad", info->pos_x, info->pos_y, + &info->paddr, &info->p_uv_addr); /* TODO: */ ilace = false; @@ -308,16 +308,13 @@ void omap_plane_install_properties(struct drm_plane *plane, if (priv->has_dmm) { prop = priv->rotation_prop; if (!prop) { - const struct drm_prop_enum_list props[] = { - { DRM_ROTATE_0, "rotate-0" }, - { DRM_ROTATE_90, "rotate-90" }, - { DRM_ROTATE_180, "rotate-180" }, - { DRM_ROTATE_270, "rotate-270" }, - { DRM_REFLECT_X, "reflect-x" }, - { DRM_REFLECT_Y, "reflect-y" }, - }; - prop = drm_property_create_bitmask(dev, 0, "rotation", - props, ARRAY_SIZE(props)); + prop = drm_mode_create_rotation_property(dev, + BIT(DRM_ROTATE_0) | + BIT(DRM_ROTATE_90) | + BIT(DRM_ROTATE_180) | + BIT(DRM_ROTATE_270) | + BIT(DRM_REFLECT_X) | + BIT(DRM_REFLECT_Y)); if (prop == NULL) return; priv->rotation_prop = prop; diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig index 4ec874da5668..bee9f72b3a93 100644 --- a/drivers/gpu/drm/panel/Kconfig +++ b/drivers/gpu/drm/panel/Kconfig @@ -5,7 +5,7 @@ config DRM_PANEL Panel registration and lookup framework. menu "Display Panels" - depends on DRM_PANEL + depends on DRM && DRM_PANEL config DRM_PANEL_SIMPLE tristate "support for simple panels" @@ -18,14 +18,11 @@ config DRM_PANEL_SIMPLE config DRM_PANEL_LD9040 tristate "LD9040 RGB/SPI panel" - depends on DRM && DRM_PANEL - depends on OF - select SPI + depends on OF && SPI select VIDEOMODE_HELPERS config DRM_PANEL_S6E8AA0 tristate "S6E8AA0 DSI video mode panel" - depends on DRM && DRM_PANEL depends on OF select DRM_MIPI_DSI select VIDEOMODE_HELPERS diff --git a/drivers/gpu/drm/panel/panel-ld9040.c b/drivers/gpu/drm/panel/panel-ld9040.c index db1601fdbe29..42ac67b21e9f 100644 --- a/drivers/gpu/drm/panel/panel-ld9040.c +++ b/drivers/gpu/drm/panel/panel-ld9040.c @@ -110,7 +110,10 @@ struct ld9040 { int error; }; -#define panel_to_ld9040(p) container_of(p, struct ld9040, panel) +static inline struct ld9040 *panel_to_ld9040(struct drm_panel *panel) +{ + return container_of(panel, struct ld9040, panel); +} static int ld9040_clear_error(struct ld9040 *ctx) { @@ -216,6 +219,11 @@ static int ld9040_power_off(struct ld9040 *ctx) static int ld9040_disable(struct drm_panel *panel) { + return 0; +} + +static int ld9040_unprepare(struct drm_panel *panel) +{ struct ld9040 *ctx = panel_to_ld9040(panel); msleep(120); @@ -228,7 +236,7 @@ static int ld9040_disable(struct drm_panel *panel) return ld9040_power_off(ctx); } -static int ld9040_enable(struct drm_panel *panel) +static int ld9040_prepare(struct drm_panel *panel) { struct ld9040 *ctx = panel_to_ld9040(panel); int ret; @@ -242,11 +250,16 @@ static int ld9040_enable(struct drm_panel *panel) ret = ld9040_clear_error(ctx); if (ret < 0) - ld9040_disable(panel); + ld9040_unprepare(panel); return ret; } +static int ld9040_enable(struct drm_panel *panel) +{ + return 0; +} + static int ld9040_get_modes(struct drm_panel *panel) { struct drm_connector *connector = panel->connector; @@ -273,6 +286,8 @@ static int ld9040_get_modes(struct drm_panel *panel) static const struct drm_panel_funcs ld9040_drm_funcs = { .disable = ld9040_disable, + .unprepare = ld9040_unprepare, + .prepare = ld9040_prepare, .enable = ld9040_enable, .get_modes = ld9040_get_modes, }; diff --git a/drivers/gpu/drm/panel/panel-s6e8aa0.c b/drivers/gpu/drm/panel/panel-s6e8aa0.c index 06e57a26db7a..b5217fe37f02 100644 --- a/drivers/gpu/drm/panel/panel-s6e8aa0.c +++ b/drivers/gpu/drm/panel/panel-s6e8aa0.c @@ -120,7 +120,10 @@ struct s6e8aa0 { int error; }; -#define panel_to_s6e8aa0(p) container_of(p, struct s6e8aa0, panel) +static inline struct s6e8aa0 *panel_to_s6e8aa0(struct drm_panel *panel) +{ + return container_of(panel, struct s6e8aa0, panel); +} static int s6e8aa0_clear_error(struct s6e8aa0 *ctx) { @@ -133,14 +136,14 @@ static int s6e8aa0_clear_error(struct s6e8aa0 *ctx) static void s6e8aa0_dcs_write(struct s6e8aa0 *ctx, const void *data, size_t len) { struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); - int ret; + ssize_t ret; if (ctx->error < 0) return; - ret = mipi_dsi_dcs_write(dsi, dsi->channel, data, len); + ret = mipi_dsi_dcs_write(dsi, data, len); if (ret < 0) { - dev_err(ctx->dev, "error %d writing dcs seq: %*ph\n", ret, len, + dev_err(ctx->dev, "error %zd writing dcs seq: %*ph\n", ret, len, data); ctx->error = ret; } @@ -154,7 +157,7 @@ static int s6e8aa0_dcs_read(struct s6e8aa0 *ctx, u8 cmd, void *data, size_t len) if (ctx->error < 0) return ctx->error; - ret = mipi_dsi_dcs_read(dsi, dsi->channel, cmd, data, len); + ret = mipi_dsi_dcs_read(dsi, cmd, data, len); if (ret < 0) { dev_err(ctx->dev, "error %d reading dcs seq(%#x)\n", ret, cmd); ctx->error = ret; @@ -889,6 +892,11 @@ static int s6e8aa0_power_off(struct s6e8aa0 *ctx) static int s6e8aa0_disable(struct drm_panel *panel) { + return 0; +} + +static int s6e8aa0_unprepare(struct drm_panel *panel) +{ struct s6e8aa0 *ctx = panel_to_s6e8aa0(panel); s6e8aa0_dcs_write_seq_static(ctx, MIPI_DCS_ENTER_SLEEP_MODE); @@ -900,7 +908,7 @@ static int s6e8aa0_disable(struct drm_panel *panel) return s6e8aa0_power_off(ctx); } -static int s6e8aa0_enable(struct drm_panel *panel) +static int s6e8aa0_prepare(struct drm_panel *panel) { struct s6e8aa0 *ctx = panel_to_s6e8aa0(panel); int ret; @@ -913,11 +921,16 @@ static int s6e8aa0_enable(struct drm_panel *panel) ret = ctx->error; if (ret < 0) - s6e8aa0_disable(panel); + s6e8aa0_unprepare(panel); return ret; } +static int s6e8aa0_enable(struct drm_panel *panel) +{ + return 0; +} + static int s6e8aa0_get_modes(struct drm_panel *panel) { struct drm_connector *connector = panel->connector; @@ -944,6 +957,8 @@ static int s6e8aa0_get_modes(struct drm_panel *panel) static const struct drm_panel_funcs s6e8aa0_drm_funcs = { .disable = s6e8aa0_disable, + .unprepare = s6e8aa0_unprepare, + .prepare = s6e8aa0_prepare, .enable = s6e8aa0_enable, .get_modes = s6e8aa0_get_modes, }; diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index a25136132c31..4ce1db0a68ff 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -37,14 +37,35 @@ struct panel_desc { const struct drm_display_mode *modes; unsigned int num_modes; + unsigned int bpc; + struct { unsigned int width; unsigned int height; } size; + + /** + * @prepare: the time (in milliseconds) that it takes for the panel to + * become ready and start receiving video data + * @enable: the time (in milliseconds) that it takes for the panel to + * display the first valid frame after starting to receive + * video data + * @disable: the time (in milliseconds) that it takes for the panel to + * turn the display off (no content is visible) + * @unprepare: the time (in milliseconds) that it takes for the panel + * to power itself down completely + */ + struct { + unsigned int prepare; + unsigned int enable; + unsigned int disable; + unsigned int unprepare; + } delay; }; struct panel_simple { struct drm_panel base; + bool prepared; bool enabled; const struct panel_desc *desc; @@ -87,6 +108,7 @@ static int panel_simple_get_fixed_modes(struct panel_simple *panel) num++; } + connector->display_info.bpc = panel->desc->bpc; connector->display_info.width_mm = panel->desc->size.width; connector->display_info.height_mm = panel->desc->size.height; @@ -105,21 +127,40 @@ static int panel_simple_disable(struct drm_panel *panel) backlight_update_status(p->backlight); } + if (p->desc->delay.disable) + msleep(p->desc->delay.disable); + + p->enabled = false; + + return 0; +} + +static int panel_simple_unprepare(struct drm_panel *panel) +{ + struct panel_simple *p = to_panel_simple(panel); + + if (!p->prepared) + return 0; + if (p->enable_gpio) gpiod_set_value_cansleep(p->enable_gpio, 0); regulator_disable(p->supply); - p->enabled = false; + + if (p->desc->delay.unprepare) + msleep(p->desc->delay.unprepare); + + p->prepared = false; return 0; } -static int panel_simple_enable(struct drm_panel *panel) +static int panel_simple_prepare(struct drm_panel *panel) { struct panel_simple *p = to_panel_simple(panel); int err; - if (p->enabled) + if (p->prepared) return 0; err = regulator_enable(p->supply); @@ -131,6 +172,24 @@ static int panel_simple_enable(struct drm_panel *panel) if (p->enable_gpio) gpiod_set_value_cansleep(p->enable_gpio, 1); + if (p->desc->delay.prepare) + msleep(p->desc->delay.prepare); + + p->prepared = true; + + return 0; +} + +static int panel_simple_enable(struct drm_panel *panel) +{ + struct panel_simple *p = to_panel_simple(panel); + + if (p->enabled) + return 0; + + if (p->desc->delay.enable) + msleep(p->desc->delay.enable); + if (p->backlight) { p->backlight->props.power = FB_BLANK_UNBLANK; backlight_update_status(p->backlight); @@ -164,6 +223,8 @@ static int panel_simple_get_modes(struct drm_panel *panel) static const struct drm_panel_funcs panel_simple_funcs = { .disable = panel_simple_disable, + .unprepare = panel_simple_unprepare, + .prepare = panel_simple_prepare, .enable = panel_simple_enable, .get_modes = panel_simple_get_modes, }; @@ -179,22 +240,21 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc) return -ENOMEM; panel->enabled = false; + panel->prepared = false; panel->desc = desc; panel->supply = devm_regulator_get(dev, "power"); if (IS_ERR(panel->supply)) return PTR_ERR(panel->supply); - panel->enable_gpio = devm_gpiod_get(dev, "enable"); + panel->enable_gpio = devm_gpiod_get_optional(dev, "enable"); if (IS_ERR(panel->enable_gpio)) { err = PTR_ERR(panel->enable_gpio); - if (err != -ENOENT) { - dev_err(dev, "failed to request GPIO: %d\n", err); - return err; - } + dev_err(dev, "failed to request GPIO: %d\n", err); + return err; + } - panel->enable_gpio = NULL; - } else { + if (panel->enable_gpio) { err = gpiod_direction_output(panel->enable_gpio, 0); if (err < 0) { dev_err(dev, "failed to setup GPIO: %d\n", err); @@ -285,6 +345,7 @@ static const struct drm_display_mode auo_b101aw03_mode = { static const struct panel_desc auo_b101aw03 = { .modes = &auo_b101aw03_mode, .num_modes = 1, + .bpc = 6, .size = { .width = 223, .height = 125, @@ -307,12 +368,40 @@ static const struct drm_display_mode auo_b133xtn01_mode = { static const struct panel_desc auo_b133xtn01 = { .modes = &auo_b133xtn01_mode, .num_modes = 1, + .bpc = 6, .size = { .width = 293, .height = 165, }, }; +static const struct drm_display_mode auo_b133htn01_mode = { + .clock = 150660, + .hdisplay = 1920, + .hsync_start = 1920 + 172, + .hsync_end = 1920 + 172 + 80, + .htotal = 1920 + 172 + 80 + 60, + .vdisplay = 1080, + .vsync_start = 1080 + 25, + .vsync_end = 1080 + 25 + 10, + .vtotal = 1080 + 25 + 10 + 10, + .vrefresh = 60, +}; + +static const struct panel_desc auo_b133htn01 = { + .modes = &auo_b133htn01_mode, + .num_modes = 1, + .size = { + .width = 293, + .height = 165, + }, + .delay = { + .prepare = 105, + .enable = 20, + .unprepare = 50, + }, +}; + static const struct drm_display_mode chunghwa_claa101wa01a_mode = { .clock = 72070, .hdisplay = 1366, @@ -329,6 +418,7 @@ static const struct drm_display_mode chunghwa_claa101wa01a_mode = { static const struct panel_desc chunghwa_claa101wa01a = { .modes = &chunghwa_claa101wa01a_mode, .num_modes = 1, + .bpc = 6, .size = { .width = 220, .height = 120, @@ -351,6 +441,7 @@ static const struct drm_display_mode chunghwa_claa101wb01_mode = { static const struct panel_desc chunghwa_claa101wb01 = { .modes = &chunghwa_claa101wb01_mode, .num_modes = 1, + .bpc = 6, .size = { .width = 223, .height = 125, @@ -374,6 +465,7 @@ static const struct drm_display_mode edt_et057090dhu_mode = { static const struct panel_desc edt_et057090dhu = { .modes = &edt_et057090dhu_mode, .num_modes = 1, + .bpc = 6, .size = { .width = 115, .height = 86, @@ -397,12 +489,82 @@ static const struct drm_display_mode edt_etm0700g0dh6_mode = { static const struct panel_desc edt_etm0700g0dh6 = { .modes = &edt_etm0700g0dh6_mode, .num_modes = 1, + .bpc = 6, .size = { .width = 152, .height = 91, }, }; +static const struct drm_display_mode foxlink_fl500wvr00_a0t_mode = { + .clock = 32260, + .hdisplay = 800, + .hsync_start = 800 + 168, + .hsync_end = 800 + 168 + 64, + .htotal = 800 + 168 + 64 + 88, + .vdisplay = 480, + .vsync_start = 480 + 37, + .vsync_end = 480 + 37 + 2, + .vtotal = 480 + 37 + 2 + 8, + .vrefresh = 60, +}; + +static const struct panel_desc foxlink_fl500wvr00_a0t = { + .modes = &foxlink_fl500wvr00_a0t_mode, + .num_modes = 1, + .size = { + .width = 108, + .height = 65, + }, +}; + +static const struct drm_display_mode innolux_n116bge_mode = { + .clock = 71000, + .hdisplay = 1366, + .hsync_start = 1366 + 64, + .hsync_end = 1366 + 64 + 6, + .htotal = 1366 + 64 + 6 + 64, + .vdisplay = 768, + .vsync_start = 768 + 8, + .vsync_end = 768 + 8 + 4, + .vtotal = 768 + 8 + 4 + 8, + .vrefresh = 60, + .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, +}; + +static const struct panel_desc innolux_n116bge = { + .modes = &innolux_n116bge_mode, + .num_modes = 1, + .bpc = 6, + .size = { + .width = 256, + .height = 144, + }, +}; + +static const struct drm_display_mode innolux_n156bge_l21_mode = { + .clock = 69300, + .hdisplay = 1366, + .hsync_start = 1366 + 16, + .hsync_end = 1366 + 16 + 34, + .htotal = 1366 + 16 + 34 + 50, + .vdisplay = 768, + .vsync_start = 768 + 2, + .vsync_end = 768 + 2 + 6, + .vtotal = 768 + 2 + 6 + 12, + .vrefresh = 60, +}; + +static const struct panel_desc innolux_n156bge_l21 = { + .modes = &innolux_n156bge_l21_mode, + .num_modes = 1, + .bpc = 6, + .size = { + .width = 344, + .height = 193, + }, +}; + static const struct drm_display_mode lg_lp129qe_mode = { .clock = 285250, .hdisplay = 2560, @@ -419,6 +581,7 @@ static const struct drm_display_mode lg_lp129qe_mode = { static const struct panel_desc lg_lp129qe = { .modes = &lg_lp129qe_mode, .num_modes = 1, + .bpc = 8, .size = { .width = 272, .height = 181, @@ -441,6 +604,7 @@ static const struct drm_display_mode samsung_ltn101nt05_mode = { static const struct panel_desc samsung_ltn101nt05 = { .modes = &samsung_ltn101nt05_mode, .num_modes = 1, + .bpc = 6, .size = { .width = 1024, .height = 600, @@ -452,6 +616,9 @@ static const struct of_device_id platform_of_match[] = { .compatible = "auo,b101aw03", .data = &auo_b101aw03, }, { + .compatible = "auo,b133htn01", + .data = &auo_b133htn01, + }, { .compatible = "auo,b133xtn01", .data = &auo_b133xtn01, }, { @@ -470,14 +637,21 @@ static const struct of_device_id platform_of_match[] = { .compatible = "edt,etm0700g0dh6", .data = &edt_etm0700g0dh6, }, { + .compatible = "foxlink,fl500wvr00-a0t", + .data = &foxlink_fl500wvr00_a0t, + }, { + .compatible = "innolux,n116bge", + .data = &innolux_n116bge, + }, { + .compatible = "innolux,n156bge-l21", + .data = &innolux_n156bge_l21, + }, { .compatible = "lg,lp129qe", .data = &lg_lp129qe, }, { .compatible = "samsung,ltn101nt05", .data = &samsung_ltn101nt05, }, { - .compatible = "simple-panel", - }, { /* sentinel */ } }; @@ -545,7 +719,7 @@ static const struct panel_desc_dsi lg_ld070wx3_sl01 = { .height = 151, }, }, - .flags = MIPI_DSI_MODE_VIDEO, + .flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_CLOCK_NON_CONTINUOUS, .format = MIPI_DSI_FMT_RGB888, .lanes = 4, }; @@ -599,7 +773,8 @@ static const struct panel_desc_dsi panasonic_vvx10f004b00 = { .height = 136, }, }, - .flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE, + .flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE | + MIPI_DSI_CLOCK_NON_CONTINUOUS, .format = MIPI_DSI_FMT_RGB888, .lanes = 4, }; diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index 5d7ea2461852..b8ced08b6291 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -835,7 +835,7 @@ static void qxl_conn_destroy(struct drm_connector *connector) struct qxl_output *qxl_output = drm_connector_to_qxl_output(connector); - drm_sysfs_connector_remove(connector); + drm_connector_unregister(connector); drm_connector_cleanup(connector); kfree(qxl_output); } @@ -902,7 +902,7 @@ static int qdev_output_init(struct drm_device *dev, int num_output) drm_object_attach_property(&connector->base, qdev->hotplug_mode_update_property, 0); - drm_sysfs_connector_add(connector); + drm_connector_register(connector); return 0; } diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c index 6e936634d65c..a3fd92029a14 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.c +++ b/drivers/gpu/drm/qxl/qxl_drv.c @@ -38,7 +38,7 @@ #include "qxl_object.h" extern int qxl_max_ioctls; -static DEFINE_PCI_DEVICE_TABLE(pciidlist) = { +static const struct pci_device_id pciidlist[] = { { 0x1b36, 0x100, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0 }, { 0x1b36, 0x100, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_OTHER << 8, diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c index f437b30ce689..df567888bb1e 100644 --- a/drivers/gpu/drm/qxl/qxl_fb.c +++ b/drivers/gpu/drm/qxl/qxl_fb.c @@ -660,7 +660,7 @@ static int qxl_fbdev_destroy(struct drm_device *dev, struct qxl_fbdev *qfbdev) return 0; } -static struct drm_fb_helper_funcs qxl_fb_helper_funcs = { +static const struct drm_fb_helper_funcs qxl_fb_helper_funcs = { .fb_probe = qxl_fb_find_or_create_single, }; @@ -676,9 +676,12 @@ int qxl_fbdev_init(struct qxl_device *qdev) qfbdev->qdev = qdev; qdev->mode_info.qfbdev = qfbdev; - qfbdev->helper.funcs = &qxl_fb_helper_funcs; spin_lock_init(&qfbdev->delayed_ops_lock); INIT_LIST_HEAD(&qfbdev->delayed_ops); + + drm_fb_helper_prepare(qdev->ddev, &qfbdev->helper, + &qxl_fb_helper_funcs); + ret = drm_fb_helper_init(qdev->ddev, &qfbdev->helper, qxl_num_crtc /* num_crtc - QXL supports just 1 */, QXLFB_CONN_LIMIT); diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h index d458a140c024..83a423293afd 100644 --- a/drivers/gpu/drm/qxl/qxl_object.h +++ b/drivers/gpu/drm/qxl/qxl_object.h @@ -31,7 +31,7 @@ static inline int qxl_bo_reserve(struct qxl_bo *bo, bool no_wait) { int r; - r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0); + r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL); if (unlikely(r != 0)) { if (r != -ERESTARTSYS) { struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private; @@ -67,7 +67,7 @@ static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type, { int r; - r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0); + r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL); if (unlikely(r != 0)) { if (r != -ERESTARTSYS) { struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private; diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile index dbcbfe80aac0..f77b7135ee4c 100644 --- a/drivers/gpu/drm/radeon/Makefile +++ b/drivers/gpu/drm/radeon/Makefile @@ -76,11 +76,11 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \ evergreen.o evergreen_cs.o evergreen_blit_shaders.o \ evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \ atombios_encoders.o radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o \ - si_blit_shaders.o radeon_prime.o radeon_uvd.o cik.o cik_blit_shaders.o \ + si_blit_shaders.o radeon_prime.o cik.o cik_blit_shaders.o \ r600_dpm.o rs780_dpm.o rv6xx_dpm.o rv770_dpm.o rv730_dpm.o rv740_dpm.o \ rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \ trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \ - ci_dpm.o dce6_afmt.o radeon_vm.o + ci_dpm.o dce6_afmt.o radeon_vm.o radeon_ucode.o radeon_ib.o # add async DMA block radeon-y += \ diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index b1e11f8434e2..ac14b67621d3 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -405,16 +405,13 @@ bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector) u8 msg[DP_DPCD_SIZE]; int ret; - char dpcd_hex_dump[DP_DPCD_SIZE * 3]; - ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg, DP_DPCD_SIZE); if (ret > 0) { memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE); - hex_dump_to_buffer(dig_connector->dpcd, sizeof(dig_connector->dpcd), - 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false); - DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump); + DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd), + dig_connector->dpcd); radeon_dp_probe_oui(radeon_connector); diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index 7d68203a3737..a7f2ddf09a9d 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c @@ -331,12 +331,10 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2))) adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2; - /* get the native mode for LVDS */ - if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) + /* get the native mode for scaling */ + if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) { radeon_panel_mode_fixup(encoder, adjusted_mode); - - /* get the native mode for TV */ - if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) { + } else if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) { struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv; if (tv_dac) { if (tv_dac->tv_std == TV_STD_NTSC || @@ -346,6 +344,8 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, else radeon_atom_get_tv_timings(rdev, 1, adjusted_mode); } + } else if (radeon_encoder->rmx_type != RMX_OFF) { + radeon_panel_mode_fixup(encoder, adjusted_mode); } if (ASIC_IS_DCE3(rdev) && @@ -716,7 +716,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) if (radeon_connector->use_digital && (radeon_connector->audio == RADEON_AUDIO_ENABLE)) return ATOM_ENCODER_MODE_HDMI; - else if (drm_detect_hdmi_monitor(radeon_connector->edid) && + else if (drm_detect_hdmi_monitor(radeon_connector_edid(connector)) && (radeon_connector->audio == RADEON_AUDIO_AUTO)) return ATOM_ENCODER_MODE_HDMI; else if (radeon_connector->use_digital) @@ -735,7 +735,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) if (radeon_audio != 0) { if (radeon_connector->audio == RADEON_AUDIO_ENABLE) return ATOM_ENCODER_MODE_HDMI; - else if (drm_detect_hdmi_monitor(radeon_connector->edid) && + else if (drm_detect_hdmi_monitor(radeon_connector_edid(connector)) && (radeon_connector->audio == RADEON_AUDIO_AUTO)) return ATOM_ENCODER_MODE_HDMI; else @@ -755,7 +755,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) } else if (radeon_audio != 0) { if (radeon_connector->audio == RADEON_AUDIO_ENABLE) return ATOM_ENCODER_MODE_HDMI; - else if (drm_detect_hdmi_monitor(radeon_connector->edid) && + else if (drm_detect_hdmi_monitor(radeon_connector_edid(connector)) && (radeon_connector->audio == RADEON_AUDIO_AUTO)) return ATOM_ENCODER_MODE_HDMI; else diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c index 584090ac3eb9..d416bb2ff48d 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.c +++ b/drivers/gpu/drm/radeon/ci_dpm.c @@ -869,6 +869,9 @@ static int ci_set_thermal_temperature_range(struct radeon_device *rdev, WREG32_SMC(CG_THERMAL_CTRL, tmp); #endif + rdev->pm.dpm.thermal.min_temp = low_temp; + rdev->pm.dpm.thermal.max_temp = high_temp; + return 0; } @@ -940,7 +943,18 @@ static void ci_get_leakage_voltages(struct radeon_device *rdev) pi->vddc_leakage.count = 0; pi->vddci_leakage.count = 0; - if (radeon_atom_get_leakage_id_from_vbios(rdev, &leakage_id) == 0) { + if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) { + for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) { + virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; + if (radeon_atom_get_voltage_evv(rdev, virtual_voltage_id, &vddc) != 0) + continue; + if (vddc != 0 && vddc != virtual_voltage_id) { + pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc; + pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id; + pi->vddc_leakage.count++; + } + } + } else if (radeon_atom_get_leakage_id_from_vbios(rdev, &leakage_id) == 0) { for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) { virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; if (radeon_atom_get_leakage_vddc_based_on_leakage_params(rdev, &vddc, &vddci, diff --git a/drivers/gpu/drm/radeon/ci_smc.c b/drivers/gpu/drm/radeon/ci_smc.c index 8debc9d47362..b630edc2fd0c 100644 --- a/drivers/gpu/drm/radeon/ci_smc.c +++ b/drivers/gpu/drm/radeon/ci_smc.c @@ -213,24 +213,37 @@ int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit) if (!rdev->smc_fw) return -EINVAL; - switch (rdev->family) { - case CHIP_BONAIRE: - ucode_start_address = BONAIRE_SMC_UCODE_START; - ucode_size = BONAIRE_SMC_UCODE_SIZE; - break; - case CHIP_HAWAII: - ucode_start_address = HAWAII_SMC_UCODE_START; - ucode_size = HAWAII_SMC_UCODE_SIZE; - break; - default: - DRM_ERROR("unknown asic in smc ucode loader\n"); - BUG(); + if (rdev->new_fw) { + const struct smc_firmware_header_v1_0 *hdr = + (const struct smc_firmware_header_v1_0 *)rdev->smc_fw->data; + + radeon_ucode_print_smc_hdr(&hdr->header); + + ucode_start_address = le32_to_cpu(hdr->ucode_start_addr); + ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes); + src = (const u8 *) + (rdev->smc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + } else { + switch (rdev->family) { + case CHIP_BONAIRE: + ucode_start_address = BONAIRE_SMC_UCODE_START; + ucode_size = BONAIRE_SMC_UCODE_SIZE; + break; + case CHIP_HAWAII: + ucode_start_address = HAWAII_SMC_UCODE_START; + ucode_size = HAWAII_SMC_UCODE_SIZE; + break; + default: + DRM_ERROR("unknown asic in smc ucode loader\n"); + BUG(); + } + + src = (const u8 *)rdev->smc_fw->data; } if (ucode_size & 3) return -EINVAL; - src = (const u8 *)rdev->smc_fw->data; spin_lock_irqsave(&rdev->smc_idx_lock, flags); WREG32(SMC_IND_INDEX_0, ucode_start_address); WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0); diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index c0ea66192fe0..3d546c606b43 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -42,6 +42,16 @@ MODULE_FIRMWARE("radeon/BONAIRE_mc2.bin"); MODULE_FIRMWARE("radeon/BONAIRE_rlc.bin"); MODULE_FIRMWARE("radeon/BONAIRE_sdma.bin"); MODULE_FIRMWARE("radeon/BONAIRE_smc.bin"); + +MODULE_FIRMWARE("radeon/bonaire_pfp.bin"); +MODULE_FIRMWARE("radeon/bonaire_me.bin"); +MODULE_FIRMWARE("radeon/bonaire_ce.bin"); +MODULE_FIRMWARE("radeon/bonaire_mec.bin"); +MODULE_FIRMWARE("radeon/bonaire_mc.bin"); +MODULE_FIRMWARE("radeon/bonaire_rlc.bin"); +MODULE_FIRMWARE("radeon/bonaire_sdma.bin"); +MODULE_FIRMWARE("radeon/bonaire_smc.bin"); + MODULE_FIRMWARE("radeon/HAWAII_pfp.bin"); MODULE_FIRMWARE("radeon/HAWAII_me.bin"); MODULE_FIRMWARE("radeon/HAWAII_ce.bin"); @@ -51,18 +61,45 @@ MODULE_FIRMWARE("radeon/HAWAII_mc2.bin"); MODULE_FIRMWARE("radeon/HAWAII_rlc.bin"); MODULE_FIRMWARE("radeon/HAWAII_sdma.bin"); MODULE_FIRMWARE("radeon/HAWAII_smc.bin"); + +MODULE_FIRMWARE("radeon/hawaii_pfp.bin"); +MODULE_FIRMWARE("radeon/hawaii_me.bin"); +MODULE_FIRMWARE("radeon/hawaii_ce.bin"); +MODULE_FIRMWARE("radeon/hawaii_mec.bin"); +MODULE_FIRMWARE("radeon/hawaii_mc.bin"); +MODULE_FIRMWARE("radeon/hawaii_rlc.bin"); +MODULE_FIRMWARE("radeon/hawaii_sdma.bin"); +MODULE_FIRMWARE("radeon/hawaii_smc.bin"); + MODULE_FIRMWARE("radeon/KAVERI_pfp.bin"); MODULE_FIRMWARE("radeon/KAVERI_me.bin"); MODULE_FIRMWARE("radeon/KAVERI_ce.bin"); MODULE_FIRMWARE("radeon/KAVERI_mec.bin"); MODULE_FIRMWARE("radeon/KAVERI_rlc.bin"); MODULE_FIRMWARE("radeon/KAVERI_sdma.bin"); + +MODULE_FIRMWARE("radeon/kaveri_pfp.bin"); +MODULE_FIRMWARE("radeon/kaveri_me.bin"); +MODULE_FIRMWARE("radeon/kaveri_ce.bin"); +MODULE_FIRMWARE("radeon/kaveri_mec.bin"); +MODULE_FIRMWARE("radeon/kaveri_mec2.bin"); +MODULE_FIRMWARE("radeon/kaveri_rlc.bin"); +MODULE_FIRMWARE("radeon/kaveri_sdma.bin"); + MODULE_FIRMWARE("radeon/KABINI_pfp.bin"); MODULE_FIRMWARE("radeon/KABINI_me.bin"); MODULE_FIRMWARE("radeon/KABINI_ce.bin"); MODULE_FIRMWARE("radeon/KABINI_mec.bin"); MODULE_FIRMWARE("radeon/KABINI_rlc.bin"); MODULE_FIRMWARE("radeon/KABINI_sdma.bin"); + +MODULE_FIRMWARE("radeon/kabini_pfp.bin"); +MODULE_FIRMWARE("radeon/kabini_me.bin"); +MODULE_FIRMWARE("radeon/kabini_ce.bin"); +MODULE_FIRMWARE("radeon/kabini_mec.bin"); +MODULE_FIRMWARE("radeon/kabini_rlc.bin"); +MODULE_FIRMWARE("radeon/kabini_sdma.bin"); + MODULE_FIRMWARE("radeon/MULLINS_pfp.bin"); MODULE_FIRMWARE("radeon/MULLINS_me.bin"); MODULE_FIRMWARE("radeon/MULLINS_ce.bin"); @@ -70,6 +107,13 @@ MODULE_FIRMWARE("radeon/MULLINS_mec.bin"); MODULE_FIRMWARE("radeon/MULLINS_rlc.bin"); MODULE_FIRMWARE("radeon/MULLINS_sdma.bin"); +MODULE_FIRMWARE("radeon/mullins_pfp.bin"); +MODULE_FIRMWARE("radeon/mullins_me.bin"); +MODULE_FIRMWARE("radeon/mullins_ce.bin"); +MODULE_FIRMWARE("radeon/mullins_mec.bin"); +MODULE_FIRMWARE("radeon/mullins_rlc.bin"); +MODULE_FIRMWARE("radeon/mullins_sdma.bin"); + extern int r600_ih_ring_alloc(struct radeon_device *rdev); extern void r600_ih_ring_fini(struct radeon_device *rdev); extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save); @@ -1760,27 +1804,44 @@ static void cik_srbm_select(struct radeon_device *rdev, */ int ci_mc_load_microcode(struct radeon_device *rdev) { - const __be32 *fw_data; + const __be32 *fw_data = NULL; + const __le32 *new_fw_data = NULL; u32 running, blackout = 0; - u32 *io_mc_regs; + u32 *io_mc_regs = NULL; + const __le32 *new_io_mc_regs = NULL; int i, regs_size, ucode_size; if (!rdev->mc_fw) return -EINVAL; - ucode_size = rdev->mc_fw->size / 4; + if (rdev->new_fw) { + const struct mc_firmware_header_v1_0 *hdr = + (const struct mc_firmware_header_v1_0 *)rdev->mc_fw->data; - switch (rdev->family) { - case CHIP_BONAIRE: - io_mc_regs = (u32 *)&bonaire_io_mc_regs; - regs_size = BONAIRE_IO_MC_REGS_SIZE; - break; - case CHIP_HAWAII: - io_mc_regs = (u32 *)&hawaii_io_mc_regs; - regs_size = HAWAII_IO_MC_REGS_SIZE; - break; - default: - return -EINVAL; + radeon_ucode_print_mc_hdr(&hdr->header); + + regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2); + new_io_mc_regs = (const __le32 *) + (rdev->mc_fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes)); + ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; + new_fw_data = (const __le32 *) + (rdev->mc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + } else { + ucode_size = rdev->mc_fw->size / 4; + + switch (rdev->family) { + case CHIP_BONAIRE: + io_mc_regs = (u32 *)&bonaire_io_mc_regs; + regs_size = BONAIRE_IO_MC_REGS_SIZE; + break; + case CHIP_HAWAII: + io_mc_regs = (u32 *)&hawaii_io_mc_regs; + regs_size = HAWAII_IO_MC_REGS_SIZE; + break; + default: + return -EINVAL; + } + fw_data = (const __be32 *)rdev->mc_fw->data; } running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK; @@ -1797,13 +1858,21 @@ int ci_mc_load_microcode(struct radeon_device *rdev) /* load mc io regs */ for (i = 0; i < regs_size; i++) { - WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]); - WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]); + if (rdev->new_fw) { + WREG32(MC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(new_io_mc_regs++)); + WREG32(MC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++)); + } else { + WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]); + WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]); + } } /* load the MC ucode */ - fw_data = (const __be32 *)rdev->mc_fw->data; - for (i = 0; i < ucode_size; i++) - WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++)); + for (i = 0; i < ucode_size; i++) { + if (rdev->new_fw) + WREG32(MC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++)); + else + WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++)); + } /* put the engine back into the active state */ WREG32(MC_SEQ_SUP_CNTL, 0x00000008); @@ -1841,17 +1910,21 @@ int ci_mc_load_microcode(struct radeon_device *rdev) static int cik_init_microcode(struct radeon_device *rdev) { const char *chip_name; + const char *new_chip_name; size_t pfp_req_size, me_req_size, ce_req_size, mec_req_size, rlc_req_size, mc_req_size = 0, sdma_req_size, smc_req_size = 0, mc2_req_size = 0; char fw_name[30]; + int new_fw = 0; int err; + int num_fw; DRM_DEBUG("\n"); switch (rdev->family) { case CHIP_BONAIRE: chip_name = "BONAIRE"; + new_chip_name = "bonaire"; pfp_req_size = CIK_PFP_UCODE_SIZE * 4; me_req_size = CIK_ME_UCODE_SIZE * 4; ce_req_size = CIK_CE_UCODE_SIZE * 4; @@ -1861,9 +1934,11 @@ static int cik_init_microcode(struct radeon_device *rdev) mc2_req_size = BONAIRE_MC2_UCODE_SIZE * 4; sdma_req_size = CIK_SDMA_UCODE_SIZE * 4; smc_req_size = ALIGN(BONAIRE_SMC_UCODE_SIZE, 4); + num_fw = 8; break; case CHIP_HAWAII: chip_name = "HAWAII"; + new_chip_name = "hawaii"; pfp_req_size = CIK_PFP_UCODE_SIZE * 4; me_req_size = CIK_ME_UCODE_SIZE * 4; ce_req_size = CIK_CE_UCODE_SIZE * 4; @@ -1873,142 +1948,285 @@ static int cik_init_microcode(struct radeon_device *rdev) mc2_req_size = HAWAII_MC2_UCODE_SIZE * 4; sdma_req_size = CIK_SDMA_UCODE_SIZE * 4; smc_req_size = ALIGN(HAWAII_SMC_UCODE_SIZE, 4); + num_fw = 8; break; case CHIP_KAVERI: chip_name = "KAVERI"; + new_chip_name = "kaveri"; pfp_req_size = CIK_PFP_UCODE_SIZE * 4; me_req_size = CIK_ME_UCODE_SIZE * 4; ce_req_size = CIK_CE_UCODE_SIZE * 4; mec_req_size = CIK_MEC_UCODE_SIZE * 4; rlc_req_size = KV_RLC_UCODE_SIZE * 4; sdma_req_size = CIK_SDMA_UCODE_SIZE * 4; + num_fw = 7; break; case CHIP_KABINI: chip_name = "KABINI"; + new_chip_name = "kabini"; pfp_req_size = CIK_PFP_UCODE_SIZE * 4; me_req_size = CIK_ME_UCODE_SIZE * 4; ce_req_size = CIK_CE_UCODE_SIZE * 4; mec_req_size = CIK_MEC_UCODE_SIZE * 4; rlc_req_size = KB_RLC_UCODE_SIZE * 4; sdma_req_size = CIK_SDMA_UCODE_SIZE * 4; + num_fw = 6; break; case CHIP_MULLINS: chip_name = "MULLINS"; + new_chip_name = "mullins"; pfp_req_size = CIK_PFP_UCODE_SIZE * 4; me_req_size = CIK_ME_UCODE_SIZE * 4; ce_req_size = CIK_CE_UCODE_SIZE * 4; mec_req_size = CIK_MEC_UCODE_SIZE * 4; rlc_req_size = ML_RLC_UCODE_SIZE * 4; sdma_req_size = CIK_SDMA_UCODE_SIZE * 4; + num_fw = 6; break; default: BUG(); } - DRM_INFO("Loading %s Microcode\n", chip_name); + DRM_INFO("Loading %s Microcode\n", new_chip_name); - snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); + snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", new_chip_name); err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev); - if (err) - goto out; - if (rdev->pfp_fw->size != pfp_req_size) { - printk(KERN_ERR - "cik_cp: Bogus length %zu in firmware \"%s\"\n", - rdev->pfp_fw->size, fw_name); - err = -EINVAL; - goto out; + if (err) { + snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); + err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev); + if (err) + goto out; + if (rdev->pfp_fw->size != pfp_req_size) { + printk(KERN_ERR + "cik_cp: Bogus length %zu in firmware \"%s\"\n", + rdev->pfp_fw->size, fw_name); + err = -EINVAL; + goto out; + } + } else { + err = radeon_ucode_validate(rdev->pfp_fw); + if (err) { + printk(KERN_ERR + "cik_fw: validation failed for firmware \"%s\"\n", + fw_name); + goto out; + } else { + new_fw++; + } } - snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name); + snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", new_chip_name); err = request_firmware(&rdev->me_fw, fw_name, rdev->dev); - if (err) - goto out; - if (rdev->me_fw->size != me_req_size) { - printk(KERN_ERR - "cik_cp: Bogus length %zu in firmware \"%s\"\n", - rdev->me_fw->size, fw_name); - err = -EINVAL; + if (err) { + snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name); + err = request_firmware(&rdev->me_fw, fw_name, rdev->dev); + if (err) + goto out; + if (rdev->me_fw->size != me_req_size) { + printk(KERN_ERR + "cik_cp: Bogus length %zu in firmware \"%s\"\n", + rdev->me_fw->size, fw_name); + err = -EINVAL; + } + } else { + err = radeon_ucode_validate(rdev->me_fw); + if (err) { + printk(KERN_ERR + "cik_fw: validation failed for firmware \"%s\"\n", + fw_name); + goto out; + } else { + new_fw++; + } } - snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name); + snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", new_chip_name); err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev); - if (err) - goto out; - if (rdev->ce_fw->size != ce_req_size) { - printk(KERN_ERR - "cik_cp: Bogus length %zu in firmware \"%s\"\n", - rdev->ce_fw->size, fw_name); - err = -EINVAL; + if (err) { + snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name); + err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev); + if (err) + goto out; + if (rdev->ce_fw->size != ce_req_size) { + printk(KERN_ERR + "cik_cp: Bogus length %zu in firmware \"%s\"\n", + rdev->ce_fw->size, fw_name); + err = -EINVAL; + } + } else { + err = radeon_ucode_validate(rdev->ce_fw); + if (err) { + printk(KERN_ERR + "cik_fw: validation failed for firmware \"%s\"\n", + fw_name); + goto out; + } else { + new_fw++; + } } - snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", chip_name); + snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", new_chip_name); err = request_firmware(&rdev->mec_fw, fw_name, rdev->dev); - if (err) - goto out; - if (rdev->mec_fw->size != mec_req_size) { - printk(KERN_ERR - "cik_cp: Bogus length %zu in firmware \"%s\"\n", - rdev->mec_fw->size, fw_name); - err = -EINVAL; + if (err) { + snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", chip_name); + err = request_firmware(&rdev->mec_fw, fw_name, rdev->dev); + if (err) + goto out; + if (rdev->mec_fw->size != mec_req_size) { + printk(KERN_ERR + "cik_cp: Bogus length %zu in firmware \"%s\"\n", + rdev->mec_fw->size, fw_name); + err = -EINVAL; + } + } else { + err = radeon_ucode_validate(rdev->mec_fw); + if (err) { + printk(KERN_ERR + "cik_fw: validation failed for firmware \"%s\"\n", + fw_name); + goto out; + } else { + new_fw++; + } } - snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name); + if (rdev->family == CHIP_KAVERI) { + snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec2.bin", new_chip_name); + err = request_firmware(&rdev->mec2_fw, fw_name, rdev->dev); + if (err) { + goto out; + } else { + err = radeon_ucode_validate(rdev->mec2_fw); + if (err) { + goto out; + } else { + new_fw++; + } + } + } + + snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", new_chip_name); err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev); - if (err) - goto out; - if (rdev->rlc_fw->size != rlc_req_size) { - printk(KERN_ERR - "cik_rlc: Bogus length %zu in firmware \"%s\"\n", - rdev->rlc_fw->size, fw_name); - err = -EINVAL; + if (err) { + snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name); + err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev); + if (err) + goto out; + if (rdev->rlc_fw->size != rlc_req_size) { + printk(KERN_ERR + "cik_rlc: Bogus length %zu in firmware \"%s\"\n", + rdev->rlc_fw->size, fw_name); + err = -EINVAL; + } + } else { + err = radeon_ucode_validate(rdev->rlc_fw); + if (err) { + printk(KERN_ERR + "cik_fw: validation failed for firmware \"%s\"\n", + fw_name); + goto out; + } else { + new_fw++; + } } - snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name); + snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", new_chip_name); err = request_firmware(&rdev->sdma_fw, fw_name, rdev->dev); - if (err) - goto out; - if (rdev->sdma_fw->size != sdma_req_size) { - printk(KERN_ERR - "cik_sdma: Bogus length %zu in firmware \"%s\"\n", - rdev->sdma_fw->size, fw_name); - err = -EINVAL; + if (err) { + snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name); + err = request_firmware(&rdev->sdma_fw, fw_name, rdev->dev); + if (err) + goto out; + if (rdev->sdma_fw->size != sdma_req_size) { + printk(KERN_ERR + "cik_sdma: Bogus length %zu in firmware \"%s\"\n", + rdev->sdma_fw->size, fw_name); + err = -EINVAL; + } + } else { + err = radeon_ucode_validate(rdev->sdma_fw); + if (err) { + printk(KERN_ERR + "cik_fw: validation failed for firmware \"%s\"\n", + fw_name); + goto out; + } else { + new_fw++; + } } /* No SMC, MC ucode on APUs */ if (!(rdev->flags & RADEON_IS_IGP)) { - snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name); + snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name); err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); if (err) { - snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); + snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name); err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); - if (err) + if (err) { + snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); + err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); + if (err) + goto out; + } + if ((rdev->mc_fw->size != mc_req_size) && + (rdev->mc_fw->size != mc2_req_size)){ + printk(KERN_ERR + "cik_mc: Bogus length %zu in firmware \"%s\"\n", + rdev->mc_fw->size, fw_name); + err = -EINVAL; + } + DRM_INFO("%s: %zu bytes\n", fw_name, rdev->mc_fw->size); + } else { + err = radeon_ucode_validate(rdev->mc_fw); + if (err) { + printk(KERN_ERR + "cik_fw: validation failed for firmware \"%s\"\n", + fw_name); goto out; + } else { + new_fw++; + } } - if ((rdev->mc_fw->size != mc_req_size) && - (rdev->mc_fw->size != mc2_req_size)){ - printk(KERN_ERR - "cik_mc: Bogus length %zu in firmware \"%s\"\n", - rdev->mc_fw->size, fw_name); - err = -EINVAL; - } - DRM_INFO("%s: %zu bytes\n", fw_name, rdev->mc_fw->size); - snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); + snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name); err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); if (err) { - printk(KERN_ERR - "smc: error loading firmware \"%s\"\n", - fw_name); - release_firmware(rdev->smc_fw); - rdev->smc_fw = NULL; - err = 0; - } else if (rdev->smc_fw->size != smc_req_size) { - printk(KERN_ERR - "cik_smc: Bogus length %zu in firmware \"%s\"\n", - rdev->smc_fw->size, fw_name); - err = -EINVAL; + snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); + err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); + if (err) { + printk(KERN_ERR + "smc: error loading firmware \"%s\"\n", + fw_name); + release_firmware(rdev->smc_fw); + rdev->smc_fw = NULL; + err = 0; + } else if (rdev->smc_fw->size != smc_req_size) { + printk(KERN_ERR + "cik_smc: Bogus length %zu in firmware \"%s\"\n", + rdev->smc_fw->size, fw_name); + err = -EINVAL; + } + } else { + err = radeon_ucode_validate(rdev->smc_fw); + if (err) { + printk(KERN_ERR + "cik_fw: validation failed for firmware \"%s\"\n", + fw_name); + goto out; + } else { + new_fw++; + } } } + if (new_fw == 0) { + rdev->new_fw = false; + } else if (new_fw < num_fw) { + printk(KERN_ERR "ci_fw: mixing new and old firmware!\n"); + err = -EINVAL; + } else { + rdev->new_fw = true; + } + out: if (err) { if (err != -EINVAL) @@ -2021,8 +2239,14 @@ out: rdev->me_fw = NULL; release_firmware(rdev->ce_fw); rdev->ce_fw = NULL; + release_firmware(rdev->mec_fw); + rdev->mec_fw = NULL; + release_firmware(rdev->mec2_fw); + rdev->mec2_fw = NULL; release_firmware(rdev->rlc_fw); rdev->rlc_fw = NULL; + release_firmware(rdev->sdma_fw); + rdev->sdma_fw = NULL; release_firmware(rdev->mc_fw); rdev->mc_fw = NULL; release_firmware(rdev->smc_fw); @@ -3259,7 +3483,7 @@ static void cik_gpu_init(struct radeon_device *rdev) u32 mc_shared_chmap, mc_arb_ramcfg; u32 hdp_host_path_cntl; u32 tmp; - int i, j, k; + int i, j; switch (rdev->family) { case CHIP_BONAIRE: @@ -3320,6 +3544,7 @@ static void cik_gpu_init(struct radeon_device *rdev) (rdev->pdev->device == 0x130B) || (rdev->pdev->device == 0x130E) || (rdev->pdev->device == 0x1315) || + (rdev->pdev->device == 0x1318) || (rdev->pdev->device == 0x131B)) { rdev->config.cik.max_cu_per_sh = 4; rdev->config.cik.max_backends_per_se = 1; @@ -3448,12 +3673,11 @@ static void cik_gpu_init(struct radeon_device *rdev) rdev->config.cik.max_sh_per_se, rdev->config.cik.max_backends_per_se); + rdev->config.cik.active_cus = 0; for (i = 0; i < rdev->config.cik.max_shader_engines; i++) { for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) { - for (k = 0; k < rdev->config.cik.max_cu_per_sh; k++) { - rdev->config.cik.active_cus += - hweight32(cik_get_cu_active_bitmap(rdev, i, j)); - } + rdev->config.cik.active_cus += + hweight32(cik_get_cu_active_bitmap(rdev, i, j)); } } @@ -3577,7 +3801,7 @@ int cik_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) radeon_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); radeon_ring_write(ring, ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2)); radeon_ring_write(ring, 0xDEADBEEF); - radeon_ring_unlock_commit(rdev, ring); + radeon_ring_unlock_commit(rdev, ring, false); for (i = 0; i < rdev->usec_timeout; i++) { tmp = RREG32(scratch); @@ -3666,8 +3890,6 @@ void cik_fence_gfx_ring_emit(struct radeon_device *rdev, radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | DATA_SEL(1) | INT_SEL(2)); radeon_ring_write(ring, fence->seq); radeon_ring_write(ring, 0); - /* HDP flush */ - cik_hdp_flush_cp_ring_emit(rdev, fence->ring); } /** @@ -3696,10 +3918,19 @@ void cik_fence_compute_ring_emit(struct radeon_device *rdev, radeon_ring_write(ring, upper_32_bits(addr)); radeon_ring_write(ring, fence->seq); radeon_ring_write(ring, 0); - /* HDP flush */ - cik_hdp_flush_cp_ring_emit(rdev, fence->ring); } +/** + * cik_semaphore_ring_emit - emit a semaphore on the CP ring + * + * @rdev: radeon_device pointer + * @ring: radeon ring buffer object + * @semaphore: radeon semaphore object + * @emit_wait: Is this a sempahore wait? + * + * Emits a semaphore signal/wait packet to the CP ring and prevents the PFP + * from running ahead of semaphore waits. + */ bool cik_semaphore_ring_emit(struct radeon_device *rdev, struct radeon_ring *ring, struct radeon_semaphore *semaphore, @@ -3712,6 +3943,12 @@ bool cik_semaphore_ring_emit(struct radeon_device *rdev, radeon_ring_write(ring, lower_32_bits(addr)); radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel); + if (emit_wait && ring->idx == RADEON_RING_TYPE_GFX_INDEX) { + /* Prevent the PFP from running ahead of the semaphore wait */ + radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); + radeon_ring_write(ring, 0x0); + } + return true; } @@ -3784,7 +4021,7 @@ int cik_copy_cpdma(struct radeon_device *rdev, return r; } - radeon_ring_unlock_commit(rdev, ring); + radeon_ring_unlock_commit(rdev, ring, false); radeon_semaphore_free(rdev, &sem, *fence); return r; @@ -3883,7 +4120,7 @@ int cik_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2); ib.ptr[2] = 0xDEADBEEF; ib.length_dw = 3; - r = radeon_ib_schedule(rdev, &ib, NULL); + r = radeon_ib_schedule(rdev, &ib, NULL, false); if (r) { radeon_scratch_free(rdev, scratch); radeon_ib_free(rdev, &ib); @@ -3969,7 +4206,6 @@ static void cik_cp_gfx_enable(struct radeon_device *rdev, bool enable) */ static int cik_cp_gfx_load_microcode(struct radeon_device *rdev) { - const __be32 *fw_data; int i; if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw) @@ -3977,26 +4213,70 @@ static int cik_cp_gfx_load_microcode(struct radeon_device *rdev) cik_cp_gfx_enable(rdev, false); - /* PFP */ - fw_data = (const __be32 *)rdev->pfp_fw->data; - WREG32(CP_PFP_UCODE_ADDR, 0); - for (i = 0; i < CIK_PFP_UCODE_SIZE; i++) - WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++)); - WREG32(CP_PFP_UCODE_ADDR, 0); - - /* CE */ - fw_data = (const __be32 *)rdev->ce_fw->data; - WREG32(CP_CE_UCODE_ADDR, 0); - for (i = 0; i < CIK_CE_UCODE_SIZE; i++) - WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++)); - WREG32(CP_CE_UCODE_ADDR, 0); - - /* ME */ - fw_data = (const __be32 *)rdev->me_fw->data; - WREG32(CP_ME_RAM_WADDR, 0); - for (i = 0; i < CIK_ME_UCODE_SIZE; i++) - WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++)); - WREG32(CP_ME_RAM_WADDR, 0); + if (rdev->new_fw) { + const struct gfx_firmware_header_v1_0 *pfp_hdr = + (const struct gfx_firmware_header_v1_0 *)rdev->pfp_fw->data; + const struct gfx_firmware_header_v1_0 *ce_hdr = + (const struct gfx_firmware_header_v1_0 *)rdev->ce_fw->data; + const struct gfx_firmware_header_v1_0 *me_hdr = + (const struct gfx_firmware_header_v1_0 *)rdev->me_fw->data; + const __le32 *fw_data; + u32 fw_size; + + radeon_ucode_print_gfx_hdr(&pfp_hdr->header); + radeon_ucode_print_gfx_hdr(&ce_hdr->header); + radeon_ucode_print_gfx_hdr(&me_hdr->header); + + /* PFP */ + fw_data = (const __le32 *) + (rdev->pfp_fw->data + le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes)); + fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4; + WREG32(CP_PFP_UCODE_ADDR, 0); + for (i = 0; i < fw_size; i++) + WREG32(CP_PFP_UCODE_DATA, le32_to_cpup(fw_data++)); + WREG32(CP_PFP_UCODE_ADDR, 0); + + /* CE */ + fw_data = (const __le32 *) + (rdev->ce_fw->data + le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes)); + fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4; + WREG32(CP_CE_UCODE_ADDR, 0); + for (i = 0; i < fw_size; i++) + WREG32(CP_CE_UCODE_DATA, le32_to_cpup(fw_data++)); + WREG32(CP_CE_UCODE_ADDR, 0); + + /* ME */ + fw_data = (const __be32 *) + (rdev->me_fw->data + le32_to_cpu(me_hdr->header.ucode_array_offset_bytes)); + fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4; + WREG32(CP_ME_RAM_WADDR, 0); + for (i = 0; i < fw_size; i++) + WREG32(CP_ME_RAM_DATA, le32_to_cpup(fw_data++)); + WREG32(CP_ME_RAM_WADDR, 0); + } else { + const __be32 *fw_data; + + /* PFP */ + fw_data = (const __be32 *)rdev->pfp_fw->data; + WREG32(CP_PFP_UCODE_ADDR, 0); + for (i = 0; i < CIK_PFP_UCODE_SIZE; i++) + WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++)); + WREG32(CP_PFP_UCODE_ADDR, 0); + + /* CE */ + fw_data = (const __be32 *)rdev->ce_fw->data; + WREG32(CP_CE_UCODE_ADDR, 0); + for (i = 0; i < CIK_CE_UCODE_SIZE; i++) + WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++)); + WREG32(CP_CE_UCODE_ADDR, 0); + + /* ME */ + fw_data = (const __be32 *)rdev->me_fw->data; + WREG32(CP_ME_RAM_WADDR, 0); + for (i = 0; i < CIK_ME_UCODE_SIZE; i++) + WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++)); + WREG32(CP_ME_RAM_WADDR, 0); + } WREG32(CP_PFP_UCODE_ADDR, 0); WREG32(CP_CE_UCODE_ADDR, 0); @@ -4061,7 +4341,7 @@ static int cik_cp_gfx_start(struct radeon_device *rdev) radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */ - radeon_ring_unlock_commit(rdev, ring); + radeon_ring_unlock_commit(rdev, ring, false); return 0; } @@ -4261,7 +4541,6 @@ static void cik_cp_compute_enable(struct radeon_device *rdev, bool enable) */ static int cik_cp_compute_load_microcode(struct radeon_device *rdev) { - const __be32 *fw_data; int i; if (!rdev->mec_fw) @@ -4269,20 +4548,55 @@ static int cik_cp_compute_load_microcode(struct radeon_device *rdev) cik_cp_compute_enable(rdev, false); - /* MEC1 */ - fw_data = (const __be32 *)rdev->mec_fw->data; - WREG32(CP_MEC_ME1_UCODE_ADDR, 0); - for (i = 0; i < CIK_MEC_UCODE_SIZE; i++) - WREG32(CP_MEC_ME1_UCODE_DATA, be32_to_cpup(fw_data++)); - WREG32(CP_MEC_ME1_UCODE_ADDR, 0); + if (rdev->new_fw) { + const struct gfx_firmware_header_v1_0 *mec_hdr = + (const struct gfx_firmware_header_v1_0 *)rdev->mec_fw->data; + const __le32 *fw_data; + u32 fw_size; + + radeon_ucode_print_gfx_hdr(&mec_hdr->header); + + /* MEC1 */ + fw_data = (const __le32 *) + (rdev->mec_fw->data + le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes)); + fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4; + WREG32(CP_MEC_ME1_UCODE_ADDR, 0); + for (i = 0; i < fw_size; i++) + WREG32(CP_MEC_ME1_UCODE_DATA, le32_to_cpup(fw_data++)); + WREG32(CP_MEC_ME1_UCODE_ADDR, 0); - if (rdev->family == CHIP_KAVERI) { /* MEC2 */ + if (rdev->family == CHIP_KAVERI) { + const struct gfx_firmware_header_v1_0 *mec2_hdr = + (const struct gfx_firmware_header_v1_0 *)rdev->mec2_fw->data; + + fw_data = (const __le32 *) + (rdev->mec2_fw->data + + le32_to_cpu(mec2_hdr->header.ucode_array_offset_bytes)); + fw_size = le32_to_cpu(mec2_hdr->header.ucode_size_bytes) / 4; + WREG32(CP_MEC_ME2_UCODE_ADDR, 0); + for (i = 0; i < fw_size; i++) + WREG32(CP_MEC_ME2_UCODE_DATA, le32_to_cpup(fw_data++)); + WREG32(CP_MEC_ME2_UCODE_ADDR, 0); + } + } else { + const __be32 *fw_data; + + /* MEC1 */ fw_data = (const __be32 *)rdev->mec_fw->data; - WREG32(CP_MEC_ME2_UCODE_ADDR, 0); + WREG32(CP_MEC_ME1_UCODE_ADDR, 0); for (i = 0; i < CIK_MEC_UCODE_SIZE; i++) - WREG32(CP_MEC_ME2_UCODE_DATA, be32_to_cpup(fw_data++)); - WREG32(CP_MEC_ME2_UCODE_ADDR, 0); + WREG32(CP_MEC_ME1_UCODE_DATA, be32_to_cpup(fw_data++)); + WREG32(CP_MEC_ME1_UCODE_ADDR, 0); + + if (rdev->family == CHIP_KAVERI) { + /* MEC2 */ + fw_data = (const __be32 *)rdev->mec_fw->data; + WREG32(CP_MEC_ME2_UCODE_ADDR, 0); + for (i = 0; i < CIK_MEC_UCODE_SIZE; i++) + WREG32(CP_MEC_ME2_UCODE_DATA, be32_to_cpup(fw_data++)); + WREG32(CP_MEC_ME2_UCODE_ADDR, 0); + } } return 0; @@ -4375,7 +4689,7 @@ static int cik_mec_init(struct radeon_device *rdev) r = radeon_bo_create(rdev, rdev->mec.num_mec *rdev->mec.num_pipe * MEC_HPD_SIZE * 2, PAGE_SIZE, true, - RADEON_GEM_DOMAIN_GTT, NULL, + RADEON_GEM_DOMAIN_GTT, 0, NULL, &rdev->mec.hpd_eop_obj); if (r) { dev_warn(rdev->dev, "(%d) create HDP EOP bo failed\n", r); @@ -4489,7 +4803,7 @@ struct bonaire_mqd */ static int cik_cp_compute_resume(struct radeon_device *rdev) { - int r, i, idx; + int r, i, j, idx; u32 tmp; bool use_doorbell = true; u64 hqd_gpu_addr; @@ -4545,7 +4859,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev) r = radeon_bo_create(rdev, sizeof(struct bonaire_mqd), PAGE_SIZE, true, - RADEON_GEM_DOMAIN_GTT, NULL, + RADEON_GEM_DOMAIN_GTT, 0, NULL, &rdev->ring[idx].mqd_obj); if (r) { dev_warn(rdev->dev, "(%d) create MQD bo failed\n", r); @@ -4608,7 +4922,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev) mqd->queue_state.cp_hqd_pq_wptr= 0; if (RREG32(CP_HQD_ACTIVE) & 1) { WREG32(CP_HQD_DEQUEUE_REQUEST, 1); - for (i = 0; i < rdev->usec_timeout; i++) { + for (j = 0; j < rdev->usec_timeout; j++) { if (!(RREG32(CP_HQD_ACTIVE) & 1)) break; udelay(1); @@ -5402,7 +5716,6 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev) r = radeon_gart_table_vram_pin(rdev); if (r) return r; - radeon_gart_restore(rdev); /* Setup TLB control */ WREG32(MC_VM_MX_L1_TLB_CNTL, (0xA << 7) | @@ -5436,20 +5749,17 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev) WREG32(0x15D8, 0); WREG32(0x15DC, 0); - /* empty context1-15 */ - /* FIXME start with 4G, once using 2 level pt switch to full - * vm size space - */ + /* restore context1-15 */ /* set vm size, must be a multiple of 4 */ WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0); WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn); for (i = 1; i < 16; i++) { if (i < 8) WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2), - rdev->gart.table_addr >> 12); + rdev->vm_manager.saved_table_addr[i]); else WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2), - rdev->gart.table_addr >> 12); + rdev->vm_manager.saved_table_addr[i]); } /* enable context1-15 */ @@ -5514,6 +5824,17 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev) */ static void cik_pcie_gart_disable(struct radeon_device *rdev) { + unsigned i; + + for (i = 1; i < 16; ++i) { + uint32_t reg; + if (i < 8) + reg = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2); + else + reg = VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2); + rdev->vm_manager.saved_table_addr[i] = RREG32(reg); + } + /* Disable all tables */ WREG32(VM_CONTEXT0_CNTL, 0); WREG32(VM_CONTEXT1_CNTL, 0); @@ -5642,12 +5963,13 @@ static void cik_vm_decode_fault(struct radeon_device *rdev, void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) { struct radeon_ring *ring = &rdev->ring[ridx]; + int usepfp = (ridx == RADEON_RING_TYPE_GFX_INDEX); if (vm == NULL) return; radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); - radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | + radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | WRITE_DATA_DST_SEL(0))); if (vm->id < 8) { radeon_ring_write(ring, @@ -5661,14 +5983,14 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) /* update SH_MEM_* regs */ radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); - radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | + radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | WRITE_DATA_DST_SEL(0))); radeon_ring_write(ring, SRBM_GFX_CNTL >> 2); radeon_ring_write(ring, 0); radeon_ring_write(ring, VMID(vm->id)); radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 6)); - radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | + radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | WRITE_DATA_DST_SEL(0))); radeon_ring_write(ring, SH_MEM_BASES >> 2); radeon_ring_write(ring, 0); @@ -5679,7 +6001,7 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) radeon_ring_write(ring, 0); /* SH_MEM_APE1_LIMIT */ radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); - radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | + radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | WRITE_DATA_DST_SEL(0))); radeon_ring_write(ring, SRBM_GFX_CNTL >> 2); radeon_ring_write(ring, 0); @@ -5690,14 +6012,14 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) /* bits 0-15 are the VM contexts0-15 */ radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); - radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | + radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | WRITE_DATA_DST_SEL(0))); radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); radeon_ring_write(ring, 0); radeon_ring_write(ring, 1 << vm->id); /* compute doesn't have PFP */ - if (ridx == RADEON_RING_TYPE_GFX_INDEX) { + if (usepfp) { /* sync PFP to ME, otherwise we might get invalid PFP reads */ radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); radeon_ring_write(ring, 0x0); @@ -5865,28 +6187,10 @@ static void cik_rlc_start(struct radeon_device *rdev) static int cik_rlc_resume(struct radeon_device *rdev) { u32 i, size, tmp; - const __be32 *fw_data; if (!rdev->rlc_fw) return -EINVAL; - switch (rdev->family) { - case CHIP_BONAIRE: - case CHIP_HAWAII: - default: - size = BONAIRE_RLC_UCODE_SIZE; - break; - case CHIP_KAVERI: - size = KV_RLC_UCODE_SIZE; - break; - case CHIP_KABINI: - size = KB_RLC_UCODE_SIZE; - break; - case CHIP_MULLINS: - size = ML_RLC_UCODE_SIZE; - break; - } - cik_rlc_stop(rdev); /* disable CG */ @@ -5910,11 +6214,45 @@ static int cik_rlc_resume(struct radeon_device *rdev) WREG32(RLC_MC_CNTL, 0); WREG32(RLC_UCODE_CNTL, 0); - fw_data = (const __be32 *)rdev->rlc_fw->data; + if (rdev->new_fw) { + const struct rlc_firmware_header_v1_0 *hdr = + (const struct rlc_firmware_header_v1_0 *)rdev->rlc_fw->data; + const __le32 *fw_data = (const __le32 *) + (rdev->rlc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + + radeon_ucode_print_rlc_hdr(&hdr->header); + + size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; + WREG32(RLC_GPM_UCODE_ADDR, 0); + for (i = 0; i < size; i++) + WREG32(RLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++)); WREG32(RLC_GPM_UCODE_ADDR, 0); - for (i = 0; i < size; i++) - WREG32(RLC_GPM_UCODE_DATA, be32_to_cpup(fw_data++)); - WREG32(RLC_GPM_UCODE_ADDR, 0); + } else { + const __be32 *fw_data; + + switch (rdev->family) { + case CHIP_BONAIRE: + case CHIP_HAWAII: + default: + size = BONAIRE_RLC_UCODE_SIZE; + break; + case CHIP_KAVERI: + size = KV_RLC_UCODE_SIZE; + break; + case CHIP_KABINI: + size = KB_RLC_UCODE_SIZE; + break; + case CHIP_MULLINS: + size = ML_RLC_UCODE_SIZE; + break; + } + + fw_data = (const __be32 *)rdev->rlc_fw->data; + WREG32(RLC_GPM_UCODE_ADDR, 0); + for (i = 0; i < size; i++) + WREG32(RLC_GPM_UCODE_DATA, be32_to_cpup(fw_data++)); + WREG32(RLC_GPM_UCODE_ADDR, 0); + } /* XXX - find out what chips support lbpw */ cik_enable_lbpw(rdev, false); @@ -6348,11 +6686,10 @@ static void cik_enable_gds_pg(struct radeon_device *rdev, bool enable) void cik_init_cp_pg_table(struct radeon_device *rdev) { - const __be32 *fw_data; volatile u32 *dst_ptr; int me, i, max_me = 4; u32 bo_offset = 0; - u32 table_offset; + u32 table_offset, table_size; if (rdev->family == CHIP_KAVERI) max_me = 5; @@ -6363,24 +6700,71 @@ void cik_init_cp_pg_table(struct radeon_device *rdev) /* write the cp table buffer */ dst_ptr = rdev->rlc.cp_table_ptr; for (me = 0; me < max_me; me++) { - if (me == 0) { - fw_data = (const __be32 *)rdev->ce_fw->data; - table_offset = CP_ME_TABLE_OFFSET; - } else if (me == 1) { - fw_data = (const __be32 *)rdev->pfp_fw->data; - table_offset = CP_ME_TABLE_OFFSET; - } else if (me == 2) { - fw_data = (const __be32 *)rdev->me_fw->data; - table_offset = CP_ME_TABLE_OFFSET; + if (rdev->new_fw) { + const __le32 *fw_data; + const struct gfx_firmware_header_v1_0 *hdr; + + if (me == 0) { + hdr = (const struct gfx_firmware_header_v1_0 *)rdev->ce_fw->data; + fw_data = (const __le32 *) + (rdev->ce_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + table_offset = le32_to_cpu(hdr->jt_offset); + table_size = le32_to_cpu(hdr->jt_size); + } else if (me == 1) { + hdr = (const struct gfx_firmware_header_v1_0 *)rdev->pfp_fw->data; + fw_data = (const __le32 *) + (rdev->pfp_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + table_offset = le32_to_cpu(hdr->jt_offset); + table_size = le32_to_cpu(hdr->jt_size); + } else if (me == 2) { + hdr = (const struct gfx_firmware_header_v1_0 *)rdev->me_fw->data; + fw_data = (const __le32 *) + (rdev->me_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + table_offset = le32_to_cpu(hdr->jt_offset); + table_size = le32_to_cpu(hdr->jt_size); + } else if (me == 3) { + hdr = (const struct gfx_firmware_header_v1_0 *)rdev->mec_fw->data; + fw_data = (const __le32 *) + (rdev->mec_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + table_offset = le32_to_cpu(hdr->jt_offset); + table_size = le32_to_cpu(hdr->jt_size); + } else { + hdr = (const struct gfx_firmware_header_v1_0 *)rdev->mec2_fw->data; + fw_data = (const __le32 *) + (rdev->mec2_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + table_offset = le32_to_cpu(hdr->jt_offset); + table_size = le32_to_cpu(hdr->jt_size); + } + + for (i = 0; i < table_size; i ++) { + dst_ptr[bo_offset + i] = + cpu_to_le32(le32_to_cpu(fw_data[table_offset + i])); + } + bo_offset += table_size; } else { - fw_data = (const __be32 *)rdev->mec_fw->data; - table_offset = CP_MEC_TABLE_OFFSET; - } + const __be32 *fw_data; + table_size = CP_ME_TABLE_SIZE; + + if (me == 0) { + fw_data = (const __be32 *)rdev->ce_fw->data; + table_offset = CP_ME_TABLE_OFFSET; + } else if (me == 1) { + fw_data = (const __be32 *)rdev->pfp_fw->data; + table_offset = CP_ME_TABLE_OFFSET; + } else if (me == 2) { + fw_data = (const __be32 *)rdev->me_fw->data; + table_offset = CP_ME_TABLE_OFFSET; + } else { + fw_data = (const __be32 *)rdev->mec_fw->data; + table_offset = CP_MEC_TABLE_OFFSET; + } - for (i = 0; i < CP_ME_TABLE_SIZE; i ++) { - dst_ptr[bo_offset + i] = cpu_to_le32(be32_to_cpu(fw_data[table_offset + i])); + for (i = 0; i < table_size; i ++) { + dst_ptr[bo_offset + i] = + cpu_to_le32(be32_to_cpu(fw_data[table_offset + i])); + } + bo_offset += table_size; } - bo_offset += CP_ME_TABLE_SIZE; } } @@ -7367,17 +7751,17 @@ static inline u32 cik_get_ih_wptr(struct radeon_device *rdev) wptr = RREG32(IH_RB_WPTR); if (wptr & RB_OVERFLOW) { + wptr &= ~RB_OVERFLOW; /* When a ring buffer overflow happen start parsing interrupt * from the last not overwritten vector (wptr + 16). Hopefully * this should allow us to catchup. */ - dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n", - wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask); + dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", + wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask); rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask; tmp = RREG32(IH_RB_CNTL); tmp |= IH_WPTR_OVERFLOW_CLEAR; WREG32(IH_RB_CNTL, tmp); - wptr &= ~RB_OVERFLOW; } return (wptr & rdev->ih.ptr_mask); } @@ -7618,7 +8002,8 @@ restart_ih: case 16: /* D5 page flip */ case 18: /* D6 page flip */ DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1); - radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1); + if (radeon_use_pflipirq > 0) + radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1); break; case 42: /* HPD hotplug */ switch (src_data) { @@ -7866,6 +8251,7 @@ restart_ih: /* wptr/rptr are in bytes! */ rptr += 16; rptr &= rdev->ih.ptr_mask; + WREG32(IH_RB_RPTR, rptr); } if (queue_hotplug) schedule_work(&rdev->hotplug_work); @@ -7874,7 +8260,6 @@ restart_ih: if (queue_thermal) schedule_work(&rdev->pm.dpm.thermal.work); rdev->ih.rptr = rptr; - WREG32(IH_RB_RPTR, rdev->ih.rptr); atomic_set(&rdev->ih.lock, 0); /* make sure wptr hasn't changed while processing */ @@ -7900,6 +8285,7 @@ restart_ih: static int cik_startup(struct radeon_device *rdev) { struct radeon_ring *ring; + u32 nop; int r; /* enable pcie gen2/3 link */ @@ -8033,9 +8419,18 @@ static int cik_startup(struct radeon_device *rdev) } cik_irq_set(rdev); + if (rdev->family == CHIP_HAWAII) { + if (rdev->new_fw) + nop = PACKET3(PACKET3_NOP, 0x3FFF); + else + nop = RADEON_CP_PACKET2; + } else { + nop = PACKET3(PACKET3_NOP, 0x3FFF); + } + ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, - PACKET3(PACKET3_NOP, 0x3FFF)); + nop); if (r) return r; @@ -8043,7 +8438,7 @@ static int cik_startup(struct radeon_device *rdev) /* type-2 packets are deprecated on MEC, use type-3 instead */ ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET, - PACKET3(PACKET3_NOP, 0x3FFF)); + nop); if (r) return r; ring->me = 1; /* first MEC */ @@ -8054,7 +8449,7 @@ static int cik_startup(struct radeon_device *rdev) /* type-2 packets are deprecated on MEC, use type-3 instead */ ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET, - PACKET3(PACKET3_NOP, 0x3FFF)); + nop); if (r) return r; /* dGPU only have 1 MEC */ @@ -9168,6 +9563,9 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev) int ret, i; u16 tmp16; + if (pci_is_root_bus(rdev->pdev->bus)) + return; + if (radeon_pcie_gen2 == 0) return; @@ -9394,7 +9792,8 @@ static void cik_program_aspm(struct radeon_device *rdev) if (orig != data) WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data); - if (!disable_clkreq) { + if (!disable_clkreq && + !pci_is_root_bus(rdev->pdev->bus)) { struct pci_dev *root = rdev->pdev->bus->self; u32 lnkcap; diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c index 8e9d0f1d858e..c4ffa54b1e3d 100644 --- a/drivers/gpu/drm/radeon/cik_sdma.c +++ b/drivers/gpu/drm/radeon/cik_sdma.c @@ -24,6 +24,7 @@ #include <linux/firmware.h> #include <drm/drmP.h> #include "radeon.h" +#include "radeon_ucode.h" #include "radeon_asic.h" #include "radeon_trace.h" #include "cikd.h" @@ -118,6 +119,7 @@ void cik_sdma_set_wptr(struct radeon_device *rdev, reg = SDMA0_GFX_RB_WPTR + SDMA1_REGISTER_OFFSET; WREG32(reg, (ring->wptr << 2) & 0x3fffc); + (void)RREG32(reg); } /** @@ -419,7 +421,6 @@ static int cik_sdma_rlc_resume(struct radeon_device *rdev) */ static int cik_sdma_load_microcode(struct radeon_device *rdev) { - const __be32 *fw_data; int i; if (!rdev->sdma_fw) @@ -428,19 +429,48 @@ static int cik_sdma_load_microcode(struct radeon_device *rdev) /* halt the MEs */ cik_sdma_enable(rdev, false); - /* sdma0 */ - fw_data = (const __be32 *)rdev->sdma_fw->data; - WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0); - for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++) - WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, be32_to_cpup(fw_data++)); - WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION); - - /* sdma1 */ - fw_data = (const __be32 *)rdev->sdma_fw->data; - WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0); - for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++) - WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, be32_to_cpup(fw_data++)); - WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION); + if (rdev->new_fw) { + const struct sdma_firmware_header_v1_0 *hdr = + (const struct sdma_firmware_header_v1_0 *)rdev->sdma_fw->data; + const __le32 *fw_data; + u32 fw_size; + + radeon_ucode_print_sdma_hdr(&hdr->header); + + /* sdma0 */ + fw_data = (const __le32 *) + (rdev->sdma_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; + WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0); + for (i = 0; i < fw_size; i++) + WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, le32_to_cpup(fw_data++)); + WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION); + + /* sdma1 */ + fw_data = (const __le32 *) + (rdev->sdma_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; + WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0); + for (i = 0; i < fw_size; i++) + WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, le32_to_cpup(fw_data++)); + WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION); + } else { + const __be32 *fw_data; + + /* sdma0 */ + fw_data = (const __be32 *)rdev->sdma_fw->data; + WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0); + for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++) + WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, be32_to_cpup(fw_data++)); + WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION); + + /* sdma1 */ + fw_data = (const __be32 *)rdev->sdma_fw->data; + WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0); + for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++) + WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, be32_to_cpup(fw_data++)); + WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION); + } WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0); WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0); @@ -459,13 +489,6 @@ int cik_sdma_resume(struct radeon_device *rdev) { int r; - /* Reset dma */ - WREG32(SRBM_SOFT_RESET, SOFT_RESET_SDMA | SOFT_RESET_SDMA1); - RREG32(SRBM_SOFT_RESET); - udelay(50); - WREG32(SRBM_SOFT_RESET, 0); - RREG32(SRBM_SOFT_RESET); - r = cik_sdma_load_microcode(rdev); if (r) return r; @@ -566,7 +589,7 @@ int cik_copy_dma(struct radeon_device *rdev, return r; } - radeon_ring_unlock_commit(rdev, ring); + radeon_ring_unlock_commit(rdev, ring, false); radeon_semaphore_free(rdev, &sem, *fence); return r; @@ -608,7 +631,7 @@ int cik_sdma_ring_test(struct radeon_device *rdev, radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr)); radeon_ring_write(ring, 1); /* number of DWs to follow */ radeon_ring_write(ring, 0xDEADBEEF); - radeon_ring_unlock_commit(rdev, ring); + radeon_ring_unlock_commit(rdev, ring, false); for (i = 0; i < rdev->usec_timeout; i++) { tmp = readl(ptr); @@ -665,7 +688,7 @@ int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) ib.ptr[4] = 0xDEADBEEF; ib.length_dw = 5; - r = radeon_ib_schedule(rdev, &ib, NULL); + r = radeon_ib_schedule(rdev, &ib, NULL, false); if (r) { radeon_ib_free(rdev, &ib); DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); @@ -719,7 +742,43 @@ bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) } /** - * cik_sdma_vm_set_page - update the page tables using sDMA + * cik_sdma_vm_copy_pages - update PTEs by copying them from the GART + * + * @rdev: radeon_device pointer + * @ib: indirect buffer to fill with commands + * @pe: addr of the page entry + * @src: src addr to copy from + * @count: number of page entries to update + * + * Update PTEs by copying them from the GART using sDMA (CIK). + */ +void cik_sdma_vm_copy_pages(struct radeon_device *rdev, + struct radeon_ib *ib, + uint64_t pe, uint64_t src, + unsigned count) +{ + while (count) { + unsigned bytes = count * 8; + if (bytes > 0x1FFFF8) + bytes = 0x1FFFF8; + + ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, + SDMA_WRITE_SUB_OPCODE_LINEAR, 0); + ib->ptr[ib->length_dw++] = bytes; + ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ + ib->ptr[ib->length_dw++] = lower_32_bits(src); + ib->ptr[ib->length_dw++] = upper_32_bits(src); + ib->ptr[ib->length_dw++] = lower_32_bits(pe); + ib->ptr[ib->length_dw++] = upper_32_bits(pe); + + pe += bytes; + src += bytes; + count -= bytes / 8; + } +} + +/** + * cik_sdma_vm_write_pages - update PTEs by writing them manually * * @rdev: radeon_device pointer * @ib: indirect buffer to fill with commands @@ -729,84 +788,103 @@ bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) * @incr: increase next addr by incr bytes * @flags: access flags * - * Update the page tables using sDMA (CIK). + * Update PTEs by writing them manually using sDMA (CIK). */ -void cik_sdma_vm_set_page(struct radeon_device *rdev, - struct radeon_ib *ib, - uint64_t pe, - uint64_t addr, unsigned count, - uint32_t incr, uint32_t flags) +void cik_sdma_vm_write_pages(struct radeon_device *rdev, + struct radeon_ib *ib, + uint64_t pe, + uint64_t addr, unsigned count, + uint32_t incr, uint32_t flags) { uint64_t value; unsigned ndw; - trace_radeon_vm_set_page(pe, addr, count, incr, flags); - - if (flags == R600_PTE_GART) { - uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8; - while (count) { - unsigned bytes = count * 8; - if (bytes > 0x1FFFF8) - bytes = 0x1FFFF8; - - ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_WRITE_SUB_OPCODE_LINEAR, 0); - ib->ptr[ib->length_dw++] = bytes; - ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ - ib->ptr[ib->length_dw++] = lower_32_bits(src); - ib->ptr[ib->length_dw++] = upper_32_bits(src); - ib->ptr[ib->length_dw++] = lower_32_bits(pe); - ib->ptr[ib->length_dw++] = upper_32_bits(pe); - - pe += bytes; - src += bytes; - count -= bytes / 8; - } - } else if (flags & R600_PTE_SYSTEM) { - while (count) { - ndw = count * 2; - if (ndw > 0xFFFFE) - ndw = 0xFFFFE; - - /* for non-physically contiguous pages (system) */ - ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0); - ib->ptr[ib->length_dw++] = pe; - ib->ptr[ib->length_dw++] = upper_32_bits(pe); - ib->ptr[ib->length_dw++] = ndw; - for (; ndw > 0; ndw -= 2, --count, pe += 8) { + while (count) { + ndw = count * 2; + if (ndw > 0xFFFFE) + ndw = 0xFFFFE; + + /* for non-physically contiguous pages (system) */ + ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE, + SDMA_WRITE_SUB_OPCODE_LINEAR, 0); + ib->ptr[ib->length_dw++] = pe; + ib->ptr[ib->length_dw++] = upper_32_bits(pe); + ib->ptr[ib->length_dw++] = ndw; + for (; ndw > 0; ndw -= 2, --count, pe += 8) { + if (flags & R600_PTE_SYSTEM) { value = radeon_vm_map_gart(rdev, addr); value &= 0xFFFFFFFFFFFFF000ULL; - addr += incr; - value |= flags; - ib->ptr[ib->length_dw++] = value; - ib->ptr[ib->length_dw++] = upper_32_bits(value); - } - } - } else { - while (count) { - ndw = count; - if (ndw > 0x7FFFF) - ndw = 0x7FFFF; - - if (flags & R600_PTE_VALID) + } else if (flags & R600_PTE_VALID) { value = addr; - else + } else { value = 0; - /* for physically contiguous pages (vram) */ - ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0); - ib->ptr[ib->length_dw++] = pe; /* dst addr */ - ib->ptr[ib->length_dw++] = upper_32_bits(pe); - ib->ptr[ib->length_dw++] = flags; /* mask */ - ib->ptr[ib->length_dw++] = 0; - ib->ptr[ib->length_dw++] = value; /* value */ + } + addr += incr; + value |= flags; + ib->ptr[ib->length_dw++] = value; ib->ptr[ib->length_dw++] = upper_32_bits(value); - ib->ptr[ib->length_dw++] = incr; /* increment size */ - ib->ptr[ib->length_dw++] = 0; - ib->ptr[ib->length_dw++] = ndw; /* number of entries */ - pe += ndw * 8; - addr += ndw * incr; - count -= ndw; } } +} + +/** + * cik_sdma_vm_set_pages - update the page tables using sDMA + * + * @rdev: radeon_device pointer + * @ib: indirect buffer to fill with commands + * @pe: addr of the page entry + * @addr: dst addr to write into pe + * @count: number of page entries to update + * @incr: increase next addr by incr bytes + * @flags: access flags + * + * Update the page tables using sDMA (CIK). + */ +void cik_sdma_vm_set_pages(struct radeon_device *rdev, + struct radeon_ib *ib, + uint64_t pe, + uint64_t addr, unsigned count, + uint32_t incr, uint32_t flags) +{ + uint64_t value; + unsigned ndw; + + while (count) { + ndw = count; + if (ndw > 0x7FFFF) + ndw = 0x7FFFF; + + if (flags & R600_PTE_VALID) + value = addr; + else + value = 0; + + /* for physically contiguous pages (vram) */ + ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0); + ib->ptr[ib->length_dw++] = pe; /* dst addr */ + ib->ptr[ib->length_dw++] = upper_32_bits(pe); + ib->ptr[ib->length_dw++] = flags; /* mask */ + ib->ptr[ib->length_dw++] = 0; + ib->ptr[ib->length_dw++] = value; /* value */ + ib->ptr[ib->length_dw++] = upper_32_bits(value); + ib->ptr[ib->length_dw++] = incr; /* increment size */ + ib->ptr[ib->length_dw++] = 0; + ib->ptr[ib->length_dw++] = ndw; /* number of entries */ + + pe += ndw * 8; + addr += ndw * incr; + count -= ndw; + } +} + +/** + * cik_sdma_vm_pad_ib - pad the IB to the required number of dw + * + * @ib: indirect buffer to fill with padding + * + */ +void cik_sdma_vm_pad_ib(struct radeon_ib *ib) +{ while (ib->length_dw & 0x7) ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0); } diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c index 0a65dc7e93e7..ab29f953a767 100644 --- a/drivers/gpu/drm/radeon/dce6_afmt.c +++ b/drivers/gpu/drm/radeon/dce6_afmt.c @@ -136,13 +136,13 @@ void dce6_afmt_write_latency_fields(struct drm_encoder *encoder, tmp = VIDEO_LIPSYNC(connector->video_latency[1]) | AUDIO_LIPSYNC(connector->audio_latency[1]); else - tmp = VIDEO_LIPSYNC(255) | AUDIO_LIPSYNC(255); + tmp = VIDEO_LIPSYNC(0) | AUDIO_LIPSYNC(0); } else { if (connector->latency_present[0]) tmp = VIDEO_LIPSYNC(connector->video_latency[0]) | AUDIO_LIPSYNC(connector->audio_latency[0]); else - tmp = VIDEO_LIPSYNC(255) | AUDIO_LIPSYNC(255); + tmp = VIDEO_LIPSYNC(0) | AUDIO_LIPSYNC(0); } WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp); } @@ -164,8 +164,10 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder) offset = dig->afmt->pin->offset; list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { - if (connector->encoder == encoder) + if (connector->encoder == encoder) { radeon_connector = to_radeon_connector(connector); + break; + } } if (!radeon_connector) { @@ -173,7 +175,7 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder) return; } - sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb); + sad_count = drm_edid_to_speaker_allocation(radeon_connector_edid(connector), &sadb); if (sad_count <= 0) { DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); return; @@ -225,8 +227,10 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder) offset = dig->afmt->pin->offset; list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { - if (connector->encoder == encoder) + if (connector->encoder == encoder) { radeon_connector = to_radeon_connector(connector); + break; + } } if (!radeon_connector) { @@ -234,7 +238,7 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder) return; } - sad_count = drm_edid_to_sad(radeon_connector->edid, &sads); + sad_count = drm_edid_to_sad(radeon_connector_edid(connector), &sads); if (sad_count <= 0) { DRM_ERROR("Couldn't read SADs: %d\n", sad_count); return; diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 15e4f28015e1..e50807c29f69 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -2424,7 +2424,6 @@ static int evergreen_pcie_gart_enable(struct radeon_device *rdev) r = radeon_gart_table_vram_pin(rdev); if (r) return r; - radeon_gart_restore(rdev); /* Setup L2 cache */ WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | @@ -2677,7 +2676,7 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s if (save->crtc_enabled[i]) { if (ASIC_IS_DCE6(rdev)) { tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]); - tmp |= EVERGREEN_CRTC_BLANK_DATA_EN; + tmp &= ~EVERGREEN_CRTC_BLANK_DATA_EN; WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp); WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0); @@ -2870,7 +2869,7 @@ static int evergreen_cp_start(struct radeon_device *rdev) radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); radeon_ring_write(ring, 0); radeon_ring_write(ring, 0); - radeon_ring_unlock_commit(rdev, ring); + radeon_ring_unlock_commit(rdev, ring, false); cp_me = 0xff; WREG32(CP_ME_CNTL, cp_me); @@ -2913,7 +2912,7 @@ static int evergreen_cp_start(struct radeon_device *rdev) radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ radeon_ring_write(ring, 0x00000010); /* */ - radeon_ring_unlock_commit(rdev, ring); + radeon_ring_unlock_commit(rdev, ring, false); return 0; } @@ -4023,7 +4022,8 @@ int sumo_rlc_init(struct radeon_device *rdev) /* save restore block */ if (rdev->rlc.save_restore_obj == NULL) { r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true, - RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.save_restore_obj); + RADEON_GEM_DOMAIN_VRAM, 0, NULL, + &rdev->rlc.save_restore_obj); if (r) { dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r); return r; @@ -4101,7 +4101,8 @@ int sumo_rlc_init(struct radeon_device *rdev) if (rdev->rlc.clear_state_obj == NULL) { r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true, - RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.clear_state_obj); + RADEON_GEM_DOMAIN_VRAM, 0, NULL, + &rdev->rlc.clear_state_obj); if (r) { dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r); sumo_rlc_fini(rdev); @@ -4175,8 +4176,10 @@ int sumo_rlc_init(struct radeon_device *rdev) if (rdev->rlc.cp_table_size) { if (rdev->rlc.cp_table_obj == NULL) { - r = radeon_bo_create(rdev, rdev->rlc.cp_table_size, PAGE_SIZE, true, - RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.cp_table_obj); + r = radeon_bo_create(rdev, rdev->rlc.cp_table_size, + PAGE_SIZE, true, + RADEON_GEM_DOMAIN_VRAM, 0, NULL, + &rdev->rlc.cp_table_obj); if (r) { dev_warn(rdev->dev, "(%d) create RLC cp table bo failed\n", r); sumo_rlc_fini(rdev); @@ -4746,17 +4749,17 @@ static u32 evergreen_get_ih_wptr(struct radeon_device *rdev) wptr = RREG32(IH_RB_WPTR); if (wptr & RB_OVERFLOW) { + wptr &= ~RB_OVERFLOW; /* When a ring buffer overflow happen start parsing interrupt * from the last not overwritten vector (wptr + 16). Hopefully * this should allow us to catchup. */ - dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n", - wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask); + dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", + wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask); rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask; tmp = RREG32(IH_RB_CNTL); tmp |= IH_WPTR_OVERFLOW_CLEAR; WREG32(IH_RB_CNTL, tmp); - wptr &= ~RB_OVERFLOW; } return (wptr & rdev->ih.ptr_mask); } @@ -4961,7 +4964,8 @@ restart_ih: case 16: /* D5 page flip */ case 18: /* D6 page flip */ DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1); - radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1); + if (radeon_use_pflipirq > 0) + radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1); break; case 42: /* HPD hotplug */ switch (src_data) { @@ -5133,6 +5137,7 @@ restart_ih: /* wptr/rptr are in bytes! */ rptr += 16; rptr &= rdev->ih.ptr_mask; + WREG32(IH_RB_RPTR, rptr); } if (queue_hotplug) schedule_work(&rdev->hotplug_work); @@ -5141,7 +5146,6 @@ restart_ih: if (queue_thermal && rdev->pm.dpm_enabled) schedule_work(&rdev->pm.dpm.thermal.work); rdev->ih.rptr = rptr; - WREG32(IH_RB_RPTR, rdev->ih.rptr); atomic_set(&rdev->ih.lock, 0); /* make sure wptr hasn't changed while processing */ diff --git a/drivers/gpu/drm/radeon/evergreen_dma.c b/drivers/gpu/drm/radeon/evergreen_dma.c index 478caefe0fef..afaba388c36d 100644 --- a/drivers/gpu/drm/radeon/evergreen_dma.c +++ b/drivers/gpu/drm/radeon/evergreen_dma.c @@ -155,7 +155,7 @@ int evergreen_copy_dma(struct radeon_device *rdev, return r; } - radeon_ring_unlock_commit(rdev, ring); + radeon_ring_unlock_commit(rdev, ring, false); radeon_semaphore_free(rdev, &sem, *fence); return r; diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c index 1ec0e6e83f9f..278c7a139d74 100644 --- a/drivers/gpu/drm/radeon/evergreen_hdmi.c +++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c @@ -117,7 +117,7 @@ static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder) return; } - sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb); + sad_count = drm_edid_to_speaker_allocation(radeon_connector_edid(connector), &sadb); if (sad_count <= 0) { DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); return; @@ -172,7 +172,7 @@ static void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder) return; } - sad_count = drm_edid_to_sad(radeon_connector->edid, &sads); + sad_count = drm_edid_to_sad(radeon_connector_edid(connector), &sads); if (sad_count <= 0) { DRM_ERROR("Couldn't read SADs: %d\n", sad_count); return; diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c index 9ef8c38f2d66..67cb472d188c 100644 --- a/drivers/gpu/drm/radeon/kv_dpm.c +++ b/drivers/gpu/drm/radeon/kv_dpm.c @@ -33,6 +33,8 @@ #define KV_MINIMUM_ENGINE_CLOCK 800 #define SMC_RAM_END 0x40000 +static int kv_enable_nb_dpm(struct radeon_device *rdev, + bool enable); static void kv_init_graphics_levels(struct radeon_device *rdev); static int kv_calculate_ds_divider(struct radeon_device *rdev); static int kv_calculate_nbps_level_settings(struct radeon_device *rdev); @@ -1295,6 +1297,9 @@ void kv_dpm_disable(struct radeon_device *rdev) { kv_smc_bapm_enable(rdev, false); + if (rdev->family == CHIP_MULLINS) + kv_enable_nb_dpm(rdev, false); + /* powerup blocks */ kv_dpm_powergate_acp(rdev, false); kv_dpm_powergate_samu(rdev, false); @@ -1438,14 +1443,14 @@ static int kv_update_uvd_dpm(struct radeon_device *rdev, bool gate) return kv_enable_uvd_dpm(rdev, !gate); } -static u8 kv_get_vce_boot_level(struct radeon_device *rdev) +static u8 kv_get_vce_boot_level(struct radeon_device *rdev, u32 evclk) { u8 i; struct radeon_vce_clock_voltage_dependency_table *table = &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; for (i = 0; i < table->count; i++) { - if (table->entries[i].evclk >= 0) /* XXX */ + if (table->entries[i].evclk >= evclk) break; } @@ -1468,7 +1473,7 @@ static int kv_update_vce_dpm(struct radeon_device *rdev, if (pi->caps_stable_p_state) pi->vce_boot_level = table->count - 1; else - pi->vce_boot_level = kv_get_vce_boot_level(rdev); + pi->vce_boot_level = kv_get_vce_boot_level(rdev, radeon_new_state->evclk); ret = kv_copy_bytes_to_smc(rdev, pi->dpm_table_start + @@ -1769,15 +1774,24 @@ static int kv_update_dfs_bypass_settings(struct radeon_device *rdev, return ret; } -static int kv_enable_nb_dpm(struct radeon_device *rdev) +static int kv_enable_nb_dpm(struct radeon_device *rdev, + bool enable) { struct kv_power_info *pi = kv_get_pi(rdev); int ret = 0; - if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) { - ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Enable); - if (ret == 0) - pi->nb_dpm_enabled = true; + if (enable) { + if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) { + ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Enable); + if (ret == 0) + pi->nb_dpm_enabled = true; + } + } else { + if (pi->enable_nb_dpm && pi->nb_dpm_enabled) { + ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Disable); + if (ret == 0) + pi->nb_dpm_enabled = false; + } } return ret; @@ -1864,7 +1878,7 @@ int kv_dpm_set_power_state(struct radeon_device *rdev) } kv_update_sclk_t(rdev); if (rdev->family == CHIP_MULLINS) - kv_enable_nb_dpm(rdev); + kv_enable_nb_dpm(rdev, true); } } else { if (pi->enable_dpm) { @@ -1889,7 +1903,7 @@ int kv_dpm_set_power_state(struct radeon_device *rdev) } kv_update_acp_boot_level(rdev); kv_update_sclk_t(rdev); - kv_enable_nb_dpm(rdev); + kv_enable_nb_dpm(rdev, true); } } @@ -2726,7 +2740,10 @@ int kv_dpm_init(struct radeon_device *rdev) pi->caps_sclk_ds = true; pi->enable_auto_thermal_throttling = true; pi->disable_nb_ps3_in_battery = false; - pi->bapm_enable = true; + if (radeon_bapm == 0) + pi->bapm_enable = false; + else + pi->bapm_enable = true; pi->voltage_drop_t = 0; pi->caps_sclk_throttle_low_notification = false; pi->caps_fps = false; /* true? */ diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 5a33ca681867..3faee58946dd 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -1229,7 +1229,6 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev) r = radeon_gart_table_vram_pin(rdev); if (r) return r; - radeon_gart_restore(rdev); /* Setup TLB control */ WREG32(MC_VM_MX_L1_TLB_CNTL, (0xA << 7) | @@ -1272,7 +1271,7 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev) WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0); WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), rdev->vm_manager.max_pfn); WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2), - rdev->gart.table_addr >> 12); + rdev->vm_manager.saved_table_addr[i]); } /* enable context1-7 */ @@ -1304,6 +1303,13 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev) static void cayman_pcie_gart_disable(struct radeon_device *rdev) { + unsigned i; + + for (i = 1; i < 8; ++i) { + rdev->vm_manager.saved_table_addr[i] = RREG32( + VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2)); + } + /* Disable all tables */ WREG32(VM_CONTEXT0_CNTL, 0); WREG32(VM_CONTEXT1_CNTL, 0); @@ -1506,7 +1512,7 @@ static int cayman_cp_start(struct radeon_device *rdev) radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); radeon_ring_write(ring, 0); radeon_ring_write(ring, 0); - radeon_ring_unlock_commit(rdev, ring); + radeon_ring_unlock_commit(rdev, ring, false); cayman_cp_enable(rdev, true); @@ -1548,7 +1554,7 @@ static int cayman_cp_start(struct radeon_device *rdev) radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ radeon_ring_write(ring, 0x00000010); /* */ - radeon_ring_unlock_commit(rdev, ring); + radeon_ring_unlock_commit(rdev, ring, false); /* XXX init other rings */ diff --git a/drivers/gpu/drm/radeon/ni_dma.c b/drivers/gpu/drm/radeon/ni_dma.c index 6378e0276691..f26f0a9fb522 100644 --- a/drivers/gpu/drm/radeon/ni_dma.c +++ b/drivers/gpu/drm/radeon/ni_dma.c @@ -191,12 +191,6 @@ int cayman_dma_resume(struct radeon_device *rdev) u32 reg_offset, wb_offset; int i, r; - /* Reset dma */ - WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1); - RREG32(SRBM_SOFT_RESET); - udelay(50); - WREG32(SRBM_SOFT_RESET, 0); - for (i = 0; i < 2; i++) { if (i == 0) { ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; @@ -307,7 +301,43 @@ bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) } /** - * cayman_dma_vm_set_page - update the page tables using the DMA + * cayman_dma_vm_copy_pages - update PTEs by copying them from the GART + * + * @rdev: radeon_device pointer + * @ib: indirect buffer to fill with commands + * @pe: addr of the page entry + * @src: src addr where to copy from + * @count: number of page entries to update + * + * Update PTEs by copying them from the GART using the DMA (cayman/TN). + */ +void cayman_dma_vm_copy_pages(struct radeon_device *rdev, + struct radeon_ib *ib, + uint64_t pe, uint64_t src, + unsigned count) +{ + unsigned ndw; + + while (count) { + ndw = count * 2; + if (ndw > 0xFFFFE) + ndw = 0xFFFFE; + + ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY, + 0, 0, ndw); + ib->ptr[ib->length_dw++] = lower_32_bits(pe); + ib->ptr[ib->length_dw++] = lower_32_bits(src); + ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; + ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff; + + pe += ndw * 4; + src += ndw * 4; + count -= ndw / 2; + } +} + +/** + * cayman_dma_vm_write_pages - update PTEs by writing them manually * * @rdev: radeon_device pointer * @ib: indirect buffer to fill with commands @@ -315,71 +345,103 @@ bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) * @addr: dst addr to write into pe * @count: number of page entries to update * @incr: increase next addr by incr bytes - * @flags: hw access flags + * @flags: hw access flags * - * Update the page tables using the DMA (cayman/TN). + * Update PTEs by writing them manually using the DMA (cayman/TN). */ -void cayman_dma_vm_set_page(struct radeon_device *rdev, - struct radeon_ib *ib, - uint64_t pe, - uint64_t addr, unsigned count, - uint32_t incr, uint32_t flags) +void cayman_dma_vm_write_pages(struct radeon_device *rdev, + struct radeon_ib *ib, + uint64_t pe, + uint64_t addr, unsigned count, + uint32_t incr, uint32_t flags) { uint64_t value; unsigned ndw; - trace_radeon_vm_set_page(pe, addr, count, incr, flags); - - if ((flags & R600_PTE_SYSTEM) || (count == 1)) { - while (count) { - ndw = count * 2; - if (ndw > 0xFFFFE) - ndw = 0xFFFFE; - - /* for non-physically contiguous pages (system) */ - ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw); - ib->ptr[ib->length_dw++] = pe; - ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; - for (; ndw > 0; ndw -= 2, --count, pe += 8) { - if (flags & R600_PTE_SYSTEM) { - value = radeon_vm_map_gart(rdev, addr); - value &= 0xFFFFFFFFFFFFF000ULL; - } else if (flags & R600_PTE_VALID) { - value = addr; - } else { - value = 0; - } - addr += incr; - value |= flags; - ib->ptr[ib->length_dw++] = value; - ib->ptr[ib->length_dw++] = upper_32_bits(value); - } - } - } else { - while (count) { - ndw = count * 2; - if (ndw > 0xFFFFE) - ndw = 0xFFFFE; - - if (flags & R600_PTE_VALID) + while (count) { + ndw = count * 2; + if (ndw > 0xFFFFE) + ndw = 0xFFFFE; + + /* for non-physically contiguous pages (system) */ + ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, + 0, 0, ndw); + ib->ptr[ib->length_dw++] = pe; + ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; + for (; ndw > 0; ndw -= 2, --count, pe += 8) { + if (flags & R600_PTE_SYSTEM) { + value = radeon_vm_map_gart(rdev, addr); + value &= 0xFFFFFFFFFFFFF000ULL; + } else if (flags & R600_PTE_VALID) { value = addr; - else + } else { value = 0; - /* for physically contiguous pages (vram) */ - ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw); - ib->ptr[ib->length_dw++] = pe; /* dst addr */ - ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; - ib->ptr[ib->length_dw++] = flags; /* mask */ - ib->ptr[ib->length_dw++] = 0; - ib->ptr[ib->length_dw++] = value; /* value */ + } + addr += incr; + value |= flags; + ib->ptr[ib->length_dw++] = value; ib->ptr[ib->length_dw++] = upper_32_bits(value); - ib->ptr[ib->length_dw++] = incr; /* increment size */ - ib->ptr[ib->length_dw++] = 0; - pe += ndw * 4; - addr += (ndw / 2) * incr; - count -= ndw / 2; } } +} + +/** + * cayman_dma_vm_set_pages - update the page tables using the DMA + * + * @rdev: radeon_device pointer + * @ib: indirect buffer to fill with commands + * @pe: addr of the page entry + * @addr: dst addr to write into pe + * @count: number of page entries to update + * @incr: increase next addr by incr bytes + * @flags: hw access flags + * + * Update the page tables using the DMA (cayman/TN). + */ +void cayman_dma_vm_set_pages(struct radeon_device *rdev, + struct radeon_ib *ib, + uint64_t pe, + uint64_t addr, unsigned count, + uint32_t incr, uint32_t flags) +{ + uint64_t value; + unsigned ndw; + + while (count) { + ndw = count * 2; + if (ndw > 0xFFFFE) + ndw = 0xFFFFE; + + if (flags & R600_PTE_VALID) + value = addr; + else + value = 0; + + /* for physically contiguous pages (vram) */ + ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw); + ib->ptr[ib->length_dw++] = pe; /* dst addr */ + ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; + ib->ptr[ib->length_dw++] = flags; /* mask */ + ib->ptr[ib->length_dw++] = 0; + ib->ptr[ib->length_dw++] = value; /* value */ + ib->ptr[ib->length_dw++] = upper_32_bits(value); + ib->ptr[ib->length_dw++] = incr; /* increment size */ + ib->ptr[ib->length_dw++] = 0; + + pe += ndw * 4; + addr += (ndw / 2) * incr; + count -= ndw / 2; + } +} + +/** + * cayman_dma_vm_pad_ib - pad the IB to the required number of dw + * + * @ib: indirect buffer to fill with padding + * + */ +void cayman_dma_vm_pad_ib(struct radeon_ib *ib) +{ while (ib->length_dw & 0x7) ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0); } diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 1544efcf1c3a..b0098e792e62 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -652,7 +652,6 @@ int r100_pci_gart_enable(struct radeon_device *rdev) { uint32_t tmp; - radeon_gart_restore(rdev); /* discard memory request outside of configured range */ tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS; WREG32(RADEON_AIC_CNTL, tmp); @@ -683,7 +682,7 @@ void r100_pci_gart_disable(struct radeon_device *rdev) } void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i, - uint64_t addr) + uint64_t addr, uint32_t flags) { u32 *gtt = rdev->gart.ptr; gtt[i] = cpu_to_le32(lower_32_bits(addr)); @@ -822,6 +821,20 @@ u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc) return RREG32(RADEON_CRTC2_CRNT_FRAME); } +/** + * r100_ring_hdp_flush - flush Host Data Path via the ring buffer + * rdev: radeon device structure + * ring: ring buffer struct for emitting packets + */ +static void r100_ring_hdp_flush(struct radeon_device *rdev, struct radeon_ring *ring) +{ + radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0)); + radeon_ring_write(ring, rdev->config.r100.hdp_cntl | + RADEON_HDP_READ_BUFFER_INVALIDATE); + radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0)); + radeon_ring_write(ring, rdev->config.r100.hdp_cntl); +} + /* Who ever call radeon_fence_emit should call ring_lock and ask * for enough space (today caller are ib schedule and buffer move) */ void r100_fence_ring_emit(struct radeon_device *rdev, @@ -838,11 +851,7 @@ void r100_fence_ring_emit(struct radeon_device *rdev, /* Wait until IDLE & CLEAN */ radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); radeon_ring_write(ring, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN); - radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0)); - radeon_ring_write(ring, rdev->config.r100.hdp_cntl | - RADEON_HDP_READ_BUFFER_INVALIDATE); - radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0)); - radeon_ring_write(ring, rdev->config.r100.hdp_cntl); + r100_ring_hdp_flush(rdev, ring); /* Emit fence sequence & fire IRQ */ radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0)); radeon_ring_write(ring, fence->seq); @@ -930,7 +939,7 @@ int r100_copy_blit(struct radeon_device *rdev, if (fence) { r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX); } - radeon_ring_unlock_commit(rdev, ring); + radeon_ring_unlock_commit(rdev, ring, false); return r; } @@ -963,7 +972,7 @@ void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring) RADEON_ISYNC_ANY3D_IDLE2D | RADEON_ISYNC_WAIT_IDLEGUI | RADEON_ISYNC_CPSCRATCH_IDLEGUI); - radeon_ring_unlock_commit(rdev, ring); + radeon_ring_unlock_commit(rdev, ring, false); } @@ -1401,7 +1410,6 @@ int r100_cs_parse_packet0(struct radeon_cs_parser *p, */ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) { - struct drm_mode_object *obj; struct drm_crtc *crtc; struct radeon_crtc *radeon_crtc; struct radeon_cs_packet p3reloc, waitreloc; @@ -1441,12 +1449,11 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) header = radeon_get_ib_value(p, h_idx); crtc_id = radeon_get_ib_value(p, h_idx + 5); reg = R100_CP_PACKET0_GET_REG(header); - obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); - if (!obj) { + crtc = drm_crtc_find(p->rdev->ddev, crtc_id); + if (!crtc) { DRM_ERROR("cannot find crtc %d\n", crtc_id); return -ENOENT; } - crtc = obj_to_crtc(obj); radeon_crtc = to_radeon_crtc(crtc); crtc_id = radeon_crtc->crtc_id; @@ -3631,7 +3638,7 @@ int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) } radeon_ring_write(ring, PACKET0(scratch, 0)); radeon_ring_write(ring, 0xDEADBEEF); - radeon_ring_unlock_commit(rdev, ring); + radeon_ring_unlock_commit(rdev, ring, false); for (i = 0; i < rdev->usec_timeout; i++) { tmp = RREG32(scratch); if (tmp == 0xDEADBEEF) { @@ -3693,7 +3700,7 @@ int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) ib.ptr[6] = PACKET2(0); ib.ptr[7] = PACKET2(0); ib.length_dw = 8; - r = radeon_ib_schedule(rdev, &ib, NULL); + r = radeon_ib_schedule(rdev, &ib, NULL, false); if (r) { DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); goto free_ib; @@ -4067,39 +4074,6 @@ int r100_init(struct radeon_device *rdev) return 0; } -uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg, - bool always_indirect) -{ - if (reg < rdev->rmmio_size && !always_indirect) - return readl(((void __iomem *)rdev->rmmio) + reg); - else { - unsigned long flags; - uint32_t ret; - - spin_lock_irqsave(&rdev->mmio_idx_lock, flags); - writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); - ret = readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); - spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags); - - return ret; - } -} - -void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v, - bool always_indirect) -{ - if (reg < rdev->rmmio_size && !always_indirect) - writel(v, ((void __iomem *)rdev->rmmio) + reg); - else { - unsigned long flags; - - spin_lock_irqsave(&rdev->mmio_idx_lock, flags); - writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); - writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); - spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags); - } -} - u32 r100_io_rreg(struct radeon_device *rdev, u32 reg) { if (reg < rdev->rio_mem_size) diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c index 58f0473aa73f..67780374a652 100644 --- a/drivers/gpu/drm/radeon/r200.c +++ b/drivers/gpu/drm/radeon/r200.c @@ -121,7 +121,7 @@ int r200_copy_dma(struct radeon_device *rdev, if (fence) { r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX); } - radeon_ring_unlock_commit(rdev, ring); + radeon_ring_unlock_commit(rdev, ring, false); return r; } diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 3c21d77a483d..1bc4704034ce 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c @@ -69,17 +69,23 @@ void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev) mb(); } +#define R300_PTE_UNSNOOPED (1 << 0) #define R300_PTE_WRITEABLE (1 << 2) #define R300_PTE_READABLE (1 << 3) void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, - uint64_t addr) + uint64_t addr, uint32_t flags) { void __iomem *ptr = rdev->gart.ptr; addr = (lower_32_bits(addr) >> 8) | - ((upper_32_bits(addr) & 0xff) << 24) | - R300_PTE_WRITEABLE | R300_PTE_READABLE; + ((upper_32_bits(addr) & 0xff) << 24); + if (flags & RADEON_GART_PAGE_READ) + addr |= R300_PTE_READABLE; + if (flags & RADEON_GART_PAGE_WRITE) + addr |= R300_PTE_WRITEABLE; + if (!(flags & RADEON_GART_PAGE_SNOOP)) + addr |= R300_PTE_UNSNOOPED; /* on x86 we want this to be CPU endian, on powerpc * on powerpc without HW swappers, it'll get swapped on way * into VRAM - so no need for cpu_to_le32 on VRAM tables */ @@ -120,7 +126,6 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev) r = radeon_gart_table_vram_pin(rdev); if (r) return r; - radeon_gart_restore(rdev); /* discard memory request outside of configured range */ tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); @@ -290,7 +295,7 @@ void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring) radeon_ring_write(ring, R300_GEOMETRY_ROUND_NEAREST | R300_COLOR_ROUND_NEAREST); - radeon_ring_unlock_commit(rdev, ring); + radeon_ring_unlock_commit(rdev, ring, false); } static void r300_errata(struct radeon_device *rdev) diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c index 802b19220a21..2828605aef3f 100644 --- a/drivers/gpu/drm/radeon/r420.c +++ b/drivers/gpu/drm/radeon/r420.c @@ -219,7 +219,7 @@ static void r420_cp_errata_init(struct radeon_device *rdev) radeon_ring_write(ring, PACKET0(R300_CP_RESYNC_ADDR, 1)); radeon_ring_write(ring, rdev->config.r300.resync_scratch); radeon_ring_write(ring, 0xDEADBEEF); - radeon_ring_unlock_commit(rdev, ring); + radeon_ring_unlock_commit(rdev, ring, false); } static void r420_cp_errata_fini(struct radeon_device *rdev) @@ -232,7 +232,7 @@ static void r420_cp_errata_fini(struct radeon_device *rdev) radeon_ring_lock(rdev, ring, 8); radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); radeon_ring_write(ring, R300_RB3D_DC_FINISH); - radeon_ring_unlock_commit(rdev, ring); + radeon_ring_unlock_commit(rdev, ring, false); radeon_scratch_free(rdev, rdev->config.r300.resync_scratch); } diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 3c69f58e46ef..ea5c9af722ef 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -968,7 +968,6 @@ static int r600_pcie_gart_enable(struct radeon_device *rdev) r = radeon_gart_table_vram_pin(rdev); if (r) return r; - radeon_gart_restore(rdev); /* Setup L2 cache */ WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | @@ -1339,7 +1338,7 @@ int r600_vram_scratch_init(struct radeon_device *rdev) if (rdev->vram_scratch.robj == NULL) { r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, - NULL, &rdev->vram_scratch.robj); + 0, NULL, &rdev->vram_scratch.robj); if (r) { return r; } @@ -1813,7 +1812,6 @@ static void r600_gpu_init(struct radeon_device *rdev) { u32 tiling_config; u32 ramcfg; - u32 cc_rb_backend_disable; u32 cc_gc_shader_pipe_config; u32 tmp; int i, j; @@ -1940,29 +1938,20 @@ static void r600_gpu_init(struct radeon_device *rdev) } tiling_config |= BANK_SWAPS(1); - cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000; - tmp = R6XX_MAX_BACKENDS - - r600_count_pipe_bits((cc_rb_backend_disable >> 16) & R6XX_MAX_BACKENDS_MASK); - if (tmp < rdev->config.r600.max_backends) { - rdev->config.r600.max_backends = tmp; - } - cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0x00ffff00; - tmp = R6XX_MAX_PIPES - - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R6XX_MAX_PIPES_MASK); - if (tmp < rdev->config.r600.max_pipes) { - rdev->config.r600.max_pipes = tmp; - } - tmp = R6XX_MAX_SIMDS - - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK); - if (tmp < rdev->config.r600.max_simds) { - rdev->config.r600.max_simds = tmp; - } tmp = rdev->config.r600.max_simds - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK); rdev->config.r600.active_simds = tmp; disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK; + tmp = 0; + for (i = 0; i < rdev->config.r600.max_backends; i++) + tmp |= (1 << i); + /* if all the backends are disabled, fix it up here */ + if ((disabled_rb_mask & tmp) == tmp) { + for (i = 0; i < rdev->config.r600.max_backends; i++) + disabled_rb_mask &= ~(1 << i); + } tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT; tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.r600.max_backends, R6XX_MAX_BACKENDS, disabled_rb_mask); @@ -2548,7 +2537,7 @@ int r600_cp_start(struct radeon_device *rdev) radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); radeon_ring_write(ring, 0); radeon_ring_write(ring, 0); - radeon_ring_unlock_commit(rdev, ring); + radeon_ring_unlock_commit(rdev, ring, false); cp_me = 0xff; WREG32(R_0086D8_CP_ME_CNTL, cp_me); @@ -2684,7 +2673,7 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); radeon_ring_write(ring, 0xDEADBEEF); - radeon_ring_unlock_commit(rdev, ring); + radeon_ring_unlock_commit(rdev, ring, false); for (i = 0; i < rdev->usec_timeout; i++) { tmp = RREG32(scratch); if (tmp == 0xDEADBEEF) @@ -2754,6 +2743,17 @@ void r600_fence_ring_emit(struct radeon_device *rdev, } } +/** + * r600_semaphore_ring_emit - emit a semaphore on the CP ring + * + * @rdev: radeon_device pointer + * @ring: radeon ring buffer object + * @semaphore: radeon semaphore object + * @emit_wait: Is this a sempahore wait? + * + * Emits a semaphore signal/wait packet to the CP ring and prevents the PFP + * from running ahead of semaphore waits. + */ bool r600_semaphore_ring_emit(struct radeon_device *rdev, struct radeon_ring *ring, struct radeon_semaphore *semaphore, @@ -2769,6 +2769,13 @@ bool r600_semaphore_ring_emit(struct radeon_device *rdev, radeon_ring_write(ring, lower_32_bits(addr)); radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel); + /* PFP_SYNC_ME packet only exists on 7xx+, only enable it on eg+ */ + if (emit_wait && (rdev->family >= CHIP_CEDAR)) { + /* Prevent the PFP from running ahead of the semaphore wait */ + radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); + radeon_ring_write(ring, 0x0); + } + return true; } @@ -2846,7 +2853,7 @@ int r600_copy_cpdma(struct radeon_device *rdev, return r; } - radeon_ring_unlock_commit(rdev, ring); + radeon_ring_unlock_commit(rdev, ring, false); radeon_semaphore_free(rdev, &sem, *fence); return r; @@ -3166,7 +3173,7 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); ib.ptr[2] = 0xDEADBEEF; ib.length_dw = 3; - r = radeon_ib_schedule(rdev, &ib, NULL); + r = radeon_ib_schedule(rdev, &ib, NULL, false); if (r) { DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); goto free_ib; @@ -3227,7 +3234,7 @@ int r600_ih_ring_alloc(struct radeon_device *rdev) if (rdev->ih.ring_obj == NULL) { r = radeon_bo_create(rdev, rdev->ih.ring_size, PAGE_SIZE, true, - RADEON_GEM_DOMAIN_GTT, + RADEON_GEM_DOMAIN_GTT, 0, NULL, &rdev->ih.ring_obj); if (r) { DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r); @@ -3785,17 +3792,17 @@ static u32 r600_get_ih_wptr(struct radeon_device *rdev) wptr = RREG32(IH_RB_WPTR); if (wptr & RB_OVERFLOW) { + wptr &= ~RB_OVERFLOW; /* When a ring buffer overflow happen start parsing interrupt * from the last not overwritten vector (wptr + 16). Hopefully * this should allow us to catchup. */ - dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n", - wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask); + dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", + wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask); rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask; tmp = RREG32(IH_RB_CNTL); tmp |= IH_WPTR_OVERFLOW_CLEAR; WREG32(IH_RB_CNTL, tmp); - wptr &= ~RB_OVERFLOW; } return (wptr & rdev->ih.ptr_mask); } @@ -3924,11 +3931,13 @@ restart_ih: break; case 9: /* D1 pflip */ DRM_DEBUG("IH: D1 flip\n"); - radeon_crtc_handle_flip(rdev, 0); + if (radeon_use_pflipirq > 0) + radeon_crtc_handle_flip(rdev, 0); break; case 11: /* D2 pflip */ DRM_DEBUG("IH: D2 flip\n"); - radeon_crtc_handle_flip(rdev, 1); + if (radeon_use_pflipirq > 0) + radeon_crtc_handle_flip(rdev, 1); break; case 19: /* HPD/DAC hotplug */ switch (src_data) { @@ -4039,6 +4048,7 @@ restart_ih: /* wptr/rptr are in bytes! */ rptr += 16; rptr &= rdev->ih.ptr_mask; + WREG32(IH_RB_RPTR, rptr); } if (queue_hotplug) schedule_work(&rdev->hotplug_work); @@ -4047,7 +4057,6 @@ restart_ih: if (queue_thermal && rdev->pm.dpm_enabled) schedule_work(&rdev->pm.dpm.thermal.work); rdev->ih.rptr = rptr; - WREG32(IH_RB_RPTR, rdev->ih.rptr); atomic_set(&rdev->ih.lock, 0); /* make sure wptr hasn't changed while processing */ @@ -4089,16 +4098,15 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev) } /** - * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl + * r600_mmio_hdp_flush - flush Host Data Path cache via MMIO * rdev: radeon device structure - * bo: buffer object struct which userspace is waiting for idle * - * Some R6XX/R7XX doesn't seems to take into account HDP flush performed - * through ring buffer, this leads to corruption in rendering, see - * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we - * directly perform HDP flush by writing register through MMIO. + * Some R6XX/R7XX don't seem to take into account HDP flushes performed + * through the ring buffer. This leads to corruption in rendering, see + * http://bugzilla.kernel.org/show_bug.cgi?id=15186 . To avoid this, we + * directly perform the HDP flush by writing the register through MMIO. */ -void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo) +void r600_mmio_hdp_flush(struct radeon_device *rdev) { /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL. diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index 12511bb5fd6f..c47537a1ddba 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c @@ -825,7 +825,6 @@ int r600_cs_common_vline_parse(struct radeon_cs_parser *p, uint32_t *vline_start_end, uint32_t *vline_status) { - struct drm_mode_object *obj; struct drm_crtc *crtc; struct radeon_crtc *radeon_crtc; struct radeon_cs_packet p3reloc, wait_reg_mem; @@ -887,12 +886,11 @@ int r600_cs_common_vline_parse(struct radeon_cs_parser *p, crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1); reg = R600_CP_PACKET0_GET_REG(header); - obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); - if (!obj) { + crtc = drm_crtc_find(p->rdev->ddev, crtc_id); + if (!crtc) { DRM_ERROR("cannot find crtc %d\n", crtc_id); return -ENOENT; } - crtc = obj_to_crtc(obj); radeon_crtc = to_radeon_crtc(crtc); crtc_id = radeon_crtc->crtc_id; diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c index 4969cef44a19..a908daa006d2 100644 --- a/drivers/gpu/drm/radeon/r600_dma.c +++ b/drivers/gpu/drm/radeon/r600_dma.c @@ -124,15 +124,6 @@ int r600_dma_resume(struct radeon_device *rdev) u32 rb_bufsz; int r; - /* Reset dma */ - if (rdev->family >= CHIP_RV770) - WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA); - else - WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA); - RREG32(SRBM_SOFT_RESET); - udelay(50); - WREG32(SRBM_SOFT_RESET, 0); - WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0); WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0); @@ -261,7 +252,7 @@ int r600_dma_ring_test(struct radeon_device *rdev, radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc); radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff); radeon_ring_write(ring, 0xDEADBEEF); - radeon_ring_unlock_commit(rdev, ring); + radeon_ring_unlock_commit(rdev, ring, false); for (i = 0; i < rdev->usec_timeout; i++) { tmp = readl(ptr); @@ -368,7 +359,7 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) ib.ptr[3] = 0xDEADBEEF; ib.length_dw = 4; - r = radeon_ib_schedule(rdev, &ib, NULL); + r = radeon_ib_schedule(rdev, &ib, NULL, false); if (r) { radeon_ib_free(rdev, &ib); DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); @@ -493,7 +484,7 @@ int r600_copy_dma(struct radeon_device *rdev, return r; } - radeon_ring_unlock_commit(rdev, ring); + radeon_ring_unlock_commit(rdev, ring, false); radeon_semaphore_free(rdev, &sem, *fence); return r; diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index f94e7a9afe75..31e1052ad3e3 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h @@ -44,13 +44,6 @@ #define R6XX_MAX_PIPES 8 #define R6XX_MAX_PIPES_MASK 0xff -/* PTE flags */ -#define PTE_VALID (1 << 0) -#define PTE_SYSTEM (1 << 1) -#define PTE_SNOOPED (1 << 2) -#define PTE_READABLE (1 << 5) -#define PTE_WRITEABLE (1 << 6) - /* tiling bits */ #define ARRAY_LINEAR_GENERAL 0x00000000 #define ARRAY_LINEAR_ALIGNED 0x00000001 @@ -1597,6 +1590,7 @@ */ # define PACKET3_CP_DMA_CMD_SAIC (1 << 28) # define PACKET3_CP_DMA_CMD_DAIC (1 << 29) +#define PACKET3_PFP_SYNC_ME 0x42 /* r7xx+ only */ #define PACKET3_SURFACE_SYNC 0x43 # define PACKET3_CB0_DEST_BASE_ENA (1 << 6) # define PACKET3_FULL_CACHE_ENA (1 << 20) /* r7xx+ only */ diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 60c47f829122..3247bfd14410 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -64,6 +64,7 @@ #include <linux/wait.h> #include <linux/list.h> #include <linux/kref.h> +#include <linux/interval_tree.h> #include <ttm/ttm_bo_api.h> #include <ttm/ttm_bo_driver.h> @@ -103,6 +104,9 @@ extern int radeon_hard_reset; extern int radeon_vm_size; extern int radeon_vm_block_size; extern int radeon_deep_color; +extern int radeon_use_pflipirq; +extern int radeon_bapm; +extern int radeon_backlight; /* * Copy from radeon_drv.h so we don't have to include both and have conflicting @@ -304,6 +308,9 @@ int radeon_atom_get_leakage_vddc_based_on_leakage_params(struct radeon_device *r u16 *vddc, u16 *vddci, u16 virtual_voltage_id, u16 vbios_voltage_id); +int radeon_atom_get_voltage_evv(struct radeon_device *rdev, + u16 virtual_voltage_id, + u16 *voltage); int radeon_atom_round_to_true_voltage(struct radeon_device *rdev, u8 voltage_type, u16 nominal_voltage, @@ -317,6 +324,9 @@ int radeon_atom_get_voltage_table(struct radeon_device *rdev, struct atom_voltage_table *voltage_table); bool radeon_atom_is_voltage_gpio(struct radeon_device *rdev, u8 voltage_type, u8 voltage_mode); +int radeon_atom_get_svi2_info(struct radeon_device *rdev, + u8 voltage_type, + u8 *svd_gpio_id, u8 *svc_gpio_id); void radeon_atom_update_memory_dll(struct radeon_device *rdev, u32 mem_clock); void radeon_atom_set_ac_timing(struct radeon_device *rdev, @@ -441,14 +451,12 @@ struct radeon_mman { struct radeon_bo_va { /* protected by bo being reserved */ struct list_head bo_list; - uint64_t soffset; - uint64_t eoffset; uint32_t flags; - bool valid; + uint64_t addr; unsigned ref_count; /* protected by vm mutex */ - struct list_head vm_list; + struct interval_tree_node it; struct list_head vm_status; /* constant after initialization */ @@ -465,6 +473,7 @@ struct radeon_bo { struct ttm_placement placement; struct ttm_buffer_object tbo; struct ttm_bo_kmap_obj kmap; + u32 flags; unsigned pin_count; void *kptr; u32 tiling_flags; @@ -543,9 +552,9 @@ struct radeon_gem { int radeon_gem_init(struct radeon_device *rdev); void radeon_gem_fini(struct radeon_device *rdev); -int radeon_gem_object_create(struct radeon_device *rdev, int size, +int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size, int alignment, int initial_domain, - bool discardable, bool kernel, + u32 flags, bool kernel, struct drm_gem_object **obj); int radeon_mode_dumb_create(struct drm_file *file_priv, @@ -590,6 +599,12 @@ struct radeon_mc; #define RADEON_GPU_PAGE_SHIFT 12 #define RADEON_GPU_PAGE_ALIGN(a) (((a) + RADEON_GPU_PAGE_MASK) & ~RADEON_GPU_PAGE_MASK) +#define RADEON_GART_PAGE_DUMMY 0 +#define RADEON_GART_PAGE_VALID (1 << 0) +#define RADEON_GART_PAGE_READ (1 << 1) +#define RADEON_GART_PAGE_WRITE (1 << 2) +#define RADEON_GART_PAGE_SNOOP (1 << 3) + struct radeon_gart { dma_addr_t table_addr; struct radeon_bo *robj; @@ -614,8 +629,7 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, int pages); int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, int pages, struct page **pagelist, - dma_addr_t *dma_addr); -void radeon_gart_restore(struct radeon_device *rdev); + dma_addr_t *dma_addr, uint32_t flags); /* @@ -855,9 +869,9 @@ struct radeon_mec { #define R600_PTE_FRAG_64KB (4 << 7) #define R600_PTE_FRAG_256KB (6 << 7) -/* flags used for GART page table entries on R600+ */ -#define R600_PTE_GART ( R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED \ - | R600_PTE_READABLE | R600_PTE_WRITEABLE) +/* flags needed to be set so we can copy directly from the GART table */ +#define R600_PTE_GART_MASK ( R600_PTE_READABLE | R600_PTE_WRITEABLE | \ + R600_PTE_SYSTEM | R600_PTE_VALID ) struct radeon_vm_pt { struct radeon_bo *bo; @@ -865,9 +879,12 @@ struct radeon_vm_pt { }; struct radeon_vm { - struct list_head va; + struct rb_root va; unsigned id; + /* BOs moved, but not yet updated in the PT */ + struct list_head invalidated; + /* BOs freed, but not yet updated in the PT */ struct list_head freed; @@ -899,6 +916,8 @@ struct radeon_vm_manager { u64 vram_base_offset; /* is vm enabled? */ bool enabled; + /* for hw to save the PD addr on suspend/resume */ + uint32_t saved_table_addr[RADEON_NUM_VM]; }; /* @@ -952,7 +971,7 @@ int radeon_ib_get(struct radeon_device *rdev, int ring, unsigned size); void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib); int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, - struct radeon_ib *const_ib); + struct radeon_ib *const_ib, bool hdp_flush); int radeon_ib_pool_init(struct radeon_device *rdev); void radeon_ib_pool_fini(struct radeon_device *rdev); int radeon_ib_ring_tests(struct radeon_device *rdev); @@ -962,8 +981,10 @@ bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev, void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp); int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw); int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw); -void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp); -void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp); +void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp, + bool hdp_flush); +void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp, + bool hdp_flush); void radeon_ring_undo(struct radeon_ring *ring); void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp); int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp); @@ -1740,6 +1761,7 @@ struct radeon_asic_ring { /* command emmit functions */ void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib); void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence); + void (*hdp_flush)(struct radeon_device *rdev, struct radeon_ring *ring); bool (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp, struct radeon_semaphore *semaphore, bool emit_wait); void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); @@ -1763,13 +1785,8 @@ struct radeon_asic { int (*suspend)(struct radeon_device *rdev); void (*vga_set_state)(struct radeon_device *rdev, bool state); int (*asic_reset)(struct radeon_device *rdev); - /* ioctl hw specific callback. Some hw might want to perform special - * operation on specific ioctl. For instance on wait idle some hw - * might want to perform and HDP flush through MMIO as it seems that - * some R6XX/R7XX hw doesn't take HDP flush into account if programmed - * through ring. - */ - void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo); + /* Flush the HDP cache via MMIO */ + void (*mmio_hdp_flush)(struct radeon_device *rdev); /* check if 3D engine is idle */ bool (*gui_idle)(struct radeon_device *rdev); /* wait for mc_idle */ @@ -1782,16 +1799,26 @@ struct radeon_asic { struct { void (*tlb_flush)(struct radeon_device *rdev); void (*set_page)(struct radeon_device *rdev, unsigned i, - uint64_t addr); + uint64_t addr, uint32_t flags); } gart; struct { int (*init)(struct radeon_device *rdev); void (*fini)(struct radeon_device *rdev); - void (*set_page)(struct radeon_device *rdev, - struct radeon_ib *ib, - uint64_t pe, - uint64_t addr, unsigned count, - uint32_t incr, uint32_t flags); + void (*copy_pages)(struct radeon_device *rdev, + struct radeon_ib *ib, + uint64_t pe, uint64_t src, + unsigned count); + void (*write_pages)(struct radeon_device *rdev, + struct radeon_ib *ib, + uint64_t pe, + uint64_t addr, unsigned count, + uint32_t incr, uint32_t flags); + void (*set_pages)(struct radeon_device *rdev, + struct radeon_ib *ib, + uint64_t pe, + uint64_t addr, unsigned count, + uint32_t incr, uint32_t flags); + void (*pad_ib)(struct radeon_ib *ib); } vm; /* ring specific callbacks */ struct radeon_asic_ring *ring[RADEON_NUM_RINGS]; @@ -2299,10 +2326,12 @@ struct radeon_device { const struct firmware *mc_fw; /* NI MC firmware */ const struct firmware *ce_fw; /* SI CE firmware */ const struct firmware *mec_fw; /* CIK MEC firmware */ + const struct firmware *mec2_fw; /* KV MEC2 firmware */ const struct firmware *sdma_fw; /* CIK SDMA firmware */ const struct firmware *smc_fw; /* SMC firmware */ const struct firmware *uvd_fw; /* UVD firmware */ const struct firmware *vce_fw; /* VCE firmware */ + bool new_fw; struct r600_vram_scratch vram_scratch; int msi_enabled; /* msi enabled */ struct r600_ih ih; /* r6/700 interrupt ring */ @@ -2342,6 +2371,11 @@ struct radeon_device { struct dev_pm_domain vga_pm_domain; bool have_disp_power_ref; + u32 px_quirk_flags; + + /* tracking pinned memory */ + u64 vram_pin_size; + u64 gart_pin_size; }; bool radeon_is_px(struct drm_device *dev); @@ -2352,10 +2386,42 @@ int radeon_device_init(struct radeon_device *rdev, void radeon_device_fini(struct radeon_device *rdev); int radeon_gpu_wait_for_idle(struct radeon_device *rdev); -uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg, - bool always_indirect); -void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v, - bool always_indirect); +#define RADEON_MIN_MMIO_SIZE 0x10000 + +static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg, + bool always_indirect) +{ + /* The mmio size is 64kb at minimum. Allows the if to be optimized out. */ + if ((reg < rdev->rmmio_size || reg < RADEON_MIN_MMIO_SIZE) && !always_indirect) + return readl(((void __iomem *)rdev->rmmio) + reg); + else { + unsigned long flags; + uint32_t ret; + + spin_lock_irqsave(&rdev->mmio_idx_lock, flags); + writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); + ret = readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); + spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags); + + return ret; + } +} + +static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v, + bool always_indirect) +{ + if ((reg < rdev->rmmio_size || reg < RADEON_MIN_MMIO_SIZE) && !always_indirect) + writel(v, ((void __iomem *)rdev->rmmio) + reg); + else { + unsigned long flags; + + spin_lock_irqsave(&rdev->mmio_idx_lock, flags); + writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); + writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); + spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags); + } +} + u32 r100_io_rreg(struct radeon_device *rdev, u32 reg); void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v); @@ -2709,10 +2775,13 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v); #define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state)) #define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev)) #define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev)) -#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart.set_page((rdev), (i), (p)) +#define radeon_gart_set_page(rdev, i, p, f) (rdev)->asic->gart.set_page((rdev), (i), (p), (f)) #define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev)) #define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev)) -#define radeon_asic_vm_set_page(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_page((rdev), (ib), (pe), (addr), (count), (incr), (flags))) +#define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count) ((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count))) +#define radeon_asic_vm_write_pages(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.write_pages((rdev), (ib), (pe), (addr), (count), (incr), (flags))) +#define radeon_asic_vm_set_pages(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_pages((rdev), (ib), (pe), (addr), (count), (incr), (flags))) +#define radeon_asic_vm_pad_ib(rdev, ib) ((rdev)->asic->vm.pad_ib((ib))) #define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)]->ring_start((rdev), (cp)) #define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)]->ring_test((rdev), (cp)) #define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)]->ib_test((rdev), (cp)) @@ -2840,6 +2909,8 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev, struct radeon_vm *vm); int radeon_vm_clear_freed(struct radeon_device *rdev, struct radeon_vm *vm); +int radeon_vm_clear_invalids(struct radeon_device *rdev, + struct radeon_vm *vm); int radeon_vm_bo_update(struct radeon_device *rdev, struct radeon_bo_va *bo_va, struct ttm_mem_reg *mem); diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 34b9aa9e3c06..2dd5847f9b98 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -194,7 +194,7 @@ static struct radeon_asic r100_asic = { .resume = &r100_resume, .vga_set_state = &r100_vga_set_state, .asic_reset = &r100_asic_reset, - .ioctl_wait_idle = NULL, + .mmio_hdp_flush = NULL, .gui_idle = &r100_gui_idle, .mc_wait_for_idle = &r100_mc_wait_for_idle, .gart = { @@ -260,7 +260,7 @@ static struct radeon_asic r200_asic = { .resume = &r100_resume, .vga_set_state = &r100_vga_set_state, .asic_reset = &r100_asic_reset, - .ioctl_wait_idle = NULL, + .mmio_hdp_flush = NULL, .gui_idle = &r100_gui_idle, .mc_wait_for_idle = &r100_mc_wait_for_idle, .gart = { @@ -340,7 +340,7 @@ static struct radeon_asic r300_asic = { .resume = &r300_resume, .vga_set_state = &r100_vga_set_state, .asic_reset = &r300_asic_reset, - .ioctl_wait_idle = NULL, + .mmio_hdp_flush = NULL, .gui_idle = &r100_gui_idle, .mc_wait_for_idle = &r300_mc_wait_for_idle, .gart = { @@ -406,7 +406,7 @@ static struct radeon_asic r300_asic_pcie = { .resume = &r300_resume, .vga_set_state = &r100_vga_set_state, .asic_reset = &r300_asic_reset, - .ioctl_wait_idle = NULL, + .mmio_hdp_flush = NULL, .gui_idle = &r100_gui_idle, .mc_wait_for_idle = &r300_mc_wait_for_idle, .gart = { @@ -472,7 +472,7 @@ static struct radeon_asic r420_asic = { .resume = &r420_resume, .vga_set_state = &r100_vga_set_state, .asic_reset = &r300_asic_reset, - .ioctl_wait_idle = NULL, + .mmio_hdp_flush = NULL, .gui_idle = &r100_gui_idle, .mc_wait_for_idle = &r300_mc_wait_for_idle, .gart = { @@ -538,7 +538,7 @@ static struct radeon_asic rs400_asic = { .resume = &rs400_resume, .vga_set_state = &r100_vga_set_state, .asic_reset = &r300_asic_reset, - .ioctl_wait_idle = NULL, + .mmio_hdp_flush = NULL, .gui_idle = &r100_gui_idle, .mc_wait_for_idle = &rs400_mc_wait_for_idle, .gart = { @@ -604,7 +604,7 @@ static struct radeon_asic rs600_asic = { .resume = &rs600_resume, .vga_set_state = &r100_vga_set_state, .asic_reset = &rs600_asic_reset, - .ioctl_wait_idle = NULL, + .mmio_hdp_flush = NULL, .gui_idle = &r100_gui_idle, .mc_wait_for_idle = &rs600_mc_wait_for_idle, .gart = { @@ -672,7 +672,7 @@ static struct radeon_asic rs690_asic = { .resume = &rs690_resume, .vga_set_state = &r100_vga_set_state, .asic_reset = &rs600_asic_reset, - .ioctl_wait_idle = NULL, + .mmio_hdp_flush = NULL, .gui_idle = &r100_gui_idle, .mc_wait_for_idle = &rs690_mc_wait_for_idle, .gart = { @@ -740,7 +740,7 @@ static struct radeon_asic rv515_asic = { .resume = &rv515_resume, .vga_set_state = &r100_vga_set_state, .asic_reset = &rs600_asic_reset, - .ioctl_wait_idle = NULL, + .mmio_hdp_flush = NULL, .gui_idle = &r100_gui_idle, .mc_wait_for_idle = &rv515_mc_wait_for_idle, .gart = { @@ -806,7 +806,7 @@ static struct radeon_asic r520_asic = { .resume = &r520_resume, .vga_set_state = &r100_vga_set_state, .asic_reset = &rs600_asic_reset, - .ioctl_wait_idle = NULL, + .mmio_hdp_flush = NULL, .gui_idle = &r100_gui_idle, .mc_wait_for_idle = &r520_mc_wait_for_idle, .gart = { @@ -898,7 +898,7 @@ static struct radeon_asic r600_asic = { .resume = &r600_resume, .vga_set_state = &r600_vga_set_state, .asic_reset = &r600_asic_reset, - .ioctl_wait_idle = r600_ioctl_wait_idle, + .mmio_hdp_flush = r600_mmio_hdp_flush, .gui_idle = &r600_gui_idle, .mc_wait_for_idle = &r600_mc_wait_for_idle, .get_xclk = &r600_get_xclk, @@ -970,7 +970,7 @@ static struct radeon_asic rv6xx_asic = { .resume = &r600_resume, .vga_set_state = &r600_vga_set_state, .asic_reset = &r600_asic_reset, - .ioctl_wait_idle = r600_ioctl_wait_idle, + .mmio_hdp_flush = r600_mmio_hdp_flush, .gui_idle = &r600_gui_idle, .mc_wait_for_idle = &r600_mc_wait_for_idle, .get_xclk = &r600_get_xclk, @@ -1060,7 +1060,7 @@ static struct radeon_asic rs780_asic = { .resume = &r600_resume, .vga_set_state = &r600_vga_set_state, .asic_reset = &r600_asic_reset, - .ioctl_wait_idle = r600_ioctl_wait_idle, + .mmio_hdp_flush = r600_mmio_hdp_flush, .gui_idle = &r600_gui_idle, .mc_wait_for_idle = &r600_mc_wait_for_idle, .get_xclk = &r600_get_xclk, @@ -1163,7 +1163,7 @@ static struct radeon_asic rv770_asic = { .resume = &rv770_resume, .asic_reset = &r600_asic_reset, .vga_set_state = &r600_vga_set_state, - .ioctl_wait_idle = r600_ioctl_wait_idle, + .mmio_hdp_flush = r600_mmio_hdp_flush, .gui_idle = &r600_gui_idle, .mc_wait_for_idle = &r600_mc_wait_for_idle, .get_xclk = &rv770_get_xclk, @@ -1281,7 +1281,7 @@ static struct radeon_asic evergreen_asic = { .resume = &evergreen_resume, .asic_reset = &evergreen_asic_reset, .vga_set_state = &r600_vga_set_state, - .ioctl_wait_idle = r600_ioctl_wait_idle, + .mmio_hdp_flush = r600_mmio_hdp_flush, .gui_idle = &r600_gui_idle, .mc_wait_for_idle = &evergreen_mc_wait_for_idle, .get_xclk = &rv770_get_xclk, @@ -1373,7 +1373,7 @@ static struct radeon_asic sumo_asic = { .resume = &evergreen_resume, .asic_reset = &evergreen_asic_reset, .vga_set_state = &r600_vga_set_state, - .ioctl_wait_idle = r600_ioctl_wait_idle, + .mmio_hdp_flush = r600_mmio_hdp_flush, .gui_idle = &r600_gui_idle, .mc_wait_for_idle = &evergreen_mc_wait_for_idle, .get_xclk = &r600_get_xclk, @@ -1464,7 +1464,7 @@ static struct radeon_asic btc_asic = { .resume = &evergreen_resume, .asic_reset = &evergreen_asic_reset, .vga_set_state = &r600_vga_set_state, - .ioctl_wait_idle = r600_ioctl_wait_idle, + .mmio_hdp_flush = r600_mmio_hdp_flush, .gui_idle = &r600_gui_idle, .mc_wait_for_idle = &evergreen_mc_wait_for_idle, .get_xclk = &rv770_get_xclk, @@ -1599,7 +1599,7 @@ static struct radeon_asic cayman_asic = { .resume = &cayman_resume, .asic_reset = &cayman_asic_reset, .vga_set_state = &r600_vga_set_state, - .ioctl_wait_idle = r600_ioctl_wait_idle, + .mmio_hdp_flush = r600_mmio_hdp_flush, .gui_idle = &r600_gui_idle, .mc_wait_for_idle = &evergreen_mc_wait_for_idle, .get_xclk = &rv770_get_xclk, @@ -1611,7 +1611,10 @@ static struct radeon_asic cayman_asic = { .vm = { .init = &cayman_vm_init, .fini = &cayman_vm_fini, - .set_page = &cayman_dma_vm_set_page, + .copy_pages = &cayman_dma_vm_copy_pages, + .write_pages = &cayman_dma_vm_write_pages, + .set_pages = &cayman_dma_vm_set_pages, + .pad_ib = &cayman_dma_vm_pad_ib, }, .ring = { [RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring, @@ -1699,7 +1702,7 @@ static struct radeon_asic trinity_asic = { .resume = &cayman_resume, .asic_reset = &cayman_asic_reset, .vga_set_state = &r600_vga_set_state, - .ioctl_wait_idle = r600_ioctl_wait_idle, + .mmio_hdp_flush = r600_mmio_hdp_flush, .gui_idle = &r600_gui_idle, .mc_wait_for_idle = &evergreen_mc_wait_for_idle, .get_xclk = &r600_get_xclk, @@ -1711,7 +1714,10 @@ static struct radeon_asic trinity_asic = { .vm = { .init = &cayman_vm_init, .fini = &cayman_vm_fini, - .set_page = &cayman_dma_vm_set_page, + .copy_pages = &cayman_dma_vm_copy_pages, + .write_pages = &cayman_dma_vm_write_pages, + .set_pages = &cayman_dma_vm_set_pages, + .pad_ib = &cayman_dma_vm_pad_ib, }, .ring = { [RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring, @@ -1829,7 +1835,7 @@ static struct radeon_asic si_asic = { .resume = &si_resume, .asic_reset = &si_asic_reset, .vga_set_state = &r600_vga_set_state, - .ioctl_wait_idle = r600_ioctl_wait_idle, + .mmio_hdp_flush = r600_mmio_hdp_flush, .gui_idle = &r600_gui_idle, .mc_wait_for_idle = &evergreen_mc_wait_for_idle, .get_xclk = &si_get_xclk, @@ -1841,7 +1847,10 @@ static struct radeon_asic si_asic = { .vm = { .init = &si_vm_init, .fini = &si_vm_fini, - .set_page = &si_dma_vm_set_page, + .copy_pages = &si_dma_vm_copy_pages, + .write_pages = &si_dma_vm_write_pages, + .set_pages = &si_dma_vm_set_pages, + .pad_ib = &cayman_dma_vm_pad_ib, }, .ring = { [RADEON_RING_TYPE_GFX_INDEX] = &si_gfx_ring, @@ -1987,7 +1996,7 @@ static struct radeon_asic ci_asic = { .resume = &cik_resume, .asic_reset = &cik_asic_reset, .vga_set_state = &r600_vga_set_state, - .ioctl_wait_idle = NULL, + .mmio_hdp_flush = &r600_mmio_hdp_flush, .gui_idle = &r600_gui_idle, .mc_wait_for_idle = &evergreen_mc_wait_for_idle, .get_xclk = &cik_get_xclk, @@ -1999,7 +2008,10 @@ static struct radeon_asic ci_asic = { .vm = { .init = &cik_vm_init, .fini = &cik_vm_fini, - .set_page = &cik_sdma_vm_set_page, + .copy_pages = &cik_sdma_vm_copy_pages, + .write_pages = &cik_sdma_vm_write_pages, + .set_pages = &cik_sdma_vm_set_pages, + .pad_ib = &cik_sdma_vm_pad_ib, }, .ring = { [RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring, @@ -2091,7 +2103,7 @@ static struct radeon_asic kv_asic = { .resume = &cik_resume, .asic_reset = &cik_asic_reset, .vga_set_state = &r600_vga_set_state, - .ioctl_wait_idle = NULL, + .mmio_hdp_flush = &r600_mmio_hdp_flush, .gui_idle = &r600_gui_idle, .mc_wait_for_idle = &evergreen_mc_wait_for_idle, .get_xclk = &cik_get_xclk, @@ -2103,7 +2115,10 @@ static struct radeon_asic kv_asic = { .vm = { .init = &cik_vm_init, .fini = &cik_vm_fini, - .set_page = &cik_sdma_vm_set_page, + .copy_pages = &cik_sdma_vm_copy_pages, + .write_pages = &cik_sdma_vm_write_pages, + .set_pages = &cik_sdma_vm_set_pages, + .pad_ib = &cik_sdma_vm_pad_ib, }, .ring = { [RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring, @@ -2457,7 +2472,7 @@ int radeon_asic_init(struct radeon_device *rdev) rdev->cg_flags = RADEON_CG_SUPPORT_GFX_MGCG | RADEON_CG_SUPPORT_GFX_MGLS | - RADEON_CG_SUPPORT_GFX_CGCG | + /*RADEON_CG_SUPPORT_GFX_CGCG |*/ RADEON_CG_SUPPORT_GFX_CGLS | RADEON_CG_SUPPORT_GFX_CGTS | RADEON_CG_SUPPORT_GFX_CGTS_LS | @@ -2476,7 +2491,7 @@ int radeon_asic_init(struct radeon_device *rdev) rdev->cg_flags = RADEON_CG_SUPPORT_GFX_MGCG | RADEON_CG_SUPPORT_GFX_MGLS | - RADEON_CG_SUPPORT_GFX_CGCG | + /*RADEON_CG_SUPPORT_GFX_CGCG |*/ RADEON_CG_SUPPORT_GFX_CGLS | RADEON_CG_SUPPORT_GFX_CGTS | RADEON_CG_SUPPORT_GFX_CP_LS | @@ -2502,7 +2517,7 @@ int radeon_asic_init(struct radeon_device *rdev) rdev->cg_flags = RADEON_CG_SUPPORT_GFX_MGCG | RADEON_CG_SUPPORT_GFX_MGLS | - RADEON_CG_SUPPORT_GFX_CGCG | + /*RADEON_CG_SUPPORT_GFX_CGCG |*/ RADEON_CG_SUPPORT_GFX_CGLS | RADEON_CG_SUPPORT_GFX_CGTS | RADEON_CG_SUPPORT_GFX_CGTS_LS | @@ -2530,7 +2545,7 @@ int radeon_asic_init(struct radeon_device *rdev) rdev->cg_flags = RADEON_CG_SUPPORT_GFX_MGCG | RADEON_CG_SUPPORT_GFX_MGLS | - RADEON_CG_SUPPORT_GFX_CGCG | + /*RADEON_CG_SUPPORT_GFX_CGCG |*/ RADEON_CG_SUPPORT_GFX_CGLS | RADEON_CG_SUPPORT_GFX_CGTS | RADEON_CG_SUPPORT_GFX_CGTS_LS | diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 01e7c0ad8f01..7756bc1e1cd3 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -68,7 +68,7 @@ int r100_asic_reset(struct radeon_device *rdev); u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc); void r100_pci_gart_tlb_flush(struct radeon_device *rdev); void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i, - uint64_t addr); + uint64_t addr, uint32_t flags); void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring); int r100_irq_set(struct radeon_device *rdev); int r100_irq_process(struct radeon_device *rdev); @@ -173,7 +173,7 @@ extern void r300_fence_ring_emit(struct radeon_device *rdev, extern int r300_cs_parse(struct radeon_cs_parser *p); extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev); extern void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, - uint64_t addr); + uint64_t addr, uint32_t flags); extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes); extern int rv370_get_pcie_lanes(struct radeon_device *rdev); extern void r300_set_reg_safe(struct radeon_device *rdev); @@ -209,7 +209,7 @@ extern int rs400_suspend(struct radeon_device *rdev); extern int rs400_resume(struct radeon_device *rdev); void rs400_gart_tlb_flush(struct radeon_device *rdev); void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, - uint64_t addr); + uint64_t addr, uint32_t flags); uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg); void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); int rs400_gart_init(struct radeon_device *rdev); @@ -233,7 +233,7 @@ void rs600_irq_disable(struct radeon_device *rdev); u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc); void rs600_gart_tlb_flush(struct radeon_device *rdev); void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, - uint64_t addr); + uint64_t addr, uint32_t flags); uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg); void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); void rs600_bandwidth_update(struct radeon_device *rdev); @@ -351,7 +351,7 @@ void r600_hpd_fini(struct radeon_device *rdev); bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); void r600_hpd_set_polarity(struct radeon_device *rdev, enum radeon_hpd_id hpd); -extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo); +extern void r600_mmio_hdp_flush(struct radeon_device *rdev); extern bool r600_gui_idle(struct radeon_device *rdev); extern void r600_pm_misc(struct radeon_device *rdev); extern void r600_pm_init_profile(struct radeon_device *rdev); @@ -606,11 +606,22 @@ void cayman_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring); bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring); -void cayman_dma_vm_set_page(struct radeon_device *rdev, - struct radeon_ib *ib, - uint64_t pe, - uint64_t addr, unsigned count, - uint32_t incr, uint32_t flags); + +void cayman_dma_vm_copy_pages(struct radeon_device *rdev, + struct radeon_ib *ib, + uint64_t pe, uint64_t src, + unsigned count); +void cayman_dma_vm_write_pages(struct radeon_device *rdev, + struct radeon_ib *ib, + uint64_t pe, + uint64_t addr, unsigned count, + uint32_t incr, uint32_t flags); +void cayman_dma_vm_set_pages(struct radeon_device *rdev, + struct radeon_ib *ib, + uint64_t pe, + uint64_t addr, unsigned count, + uint32_t incr, uint32_t flags); +void cayman_dma_vm_pad_ib(struct radeon_ib *ib); void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); @@ -693,11 +704,22 @@ int si_copy_dma(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, unsigned num_gpu_pages, struct radeon_fence **fence); -void si_dma_vm_set_page(struct radeon_device *rdev, - struct radeon_ib *ib, - uint64_t pe, - uint64_t addr, unsigned count, - uint32_t incr, uint32_t flags); + +void si_dma_vm_copy_pages(struct radeon_device *rdev, + struct radeon_ib *ib, + uint64_t pe, uint64_t src, + unsigned count); +void si_dma_vm_write_pages(struct radeon_device *rdev, + struct radeon_ib *ib, + uint64_t pe, + uint64_t addr, unsigned count, + uint32_t incr, uint32_t flags); +void si_dma_vm_set_pages(struct radeon_device *rdev, + struct radeon_ib *ib, + uint64_t pe, + uint64_t addr, unsigned count, + uint32_t incr, uint32_t flags); + void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); u32 si_get_xclk(struct radeon_device *rdev); uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev); @@ -771,11 +793,23 @@ int cik_irq_process(struct radeon_device *rdev); int cik_vm_init(struct radeon_device *rdev); void cik_vm_fini(struct radeon_device *rdev); void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); -void cik_sdma_vm_set_page(struct radeon_device *rdev, - struct radeon_ib *ib, - uint64_t pe, - uint64_t addr, unsigned count, - uint32_t incr, uint32_t flags); + +void cik_sdma_vm_copy_pages(struct radeon_device *rdev, + struct radeon_ib *ib, + uint64_t pe, uint64_t src, + unsigned count); +void cik_sdma_vm_write_pages(struct radeon_device *rdev, + struct radeon_ib *ib, + uint64_t pe, + uint64_t addr, unsigned count, + uint32_t incr, uint32_t flags); +void cik_sdma_vm_set_pages(struct radeon_device *rdev, + struct radeon_ib *ib, + uint64_t pe, + uint64_t addr, unsigned count, + uint32_t incr, uint32_t flags); +void cik_sdma_vm_pad_ib(struct radeon_ib *ib); + void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib); u32 cik_gfx_get_rptr(struct radeon_device *rdev, diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 173f378428a9..e74c7e387dde 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -447,6 +447,13 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, } } + /* Fujitsu D3003-S2 board lists DVI-I as DVI-I and VGA */ + if ((dev->pdev->device == 0x9805) && + (dev->pdev->subsystem_vendor == 0x1734) && + (dev->pdev->subsystem_device == 0x11bd)) { + if (*connector_type == DRM_MODE_CONNECTOR_VGA) + return false; + } return true; } @@ -1963,7 +1970,7 @@ static const char *thermal_controller_names[] = { "adm1032", "adm1030", "max6649", - "lm64", + "lm63", /* lm64 */ "f75375", "asc7xxx", }; @@ -1974,7 +1981,7 @@ static const char *pp_lib_thermal_controller_names[] = { "adm1032", "adm1030", "max6649", - "lm64", + "lm63", /* lm64 */ "f75375", "RV6xx", "RV770", @@ -2281,19 +2288,31 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); rdev->pm.int_thermal_type = THERMAL_TYPE_KV; - } else if ((controller->ucType == - ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) || - (controller->ucType == - ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) || - (controller->ucType == - ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL)) { - DRM_INFO("Special thermal controller config\n"); + } else if (controller->ucType == + ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) { + DRM_INFO("External GPIO thermal controller %s fan control\n", + (controller->ucFanParameters & + ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); + rdev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO; + } else if (controller->ucType == + ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) { + DRM_INFO("ADT7473 with internal thermal controller %s fan control\n", + (controller->ucFanParameters & + ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); + rdev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL; + } else if (controller->ucType == + ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) { + DRM_INFO("EMC2103 with internal thermal controller %s fan control\n", + (controller->ucFanParameters & + ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); + rdev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL; } else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) { DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n", pp_lib_thermal_controller_names[controller->ucType], controller->ucI2cAddress >> 1, (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); + rdev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL; i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine); rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus); if (rdev->pm.i2c_bus) { @@ -3236,6 +3255,41 @@ int radeon_atom_get_leakage_vddc_based_on_leakage_params(struct radeon_device *r return 0; } +union get_voltage_info { + struct _GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 in; + struct _GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 evv_out; +}; + +int radeon_atom_get_voltage_evv(struct radeon_device *rdev, + u16 virtual_voltage_id, + u16 *voltage) +{ + int index = GetIndexIntoMasterTable(COMMAND, GetVoltageInfo); + u32 entry_id; + u32 count = rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; + union get_voltage_info args; + + for (entry_id = 0; entry_id < count; entry_id++) { + if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].v == + virtual_voltage_id) + break; + } + + if (entry_id >= count) + return -EINVAL; + + args.in.ucVoltageType = VOLTAGE_TYPE_VDDC; + args.in.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE; + args.in.ulSCLKFreq = + cpu_to_le32(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].clk); + + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + + *voltage = le16_to_cpu(args.evv_out.usVoltageLevel); + + return 0; +} + int radeon_atom_get_voltage_gpio_settings(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type, u32 *gpio_value, u32 *gpio_mask) @@ -3397,6 +3451,50 @@ radeon_atom_is_voltage_gpio(struct radeon_device *rdev, return false; } +int radeon_atom_get_svi2_info(struct radeon_device *rdev, + u8 voltage_type, + u8 *svd_gpio_id, u8 *svc_gpio_id) +{ + int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo); + u8 frev, crev; + u16 data_offset, size; + union voltage_object_info *voltage_info; + union voltage_object *voltage_object = NULL; + + if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, + &frev, &crev, &data_offset)) { + voltage_info = (union voltage_object_info *) + (rdev->mode_info.atom_context->bios + data_offset); + + switch (frev) { + case 3: + switch (crev) { + case 1: + voltage_object = (union voltage_object *) + atom_lookup_voltage_object_v3(&voltage_info->v3, + voltage_type, + VOLTAGE_OBJ_SVID2); + if (voltage_object) { + *svd_gpio_id = voltage_object->v3.asSVID2Obj.ucSVDGpioId; + *svc_gpio_id = voltage_object->v3.asSVID2Obj.ucSVCGpioId; + } else { + return -EINVAL; + } + break; + default: + DRM_ERROR("unknown voltage object table\n"); + return -EINVAL; + } + break; + default: + DRM_ERROR("unknown voltage object table\n"); + return -EINVAL; + } + + } + return 0; +} + int radeon_atom_get_max_voltage(struct radeon_device *rdev, u8 voltage_type, u16 *max_voltage) { diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c index a9fb0d016d38..8bc7d0bbd3c8 100644 --- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c +++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c @@ -33,7 +33,6 @@ static struct radeon_atpx_priv { bool atpx_detected; /* handle for device - and atpx */ acpi_handle dhandle; - acpi_handle other_handle; struct radeon_atpx atpx; } radeon_atpx_priv; @@ -453,10 +452,9 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev) return false; status = acpi_get_handle(dhandle, "ATPX", &atpx_handle); - if (ACPI_FAILURE(status)) { - radeon_atpx_priv.other_handle = dhandle; + if (ACPI_FAILURE(status)) return false; - } + radeon_atpx_priv.dhandle = dhandle; radeon_atpx_priv.atpx.handle = atpx_handle; return true; @@ -540,16 +538,6 @@ static bool radeon_atpx_detect(void) printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n", acpi_method_name); radeon_atpx_priv.atpx_detected = true; - /* - * On some systems hotplug events are generated for the device - * being switched off when ATPX is executed. They cause ACPI - * hotplug to trigger and attempt to remove the device from - * the system, which causes it to break down. Prevent that from - * happening by setting the no_hotplug flag for the involved - * ACPI device objects. - */ - acpi_bus_no_hotplug(radeon_atpx_priv.dhandle); - acpi_bus_no_hotplug(radeon_atpx_priv.other_handle); return true; } return false; diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c index 6e05a2e75a46..69f5695bdab9 100644 --- a/drivers/gpu/drm/radeon/radeon_benchmark.c +++ b/drivers/gpu/drm/radeon/radeon_benchmark.c @@ -97,7 +97,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size, int time; n = RADEON_BENCHMARK_ITERATIONS; - r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, NULL, &sobj); + r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, 0, NULL, &sobj); if (r) { goto out_cleanup; } @@ -109,7 +109,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size, if (r) { goto out_cleanup; } - r = radeon_bo_create(rdev, size, PAGE_SIZE, true, ddomain, NULL, &dobj); + r = radeon_bo_create(rdev, size, PAGE_SIZE, true, ddomain, 0, NULL, &dobj); if (r) { goto out_cleanup; } diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 44831197e82e..300c4b3d4669 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -107,7 +107,7 @@ int radeon_get_monitor_bpc(struct drm_connector *connector) case DRM_MODE_CONNECTOR_DVII: case DRM_MODE_CONNECTOR_HDMIB: if (radeon_connector->use_digital) { - if (drm_detect_hdmi_monitor(radeon_connector->edid)) { + if (drm_detect_hdmi_monitor(radeon_connector_edid(connector))) { if (connector->display_info.bpc) bpc = connector->display_info.bpc; } @@ -115,7 +115,7 @@ int radeon_get_monitor_bpc(struct drm_connector *connector) break; case DRM_MODE_CONNECTOR_DVID: case DRM_MODE_CONNECTOR_HDMIA: - if (drm_detect_hdmi_monitor(radeon_connector->edid)) { + if (drm_detect_hdmi_monitor(radeon_connector_edid(connector))) { if (connector->display_info.bpc) bpc = connector->display_info.bpc; } @@ -124,7 +124,7 @@ int radeon_get_monitor_bpc(struct drm_connector *connector) dig_connector = radeon_connector->con_priv; if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) || - drm_detect_hdmi_monitor(radeon_connector->edid)) { + drm_detect_hdmi_monitor(radeon_connector_edid(connector))) { if (connector->display_info.bpc) bpc = connector->display_info.bpc; } @@ -148,7 +148,7 @@ int radeon_get_monitor_bpc(struct drm_connector *connector) break; } - if (drm_detect_hdmi_monitor(radeon_connector->edid)) { + if (drm_detect_hdmi_monitor(radeon_connector_edid(connector))) { /* hdmi deep color only implemented on DCE4+ */ if ((bpc > 8) && !ASIC_IS_DCE4(rdev)) { DRM_DEBUG("%s: HDMI deep color %d bpc unsupported. Using 8 bpc.\n", @@ -197,10 +197,19 @@ int radeon_get_monitor_bpc(struct drm_connector *connector) connector->name, bpc); } } + else if (bpc > 8) { + /* max_tmds_clock missing, but hdmi spec mandates it for deep color. */ + DRM_DEBUG("%s: Required max tmds clock for HDMI deep color missing. Using 8 bpc.\n", + connector->name); + bpc = 8; + } } - if ((radeon_deep_color == 0) && (bpc > 8)) + if ((radeon_deep_color == 0) && (bpc > 8)) { + DRM_DEBUG("%s: Deep color disabled. Set radeon module param deep_color=1 to enable.\n", + connector->name); bpc = 8; + } DRM_DEBUG("%s: Display bpc=%d, returned bpc=%d\n", connector->name, connector->display_info.bpc, bpc); @@ -216,7 +225,6 @@ radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_c struct drm_encoder *best_encoder = NULL; struct drm_encoder *encoder = NULL; struct drm_connector_helper_funcs *connector_funcs = connector->helper_private; - struct drm_mode_object *obj; bool connected; int i; @@ -226,14 +234,11 @@ radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_c if (connector->encoder_ids[i] == 0) break; - obj = drm_mode_object_find(connector->dev, - connector->encoder_ids[i], - DRM_MODE_OBJECT_ENCODER); - if (!obj) + encoder = drm_encoder_find(connector->dev, + connector->encoder_ids[i]); + if (!encoder) continue; - encoder = obj_to_encoder(obj); - if ((encoder == best_encoder) && (status == connector_status_connected)) connected = true; else @@ -249,7 +254,6 @@ radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_c static struct drm_encoder *radeon_find_encoder(struct drm_connector *connector, int encoder_type) { - struct drm_mode_object *obj; struct drm_encoder *encoder; int i; @@ -257,34 +261,134 @@ static struct drm_encoder *radeon_find_encoder(struct drm_connector *connector, if (connector->encoder_ids[i] == 0) break; - obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER); - if (!obj) + encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]); + if (!encoder) continue; - encoder = obj_to_encoder(obj); if (encoder->encoder_type == encoder_type) return encoder; } return NULL; } +struct edid *radeon_connector_edid(struct drm_connector *connector) +{ + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + struct drm_property_blob *edid_blob = connector->edid_blob_ptr; + + if (radeon_connector->edid) { + return radeon_connector->edid; + } else if (edid_blob) { + struct edid *edid = kmemdup(edid_blob->data, edid_blob->length, GFP_KERNEL); + if (edid) + radeon_connector->edid = edid; + } + return radeon_connector->edid; +} + +static void radeon_connector_get_edid(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + + if (radeon_connector->edid) + return; + + /* on hw with routers, select right port */ + if (radeon_connector->router.ddc_valid) + radeon_router_select_ddc_port(radeon_connector); + + if ((radeon_connector_encoder_get_dp_bridge_encoder_id(connector) != + ENCODER_OBJECT_ID_NONE) && + radeon_connector->ddc_bus->has_aux) { + radeon_connector->edid = drm_get_edid(connector, + &radeon_connector->ddc_bus->aux.ddc); + } else if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) || + (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) { + struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; + + if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT || + dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) && + radeon_connector->ddc_bus->has_aux) + radeon_connector->edid = drm_get_edid(&radeon_connector->base, + &radeon_connector->ddc_bus->aux.ddc); + else if (radeon_connector->ddc_bus) + radeon_connector->edid = drm_get_edid(&radeon_connector->base, + &radeon_connector->ddc_bus->adapter); + } else if (radeon_connector->ddc_bus) { + radeon_connector->edid = drm_get_edid(&radeon_connector->base, + &radeon_connector->ddc_bus->adapter); + } + + if (!radeon_connector->edid) { + if (rdev->is_atom_bios) { + /* some laptops provide a hardcoded edid in rom for LCDs */ + if (((connector->connector_type == DRM_MODE_CONNECTOR_LVDS) || + (connector->connector_type == DRM_MODE_CONNECTOR_eDP))) + radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev); + } else { + /* some servers provide a hardcoded edid in rom for KVMs */ + radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev); + } + } +} + +static void radeon_connector_free_edid(struct drm_connector *connector) +{ + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + + if (radeon_connector->edid) { + kfree(radeon_connector->edid); + radeon_connector->edid = NULL; + } +} + +static int radeon_ddc_get_modes(struct drm_connector *connector) +{ + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + int ret; + + if (radeon_connector->edid) { + drm_mode_connector_update_edid_property(connector, radeon_connector->edid); + ret = drm_add_edid_modes(connector, radeon_connector->edid); + drm_edid_to_eld(connector, radeon_connector->edid); + return ret; + } + drm_mode_connector_update_edid_property(connector, NULL); + return 0; +} + static struct drm_encoder *radeon_best_single_encoder(struct drm_connector *connector) { int enc_id = connector->encoder_ids[0]; - struct drm_mode_object *obj; - struct drm_encoder *encoder; - /* pick the encoder ids */ - if (enc_id) { - obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER); - if (!obj) - return NULL; - encoder = obj_to_encoder(obj); - return encoder; - } + if (enc_id) + return drm_encoder_find(connector->dev, enc_id); return NULL; } +static void radeon_get_native_mode(struct drm_connector *connector) +{ + struct drm_encoder *encoder = radeon_best_single_encoder(connector); + struct radeon_encoder *radeon_encoder; + + if (encoder == NULL) + return; + + radeon_encoder = to_radeon_encoder(encoder); + + if (!list_empty(&connector->probed_modes)) { + struct drm_display_mode *preferred_mode = + list_first_entry(&connector->probed_modes, + struct drm_display_mode, head); + + radeon_encoder->native_mode = *preferred_mode; + } else { + radeon_encoder->native_mode.clock = 0; + } +} + /* * radeon_connector_analog_encoder_conflict_solve * - search for other connectors sharing this encoder @@ -585,6 +689,35 @@ static int radeon_connector_set_property(struct drm_connector *connector, struct radeon_property_change_mode(&radeon_encoder->base); } + if (property == dev->mode_config.scaling_mode_property) { + enum radeon_rmx_type rmx_type; + + if (connector->encoder) + radeon_encoder = to_radeon_encoder(connector->encoder); + else { + struct drm_connector_helper_funcs *connector_funcs = connector->helper_private; + radeon_encoder = to_radeon_encoder(connector_funcs->best_encoder(connector)); + } + + switch (val) { + default: + case DRM_MODE_SCALE_NONE: rmx_type = RMX_OFF; break; + case DRM_MODE_SCALE_CENTER: rmx_type = RMX_CENTER; break; + case DRM_MODE_SCALE_ASPECT: rmx_type = RMX_ASPECT; break; + case DRM_MODE_SCALE_FULLSCREEN: rmx_type = RMX_FULL; break; + } + if (radeon_encoder->rmx_type == rmx_type) + return 0; + + if ((rmx_type != DRM_MODE_SCALE_NONE) && + (radeon_encoder->native_mode.clock == 0)) + return 0; + + radeon_encoder->rmx_type = rmx_type; + + radeon_property_change_mode(&radeon_encoder->base); + } + return 0; } @@ -625,22 +758,20 @@ static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder, static int radeon_lvds_get_modes(struct drm_connector *connector) { - struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct drm_encoder *encoder; int ret = 0; struct drm_display_mode *mode; - if (radeon_connector->ddc_bus) { - ret = radeon_ddc_get_modes(radeon_connector); - if (ret > 0) { - encoder = radeon_best_single_encoder(connector); - if (encoder) { - radeon_fixup_lvds_native_mode(encoder, connector); - /* add scaled modes */ - radeon_add_common_modes(encoder, connector); - } - return ret; + radeon_connector_get_edid(connector); + ret = radeon_ddc_get_modes(connector); + if (ret > 0) { + encoder = radeon_best_single_encoder(connector); + if (encoder) { + radeon_fixup_lvds_native_mode(encoder, connector); + /* add scaled modes */ + radeon_add_common_modes(encoder, connector); } + return ret; } encoder = radeon_best_single_encoder(connector); @@ -715,16 +846,9 @@ radeon_lvds_detect(struct drm_connector *connector, bool force) } /* check for edid as well */ + radeon_connector_get_edid(connector); if (radeon_connector->edid) ret = connector_status_connected; - else { - if (radeon_connector->ddc_bus) { - radeon_connector->edid = drm_get_edid(&radeon_connector->base, - &radeon_connector->ddc_bus->adapter); - if (radeon_connector->edid) - ret = connector_status_connected; - } - } /* check acpi lid status ??? */ radeon_connector_update_scratch_regs(connector, ret); @@ -737,10 +861,9 @@ static void radeon_connector_destroy(struct drm_connector *connector) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); - if (radeon_connector->edid) - kfree(radeon_connector->edid); + radeon_connector_free_edid(connector); kfree(radeon_connector->con_priv); - drm_sysfs_connector_remove(connector); + drm_connector_unregister(connector); drm_connector_cleanup(connector); kfree(connector); } @@ -797,10 +920,12 @@ static const struct drm_connector_funcs radeon_lvds_connector_funcs = { static int radeon_vga_get_modes(struct drm_connector *connector) { - struct radeon_connector *radeon_connector = to_radeon_connector(connector); int ret; - ret = radeon_ddc_get_modes(radeon_connector); + radeon_connector_get_edid(connector); + ret = radeon_ddc_get_modes(connector); + + radeon_get_native_mode(connector); return ret; } @@ -843,28 +968,26 @@ radeon_vga_detect(struct drm_connector *connector, bool force) dret = radeon_ddc_probe(radeon_connector, false); if (dret) { radeon_connector->detected_by_load = false; - if (radeon_connector->edid) { - kfree(radeon_connector->edid); - radeon_connector->edid = NULL; - } - radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); + radeon_connector_free_edid(connector); + radeon_connector_get_edid(connector); if (!radeon_connector->edid) { DRM_ERROR("%s: probed a monitor but no|invalid EDID\n", connector->name); ret = connector_status_connected; } else { - radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL); + radeon_connector->use_digital = + !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL); /* some oems have boards with separate digital and analog connectors * with a shared ddc line (often vga + hdmi) */ if (radeon_connector->use_digital && radeon_connector->shared_ddc) { - kfree(radeon_connector->edid); - radeon_connector->edid = NULL; + radeon_connector_free_edid(connector); ret = connector_status_disconnected; - } else + } else { ret = connector_status_connected; + } } } else { @@ -999,15 +1122,6 @@ static const struct drm_connector_funcs radeon_tv_connector_funcs = { .set_property = radeon_connector_set_property, }; -static int radeon_dvi_get_modes(struct drm_connector *connector) -{ - struct radeon_connector *radeon_connector = to_radeon_connector(connector); - int ret; - - ret = radeon_ddc_get_modes(radeon_connector); - return ret; -} - static bool radeon_check_hpd_status_unchanged(struct drm_connector *connector) { struct drm_device *dev = connector->dev; @@ -1048,7 +1162,6 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct drm_encoder *encoder = NULL; struct drm_encoder_helper_funcs *encoder_funcs; - struct drm_mode_object *obj; int i, r; enum drm_connector_status ret = connector_status_disconnected; bool dret = false, broken_edid = false; @@ -1066,18 +1179,16 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) dret = radeon_ddc_probe(radeon_connector, false); if (dret) { radeon_connector->detected_by_load = false; - if (radeon_connector->edid) { - kfree(radeon_connector->edid); - radeon_connector->edid = NULL; - } - radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); + radeon_connector_free_edid(connector); + radeon_connector_get_edid(connector); if (!radeon_connector->edid) { DRM_ERROR("%s: probed a monitor but no|invalid EDID\n", connector->name); /* rs690 seems to have a problem with connectors not existing and always * return a block of 0's. If we see this just stop polling on this output */ - if ((rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) && radeon_connector->base.null_edid_counter) { + if ((rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) && + radeon_connector->base.null_edid_counter) { ret = connector_status_disconnected; DRM_ERROR("%s: detected RS690 floating bus bug, stopping ddc detect\n", connector->name); @@ -1087,18 +1198,18 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) broken_edid = true; /* defer use_digital to later */ } } else { - radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL); + radeon_connector->use_digital = + !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL); /* some oems have boards with separate digital and analog connectors * with a shared ddc line (often vga + hdmi) */ if ((!radeon_connector->use_digital) && radeon_connector->shared_ddc) { - kfree(radeon_connector->edid); - radeon_connector->edid = NULL; + radeon_connector_free_edid(connector); ret = connector_status_disconnected; - } else + } else { ret = connector_status_connected; - + } /* This gets complicated. We have boards with VGA + HDMI with a * shared DDC line and we have boards with DVI-D + HDMI with a shared * DDC line. The latter is more complex because with DVI<->HDMI adapters @@ -1118,8 +1229,7 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) if (list_connector->connector_type != DRM_MODE_CONNECTOR_VGA) { /* hpd is our only option in this case */ if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) { - kfree(radeon_connector->edid); - radeon_connector->edid = NULL; + radeon_connector_free_edid(connector); ret = connector_status_disconnected; } } @@ -1153,14 +1263,11 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) if (connector->encoder_ids[i] == 0) break; - obj = drm_mode_object_find(connector->dev, - connector->encoder_ids[i], - DRM_MODE_OBJECT_ENCODER); - if (!obj) + encoder = drm_encoder_find(connector->dev, + connector->encoder_ids[i]); + if (!encoder) continue; - encoder = obj_to_encoder(obj); - if (encoder->encoder_type != DRM_MODE_ENCODER_DAC && encoder->encoder_type != DRM_MODE_ENCODER_TVDAC) continue; @@ -1225,19 +1332,16 @@ static struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector) { int enc_id = connector->encoder_ids[0]; struct radeon_connector *radeon_connector = to_radeon_connector(connector); - struct drm_mode_object *obj; struct drm_encoder *encoder; int i; for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { if (connector->encoder_ids[i] == 0) break; - obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER); - if (!obj) + encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]); + if (!encoder) continue; - encoder = obj_to_encoder(obj); - if (radeon_connector->use_digital == true) { if (encoder->encoder_type == DRM_MODE_ENCODER_TMDS) return encoder; @@ -1252,13 +1356,8 @@ static struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector) /* then check use digitial */ /* pick the first one */ - if (enc_id) { - obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER); - if (!obj) - return NULL; - encoder = obj_to_encoder(obj); - return encoder; - } + if (enc_id) + return drm_encoder_find(connector->dev, enc_id); return NULL; } @@ -1291,7 +1390,7 @@ static int radeon_dvi_mode_valid(struct drm_connector *connector, (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) || (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_B)) return MODE_OK; - else if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) { + else if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector_edid(connector))) { /* HDMI 1.3+ supports max clock of 340 Mhz */ if (mode->clock > 340000) return MODE_CLOCK_HIGH; @@ -1310,7 +1409,7 @@ static int radeon_dvi_mode_valid(struct drm_connector *connector, } static const struct drm_connector_helper_funcs radeon_dvi_connector_helper_funcs = { - .get_modes = radeon_dvi_get_modes, + .get_modes = radeon_vga_get_modes, .mode_valid = radeon_dvi_mode_valid, .best_encoder = radeon_dvi_encoder, }; @@ -1339,7 +1438,8 @@ static int radeon_dp_get_modes(struct drm_connector *connector) if (!radeon_dig_connector->edp_on) atombios_set_edp_panel_power(connector, ATOM_TRANSMITTER_ACTION_POWER_ON); - ret = radeon_ddc_get_modes(radeon_connector); + radeon_connector_get_edid(connector); + ret = radeon_ddc_get_modes(connector); if (!radeon_dig_connector->edp_on) atombios_set_edp_panel_power(connector, ATOM_TRANSMITTER_ACTION_POWER_OFF); @@ -1350,7 +1450,8 @@ static int radeon_dp_get_modes(struct drm_connector *connector) if (encoder) radeon_atom_ext_encoder_setup_ddc(encoder); } - ret = radeon_ddc_get_modes(radeon_connector); + radeon_connector_get_edid(connector); + ret = radeon_ddc_get_modes(connector); } if (ret > 0) { @@ -1383,7 +1484,10 @@ static int radeon_dp_get_modes(struct drm_connector *connector) if (encoder) radeon_atom_ext_encoder_setup_ddc(encoder); } - ret = radeon_ddc_get_modes(radeon_connector); + radeon_connector_get_edid(connector); + ret = radeon_ddc_get_modes(connector); + + radeon_get_native_mode(connector); } return ret; @@ -1391,7 +1495,6 @@ static int radeon_dp_get_modes(struct drm_connector *connector) u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector) { - struct drm_mode_object *obj; struct drm_encoder *encoder; struct radeon_encoder *radeon_encoder; int i; @@ -1400,11 +1503,10 @@ u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *conn if (connector->encoder_ids[i] == 0) break; - obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER); - if (!obj) + encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]); + if (!encoder) continue; - encoder = obj_to_encoder(obj); radeon_encoder = to_radeon_encoder(encoder); switch (radeon_encoder->encoder_id) { @@ -1419,9 +1521,8 @@ u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *conn return ENCODER_OBJECT_ID_NONE; } -bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector) +static bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector) { - struct drm_mode_object *obj; struct drm_encoder *encoder; struct radeon_encoder *radeon_encoder; int i; @@ -1431,11 +1532,10 @@ bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector) if (connector->encoder_ids[i] == 0) break; - obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER); - if (!obj) + encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]); + if (!encoder) continue; - encoder = obj_to_encoder(obj); radeon_encoder = to_radeon_encoder(encoder); if (radeon_encoder->caps & ATOM_ENCODER_CAP_RECORD_HBR2) found = true; @@ -1478,10 +1578,7 @@ radeon_dp_detect(struct drm_connector *connector, bool force) goto out; } - if (radeon_connector->edid) { - kfree(radeon_connector->edid); - radeon_connector->edid = NULL; - } + radeon_connector_free_edid(connector); if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) || (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) { @@ -1587,7 +1684,7 @@ static int radeon_dp_mode_valid(struct drm_connector *connector, (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) { return radeon_dp_mode_valid_helper(connector, mode); } else { - if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) { + if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector_edid(connector))) { /* HDMI 1.3+ supports max clock of 340 Mhz */ if (mode->clock > 340000) return MODE_CLOCK_HIGH; @@ -1747,6 +1844,9 @@ radeon_add_atom_connector(struct drm_device *dev, drm_object_attach_property(&radeon_connector->base.base, rdev->mode_info.load_detect_property, 1); + drm_object_attach_property(&radeon_connector->base.base, + dev->mode_config.scaling_mode_property, + DRM_MODE_SCALE_NONE); break; case DRM_MODE_CONNECTOR_DVII: case DRM_MODE_CONNECTOR_DVID: @@ -1768,6 +1868,10 @@ radeon_add_atom_connector(struct drm_device *dev, 0); drm_object_attach_property(&radeon_connector->base.base, + dev->mode_config.scaling_mode_property, + DRM_MODE_SCALE_NONE); + + drm_object_attach_property(&radeon_connector->base.base, rdev->mode_info.dither_property, RADEON_FMT_DITHER_DISABLE); @@ -1817,6 +1921,10 @@ radeon_add_atom_connector(struct drm_device *dev, drm_object_attach_property(&radeon_connector->base.base, rdev->mode_info.load_detect_property, 1); + if (ASIC_IS_AVIVO(rdev)) + drm_object_attach_property(&radeon_connector->base.base, + dev->mode_config.scaling_mode_property, + DRM_MODE_SCALE_NONE); /* no HPD on analog connectors */ radeon_connector->hpd.hpd = RADEON_HPD_NONE; connector->polled = DRM_CONNECTOR_POLL_CONNECT; @@ -1835,6 +1943,10 @@ radeon_add_atom_connector(struct drm_device *dev, drm_object_attach_property(&radeon_connector->base.base, rdev->mode_info.load_detect_property, 1); + if (ASIC_IS_AVIVO(rdev)) + drm_object_attach_property(&radeon_connector->base.base, + dev->mode_config.scaling_mode_property, + DRM_MODE_SCALE_NONE); /* no HPD on analog connectors */ radeon_connector->hpd.hpd = RADEON_HPD_NONE; connector->interlace_allowed = true; @@ -1868,17 +1980,18 @@ radeon_add_atom_connector(struct drm_device *dev, drm_object_attach_property(&radeon_connector->base.base, rdev->mode_info.underscan_vborder_property, 0); + drm_object_attach_property(&radeon_connector->base.base, + rdev->mode_info.dither_property, + RADEON_FMT_DITHER_DISABLE); + drm_object_attach_property(&radeon_connector->base.base, + dev->mode_config.scaling_mode_property, + DRM_MODE_SCALE_NONE); } if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) { drm_object_attach_property(&radeon_connector->base.base, rdev->mode_info.audio_property, RADEON_AUDIO_AUTO); } - if (ASIC_IS_AVIVO(rdev)) { - drm_object_attach_property(&radeon_connector->base.base, - rdev->mode_info.dither_property, - RADEON_FMT_DITHER_DISABLE); - } if (connector_type == DRM_MODE_CONNECTOR_DVII) { radeon_connector->dac_load_detect = true; drm_object_attach_property(&radeon_connector->base.base, @@ -1918,17 +2031,18 @@ radeon_add_atom_connector(struct drm_device *dev, drm_object_attach_property(&radeon_connector->base.base, rdev->mode_info.underscan_vborder_property, 0); + drm_object_attach_property(&radeon_connector->base.base, + rdev->mode_info.dither_property, + RADEON_FMT_DITHER_DISABLE); + drm_object_attach_property(&radeon_connector->base.base, + dev->mode_config.scaling_mode_property, + DRM_MODE_SCALE_NONE); } if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) { drm_object_attach_property(&radeon_connector->base.base, rdev->mode_info.audio_property, RADEON_AUDIO_AUTO); } - if (ASIC_IS_AVIVO(rdev)) { - drm_object_attach_property(&radeon_connector->base.base, - rdev->mode_info.dither_property, - RADEON_FMT_DITHER_DISABLE); - } subpixel_order = SubPixelHorizontalRGB; connector->interlace_allowed = true; if (connector_type == DRM_MODE_CONNECTOR_HDMIB) @@ -1965,18 +2079,18 @@ radeon_add_atom_connector(struct drm_device *dev, drm_object_attach_property(&radeon_connector->base.base, rdev->mode_info.underscan_vborder_property, 0); + drm_object_attach_property(&radeon_connector->base.base, + rdev->mode_info.dither_property, + RADEON_FMT_DITHER_DISABLE); + drm_object_attach_property(&radeon_connector->base.base, + dev->mode_config.scaling_mode_property, + DRM_MODE_SCALE_NONE); } if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) { drm_object_attach_property(&radeon_connector->base.base, rdev->mode_info.audio_property, RADEON_AUDIO_AUTO); } - if (ASIC_IS_AVIVO(rdev)) { - drm_object_attach_property(&radeon_connector->base.base, - rdev->mode_info.dither_property, - RADEON_FMT_DITHER_DISABLE); - - } connector->interlace_allowed = true; /* in theory with a DP to VGA converter... */ connector->doublescan_allowed = false; @@ -2050,7 +2164,7 @@ radeon_add_atom_connector(struct drm_device *dev, connector->polled = DRM_CONNECTOR_POLL_HPD; connector->display_info.subpixel_order = subpixel_order; - drm_sysfs_connector_add(connector); + drm_connector_register(connector); if (has_aux) radeon_dp_aux_init(radeon_connector); @@ -2211,5 +2325,5 @@ radeon_add_legacy_connector(struct drm_device *dev, } else connector->polled = DRM_CONNECTOR_POLL_HPD; connector->display_info.subpixel_order = subpixel_order; - drm_sysfs_connector_add(connector); + drm_connector_register(connector); } diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index ae763f60c8a0..83f382e8e40e 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -132,7 +132,8 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) * the buffers used for read only, which doubles the range * to 0 to 31. 32 is reserved for the kernel driver. */ - priority = (r->flags & 0xf) * 2 + !!r->write_domain; + priority = (r->flags & RADEON_RELOC_PRIO_MASK) * 2 + + !!r->write_domain; /* the first reloc of an UVD job is the msg and that must be in VRAM, also but everything into VRAM on AGP cards to avoid @@ -450,7 +451,7 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev, radeon_vce_note_usage(rdev); radeon_cs_sync_rings(parser); - r = radeon_ib_schedule(rdev, &parser->ib, NULL); + r = radeon_ib_schedule(rdev, &parser->ib, NULL, true); if (r) { DRM_ERROR("Failed to schedule IB !\n"); } @@ -500,7 +501,8 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p, if (r) return r; } - return 0; + + return radeon_vm_clear_invalids(rdev, vm); } static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev, @@ -540,9 +542,9 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev, if ((rdev->family >= CHIP_TAHITI) && (parser->chunk_const_ib_idx != -1)) { - r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib); + r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib, true); } else { - r = radeon_ib_schedule(rdev, &parser->ib, NULL); + r = radeon_ib_schedule(rdev, &parser->ib, NULL, true); } out: diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 697add2cd4e3..12c8329644c4 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -103,6 +103,35 @@ static const char radeon_family_name[][16] = { "LAST", }; +#define RADEON_PX_QUIRK_DISABLE_PX (1 << 0) +#define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1) + +struct radeon_px_quirk { + u32 chip_vendor; + u32 chip_device; + u32 subsys_vendor; + u32 subsys_device; + u32 px_quirk_flags; +}; + +static struct radeon_px_quirk radeon_px_quirk_list[] = { + /* Acer aspire 5560g (CPU: AMD A4-3305M; GPU: AMD Radeon HD 6480g + 7470m) + * https://bugzilla.kernel.org/show_bug.cgi?id=74551 + */ + { PCI_VENDOR_ID_ATI, 0x6760, 0x1025, 0x0672, RADEON_PX_QUIRK_DISABLE_PX }, + /* Asus K73TA laptop with AMD A6-3400M APU and Radeon 6550 GPU + * https://bugzilla.kernel.org/show_bug.cgi?id=51381 + */ + { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x108c, RADEON_PX_QUIRK_DISABLE_PX }, + /* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU + * https://bugzilla.kernel.org/show_bug.cgi?id=51381 + */ + { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX }, + /* macbook pro 8.2 */ + { PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP }, + { 0, 0, 0, 0, 0 }, +}; + bool radeon_is_px(struct drm_device *dev) { struct radeon_device *rdev = dev->dev_private; @@ -112,6 +141,26 @@ bool radeon_is_px(struct drm_device *dev) return false; } +static void radeon_device_handle_px_quirks(struct radeon_device *rdev) +{ + struct radeon_px_quirk *p = radeon_px_quirk_list; + + /* Apply PX quirks */ + while (p && p->chip_device != 0) { + if (rdev->pdev->vendor == p->chip_vendor && + rdev->pdev->device == p->chip_device && + rdev->pdev->subsystem_vendor == p->subsys_vendor && + rdev->pdev->subsystem_device == p->subsys_device) { + rdev->px_quirk_flags = p->px_quirk_flags; + break; + } + ++p; + } + + if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX) + rdev->flags &= ~RADEON_IS_PX; +} + /** * radeon_program_register_sequence - program an array of registers. * @@ -385,7 +434,8 @@ int radeon_wb_init(struct radeon_device *rdev) if (rdev->wb.wb_obj == NULL) { r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true, - RADEON_GEM_DOMAIN_GTT, NULL, &rdev->wb.wb_obj); + RADEON_GEM_DOMAIN_GTT, 0, NULL, + &rdev->wb.wb_obj); if (r) { dev_warn(rdev->dev, "(%d) create WB bo failed\n", r); return r; @@ -1077,7 +1127,19 @@ static void radeon_check_arguments(struct radeon_device *rdev) /* defines number of bits in page table versus page directory, * a page is 4KB so we have 12 bits offset, minimum 9 bits in the * page table and the remaining bits are in the page directory */ - if (radeon_vm_block_size < 9) { + if (radeon_vm_block_size == -1) { + + /* Total bits covered by PD + PTs */ + unsigned bits = ilog2(radeon_vm_size) + 17; + + /* Make sure the PD is 4K in size up to 8GB address space. + Above that split equal between PD and PTs */ + if (radeon_vm_size <= 8) + radeon_vm_block_size = bits - 9; + else + radeon_vm_block_size = (bits + 3) / 2; + + } else if (radeon_vm_block_size < 9) { dev_warn(rdev->dev, "VM page table size (%d) too small\n", radeon_vm_block_size); radeon_vm_block_size = 9; @@ -1092,25 +1154,6 @@ static void radeon_check_arguments(struct radeon_device *rdev) } /** - * radeon_switcheroo_quirk_long_wakeup - return true if longer d3 delay is - * needed for waking up. - * - * @pdev: pci dev pointer - */ -static bool radeon_switcheroo_quirk_long_wakeup(struct pci_dev *pdev) -{ - - /* 6600m in a macbook pro */ - if (pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE && - pdev->subsystem_device == 0x00e2) { - printk(KERN_INFO "radeon: quirking longer d3 wakeup delay\n"); - return true; - } - - return false; -} - -/** * radeon_switcheroo_set_state - set switcheroo state * * @pdev: pci dev pointer @@ -1122,6 +1165,7 @@ static bool radeon_switcheroo_quirk_long_wakeup(struct pci_dev *pdev) static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) { struct drm_device *dev = pci_get_drvdata(pdev); + struct radeon_device *rdev = dev->dev_private; if (radeon_is_px(dev) && state == VGA_SWITCHEROO_OFF) return; @@ -1133,7 +1177,7 @@ static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero /* don't suspend or resume card normally */ dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; - if (d3_delay < 20 && radeon_switcheroo_quirk_long_wakeup(pdev)) + if (d3_delay < 20 && (rdev->px_quirk_flags & RADEON_PX_QUIRK_LONG_WAKEUP)) dev->pdev->d3_delay = 20; radeon_resume_kms(dev, true, true); @@ -1337,6 +1381,9 @@ int radeon_device_init(struct radeon_device *rdev, if (rdev->rio_mem == NULL) DRM_ERROR("Unable to find PCI I/O BAR\n"); + if (rdev->flags & RADEON_IS_PX) + radeon_device_handle_px_quirks(rdev); + /* if we have > 1 VGA cards, then disable the radeon VGA resources */ /* this will fail for cards that aren't VGA class devices, just * ignore it */ @@ -1350,7 +1397,7 @@ int radeon_device_init(struct radeon_device *rdev, r = radeon_init(rdev); if (r) - return r; + goto failed; r = radeon_ib_ring_tests(rdev); if (r) @@ -1370,7 +1417,7 @@ int radeon_device_init(struct radeon_device *rdev, radeon_agp_disable(rdev); r = radeon_init(rdev); if (r) - return r; + goto failed; } if ((radeon_testing & 1)) { @@ -1392,6 +1439,11 @@ int radeon_device_init(struct radeon_device *rdev, DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n"); } return 0; + +failed: + if (runtime) + vga_switcheroo_fini_domain_pm_ops(rdev->dev); + return r; } static void radeon_debugfs_remove_files(struct radeon_device *rdev); @@ -1412,6 +1464,8 @@ void radeon_device_fini(struct radeon_device *rdev) radeon_bo_evict_vram(rdev); radeon_fini(rdev); vga_switcheroo_unregister_client(rdev->pdev); + if (rdev->flags & RADEON_IS_PX) + vga_switcheroo_fini_domain_pm_ops(rdev->dev); vga_client_register(rdev->pdev, NULL, NULL, NULL); if (rdev->rio_mem) pci_iounmap(rdev->pdev, rdev->rio_mem); @@ -1637,8 +1691,8 @@ int radeon_gpu_reset(struct radeon_device *rdev) radeon_save_bios_scratch_regs(rdev); /* block TTM */ resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); - radeon_pm_suspend(rdev); radeon_suspend(rdev); + radeon_hpd_fini(rdev); for (i = 0; i < RADEON_NUM_RINGS; ++i) { ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i], @@ -1683,9 +1737,39 @@ retry: } } - radeon_pm_resume(rdev); + if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { + /* do dpm late init */ + r = radeon_pm_late_init(rdev); + if (r) { + rdev->pm.dpm_enabled = false; + DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n"); + } + } else { + /* resume old pm late */ + radeon_pm_resume(rdev); + } + + /* init dig PHYs, disp eng pll */ + if (rdev->is_atom_bios) { + radeon_atom_encoder_init(rdev); + radeon_atom_disp_eng_pll_init(rdev); + /* turn on the BL */ + if (rdev->mode_info.bl_encoder) { + u8 bl_level = radeon_get_backlight_level(rdev, + rdev->mode_info.bl_encoder); + radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder, + bl_level); + } + } + /* reset hpd state */ + radeon_hpd_init(rdev); + drm_helper_resume_force_mode(rdev->ddev); + /* set the power state here in case we are a PX system or headless */ + if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) + radeon_pm_compute_clocks(rdev); + ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); if (r) { /* bad news, how to tell it to userspace ? */ diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index bf25061c8ac4..3fdf87318069 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -293,6 +293,18 @@ void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id) if (radeon_crtc == NULL) return; + /* Skip the pageflip completion check below (based on polling) on + * asics which reliably support hw pageflip completion irqs. pflip + * irqs are a reliable and race-free method of handling pageflip + * completion detection. A use_pflipirq module parameter < 2 allows + * to override this in case of asics with faulty pflip irqs. + * A module parameter of 0 would only use this polling based path, + * a parameter of 1 would use pflip irq only as a backup to this + * path, as in Linux 3.16. + */ + if ((radeon_use_pflipirq == 2) && ASIC_IS_DCE4(rdev)) + return; + spin_lock_irqsave(&rdev->ddev->event_lock, flags); if (radeon_crtc->flip_status != RADEON_FLIP_SUBMITTED) { DRM_DEBUG_DRIVER("radeon_crtc->flip_status = %d != " @@ -823,64 +835,6 @@ static bool radeon_setup_enc_conn(struct drm_device *dev) return ret; } -int radeon_ddc_get_modes(struct radeon_connector *radeon_connector) -{ - struct drm_device *dev = radeon_connector->base.dev; - struct radeon_device *rdev = dev->dev_private; - int ret = 0; - - /* don't leak the edid if we already fetched it in detect() */ - if (radeon_connector->edid) - goto got_edid; - - /* on hw with routers, select right port */ - if (radeon_connector->router.ddc_valid) - radeon_router_select_ddc_port(radeon_connector); - - if (radeon_connector_encoder_get_dp_bridge_encoder_id(&radeon_connector->base) != - ENCODER_OBJECT_ID_NONE) { - if (radeon_connector->ddc_bus->has_aux) - radeon_connector->edid = drm_get_edid(&radeon_connector->base, - &radeon_connector->ddc_bus->aux.ddc); - } else if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) || - (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) { - struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; - - if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT || - dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) && - radeon_connector->ddc_bus->has_aux) - radeon_connector->edid = drm_get_edid(&radeon_connector->base, - &radeon_connector->ddc_bus->aux.ddc); - else if (radeon_connector->ddc_bus && !radeon_connector->edid) - radeon_connector->edid = drm_get_edid(&radeon_connector->base, - &radeon_connector->ddc_bus->adapter); - } else { - if (radeon_connector->ddc_bus && !radeon_connector->edid) - radeon_connector->edid = drm_get_edid(&radeon_connector->base, - &radeon_connector->ddc_bus->adapter); - } - - if (!radeon_connector->edid) { - if (rdev->is_atom_bios) { - /* some laptops provide a hardcoded edid in rom for LCDs */ - if (((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_LVDS) || - (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP))) - radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev); - } else - /* some servers provide a hardcoded edid in rom for KVMs */ - radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev); - } - if (radeon_connector->edid) { -got_edid: - drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid); - ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid); - drm_edid_to_eld(&radeon_connector->base, radeon_connector->edid); - return ret; - } - drm_mode_connector_update_edid_property(&radeon_connector->base, NULL); - return 0; -} - /* avivo */ /** @@ -1749,7 +1703,7 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, (!(mode->flags & DRM_MODE_FLAG_INTERLACE)) && ((radeon_encoder->underscan_type == UNDERSCAN_ON) || ((radeon_encoder->underscan_type == UNDERSCAN_AUTO) && - drm_detect_hdmi_monitor(radeon_connector->edid) && + drm_detect_hdmi_monitor(radeon_connector_edid(connector)) && is_hdtv_mode(mode)))) { if (radeon_encoder->underscan_hborder != 0) radeon_crtc->h_border = radeon_encoder->underscan_hborder; diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 959f0866d993..f9d17b29b343 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -82,9 +82,11 @@ * 2.38.0 - RADEON_GEM_OP (GET_INITIAL_DOMAIN, SET_INITIAL_DOMAIN), * CIK: 1D and linear tiling modes contain valid PIPE_CONFIG * 2.39.0 - Add INFO query for number of active CUs + * 2.40.0 - Add RADEON_GEM_GTT_WC/UC, flush HDP cache before submitting + * CS to GPU on >= r600 */ #define KMS_DRIVER_MAJOR 2 -#define KMS_DRIVER_MINOR 39 +#define KMS_DRIVER_MINOR 40 #define KMS_DRIVER_PATCHLEVEL 0 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); int radeon_driver_unload_kms(struct drm_device *dev); @@ -174,9 +176,12 @@ int radeon_dpm = -1; int radeon_aspm = -1; int radeon_runtime_pm = -1; int radeon_hard_reset = 0; -int radeon_vm_size = 4; -int radeon_vm_block_size = 9; +int radeon_vm_size = 8; +int radeon_vm_block_size = -1; int radeon_deep_color = 0; +int radeon_use_pflipirq = 2; +int radeon_bapm = -1; +int radeon_backlight = -1; MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); module_param_named(no_wb, radeon_no_wb, int, 0444); @@ -247,12 +252,21 @@ module_param_named(hard_reset, radeon_hard_reset, int, 0444); MODULE_PARM_DESC(vm_size, "VM address space size in gigabytes (default 4GB)"); module_param_named(vm_size, radeon_vm_size, int, 0444); -MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default 9)"); +MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default depending on vm_size)"); module_param_named(vm_block_size, radeon_vm_block_size, int, 0444); MODULE_PARM_DESC(deep_color, "Deep Color support (1 = enable, 0 = disable (default))"); module_param_named(deep_color, radeon_deep_color, int, 0444); +MODULE_PARM_DESC(use_pflipirq, "Pflip irqs for pageflip completion (0 = disable, 1 = as fallback, 2 = exclusive (default))"); +module_param_named(use_pflipirq, radeon_use_pflipirq, int, 0444); + +MODULE_PARM_DESC(bapm, "BAPM support (1 = enable, 0 = disable, -1 = auto)"); +module_param_named(bapm, radeon_bapm, int, 0444); + +MODULE_PARM_DESC(backlight, "backlight support (1 = enable, 0 = disable, -1 = auto)"); +module_param_named(backlight, radeon_backlight, int, 0444); + static struct pci_device_id pciidlist[] = { radeon_PCI_IDS }; @@ -430,6 +444,7 @@ static int radeon_pmops_runtime_suspend(struct device *dev) ret = radeon_suspend_kms(drm_dev, false, false); pci_save_state(pdev); pci_disable_device(pdev); + pci_ignore_hotplug(pdev); pci_set_power_state(pdev, PCI_D3cold); drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF; diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index bd4959ca23aa..15edf23b465c 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c @@ -158,10 +158,43 @@ radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, uint8 return ret; } +static void radeon_encoder_add_backlight(struct radeon_encoder *radeon_encoder, + struct drm_connector *connector) +{ + struct drm_device *dev = radeon_encoder->base.dev; + struct radeon_device *rdev = dev->dev_private; + bool use_bl = false; + + if (!(radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))) + return; + + if (radeon_backlight == 0) { + return; + } else if (radeon_backlight == 1) { + use_bl = true; + } else if (radeon_backlight == -1) { + /* Quirks */ + /* Amilo Xi 2550 only works with acpi bl */ + if ((rdev->pdev->device == 0x9583) && + (rdev->pdev->subsystem_vendor == 0x1734) && + (rdev->pdev->subsystem_device == 0x1107)) + use_bl = false; + else + use_bl = true; + } + + if (use_bl) { + if (rdev->is_atom_bios) + radeon_atom_backlight_init(radeon_encoder, connector); + else + radeon_legacy_backlight_init(radeon_encoder, connector); + rdev->mode_info.bl_encoder = radeon_encoder; + } +} + void radeon_link_encoder_connector(struct drm_device *dev) { - struct radeon_device *rdev = dev->dev_private; struct drm_connector *connector; struct radeon_connector *radeon_connector; struct drm_encoder *encoder; @@ -174,13 +207,8 @@ radeon_link_encoder_connector(struct drm_device *dev) radeon_encoder = to_radeon_encoder(encoder); if (radeon_encoder->devices & radeon_connector->devices) { drm_mode_connector_attach_encoder(connector, encoder); - if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { - if (rdev->is_atom_bios) - radeon_atom_backlight_init(radeon_encoder, connector); - else - radeon_legacy_backlight_init(radeon_encoder, connector); - rdev->mode_info.bl_encoder = radeon_encoder; - } + if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) + radeon_encoder_add_backlight(radeon_encoder, connector); } } } @@ -343,7 +371,7 @@ bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder, case DRM_MODE_CONNECTOR_HDMIB: if (radeon_connector->use_digital) { /* HDMI 1.3 supports up to 340 Mhz over single link */ - if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) { + if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector_edid(connector))) { if (pixel_clock > 340000) return true; else @@ -365,7 +393,7 @@ bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder, return false; else { /* HDMI 1.3 supports up to 340 Mhz over single link */ - if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) { + if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector_edid(connector))) { if (pixel_clock > 340000) return true; else diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index 665ced3b7313..94b0f2aa3d7c 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c @@ -127,8 +127,7 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev, aligned_size = ALIGN(size, PAGE_SIZE); ret = radeon_gem_object_create(rdev, aligned_size, 0, RADEON_GEM_DOMAIN_VRAM, - false, true, - &gobj); + 0, true, &gobj); if (ret) { printk(KERN_ERR "failed to allocate framebuffer (%d)\n", aligned_size); @@ -331,7 +330,7 @@ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfb return 0; } -static struct drm_fb_helper_funcs radeon_fb_helper_funcs = { +static const struct drm_fb_helper_funcs radeon_fb_helper_funcs = { .gamma_set = radeon_crtc_fb_gamma_set, .gamma_get = radeon_crtc_fb_gamma_get, .fb_probe = radeonfb_create, @@ -353,7 +352,9 @@ int radeon_fbdev_init(struct radeon_device *rdev) rfbdev->rdev = rdev; rdev->mode_info.rfbdev = rfbdev; - rfbdev->helper.funcs = &radeon_fb_helper_funcs; + + drm_fb_helper_prepare(rdev->ddev, &rfbdev->helper, + &radeon_fb_helper_funcs); ret = drm_fb_helper_init(rdev->ddev, &rfbdev->helper, rdev->num_crtc, diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index 2e723651069b..a053a0779aac 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c @@ -128,7 +128,7 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev) if (rdev->gart.robj == NULL) { r = radeon_bo_create(rdev, rdev->gart.table_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, - NULL, &rdev->gart.robj); + 0, NULL, &rdev->gart.robj); if (r) { return r; } @@ -243,7 +243,8 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, page_base = rdev->gart.pages_addr[p]; for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { if (rdev->gart.ptr) { - radeon_gart_set_page(rdev, t, page_base); + radeon_gart_set_page(rdev, t, page_base, + RADEON_GART_PAGE_DUMMY); } page_base += RADEON_GPU_PAGE_SIZE; } @@ -261,13 +262,15 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, * @pages: number of pages to bind * @pagelist: pages to bind * @dma_addr: DMA addresses of pages + * @flags: RADEON_GART_PAGE_* flags * * Binds the requested pages to the gart page table * (all asics). * Returns 0 for success, -EINVAL for failure. */ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, - int pages, struct page **pagelist, dma_addr_t *dma_addr) + int pages, struct page **pagelist, dma_addr_t *dma_addr, + uint32_t flags) { unsigned t; unsigned p; @@ -287,7 +290,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, if (rdev->gart.ptr) { page_base = rdev->gart.pages_addr[p]; for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { - radeon_gart_set_page(rdev, t, page_base); + radeon_gart_set_page(rdev, t, page_base, flags); page_base += RADEON_GPU_PAGE_SIZE; } } @@ -298,33 +301,6 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, } /** - * radeon_gart_restore - bind all pages in the gart page table - * - * @rdev: radeon_device pointer - * - * Binds all pages in the gart page table (all asics). - * Used to rebuild the gart table on device startup or resume. - */ -void radeon_gart_restore(struct radeon_device *rdev) -{ - int i, j, t; - u64 page_base; - - if (!rdev->gart.ptr) { - return; - } - for (i = 0, t = 0; i < rdev->gart.num_cpu_pages; i++) { - page_base = rdev->gart.pages_addr[i]; - for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { - radeon_gart_set_page(rdev, t, page_base); - page_base += RADEON_GPU_PAGE_SIZE; - } - } - mb(); - radeon_gart_tlb_flush(rdev); -} - -/** * radeon_gart_init - init the driver info for managing the gart * * @rdev: radeon_device pointer diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index d09650c1d720..bfd7e1b0ff3f 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -40,9 +40,9 @@ void radeon_gem_object_free(struct drm_gem_object *gobj) } } -int radeon_gem_object_create(struct radeon_device *rdev, int size, +int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size, int alignment, int initial_domain, - bool discardable, bool kernel, + u32 flags, bool kernel, struct drm_gem_object **obj) { struct radeon_bo *robj; @@ -55,23 +55,26 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size, alignment = PAGE_SIZE; } - /* maximun bo size is the minimun btw visible vram and gtt size */ - max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size); + /* Maximum bo size is the unpinned gtt size since we use the gtt to + * handle vram to system pool migrations. + */ + max_size = rdev->mc.gtt_size - rdev->gart_pin_size; if (size > max_size) { - printk(KERN_WARNING "%s:%d alloc size %dMb bigger than %ldMb limit\n", - __func__, __LINE__, size >> 20, max_size >> 20); + DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n", + size >> 20, max_size >> 20); return -ENOMEM; } retry: - r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, NULL, &robj); + r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, + flags, NULL, &robj); if (r) { if (r != -ERESTARTSYS) { if (initial_domain == RADEON_GEM_DOMAIN_VRAM) { initial_domain |= RADEON_GEM_DOMAIN_GTT; goto retry; } - DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n", + DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n", size, initial_domain, alignment, r); } return r; @@ -208,18 +211,15 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data, struct radeon_device *rdev = dev->dev_private; struct drm_radeon_gem_info *args = data; struct ttm_mem_type_manager *man; - unsigned i; man = &rdev->mman.bdev.man[TTM_PL_VRAM]; args->vram_size = rdev->mc.real_vram_size; args->vram_visible = (u64)man->size << PAGE_SHIFT; - if (rdev->stollen_vga_memory) - args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory); - args->vram_visible -= radeon_fbdev_total_size(rdev); - args->gart_size = rdev->mc.gtt_size - 4096 - RADEON_IB_POOL_SIZE*64*1024; - for(i = 0; i < RADEON_NUM_RINGS; ++i) - args->gart_size -= rdev->ring[i].ring_size; + args->vram_visible -= rdev->vram_pin_size; + args->gart_size = rdev->mc.gtt_size; + args->gart_size -= rdev->gart_pin_size; + return 0; } @@ -252,8 +252,8 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data, /* create a gem object to contain this object in */ args->size = roundup(args->size, PAGE_SIZE); r = radeon_gem_object_create(rdev, args->size, args->alignment, - args->initial_domain, false, - false, &gobj); + args->initial_domain, args->flags, + false, &gobj); if (r) { up_read(&rdev->exclusive_lock); r = radeon_gem_handle_lockup(rdev, r); @@ -358,16 +358,18 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_gem_object *gobj; struct radeon_bo *robj; int r; + uint32_t cur_placement = 0; gobj = drm_gem_object_lookup(dev, filp, args->handle); if (gobj == NULL) { return -ENOENT; } robj = gem_to_radeon_bo(gobj); - r = radeon_bo_wait(robj, NULL, false); - /* callback hw specific functions if any */ - if (rdev->asic->ioctl_wait_idle) - robj->rdev->asic->ioctl_wait_idle(rdev, robj); + r = radeon_bo_wait(robj, &cur_placement, false); + /* Flush HDP cache via MMIO if necessary */ + if (rdev->asic->mmio_hdp_flush && + radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM) + robj->rdev->asic->mmio_hdp_flush(rdev); drm_gem_object_unreference_unlocked(gobj); r = radeon_gem_handle_lockup(rdev, r); return r; @@ -461,11 +463,6 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data, args->operation = RADEON_VA_RESULT_ERROR; return -EINVAL; } - if (!(args->flags & RADEON_VM_PAGE_SNOOPED)) { - dev_err(&dev->pdev->dev, "only supported snooped mapping for now\n"); - args->operation = RADEON_VA_RESULT_ERROR; - return -EINVAL; - } switch (args->operation) { case RADEON_VA_MAP: @@ -499,9 +496,9 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data, switch (args->operation) { case RADEON_VA_MAP: - if (bo_va->soffset) { + if (bo_va->it.start) { args->operation = RADEON_VA_RESULT_VA_EXIST; - args->offset = bo_va->soffset; + args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE; goto out; } r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags); @@ -572,9 +569,8 @@ int radeon_mode_dumb_create(struct drm_file *file_priv, args->size = ALIGN(args->size, PAGE_SIZE); r = radeon_gem_object_create(rdev, args->size, 0, - RADEON_GEM_DOMAIN_VRAM, - false, ttm_bo_type_device, - &gobj); + RADEON_GEM_DOMAIN_VRAM, 0, + false, &gobj); if (r) return -ENOMEM; diff --git a/drivers/gpu/drm/radeon/radeon_ib.c b/drivers/gpu/drm/radeon/radeon_ib.c new file mode 100644 index 000000000000..5bf2c0a05827 --- /dev/null +++ b/drivers/gpu/drm/radeon/radeon_ib.c @@ -0,0 +1,320 @@ +/* + * Copyright 2008 Advanced Micro Devices, Inc. + * Copyright 2008 Red Hat Inc. + * Copyright 2009 Jerome Glisse. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Dave Airlie + * Alex Deucher + * Jerome Glisse + * Christian König + */ +#include <drm/drmP.h> +#include "radeon.h" + +/* + * IB + * IBs (Indirect Buffers) and areas of GPU accessible memory where + * commands are stored. You can put a pointer to the IB in the + * command ring and the hw will fetch the commands from the IB + * and execute them. Generally userspace acceleration drivers + * produce command buffers which are send to the kernel and + * put in IBs for execution by the requested ring. + */ +static int radeon_debugfs_sa_init(struct radeon_device *rdev); + +/** + * radeon_ib_get - request an IB (Indirect Buffer) + * + * @rdev: radeon_device pointer + * @ring: ring index the IB is associated with + * @ib: IB object returned + * @size: requested IB size + * + * Request an IB (all asics). IBs are allocated using the + * suballocator. + * Returns 0 on success, error on failure. + */ +int radeon_ib_get(struct radeon_device *rdev, int ring, + struct radeon_ib *ib, struct radeon_vm *vm, + unsigned size) +{ + int r; + + r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256); + if (r) { + dev_err(rdev->dev, "failed to get a new IB (%d)\n", r); + return r; + } + + r = radeon_semaphore_create(rdev, &ib->semaphore); + if (r) { + return r; + } + + ib->ring = ring; + ib->fence = NULL; + ib->ptr = radeon_sa_bo_cpu_addr(ib->sa_bo); + ib->vm = vm; + if (vm) { + /* ib pool is bound at RADEON_VA_IB_OFFSET in virtual address + * space and soffset is the offset inside the pool bo + */ + ib->gpu_addr = ib->sa_bo->soffset + RADEON_VA_IB_OFFSET; + } else { + ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo); + } + ib->is_const_ib = false; + + return 0; +} + +/** + * radeon_ib_free - free an IB (Indirect Buffer) + * + * @rdev: radeon_device pointer + * @ib: IB object to free + * + * Free an IB (all asics). + */ +void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib) +{ + radeon_semaphore_free(rdev, &ib->semaphore, ib->fence); + radeon_sa_bo_free(rdev, &ib->sa_bo, ib->fence); + radeon_fence_unref(&ib->fence); +} + +/** + * radeon_ib_schedule - schedule an IB (Indirect Buffer) on the ring + * + * @rdev: radeon_device pointer + * @ib: IB object to schedule + * @const_ib: Const IB to schedule (SI only) + * @hdp_flush: Whether or not to perform an HDP cache flush + * + * Schedule an IB on the associated ring (all asics). + * Returns 0 on success, error on failure. + * + * On SI, there are two parallel engines fed from the primary ring, + * the CE (Constant Engine) and the DE (Drawing Engine). Since + * resource descriptors have moved to memory, the CE allows you to + * prime the caches while the DE is updating register state so that + * the resource descriptors will be already in cache when the draw is + * processed. To accomplish this, the userspace driver submits two + * IBs, one for the CE and one for the DE. If there is a CE IB (called + * a CONST_IB), it will be put on the ring prior to the DE IB. Prior + * to SI there was just a DE IB. + */ +int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, + struct radeon_ib *const_ib, bool hdp_flush) +{ + struct radeon_ring *ring = &rdev->ring[ib->ring]; + int r = 0; + + if (!ib->length_dw || !ring->ready) { + /* TODO: Nothings in the ib we should report. */ + dev_err(rdev->dev, "couldn't schedule ib\n"); + return -EINVAL; + } + + /* 64 dwords should be enough for fence too */ + r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_SYNCS * 8); + if (r) { + dev_err(rdev->dev, "scheduling IB failed (%d).\n", r); + return r; + } + + /* grab a vm id if necessary */ + if (ib->vm) { + struct radeon_fence *vm_id_fence; + vm_id_fence = radeon_vm_grab_id(rdev, ib->vm, ib->ring); + radeon_semaphore_sync_to(ib->semaphore, vm_id_fence); + } + + /* sync with other rings */ + r = radeon_semaphore_sync_rings(rdev, ib->semaphore, ib->ring); + if (r) { + dev_err(rdev->dev, "failed to sync rings (%d)\n", r); + radeon_ring_unlock_undo(rdev, ring); + return r; + } + + if (ib->vm) + radeon_vm_flush(rdev, ib->vm, ib->ring); + + if (const_ib) { + radeon_ring_ib_execute(rdev, const_ib->ring, const_ib); + radeon_semaphore_free(rdev, &const_ib->semaphore, NULL); + } + radeon_ring_ib_execute(rdev, ib->ring, ib); + r = radeon_fence_emit(rdev, &ib->fence, ib->ring); + if (r) { + dev_err(rdev->dev, "failed to emit fence for new IB (%d)\n", r); + radeon_ring_unlock_undo(rdev, ring); + return r; + } + if (const_ib) { + const_ib->fence = radeon_fence_ref(ib->fence); + } + + if (ib->vm) + radeon_vm_fence(rdev, ib->vm, ib->fence); + + radeon_ring_unlock_commit(rdev, ring, hdp_flush); + return 0; +} + +/** + * radeon_ib_pool_init - Init the IB (Indirect Buffer) pool + * + * @rdev: radeon_device pointer + * + * Initialize the suballocator to manage a pool of memory + * for use as IBs (all asics). + * Returns 0 on success, error on failure. + */ +int radeon_ib_pool_init(struct radeon_device *rdev) +{ + int r; + + if (rdev->ib_pool_ready) { + return 0; + } + + if (rdev->family >= CHIP_BONAIRE) { + r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo, + RADEON_IB_POOL_SIZE*64*1024, + RADEON_GPU_PAGE_SIZE, + RADEON_GEM_DOMAIN_GTT, + RADEON_GEM_GTT_WC); + } else { + /* Before CIK, it's better to stick to cacheable GTT due + * to the command stream checking + */ + r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo, + RADEON_IB_POOL_SIZE*64*1024, + RADEON_GPU_PAGE_SIZE, + RADEON_GEM_DOMAIN_GTT, 0); + } + if (r) { + return r; + } + + r = radeon_sa_bo_manager_start(rdev, &rdev->ring_tmp_bo); + if (r) { + return r; + } + + rdev->ib_pool_ready = true; + if (radeon_debugfs_sa_init(rdev)) { + dev_err(rdev->dev, "failed to register debugfs file for SA\n"); + } + return 0; +} + +/** + * radeon_ib_pool_fini - Free the IB (Indirect Buffer) pool + * + * @rdev: radeon_device pointer + * + * Tear down the suballocator managing the pool of memory + * for use as IBs (all asics). + */ +void radeon_ib_pool_fini(struct radeon_device *rdev) +{ + if (rdev->ib_pool_ready) { + radeon_sa_bo_manager_suspend(rdev, &rdev->ring_tmp_bo); + radeon_sa_bo_manager_fini(rdev, &rdev->ring_tmp_bo); + rdev->ib_pool_ready = false; + } +} + +/** + * radeon_ib_ring_tests - test IBs on the rings + * + * @rdev: radeon_device pointer + * + * Test an IB (Indirect Buffer) on each ring. + * If the test fails, disable the ring. + * Returns 0 on success, error if the primary GFX ring + * IB test fails. + */ +int radeon_ib_ring_tests(struct radeon_device *rdev) +{ + unsigned i; + int r; + + for (i = 0; i < RADEON_NUM_RINGS; ++i) { + struct radeon_ring *ring = &rdev->ring[i]; + + if (!ring->ready) + continue; + + r = radeon_ib_test(rdev, i, ring); + if (r) { + ring->ready = false; + rdev->needs_reset = false; + + if (i == RADEON_RING_TYPE_GFX_INDEX) { + /* oh, oh, that's really bad */ + DRM_ERROR("radeon: failed testing IB on GFX ring (%d).\n", r); + rdev->accel_working = false; + return r; + + } else { + /* still not good, but we can live with it */ + DRM_ERROR("radeon: failed testing IB on ring %d (%d).\n", i, r); + } + } + } + return 0; +} + +/* + * Debugfs info + */ +#if defined(CONFIG_DEBUG_FS) + +static int radeon_debugfs_sa_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct radeon_device *rdev = dev->dev_private; + + radeon_sa_bo_dump_debug_info(&rdev->ring_tmp_bo, m); + + return 0; + +} + +static struct drm_info_list radeon_debugfs_sa_list[] = { + {"radeon_sa_info", &radeon_debugfs_sa_info, 0, NULL}, +}; + +#endif + +static int radeon_debugfs_sa_init(struct radeon_device *rdev) +{ +#if defined(CONFIG_DEBUG_FS) + return radeon_debugfs_add_files(rdev, radeon_debugfs_sa_list, 1); +#else + return 0; +#endif +} diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index d25ae6acfd5a..eb7164d07985 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -254,7 +254,18 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file } break; case RADEON_INFO_ACCEL_WORKING2: - *value = rdev->accel_working; + if (rdev->family == CHIP_HAWAII) { + if (rdev->accel_working) { + if (rdev->new_fw) + *value = 3; + else + *value = 2; + } else { + *value = 0; + } + } else { + *value = rdev->accel_working; + } break; case RADEON_INFO_TILING_CONFIG: if (rdev->family >= CHIP_BONAIRE) diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 0592ddb0904b..e27608c29c11 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -685,10 +685,11 @@ extern bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder, extern u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder); extern u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector); -extern bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector); extern bool radeon_connector_is_dp12_capable(struct drm_connector *connector); extern int radeon_get_monitor_bpc(struct drm_connector *connector); +extern struct edid *radeon_connector_edid(struct drm_connector *connector); + extern void radeon_connector_hotplug(struct drm_connector *connector); extern int radeon_dp_mode_valid_helper(struct drm_connector *connector, struct drm_display_mode *mode); @@ -738,7 +739,6 @@ extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c, extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector); extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector); extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool use_aux); -extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector); extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector); diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 6c717b257d6d..480c87d8edc5 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -46,16 +46,6 @@ static void radeon_bo_clear_surface_reg(struct radeon_bo *bo); * function are calling it. */ -static void radeon_bo_clear_va(struct radeon_bo *bo) -{ - struct radeon_bo_va *bo_va, *tmp; - - list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) { - /* remove from all vm address space */ - radeon_vm_bo_rmv(bo->rdev, bo_va); - } -} - static void radeon_update_memory_usage(struct radeon_bo *bo, unsigned mem_type, int sign) { @@ -90,7 +80,7 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo) list_del_init(&bo->list); mutex_unlock(&bo->rdev->gem.mutex); radeon_bo_clear_surface_reg(bo); - radeon_bo_clear_va(bo); + WARN_ON(!list_empty(&bo->va)); drm_gem_object_release(&bo->gem_base); kfree(bo); } @@ -114,15 +104,23 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM; if (domain & RADEON_GEM_DOMAIN_GTT) { - if (rbo->rdev->flags & RADEON_IS_AGP) { - rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT; + if (rbo->flags & RADEON_GEM_GTT_UC) { + rbo->placements[c++] = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_TT; + } else if ((rbo->flags & RADEON_GEM_GTT_WC) || + (rbo->rdev->flags & RADEON_IS_AGP)) { + rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | + TTM_PL_FLAG_TT; } else { rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT; } } if (domain & RADEON_GEM_DOMAIN_CPU) { - if (rbo->rdev->flags & RADEON_IS_AGP) { - rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_SYSTEM; + if (rbo->flags & RADEON_GEM_GTT_UC) { + rbo->placements[c++] = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_SYSTEM; + } else if ((rbo->flags & RADEON_GEM_GTT_WC) || + rbo->rdev->flags & RADEON_IS_AGP) { + rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | + TTM_PL_FLAG_SYSTEM; } else { rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM; } @@ -146,7 +144,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) int radeon_bo_create(struct radeon_device *rdev, unsigned long size, int byte_align, bool kernel, u32 domain, - struct sg_table *sg, struct radeon_bo **bo_ptr) + u32 flags, struct sg_table *sg, struct radeon_bo **bo_ptr) { struct radeon_bo *bo; enum ttm_bo_type type; @@ -183,6 +181,12 @@ int radeon_bo_create(struct radeon_device *rdev, bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM | RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_CPU); + + bo->flags = flags; + /* PCI GART is always snooped */ + if (!(rdev->flags & RADEON_IS_PCIE)) + bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); + radeon_ttm_placement_from_domain(bo, domain); /* Kernel allocation are uninterruptible */ down_read(&rdev->pm.mclk_lock); @@ -232,6 +236,15 @@ void radeon_bo_kunmap(struct radeon_bo *bo) ttm_bo_kunmap(&bo->kmap); } +struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo) +{ + if (bo == NULL) + return NULL; + + ttm_bo_reference(&bo->tbo); + return bo; +} + void radeon_bo_unref(struct radeon_bo **bo) { struct ttm_buffer_object *tbo; @@ -241,9 +254,7 @@ void radeon_bo_unref(struct radeon_bo **bo) return; rdev = (*bo)->rdev; tbo = &((*bo)->tbo); - down_read(&rdev->pm.mclk_lock); ttm_bo_unref(&tbo); - up_read(&rdev->pm.mclk_lock); if (tbo == NULL) *bo = NULL; } @@ -292,9 +303,13 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset, bo->pin_count = 1; if (gpu_addr != NULL) *gpu_addr = radeon_bo_gpu_offset(bo); - } - if (unlikely(r != 0)) + if (domain == RADEON_GEM_DOMAIN_VRAM) + bo->rdev->vram_pin_size += radeon_bo_size(bo); + else + bo->rdev->gart_pin_size += radeon_bo_size(bo); + } else { dev_err(bo->rdev->dev, "%p pin failed\n", bo); + } return r; } @@ -317,8 +332,14 @@ int radeon_bo_unpin(struct radeon_bo *bo) for (i = 0; i < bo->placement.num_placement; i++) bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); - if (unlikely(r != 0)) + if (likely(r == 0)) { + if (bo->tbo.mem.mem_type == TTM_PL_VRAM) + bo->rdev->vram_pin_size -= radeon_bo_size(bo); + else + bo->rdev->gart_pin_size -= radeon_bo_size(bo); + } else { dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo); + } return r; } diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h index 5a873f31a171..98a47fdf3625 100644 --- a/drivers/gpu/drm/radeon/radeon_object.h +++ b/drivers/gpu/drm/radeon/radeon_object.h @@ -124,11 +124,12 @@ extern int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, extern int radeon_bo_create(struct radeon_device *rdev, unsigned long size, int byte_align, - bool kernel, u32 domain, + bool kernel, u32 domain, u32 flags, struct sg_table *sg, struct radeon_bo **bo_ptr); extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr); extern void radeon_bo_kunmap(struct radeon_bo *bo); +extern struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo); extern void radeon_bo_unref(struct radeon_bo **bo); extern int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr); extern int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, @@ -170,7 +171,8 @@ static inline void * radeon_sa_bo_cpu_addr(struct radeon_sa_bo *sa_bo) extern int radeon_sa_bo_manager_init(struct radeon_device *rdev, struct radeon_sa_manager *sa_manager, - unsigned size, u32 align, u32 domain); + unsigned size, u32 align, u32 domain, + u32 flags); extern void radeon_sa_bo_manager_fini(struct radeon_device *rdev, struct radeon_sa_manager *sa_manager); extern int radeon_sa_bo_manager_start(struct radeon_device *rdev, diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index e447e390d09a..164898b0010c 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -460,10 +460,6 @@ static ssize_t radeon_get_dpm_state(struct device *dev, struct radeon_device *rdev = ddev->dev_private; enum radeon_pm_state_type pm = rdev->pm.dpm.user_state; - if ((rdev->flags & RADEON_IS_PX) && - (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) - return snprintf(buf, PAGE_SIZE, "off\n"); - return snprintf(buf, PAGE_SIZE, "%s\n", (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance"); @@ -477,11 +473,6 @@ static ssize_t radeon_set_dpm_state(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct radeon_device *rdev = ddev->dev_private; - /* Can't set dpm state when the card is off */ - if ((rdev->flags & RADEON_IS_PX) && - (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) - return -EINVAL; - mutex_lock(&rdev->pm.mutex); if (strncmp("battery", buf, strlen("battery")) == 0) rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY; @@ -495,7 +486,12 @@ static ssize_t radeon_set_dpm_state(struct device *dev, goto fail; } mutex_unlock(&rdev->pm.mutex); - radeon_pm_compute_clocks(rdev); + + /* Can't set dpm state when the card is off */ + if (!(rdev->flags & RADEON_IS_PX) || + (ddev->switch_power_state == DRM_SWITCH_POWER_ON)) + radeon_pm_compute_clocks(rdev); + fail: return count; } @@ -1303,10 +1299,6 @@ int radeon_pm_init(struct radeon_device *rdev) case CHIP_RS780: case CHIP_RS880: case CHIP_RV770: - case CHIP_BARTS: - case CHIP_TURKS: - case CHIP_CAICOS: - case CHIP_CAYMAN: /* DPM requires the RLC, RV770+ dGPU requires SMC */ if (!rdev->rlc_fw) rdev->pm.pm_method = PM_METHOD_PROFILE; @@ -1330,6 +1322,10 @@ int radeon_pm_init(struct radeon_device *rdev) case CHIP_PALM: case CHIP_SUMO: case CHIP_SUMO2: + case CHIP_BARTS: + case CHIP_TURKS: + case CHIP_CAICOS: + case CHIP_CAYMAN: case CHIP_ARUBA: case CHIP_TAHITI: case CHIP_PITCAIRN: @@ -1400,9 +1396,7 @@ static void radeon_pm_fini_old(struct radeon_device *rdev) } radeon_hwmon_fini(rdev); - - if (rdev->pm.power_state) - kfree(rdev->pm.power_state); + kfree(rdev->pm.power_state); } static void radeon_pm_fini_dpm(struct radeon_device *rdev) @@ -1421,9 +1415,7 @@ static void radeon_pm_fini_dpm(struct radeon_device *rdev) radeon_dpm_fini(rdev); radeon_hwmon_fini(rdev); - - if (rdev->pm.power_state) - kfree(rdev->pm.power_state); + kfree(rdev->pm.power_state); } void radeon_pm_fini(struct radeon_device *rdev) diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c index 28d71070c389..0b16f2cbcf17 100644 --- a/drivers/gpu/drm/radeon/radeon_prime.c +++ b/drivers/gpu/drm/radeon/radeon_prime.c @@ -65,7 +65,7 @@ struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev, int ret; ret = radeon_bo_create(rdev, size, PAGE_SIZE, false, - RADEON_GEM_DOMAIN_GTT, sg, &bo); + RADEON_GEM_DOMAIN_GTT, 0, sg, &bo); if (ret) return ERR_PTR(ret); diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index f8050f5429e2..d65607902537 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -26,258 +26,8 @@ * Jerome Glisse * Christian König */ -#include <linux/seq_file.h> -#include <linux/slab.h> #include <drm/drmP.h> -#include <drm/radeon_drm.h> -#include "radeon_reg.h" #include "radeon.h" -#include "atom.h" - -/* - * IB - * IBs (Indirect Buffers) and areas of GPU accessible memory where - * commands are stored. You can put a pointer to the IB in the - * command ring and the hw will fetch the commands from the IB - * and execute them. Generally userspace acceleration drivers - * produce command buffers which are send to the kernel and - * put in IBs for execution by the requested ring. - */ -static int radeon_debugfs_sa_init(struct radeon_device *rdev); - -/** - * radeon_ib_get - request an IB (Indirect Buffer) - * - * @rdev: radeon_device pointer - * @ring: ring index the IB is associated with - * @ib: IB object returned - * @size: requested IB size - * - * Request an IB (all asics). IBs are allocated using the - * suballocator. - * Returns 0 on success, error on failure. - */ -int radeon_ib_get(struct radeon_device *rdev, int ring, - struct radeon_ib *ib, struct radeon_vm *vm, - unsigned size) -{ - int r; - - r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256); - if (r) { - dev_err(rdev->dev, "failed to get a new IB (%d)\n", r); - return r; - } - - r = radeon_semaphore_create(rdev, &ib->semaphore); - if (r) { - return r; - } - - ib->ring = ring; - ib->fence = NULL; - ib->ptr = radeon_sa_bo_cpu_addr(ib->sa_bo); - ib->vm = vm; - if (vm) { - /* ib pool is bound at RADEON_VA_IB_OFFSET in virtual address - * space and soffset is the offset inside the pool bo - */ - ib->gpu_addr = ib->sa_bo->soffset + RADEON_VA_IB_OFFSET; - } else { - ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo); - } - ib->is_const_ib = false; - - return 0; -} - -/** - * radeon_ib_free - free an IB (Indirect Buffer) - * - * @rdev: radeon_device pointer - * @ib: IB object to free - * - * Free an IB (all asics). - */ -void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib) -{ - radeon_semaphore_free(rdev, &ib->semaphore, ib->fence); - radeon_sa_bo_free(rdev, &ib->sa_bo, ib->fence); - radeon_fence_unref(&ib->fence); -} - -/** - * radeon_ib_schedule - schedule an IB (Indirect Buffer) on the ring - * - * @rdev: radeon_device pointer - * @ib: IB object to schedule - * @const_ib: Const IB to schedule (SI only) - * - * Schedule an IB on the associated ring (all asics). - * Returns 0 on success, error on failure. - * - * On SI, there are two parallel engines fed from the primary ring, - * the CE (Constant Engine) and the DE (Drawing Engine). Since - * resource descriptors have moved to memory, the CE allows you to - * prime the caches while the DE is updating register state so that - * the resource descriptors will be already in cache when the draw is - * processed. To accomplish this, the userspace driver submits two - * IBs, one for the CE and one for the DE. If there is a CE IB (called - * a CONST_IB), it will be put on the ring prior to the DE IB. Prior - * to SI there was just a DE IB. - */ -int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, - struct radeon_ib *const_ib) -{ - struct radeon_ring *ring = &rdev->ring[ib->ring]; - int r = 0; - - if (!ib->length_dw || !ring->ready) { - /* TODO: Nothings in the ib we should report. */ - dev_err(rdev->dev, "couldn't schedule ib\n"); - return -EINVAL; - } - - /* 64 dwords should be enough for fence too */ - r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_SYNCS * 8); - if (r) { - dev_err(rdev->dev, "scheduling IB failed (%d).\n", r); - return r; - } - - /* grab a vm id if necessary */ - if (ib->vm) { - struct radeon_fence *vm_id_fence; - vm_id_fence = radeon_vm_grab_id(rdev, ib->vm, ib->ring); - radeon_semaphore_sync_to(ib->semaphore, vm_id_fence); - } - - /* sync with other rings */ - r = radeon_semaphore_sync_rings(rdev, ib->semaphore, ib->ring); - if (r) { - dev_err(rdev->dev, "failed to sync rings (%d)\n", r); - radeon_ring_unlock_undo(rdev, ring); - return r; - } - - if (ib->vm) - radeon_vm_flush(rdev, ib->vm, ib->ring); - - if (const_ib) { - radeon_ring_ib_execute(rdev, const_ib->ring, const_ib); - radeon_semaphore_free(rdev, &const_ib->semaphore, NULL); - } - radeon_ring_ib_execute(rdev, ib->ring, ib); - r = radeon_fence_emit(rdev, &ib->fence, ib->ring); - if (r) { - dev_err(rdev->dev, "failed to emit fence for new IB (%d)\n", r); - radeon_ring_unlock_undo(rdev, ring); - return r; - } - if (const_ib) { - const_ib->fence = radeon_fence_ref(ib->fence); - } - - if (ib->vm) - radeon_vm_fence(rdev, ib->vm, ib->fence); - - radeon_ring_unlock_commit(rdev, ring); - return 0; -} - -/** - * radeon_ib_pool_init - Init the IB (Indirect Buffer) pool - * - * @rdev: radeon_device pointer - * - * Initialize the suballocator to manage a pool of memory - * for use as IBs (all asics). - * Returns 0 on success, error on failure. - */ -int radeon_ib_pool_init(struct radeon_device *rdev) -{ - int r; - - if (rdev->ib_pool_ready) { - return 0; - } - r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo, - RADEON_IB_POOL_SIZE*64*1024, - RADEON_GPU_PAGE_SIZE, - RADEON_GEM_DOMAIN_GTT); - if (r) { - return r; - } - - r = radeon_sa_bo_manager_start(rdev, &rdev->ring_tmp_bo); - if (r) { - return r; - } - - rdev->ib_pool_ready = true; - if (radeon_debugfs_sa_init(rdev)) { - dev_err(rdev->dev, "failed to register debugfs file for SA\n"); - } - return 0; -} - -/** - * radeon_ib_pool_fini - Free the IB (Indirect Buffer) pool - * - * @rdev: radeon_device pointer - * - * Tear down the suballocator managing the pool of memory - * for use as IBs (all asics). - */ -void radeon_ib_pool_fini(struct radeon_device *rdev) -{ - if (rdev->ib_pool_ready) { - radeon_sa_bo_manager_suspend(rdev, &rdev->ring_tmp_bo); - radeon_sa_bo_manager_fini(rdev, &rdev->ring_tmp_bo); - rdev->ib_pool_ready = false; - } -} - -/** - * radeon_ib_ring_tests - test IBs on the rings - * - * @rdev: radeon_device pointer - * - * Test an IB (Indirect Buffer) on each ring. - * If the test fails, disable the ring. - * Returns 0 on success, error if the primary GFX ring - * IB test fails. - */ -int radeon_ib_ring_tests(struct radeon_device *rdev) -{ - unsigned i; - int r; - - for (i = 0; i < RADEON_NUM_RINGS; ++i) { - struct radeon_ring *ring = &rdev->ring[i]; - - if (!ring->ready) - continue; - - r = radeon_ib_test(rdev, i, ring); - if (r) { - ring->ready = false; - rdev->needs_reset = false; - - if (i == RADEON_RING_TYPE_GFX_INDEX) { - /* oh, oh, that's really bad */ - DRM_ERROR("radeon: failed testing IB on GFX ring (%d).\n", r); - rdev->accel_working = false; - return r; - - } else { - /* still not good, but we can live with it */ - DRM_ERROR("radeon: failed testing IB on ring %d (%d).\n", i, r); - } - } - } - return 0; -} /* * Rings @@ -427,17 +177,29 @@ int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsig * * @rdev: radeon_device pointer * @ring: radeon_ring structure holding ring information + * @hdp_flush: Whether or not to perform an HDP cache flush * * Update the wptr (write pointer) to tell the GPU to * execute new commands on the ring buffer (all asics). */ -void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring) +void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring, + bool hdp_flush) { + /* If we are emitting the HDP flush via the ring buffer, we need to + * do it before padding. + */ + if (hdp_flush && rdev->asic->ring[ring->idx]->hdp_flush) + rdev->asic->ring[ring->idx]->hdp_flush(rdev, ring); /* We pad to match fetch size */ while (ring->wptr & ring->align_mask) { radeon_ring_write(ring, ring->nop); } mb(); + /* If we are emitting the HDP flush via MMIO, we need to do it after + * all CPU writes to VRAM finished. + */ + if (hdp_flush && rdev->asic->mmio_hdp_flush) + rdev->asic->mmio_hdp_flush(rdev); radeon_ring_set_wptr(rdev, ring); } @@ -447,12 +209,14 @@ void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring) * * @rdev: radeon_device pointer * @ring: radeon_ring structure holding ring information + * @hdp_flush: Whether or not to perform an HDP cache flush * * Call radeon_ring_commit() then unlock the ring (all asics). */ -void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring) +void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring, + bool hdp_flush) { - radeon_ring_commit(rdev, ring); + radeon_ring_commit(rdev, ring, hdp_flush); mutex_unlock(&rdev->ring_lock); } @@ -612,7 +376,7 @@ int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring, radeon_ring_write(ring, data[i]); } - radeon_ring_unlock_commit(rdev, ring); + radeon_ring_unlock_commit(rdev, ring, false); kfree(data); return 0; } @@ -640,7 +404,7 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig /* Allocate ring buffer */ if (ring->ring_obj == NULL) { r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true, - RADEON_GEM_DOMAIN_GTT, + RADEON_GEM_DOMAIN_GTT, 0, NULL, &ring->ring_obj); if (r) { dev_err(rdev->dev, "(%d) ring create failed\n", r); @@ -791,22 +555,6 @@ static struct drm_info_list radeon_debugfs_ring_info_list[] = { {"radeon_ring_vce2", radeon_debugfs_ring_info, 0, &si_vce2_index}, }; -static int radeon_debugfs_sa_info(struct seq_file *m, void *data) -{ - struct drm_info_node *node = (struct drm_info_node *) m->private; - struct drm_device *dev = node->minor->dev; - struct radeon_device *rdev = dev->dev_private; - - radeon_sa_bo_dump_debug_info(&rdev->ring_tmp_bo, m); - - return 0; - -} - -static struct drm_info_list radeon_debugfs_sa_list[] = { - {"radeon_sa_info", &radeon_debugfs_sa_info, 0, NULL}, -}; - #endif static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring) @@ -828,12 +576,3 @@ static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ri #endif return 0; } - -static int radeon_debugfs_sa_init(struct radeon_device *rdev) -{ -#if defined(CONFIG_DEBUG_FS) - return radeon_debugfs_add_files(rdev, radeon_debugfs_sa_list, 1); -#else - return 0; -#endif -} diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c index adcf3e2f07da..b84f97c8718c 100644 --- a/drivers/gpu/drm/radeon/radeon_sa.c +++ b/drivers/gpu/drm/radeon/radeon_sa.c @@ -49,7 +49,7 @@ static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager); int radeon_sa_bo_manager_init(struct radeon_device *rdev, struct radeon_sa_manager *sa_manager, - unsigned size, u32 align, u32 domain) + unsigned size, u32 align, u32 domain, u32 flags) { int i, r; @@ -65,7 +65,7 @@ int radeon_sa_bo_manager_init(struct radeon_device *rdev, } r = radeon_bo_create(rdev, size, align, true, - domain, NULL, &sa_manager->bo); + domain, flags, NULL, &sa_manager->bo); if (r) { dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r); return r; diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c index dbd6bcde92de..abd6753a570a 100644 --- a/drivers/gpu/drm/radeon/radeon_semaphore.c +++ b/drivers/gpu/drm/radeon/radeon_semaphore.c @@ -34,7 +34,7 @@ int radeon_semaphore_create(struct radeon_device *rdev, struct radeon_semaphore **semaphore) { - uint32_t *cpu_addr; + uint64_t *cpu_addr; int i, r; *semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL); @@ -179,7 +179,7 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev, continue; } - radeon_ring_commit(rdev, &rdev->ring[i]); + radeon_ring_commit(rdev, &rdev->ring[i], false); radeon_fence_note_sync(fence, ring); semaphore->gpu_addr += 8; diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c index 3a13e0d1055c..17bc3dced9f1 100644 --- a/drivers/gpu/drm/radeon/radeon_test.c +++ b/drivers/gpu/drm/radeon/radeon_test.c @@ -56,13 +56,7 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag) /* Number of tests = * (Total GTT - IB pool - writeback page - ring buffers) / test size */ - n = rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024; - for (i = 0; i < RADEON_NUM_RINGS; ++i) - n -= rdev->ring[i].ring_size; - if (rdev->wb.wb_obj) - n -= RADEON_GPU_PAGE_SIZE; - if (rdev->ih.ring_obj) - n -= rdev->ih.ring_size; + n = rdev->mc.gtt_size - rdev->gart_pin_size; n /= size; gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL); @@ -73,7 +67,7 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag) } r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, - NULL, &vram_obj); + 0, NULL, &vram_obj); if (r) { DRM_ERROR("Failed to create VRAM object\n"); goto out_cleanup; @@ -93,7 +87,7 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag) struct radeon_fence *fence = NULL; r = radeon_bo_create(rdev, size, PAGE_SIZE, true, - RADEON_GEM_DOMAIN_GTT, NULL, gtt_obj + i); + RADEON_GEM_DOMAIN_GTT, 0, NULL, gtt_obj + i); if (r) { DRM_ERROR("Failed to create GTT object %d\n", i); goto out_lclean; @@ -294,7 +288,7 @@ static int radeon_test_create_and_emit_fence(struct radeon_device *rdev, return r; } radeon_fence_emit(rdev, fence, ring->idx); - radeon_ring_unlock_commit(rdev, ring); + radeon_ring_unlock_commit(rdev, ring, false); } return 0; } @@ -319,7 +313,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev, goto out_cleanup; } radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); - radeon_ring_unlock_commit(rdev, ringA); + radeon_ring_unlock_commit(rdev, ringA, false); r = radeon_test_create_and_emit_fence(rdev, ringA, &fence1); if (r) @@ -331,7 +325,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev, goto out_cleanup; } radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); - radeon_ring_unlock_commit(rdev, ringA); + radeon_ring_unlock_commit(rdev, ringA, false); r = radeon_test_create_and_emit_fence(rdev, ringA, &fence2); if (r) @@ -350,7 +344,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev, goto out_cleanup; } radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore); - radeon_ring_unlock_commit(rdev, ringB); + radeon_ring_unlock_commit(rdev, ringB, false); r = radeon_fence_wait(fence1, false); if (r) { @@ -371,7 +365,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev, goto out_cleanup; } radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore); - radeon_ring_unlock_commit(rdev, ringB); + radeon_ring_unlock_commit(rdev, ringB, false); r = radeon_fence_wait(fence2, false); if (r) { @@ -414,7 +408,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev, goto out_cleanup; } radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); - radeon_ring_unlock_commit(rdev, ringA); + radeon_ring_unlock_commit(rdev, ringA, false); r = radeon_test_create_and_emit_fence(rdev, ringA, &fenceA); if (r) @@ -426,7 +420,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev, goto out_cleanup; } radeon_semaphore_emit_wait(rdev, ringB->idx, semaphore); - radeon_ring_unlock_commit(rdev, ringB); + radeon_ring_unlock_commit(rdev, ringB, false); r = radeon_test_create_and_emit_fence(rdev, ringB, &fenceB); if (r) goto out_cleanup; @@ -448,7 +442,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev, goto out_cleanup; } radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore); - radeon_ring_unlock_commit(rdev, ringC); + radeon_ring_unlock_commit(rdev, ringC, false); for (i = 0; i < 30; ++i) { mdelay(100); @@ -474,7 +468,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev, goto out_cleanup; } radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore); - radeon_ring_unlock_commit(rdev, ringC); + radeon_ring_unlock_commit(rdev, ringC, false); mdelay(1000); diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h index f749f2c3bbdb..9db74a96ef61 100644 --- a/drivers/gpu/drm/radeon/radeon_trace.h +++ b/drivers/gpu/drm/radeon/radeon_trace.h @@ -72,8 +72,8 @@ TRACE_EVENT(radeon_vm_bo_update, ), TP_fast_assign( - __entry->soffset = bo_va->soffset; - __entry->eoffset = bo_va->eoffset; + __entry->soffset = bo_va->it.start; + __entry->eoffset = bo_va->it.last + 1; __entry->flags = bo_va->flags; ), TP_printk("soffs=%010llx, eoffs=%010llx, flags=%08x", @@ -104,6 +104,24 @@ TRACE_EVENT(radeon_vm_set_page, __entry->flags, __entry->count) ); +TRACE_EVENT(radeon_vm_flush, + TP_PROTO(uint64_t pd_addr, unsigned ring, unsigned id), + TP_ARGS(pd_addr, ring, id), + TP_STRUCT__entry( + __field(u64, pd_addr) + __field(u32, ring) + __field(u32, id) + ), + + TP_fast_assign( + __entry->pd_addr = pd_addr; + __entry->ring = ring; + __entry->id = id; + ), + TP_printk("pd_addr=%010Lx, ring=%u, id=%u", + __entry->pd_addr, __entry->ring, __entry->id) +); + DECLARE_EVENT_CLASS(radeon_fence_request, TP_PROTO(struct drm_device *dev, int ring, u32 seqno), diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index c8a8a5144ec1..72afe82a95c9 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -521,6 +521,8 @@ static int radeon_ttm_backend_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) { struct radeon_ttm_tt *gtt = (void*)ttm; + uint32_t flags = RADEON_GART_PAGE_VALID | RADEON_GART_PAGE_READ | + RADEON_GART_PAGE_WRITE; int r; gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT); @@ -528,8 +530,10 @@ static int radeon_ttm_backend_bind(struct ttm_tt *ttm, WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", ttm->num_pages, bo_mem, ttm); } - r = radeon_gart_bind(gtt->rdev, gtt->offset, - ttm->num_pages, ttm->pages, gtt->ttm.dma_address); + if (ttm->caching_state == tt_cached) + flags |= RADEON_GART_PAGE_SNOOP; + r = radeon_gart_bind(gtt->rdev, gtt->offset, ttm->num_pages, + ttm->pages, gtt->ttm.dma_address, flags); if (r) { DRM_ERROR("failed to bind %lu pages at 0x%08X\n", ttm->num_pages, (unsigned)gtt->offset); @@ -726,7 +730,7 @@ int radeon_ttm_init(struct radeon_device *rdev) radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true, - RADEON_GEM_DOMAIN_VRAM, + RADEON_GEM_DOMAIN_VRAM, 0, NULL, &rdev->stollen_vga_memory); if (r) { return r; diff --git a/drivers/gpu/drm/radeon/radeon_ucode.c b/drivers/gpu/drm/radeon/radeon_ucode.c new file mode 100644 index 000000000000..6beec680390c --- /dev/null +++ b/drivers/gpu/drm/radeon/radeon_ucode.c @@ -0,0 +1,167 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include <linux/firmware.h> +#include <linux/slab.h> +#include <linux/module.h> +#include <drm/drmP.h> +#include "radeon.h" +#include "radeon_ucode.h" + +static void radeon_ucode_print_common_hdr(const struct common_firmware_header *hdr) +{ + DRM_DEBUG("size_bytes: %u\n", le32_to_cpu(hdr->size_bytes)); + DRM_DEBUG("header_size_bytes: %u\n", le32_to_cpu(hdr->header_size_bytes)); + DRM_DEBUG("header_version_major: %u\n", le16_to_cpu(hdr->header_version_major)); + DRM_DEBUG("header_version_minor: %u\n", le16_to_cpu(hdr->header_version_minor)); + DRM_DEBUG("ip_version_major: %u\n", le16_to_cpu(hdr->ip_version_major)); + DRM_DEBUG("ip_version_minor: %u\n", le16_to_cpu(hdr->ip_version_minor)); + DRM_DEBUG("ucode_version: 0x%08x\n", le32_to_cpu(hdr->ucode_version)); + DRM_DEBUG("ucode_size_bytes: %u\n", le32_to_cpu(hdr->ucode_size_bytes)); + DRM_DEBUG("ucode_array_offset_bytes: %u\n", + le32_to_cpu(hdr->ucode_array_offset_bytes)); + DRM_DEBUG("crc32: 0x%08x\n", le32_to_cpu(hdr->crc32)); +} + +void radeon_ucode_print_mc_hdr(const struct common_firmware_header *hdr) +{ + uint16_t version_major = le16_to_cpu(hdr->header_version_major); + uint16_t version_minor = le16_to_cpu(hdr->header_version_minor); + + DRM_DEBUG("MC\n"); + radeon_ucode_print_common_hdr(hdr); + + if (version_major == 1) { + const struct mc_firmware_header_v1_0 *mc_hdr = + container_of(hdr, struct mc_firmware_header_v1_0, header); + + DRM_DEBUG("io_debug_size_bytes: %u\n", + le32_to_cpu(mc_hdr->io_debug_size_bytes)); + DRM_DEBUG("io_debug_array_offset_bytes: %u\n", + le32_to_cpu(mc_hdr->io_debug_array_offset_bytes)); + } else { + DRM_ERROR("Unknown MC ucode version: %u.%u\n", version_major, version_minor); + } +} + +void radeon_ucode_print_smc_hdr(const struct common_firmware_header *hdr) +{ + uint16_t version_major = le16_to_cpu(hdr->header_version_major); + uint16_t version_minor = le16_to_cpu(hdr->header_version_minor); + + DRM_DEBUG("SMC\n"); + radeon_ucode_print_common_hdr(hdr); + + if (version_major == 1) { + const struct smc_firmware_header_v1_0 *smc_hdr = + container_of(hdr, struct smc_firmware_header_v1_0, header); + + DRM_DEBUG("ucode_start_addr: %u\n", le32_to_cpu(smc_hdr->ucode_start_addr)); + } else { + DRM_ERROR("Unknown SMC ucode version: %u.%u\n", version_major, version_minor); + } +} + +void radeon_ucode_print_gfx_hdr(const struct common_firmware_header *hdr) +{ + uint16_t version_major = le16_to_cpu(hdr->header_version_major); + uint16_t version_minor = le16_to_cpu(hdr->header_version_minor); + + DRM_DEBUG("GFX\n"); + radeon_ucode_print_common_hdr(hdr); + + if (version_major == 1) { + const struct gfx_firmware_header_v1_0 *gfx_hdr = + container_of(hdr, struct gfx_firmware_header_v1_0, header); + + DRM_DEBUG("ucode_feature_version: %u\n", + le32_to_cpu(gfx_hdr->ucode_feature_version)); + DRM_DEBUG("jt_offset: %u\n", le32_to_cpu(gfx_hdr->jt_offset)); + DRM_DEBUG("jt_size: %u\n", le32_to_cpu(gfx_hdr->jt_size)); + } else { + DRM_ERROR("Unknown GFX ucode version: %u.%u\n", version_major, version_minor); + } +} + +void radeon_ucode_print_rlc_hdr(const struct common_firmware_header *hdr) +{ + uint16_t version_major = le16_to_cpu(hdr->header_version_major); + uint16_t version_minor = le16_to_cpu(hdr->header_version_minor); + + DRM_DEBUG("RLC\n"); + radeon_ucode_print_common_hdr(hdr); + + if (version_major == 1) { + const struct rlc_firmware_header_v1_0 *rlc_hdr = + container_of(hdr, struct rlc_firmware_header_v1_0, header); + + DRM_DEBUG("ucode_feature_version: %u\n", + le32_to_cpu(rlc_hdr->ucode_feature_version)); + DRM_DEBUG("save_and_restore_offset: %u\n", + le32_to_cpu(rlc_hdr->save_and_restore_offset)); + DRM_DEBUG("clear_state_descriptor_offset: %u\n", + le32_to_cpu(rlc_hdr->clear_state_descriptor_offset)); + DRM_DEBUG("avail_scratch_ram_locations: %u\n", + le32_to_cpu(rlc_hdr->avail_scratch_ram_locations)); + DRM_DEBUG("master_pkt_description_offset: %u\n", + le32_to_cpu(rlc_hdr->master_pkt_description_offset)); + } else { + DRM_ERROR("Unknown RLC ucode version: %u.%u\n", version_major, version_minor); + } +} + +void radeon_ucode_print_sdma_hdr(const struct common_firmware_header *hdr) +{ + uint16_t version_major = le16_to_cpu(hdr->header_version_major); + uint16_t version_minor = le16_to_cpu(hdr->header_version_minor); + + DRM_DEBUG("SDMA\n"); + radeon_ucode_print_common_hdr(hdr); + + if (version_major == 1) { + const struct sdma_firmware_header_v1_0 *sdma_hdr = + container_of(hdr, struct sdma_firmware_header_v1_0, header); + + DRM_DEBUG("ucode_feature_version: %u\n", + le32_to_cpu(sdma_hdr->ucode_feature_version)); + DRM_DEBUG("ucode_change_version: %u\n", + le32_to_cpu(sdma_hdr->ucode_change_version)); + DRM_DEBUG("jt_offset: %u\n", le32_to_cpu(sdma_hdr->jt_offset)); + DRM_DEBUG("jt_size: %u\n", le32_to_cpu(sdma_hdr->jt_size)); + } else { + DRM_ERROR("Unknown SDMA ucode version: %u.%u\n", + version_major, version_minor); + } +} + +int radeon_ucode_validate(const struct firmware *fw) +{ + const struct common_firmware_header *hdr = + (const struct common_firmware_header *)fw->data; + + if (fw->size == le32_to_cpu(hdr->size_bytes)) + return 0; + + return -EINVAL; +} + diff --git a/drivers/gpu/drm/radeon/radeon_ucode.h b/drivers/gpu/drm/radeon/radeon_ucode.h index 4e7c3269b183..dc4576e4d8ad 100644 --- a/drivers/gpu/drm/radeon/radeon_ucode.h +++ b/drivers/gpu/drm/radeon/radeon_ucode.h @@ -153,4 +153,75 @@ #define HAWAII_SMC_UCODE_START 0x20000 #define HAWAII_SMC_UCODE_SIZE 0x1FDEC +struct common_firmware_header { + uint32_t size_bytes; /* size of the entire header+image(s) in bytes */ + uint32_t header_size_bytes; /* size of just the header in bytes */ + uint16_t header_version_major; /* header version */ + uint16_t header_version_minor; /* header version */ + uint16_t ip_version_major; /* IP version */ + uint16_t ip_version_minor; /* IP version */ + uint32_t ucode_version; + uint32_t ucode_size_bytes; /* size of ucode in bytes */ + uint32_t ucode_array_offset_bytes; /* payload offset from the start of the header */ + uint32_t crc32; /* crc32 checksum of the payload */ +}; + +/* version_major=1, version_minor=0 */ +struct mc_firmware_header_v1_0 { + struct common_firmware_header header; + uint32_t io_debug_size_bytes; /* size of debug array in dwords */ + uint32_t io_debug_array_offset_bytes; /* payload offset from the start of the header */ +}; + +/* version_major=1, version_minor=0 */ +struct smc_firmware_header_v1_0 { + struct common_firmware_header header; + uint32_t ucode_start_addr; +}; + +/* version_major=1, version_minor=0 */ +struct gfx_firmware_header_v1_0 { + struct common_firmware_header header; + uint32_t ucode_feature_version; + uint32_t jt_offset; /* jt location */ + uint32_t jt_size; /* size of jt */ +}; + +/* version_major=1, version_minor=0 */ +struct rlc_firmware_header_v1_0 { + struct common_firmware_header header; + uint32_t ucode_feature_version; + uint32_t save_and_restore_offset; + uint32_t clear_state_descriptor_offset; + uint32_t avail_scratch_ram_locations; + uint32_t master_pkt_description_offset; +}; + +/* version_major=1, version_minor=0 */ +struct sdma_firmware_header_v1_0 { + struct common_firmware_header header; + uint32_t ucode_feature_version; + uint32_t ucode_change_version; + uint32_t jt_offset; /* jt location */ + uint32_t jt_size; /* size of jt */ +}; + +/* header is fixed size */ +union radeon_firmware_header { + struct common_firmware_header common; + struct mc_firmware_header_v1_0 mc; + struct smc_firmware_header_v1_0 smc; + struct gfx_firmware_header_v1_0 gfx; + struct rlc_firmware_header_v1_0 rlc; + struct sdma_firmware_header_v1_0 sdma; + uint8_t raw[0x100]; +}; + +void radeon_ucode_print_mc_hdr(const struct common_firmware_header *hdr); +void radeon_ucode_print_smc_hdr(const struct common_firmware_header *hdr); +void radeon_ucode_print_gfx_hdr(const struct common_firmware_header *hdr); +void radeon_ucode_print_rlc_hdr(const struct common_firmware_header *hdr); +void radeon_ucode_print_sdma_hdr(const struct common_firmware_header *hdr); +int radeon_ucode_validate(const struct firmware *fw); + #endif diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index a4ad270e8261..341848a14376 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -117,7 +117,7 @@ int radeon_uvd_init(struct radeon_device *rdev) bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) + RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE; r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true, - RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->uvd.vcpu_bo); + RADEON_GEM_DOMAIN_VRAM, 0, NULL, &rdev->uvd.vcpu_bo); if (r) { dev_err(rdev->dev, "(%d) failed to allocate UVD bo\n", r); return r; @@ -646,7 +646,7 @@ static int radeon_uvd_send_msg(struct radeon_device *rdev, ib.ptr[i] = PACKET2(0); ib.length_dw = 16; - r = radeon_ib_schedule(rdev, &ib, NULL); + r = radeon_ib_schedule(rdev, &ib, NULL, false); if (r) goto err; ttm_eu_fence_buffer_objects(&ticket, &head, ib.fence); @@ -674,7 +674,7 @@ int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring, int r, i; r = radeon_bo_create(rdev, 1024, PAGE_SIZE, true, - RADEON_GEM_DOMAIN_VRAM, NULL, &bo); + RADEON_GEM_DOMAIN_VRAM, 0, NULL, &bo); if (r) return r; @@ -720,7 +720,7 @@ int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring, int r, i; r = radeon_bo_create(rdev, 1024, PAGE_SIZE, true, - RADEON_GEM_DOMAIN_VRAM, NULL, &bo); + RADEON_GEM_DOMAIN_VRAM, 0, NULL, &bo); if (r) return r; diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c index aa21c31a846c..c7190aadbd89 100644 --- a/drivers/gpu/drm/radeon/radeon_vce.c +++ b/drivers/gpu/drm/radeon/radeon_vce.c @@ -126,7 +126,7 @@ int radeon_vce_init(struct radeon_device *rdev) size = RADEON_GPU_PAGE_ALIGN(rdev->vce_fw->size) + RADEON_VCE_STACK_SIZE + RADEON_VCE_HEAP_SIZE; r = radeon_bo_create(rdev, size, PAGE_SIZE, true, - RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->vce.vcpu_bo); + RADEON_GEM_DOMAIN_VRAM, 0, NULL, &rdev->vce.vcpu_bo); if (r) { dev_err(rdev->dev, "(%d) failed to allocate VCE bo\n", r); return r; @@ -368,7 +368,7 @@ int radeon_vce_get_create_msg(struct radeon_device *rdev, int ring, for (i = ib.length_dw; i < ib_size_dw; ++i) ib.ptr[i] = 0x0; - r = radeon_ib_schedule(rdev, &ib, NULL); + r = radeon_ib_schedule(rdev, &ib, NULL, false); if (r) { DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); } @@ -425,7 +425,7 @@ int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring, for (i = ib.length_dw; i < ib_size_dw; ++i) ib.ptr[i] = 0x0; - r = radeon_ib_schedule(rdev, &ib, NULL); + r = radeon_ib_schedule(rdev, &ib, NULL, false); if (r) { DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); } @@ -715,7 +715,7 @@ int radeon_vce_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) return r; } radeon_ring_write(ring, VCE_CMD_END); - radeon_ring_unlock_commit(rdev, ring); + radeon_ring_unlock_commit(rdev, ring, false); for (i = 0; i < rdev->usec_timeout; i++) { if (vce_v1_0_get_rptr(rdev, ring) != rptr) diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index 725d3669014f..088ffdc2f577 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c @@ -238,8 +238,8 @@ void radeon_vm_flush(struct radeon_device *rdev, uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory); /* if we can't remember our last VM flush then flush now! */ - /* XXX figure out why we have to flush all the time */ - if (!vm->last_flush || true || pd_addr != vm->pd_gpu_addr) { + if (!vm->last_flush || pd_addr != vm->pd_gpu_addr) { + trace_radeon_vm_flush(pd_addr, ring, vm->id); vm->pd_gpu_addr = pd_addr; radeon_ring_vm_flush(rdev, ring, vm); } @@ -325,17 +325,15 @@ struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev, } bo_va->vm = vm; bo_va->bo = bo; - bo_va->soffset = 0; - bo_va->eoffset = 0; + bo_va->it.start = 0; + bo_va->it.last = 0; bo_va->flags = 0; - bo_va->valid = false; + bo_va->addr = 0; bo_va->ref_count = 1; INIT_LIST_HEAD(&bo_va->bo_list); - INIT_LIST_HEAD(&bo_va->vm_list); INIT_LIST_HEAD(&bo_va->vm_status); mutex_lock(&vm->mutex); - list_add(&bo_va->vm_list, &vm->va); list_add_tail(&bo_va->bo_list, &bo->va); mutex_unlock(&vm->mutex); @@ -343,6 +341,42 @@ struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev, } /** + * radeon_vm_set_pages - helper to call the right asic function + * + * @rdev: radeon_device pointer + * @ib: indirect buffer to fill with commands + * @pe: addr of the page entry + * @addr: dst addr to write into pe + * @count: number of page entries to update + * @incr: increase next addr by incr bytes + * @flags: hw access flags + * + * Traces the parameters and calls the right asic functions + * to setup the page table using the DMA. + */ +static void radeon_vm_set_pages(struct radeon_device *rdev, + struct radeon_ib *ib, + uint64_t pe, + uint64_t addr, unsigned count, + uint32_t incr, uint32_t flags) +{ + trace_radeon_vm_set_page(pe, addr, count, incr, flags); + + if ((flags & R600_PTE_GART_MASK) == R600_PTE_GART_MASK) { + uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8; + radeon_asic_vm_copy_pages(rdev, ib, pe, src, count); + + } else if ((flags & R600_PTE_SYSTEM) || (count < 3)) { + radeon_asic_vm_write_pages(rdev, ib, pe, addr, + count, incr, flags); + + } else { + radeon_asic_vm_set_pages(rdev, ib, pe, addr, + count, incr, flags); + } +} + +/** * radeon_vm_clear_bo - initially clear the page dir/table * * @rdev: radeon_device pointer @@ -376,16 +410,17 @@ static int radeon_vm_clear_bo(struct radeon_device *rdev, addr = radeon_bo_gpu_offset(bo); entries = radeon_bo_size(bo) / 8; - r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, - NULL, entries * 2 + 64); + r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, 256); if (r) goto error; ib.length_dw = 0; - radeon_asic_vm_set_page(rdev, &ib, addr, 0, entries, 0, 0); + radeon_vm_set_pages(rdev, &ib, addr, 0, entries, 0, 0); + radeon_asic_vm_pad_ib(rdev, &ib); + WARN_ON(ib.length_dw > 64); - r = radeon_ib_schedule(rdev, &ib, NULL); + r = radeon_ib_schedule(rdev, &ib, NULL, false); if (r) goto error; @@ -419,11 +454,9 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, uint32_t flags) { uint64_t size = radeon_bo_size(bo_va->bo); - uint64_t eoffset, last_offset = 0; struct radeon_vm *vm = bo_va->vm; - struct radeon_bo_va *tmp; - struct list_head *head; unsigned last_pfn, pt_idx; + uint64_t eoffset; int r; if (soffset) { @@ -445,51 +478,53 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, } mutex_lock(&vm->mutex); - head = &vm->va; - last_offset = 0; - list_for_each_entry(tmp, &vm->va, vm_list) { - if (bo_va == tmp) { - /* skip over currently modified bo */ - continue; + if (bo_va->it.start || bo_va->it.last) { + if (bo_va->addr) { + /* add a clone of the bo_va to clear the old address */ + struct radeon_bo_va *tmp; + tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL); + if (!tmp) { + mutex_unlock(&vm->mutex); + return -ENOMEM; + } + tmp->it.start = bo_va->it.start; + tmp->it.last = bo_va->it.last; + tmp->vm = vm; + tmp->addr = bo_va->addr; + tmp->bo = radeon_bo_ref(bo_va->bo); + list_add(&tmp->vm_status, &vm->freed); } - if (soffset >= last_offset && eoffset <= tmp->soffset) { - /* bo can be added before this one */ - break; - } - if (eoffset > tmp->soffset && soffset < tmp->eoffset) { - /* bo and tmp overlap, invalid offset */ - dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n", - bo_va->bo, (unsigned)bo_va->soffset, tmp->bo, - (unsigned)tmp->soffset, (unsigned)tmp->eoffset); - mutex_unlock(&vm->mutex); - return -EINVAL; - } - last_offset = tmp->eoffset; - head = &tmp->vm_list; + interval_tree_remove(&bo_va->it, &vm->va); + bo_va->it.start = 0; + bo_va->it.last = 0; } - if (bo_va->soffset) { - /* add a clone of the bo_va to clear the old address */ - tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL); - if (!tmp) { + soffset /= RADEON_GPU_PAGE_SIZE; + eoffset /= RADEON_GPU_PAGE_SIZE; + if (soffset || eoffset) { + struct interval_tree_node *it; + it = interval_tree_iter_first(&vm->va, soffset, eoffset - 1); + if (it) { + struct radeon_bo_va *tmp; + tmp = container_of(it, struct radeon_bo_va, it); + /* bo and tmp overlap, invalid offset */ + dev_err(rdev->dev, "bo %p va 0x%010Lx conflict with " + "(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo, + soffset, tmp->bo, tmp->it.start, tmp->it.last); mutex_unlock(&vm->mutex); - return -ENOMEM; + return -EINVAL; } - tmp->soffset = bo_va->soffset; - tmp->eoffset = bo_va->eoffset; - tmp->vm = vm; - list_add(&tmp->vm_status, &vm->freed); + bo_va->it.start = soffset; + bo_va->it.last = eoffset - 1; + interval_tree_insert(&bo_va->it, &vm->va); } - bo_va->soffset = soffset; - bo_va->eoffset = eoffset; bo_va->flags = flags; - bo_va->valid = false; - list_move(&bo_va->vm_list, head); + bo_va->addr = 0; - soffset = (soffset / RADEON_GPU_PAGE_SIZE) >> radeon_vm_block_size; - eoffset = (eoffset / RADEON_GPU_PAGE_SIZE) >> radeon_vm_block_size; + soffset >>= radeon_vm_block_size; + eoffset >>= radeon_vm_block_size; BUG_ON(eoffset >= radeon_vm_num_pdes(rdev)); @@ -510,7 +545,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, r = radeon_bo_create(rdev, RADEON_VM_PTE_COUNT * 8, RADEON_GPU_PAGE_SIZE, true, - RADEON_GEM_DOMAIN_VRAM, NULL, &pt); + RADEON_GEM_DOMAIN_VRAM, 0, NULL, &pt); if (r) return r; @@ -611,7 +646,7 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev, ndw = 64; /* assume the worst case */ - ndw += vm->max_pde_used * 16; + ndw += vm->max_pde_used * 6; /* update too big for an IB */ if (ndw > 0xfffff) @@ -640,9 +675,9 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev, ((last_pt + incr * count) != pt)) { if (count) { - radeon_asic_vm_set_page(rdev, &ib, last_pde, - last_pt, count, incr, - R600_PTE_VALID); + radeon_vm_set_pages(rdev, &ib, last_pde, + last_pt, count, incr, + R600_PTE_VALID); } count = 1; @@ -654,13 +689,15 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev, } if (count) - radeon_asic_vm_set_page(rdev, &ib, last_pde, last_pt, count, - incr, R600_PTE_VALID); + radeon_vm_set_pages(rdev, &ib, last_pde, last_pt, count, + incr, R600_PTE_VALID); if (ib.length_dw != 0) { + radeon_asic_vm_pad_ib(rdev, &ib); radeon_semaphore_sync_to(ib.semaphore, pd->tbo.sync_obj); radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use); - r = radeon_ib_schedule(rdev, &ib, NULL); + WARN_ON(ib.length_dw > ndw); + r = radeon_ib_schedule(rdev, &ib, NULL, false); if (r) { radeon_ib_free(rdev, &ib); return r; @@ -725,30 +762,30 @@ static void radeon_vm_frag_ptes(struct radeon_device *rdev, (frag_start >= frag_end)) { count = (pe_end - pe_start) / 8; - radeon_asic_vm_set_page(rdev, ib, pe_start, addr, count, - RADEON_GPU_PAGE_SIZE, flags); + radeon_vm_set_pages(rdev, ib, pe_start, addr, count, + RADEON_GPU_PAGE_SIZE, flags); return; } /* handle the 4K area at the beginning */ if (pe_start != frag_start) { count = (frag_start - pe_start) / 8; - radeon_asic_vm_set_page(rdev, ib, pe_start, addr, count, - RADEON_GPU_PAGE_SIZE, flags); + radeon_vm_set_pages(rdev, ib, pe_start, addr, count, + RADEON_GPU_PAGE_SIZE, flags); addr += RADEON_GPU_PAGE_SIZE * count; } /* handle the area in the middle */ count = (frag_end - frag_start) / 8; - radeon_asic_vm_set_page(rdev, ib, frag_start, addr, count, - RADEON_GPU_PAGE_SIZE, flags | frag_flags); + radeon_vm_set_pages(rdev, ib, frag_start, addr, count, + RADEON_GPU_PAGE_SIZE, flags | frag_flags); /* handle the 4K area at the end */ if (frag_end != pe_end) { addr += RADEON_GPU_PAGE_SIZE * count; count = (pe_end - frag_end) / 8; - radeon_asic_vm_set_page(rdev, ib, frag_end, addr, count, - RADEON_GPU_PAGE_SIZE, flags); + radeon_vm_set_pages(rdev, ib, frag_end, addr, count, + RADEON_GPU_PAGE_SIZE, flags); } } @@ -777,9 +814,6 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev, unsigned count = 0; uint64_t addr; - start = start / RADEON_GPU_PAGE_SIZE; - end = end / RADEON_GPU_PAGE_SIZE; - /* walk over the address space and update the page tables */ for (addr = start; addr < end; ) { uint64_t pt_idx = addr >> radeon_vm_block_size; @@ -842,55 +876,73 @@ int radeon_vm_bo_update(struct radeon_device *rdev, { struct radeon_vm *vm = bo_va->vm; struct radeon_ib ib; - unsigned nptes, ndw; + unsigned nptes, ncmds, ndw; uint64_t addr; + uint32_t flags; int r; - - if (!bo_va->soffset) { + if (!bo_va->it.start) { dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n", bo_va->bo, vm); return -EINVAL; } - if ((bo_va->valid && mem) || (!bo_va->valid && mem == NULL)) - return 0; + list_del_init(&bo_va->vm_status); bo_va->flags &= ~RADEON_VM_PAGE_VALID; bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM; + bo_va->flags &= ~RADEON_VM_PAGE_SNOOPED; if (mem) { addr = mem->start << PAGE_SHIFT; if (mem->mem_type != TTM_PL_SYSTEM) { bo_va->flags |= RADEON_VM_PAGE_VALID; - bo_va->valid = true; } if (mem->mem_type == TTM_PL_TT) { bo_va->flags |= RADEON_VM_PAGE_SYSTEM; + if (!(bo_va->bo->flags & (RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC))) + bo_va->flags |= RADEON_VM_PAGE_SNOOPED; + } else { addr += rdev->vm_manager.vram_base_offset; } } else { addr = 0; - bo_va->valid = false; } + if (addr == bo_va->addr) + return 0; + bo_va->addr = addr; + trace_radeon_vm_bo_update(bo_va); - nptes = (bo_va->eoffset - bo_va->soffset) / RADEON_GPU_PAGE_SIZE; + nptes = bo_va->it.last - bo_va->it.start + 1; + + /* reserve space for one command every (1 << BLOCK_SIZE) entries + or 2k dwords (whatever is smaller) */ + ncmds = (nptes >> min(radeon_vm_block_size, 11)) + 1; /* padding, etc. */ ndw = 64; - if (radeon_vm_block_size > 11) - /* reserve space for one header for every 2k dwords */ - ndw += (nptes >> 11) * 4; - else - /* reserve space for one header for - every (1 << BLOCK_SIZE) entries */ - ndw += (nptes >> radeon_vm_block_size) * 4; + flags = radeon_vm_page_flags(bo_va->flags); + if ((flags & R600_PTE_GART_MASK) == R600_PTE_GART_MASK) { + /* only copy commands needed */ + ndw += ncmds * 7; - /* reserve space for pte addresses */ - ndw += nptes * 2; + } else if (flags & R600_PTE_SYSTEM) { + /* header for write data commands */ + ndw += ncmds * 4; + + /* body of write data command */ + ndw += nptes * 2; + + } else { + /* set page commands needed */ + ndw += ncmds * 10; + + /* two extra commands for begin/end of fragment */ + ndw += 2 * 10; + } /* update too big for an IB */ if (ndw > 0xfffff) @@ -901,11 +953,15 @@ int radeon_vm_bo_update(struct radeon_device *rdev, return r; ib.length_dw = 0; - radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset, - addr, radeon_vm_page_flags(bo_va->flags)); + radeon_vm_update_ptes(rdev, vm, &ib, bo_va->it.start, + bo_va->it.last + 1, addr, + radeon_vm_page_flags(bo_va->flags)); + + radeon_asic_vm_pad_ib(rdev, &ib); + WARN_ON(ib.length_dw > ndw); radeon_semaphore_sync_to(ib.semaphore, vm->fence); - r = radeon_ib_schedule(rdev, &ib, NULL); + r = radeon_ib_schedule(rdev, &ib, NULL, false); if (r) { radeon_ib_free(rdev, &ib); return r; @@ -936,8 +992,8 @@ int radeon_vm_clear_freed(struct radeon_device *rdev, int r; list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) { - list_del(&bo_va->vm_status); r = radeon_vm_bo_update(rdev, bo_va, NULL); + radeon_bo_unref(&bo_va->bo); kfree(bo_va); if (r) return r; @@ -947,6 +1003,31 @@ int radeon_vm_clear_freed(struct radeon_device *rdev, } /** + * radeon_vm_clear_invalids - clear invalidated BOs in the PT + * + * @rdev: radeon_device pointer + * @vm: requested vm + * + * Make sure all invalidated BOs are cleared in the PT. + * Returns 0 for success. + * + * PTs have to be reserved and mutex must be locked! + */ +int radeon_vm_clear_invalids(struct radeon_device *rdev, + struct radeon_vm *vm) +{ + struct radeon_bo_va *bo_va, *tmp; + int r; + + list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, vm_status) { + r = radeon_vm_bo_update(rdev, bo_va, NULL); + if (r) + return r; + } + return 0; +} + +/** * radeon_vm_bo_rmv - remove a bo to a specific vm * * @rdev: radeon_device pointer @@ -964,10 +1045,11 @@ void radeon_vm_bo_rmv(struct radeon_device *rdev, list_del(&bo_va->bo_list); mutex_lock(&vm->mutex); - list_del(&bo_va->vm_list); + interval_tree_remove(&bo_va->it, &vm->va); + list_del(&bo_va->vm_status); - if (bo_va->soffset) { - bo_va->bo = NULL; + if (bo_va->addr) { + bo_va->bo = radeon_bo_ref(bo_va->bo); list_add(&bo_va->vm_status, &vm->freed); } else { kfree(bo_va); @@ -991,7 +1073,12 @@ void radeon_vm_bo_invalidate(struct radeon_device *rdev, struct radeon_bo_va *bo_va; list_for_each_entry(bo_va, &bo->va, bo_list) { - bo_va->valid = false; + if (bo_va->addr) { + mutex_lock(&bo_va->vm->mutex); + list_del(&bo_va->vm_status); + list_add(&bo_va->vm_status, &bo_va->vm->invalidated); + mutex_unlock(&bo_va->vm->mutex); + } } } @@ -1016,7 +1103,8 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) vm->last_flush = NULL; vm->last_id_use = NULL; mutex_init(&vm->mutex); - INIT_LIST_HEAD(&vm->va); + vm->va = RB_ROOT; + INIT_LIST_HEAD(&vm->invalidated); INIT_LIST_HEAD(&vm->freed); pd_size = radeon_vm_directory_size(rdev); @@ -1031,7 +1119,7 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) } r = radeon_bo_create(rdev, pd_size, align, true, - RADEON_GEM_DOMAIN_VRAM, NULL, + RADEON_GEM_DOMAIN_VRAM, 0, NULL, &vm->page_directory); if (r) return r; @@ -1060,11 +1148,11 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm) struct radeon_bo_va *bo_va, *tmp; int i, r; - if (!list_empty(&vm->va)) { + if (!RB_EMPTY_ROOT(&vm->va)) { dev_err(rdev->dev, "still active bo inside vm\n"); } - list_for_each_entry_safe(bo_va, tmp, &vm->va, vm_list) { - list_del_init(&bo_va->vm_list); + rbtree_postorder_for_each_entry_safe(bo_va, tmp, &vm->va, it.rb) { + interval_tree_remove(&bo_va->it, &vm->va); r = radeon_bo_reserve(bo_va->bo, false); if (!r) { list_del_init(&bo_va->bo_list); @@ -1072,8 +1160,10 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm) kfree(bo_va); } } - list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) + list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) { + radeon_bo_unref(&bo_va->bo); kfree(bo_va); + } for (i = 0; i < radeon_vm_num_pdes(rdev); i++) radeon_bo_unref(&vm->page_tables[i].bo); diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index a0f96decece3..c5799f16aa4b 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c @@ -109,7 +109,6 @@ int rs400_gart_enable(struct radeon_device *rdev) uint32_t size_reg; uint32_t tmp; - radeon_gart_restore(rdev); tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH); tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS; WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp); @@ -209,17 +208,24 @@ void rs400_gart_fini(struct radeon_device *rdev) radeon_gart_table_ram_free(rdev); } +#define RS400_PTE_UNSNOOPED (1 << 0) #define RS400_PTE_WRITEABLE (1 << 2) #define RS400_PTE_READABLE (1 << 3) -void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, uint64_t addr) +void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, + uint64_t addr, uint32_t flags) { uint32_t entry; u32 *gtt = rdev->gart.ptr; entry = (lower_32_bits(addr) & PAGE_MASK) | - ((upper_32_bits(addr) & 0xff) << 4) | - RS400_PTE_WRITEABLE | RS400_PTE_READABLE; + ((upper_32_bits(addr) & 0xff) << 4); + if (flags & RADEON_GART_PAGE_READ) + entry |= RS400_PTE_READABLE; + if (flags & RADEON_GART_PAGE_WRITE) + entry |= RS400_PTE_WRITEABLE; + if (!(flags & RADEON_GART_PAGE_SNOOP)) + entry |= RS400_PTE_UNSNOOPED; entry = cpu_to_le32(entry); gtt[i] = entry; } diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index d1a35cb1c91d..5f6db4629aaa 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c @@ -555,7 +555,6 @@ static int rs600_gart_enable(struct radeon_device *rdev) r = radeon_gart_table_vram_pin(rdev); if (r) return r; - radeon_gart_restore(rdev); /* Enable bus master */ tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS; WREG32(RADEON_BUS_CNTL, tmp); @@ -626,15 +625,21 @@ static void rs600_gart_fini(struct radeon_device *rdev) radeon_gart_table_vram_free(rdev); } -void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, uint64_t addr) +void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, + uint64_t addr, uint32_t flags) { void __iomem *ptr = (void *)rdev->gart.ptr; addr = addr & 0xFFFFFFFFFFFFF000ULL; - if (addr == rdev->dummy_page.addr) - addr |= R600_PTE_SYSTEM | R600_PTE_SNOOPED; - else - addr |= R600_PTE_GART; + addr |= R600_PTE_SYSTEM; + if (flags & RADEON_GART_PAGE_VALID) + addr |= R600_PTE_VALID; + if (flags & RADEON_GART_PAGE_READ) + addr |= R600_PTE_READABLE; + if (flags & RADEON_GART_PAGE_WRITE) + addr |= R600_PTE_WRITEABLE; + if (flags & RADEON_GART_PAGE_SNOOP) + addr |= R600_PTE_SNOOPED; writeq(addr, ptr + (i * 8)); } diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index 3e21e869015f..8a477bf1fdb3 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c @@ -124,7 +124,7 @@ void rv515_ring_start(struct radeon_device *rdev, struct radeon_ring *ring) radeon_ring_write(ring, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST); radeon_ring_write(ring, PACKET0(0x20C8, 0)); radeon_ring_write(ring, 0); - radeon_ring_unlock_commit(rdev, ring); + radeon_ring_unlock_commit(rdev, ring, false); } int rv515_mc_wait_for_idle(struct radeon_device *rdev) diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index da8703d8d455..d9f5ce715c9b 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -900,7 +900,6 @@ static int rv770_pcie_gart_enable(struct radeon_device *rdev) r = radeon_gart_table_vram_pin(rdev); if (r) return r; - radeon_gart_restore(rdev); /* Setup L2 cache */ WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | @@ -1178,7 +1177,6 @@ static void rv770_gpu_init(struct radeon_device *rdev) u32 hdp_host_path_cntl; u32 sq_dyn_gpr_size_simd_ab_0; u32 gb_tiling_config = 0; - u32 cc_rb_backend_disable = 0; u32 cc_gc_shader_pipe_config = 0; u32 mc_arb_ramcfg; u32 db_debug4, tmp; @@ -1312,21 +1310,7 @@ static void rv770_gpu_init(struct radeon_device *rdev) WREG32(SPI_CONFIG_CNTL, 0); } - cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000; - tmp = R7XX_MAX_BACKENDS - r600_count_pipe_bits(cc_rb_backend_disable >> 16); - if (tmp < rdev->config.rv770.max_backends) { - rdev->config.rv770.max_backends = tmp; - } - cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00; - tmp = R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R7XX_MAX_PIPES_MASK); - if (tmp < rdev->config.rv770.max_pipes) { - rdev->config.rv770.max_pipes = tmp; - } - tmp = R7XX_MAX_SIMDS - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R7XX_MAX_SIMDS_MASK); - if (tmp < rdev->config.rv770.max_simds) { - rdev->config.rv770.max_simds = tmp; - } tmp = rdev->config.rv770.max_simds - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R7XX_MAX_SIMDS_MASK); rdev->config.rv770.active_simds = tmp; @@ -1349,6 +1333,14 @@ static void rv770_gpu_init(struct radeon_device *rdev) rdev->config.rv770.tiling_npipes = rdev->config.rv770.max_tile_pipes; disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R7XX_MAX_BACKENDS_MASK; + tmp = 0; + for (i = 0; i < rdev->config.rv770.max_backends; i++) + tmp |= (1 << i); + /* if all the backends are disabled, fix it up here */ + if ((disabled_rb_mask & tmp) == tmp) { + for (i = 0; i < rdev->config.rv770.max_backends; i++) + disabled_rb_mask &= ~(1 << i); + } tmp = (gb_tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT; tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.rv770.max_backends, R7XX_MAX_BACKENDS, disabled_rb_mask); diff --git a/drivers/gpu/drm/radeon/rv770_dma.c b/drivers/gpu/drm/radeon/rv770_dma.c index bbf2e076ee45..74426ac2bb5c 100644 --- a/drivers/gpu/drm/radeon/rv770_dma.c +++ b/drivers/gpu/drm/radeon/rv770_dma.c @@ -90,7 +90,7 @@ int rv770_copy_dma(struct radeon_device *rdev, return r; } - radeon_ring_unlock_commit(rdev, ring); + radeon_ring_unlock_commit(rdev, ring, false); radeon_semaphore_free(rdev, &sem, *fence); return r; diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 9e854fd016da..3a0b973e8a96 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -42,6 +42,14 @@ MODULE_FIRMWARE("radeon/TAHITI_mc.bin"); MODULE_FIRMWARE("radeon/TAHITI_mc2.bin"); MODULE_FIRMWARE("radeon/TAHITI_rlc.bin"); MODULE_FIRMWARE("radeon/TAHITI_smc.bin"); + +MODULE_FIRMWARE("radeon/tahiti_pfp.bin"); +MODULE_FIRMWARE("radeon/tahiti_me.bin"); +MODULE_FIRMWARE("radeon/tahiti_ce.bin"); +MODULE_FIRMWARE("radeon/tahiti_mc.bin"); +MODULE_FIRMWARE("radeon/tahiti_rlc.bin"); +MODULE_FIRMWARE("radeon/tahiti_smc.bin"); + MODULE_FIRMWARE("radeon/PITCAIRN_pfp.bin"); MODULE_FIRMWARE("radeon/PITCAIRN_me.bin"); MODULE_FIRMWARE("radeon/PITCAIRN_ce.bin"); @@ -49,6 +57,14 @@ MODULE_FIRMWARE("radeon/PITCAIRN_mc.bin"); MODULE_FIRMWARE("radeon/PITCAIRN_mc2.bin"); MODULE_FIRMWARE("radeon/PITCAIRN_rlc.bin"); MODULE_FIRMWARE("radeon/PITCAIRN_smc.bin"); + +MODULE_FIRMWARE("radeon/pitcairn_pfp.bin"); +MODULE_FIRMWARE("radeon/pitcairn_me.bin"); +MODULE_FIRMWARE("radeon/pitcairn_ce.bin"); +MODULE_FIRMWARE("radeon/pitcairn_mc.bin"); +MODULE_FIRMWARE("radeon/pitcairn_rlc.bin"); +MODULE_FIRMWARE("radeon/pitcairn_smc.bin"); + MODULE_FIRMWARE("radeon/VERDE_pfp.bin"); MODULE_FIRMWARE("radeon/VERDE_me.bin"); MODULE_FIRMWARE("radeon/VERDE_ce.bin"); @@ -56,6 +72,14 @@ MODULE_FIRMWARE("radeon/VERDE_mc.bin"); MODULE_FIRMWARE("radeon/VERDE_mc2.bin"); MODULE_FIRMWARE("radeon/VERDE_rlc.bin"); MODULE_FIRMWARE("radeon/VERDE_smc.bin"); + +MODULE_FIRMWARE("radeon/verde_pfp.bin"); +MODULE_FIRMWARE("radeon/verde_me.bin"); +MODULE_FIRMWARE("radeon/verde_ce.bin"); +MODULE_FIRMWARE("radeon/verde_mc.bin"); +MODULE_FIRMWARE("radeon/verde_rlc.bin"); +MODULE_FIRMWARE("radeon/verde_smc.bin"); + MODULE_FIRMWARE("radeon/OLAND_pfp.bin"); MODULE_FIRMWARE("radeon/OLAND_me.bin"); MODULE_FIRMWARE("radeon/OLAND_ce.bin"); @@ -63,6 +87,14 @@ MODULE_FIRMWARE("radeon/OLAND_mc.bin"); MODULE_FIRMWARE("radeon/OLAND_mc2.bin"); MODULE_FIRMWARE("radeon/OLAND_rlc.bin"); MODULE_FIRMWARE("radeon/OLAND_smc.bin"); + +MODULE_FIRMWARE("radeon/oland_pfp.bin"); +MODULE_FIRMWARE("radeon/oland_me.bin"); +MODULE_FIRMWARE("radeon/oland_ce.bin"); +MODULE_FIRMWARE("radeon/oland_mc.bin"); +MODULE_FIRMWARE("radeon/oland_rlc.bin"); +MODULE_FIRMWARE("radeon/oland_smc.bin"); + MODULE_FIRMWARE("radeon/HAINAN_pfp.bin"); MODULE_FIRMWARE("radeon/HAINAN_me.bin"); MODULE_FIRMWARE("radeon/HAINAN_ce.bin"); @@ -71,6 +103,13 @@ MODULE_FIRMWARE("radeon/HAINAN_mc2.bin"); MODULE_FIRMWARE("radeon/HAINAN_rlc.bin"); MODULE_FIRMWARE("radeon/HAINAN_smc.bin"); +MODULE_FIRMWARE("radeon/hainan_pfp.bin"); +MODULE_FIRMWARE("radeon/hainan_me.bin"); +MODULE_FIRMWARE("radeon/hainan_ce.bin"); +MODULE_FIRMWARE("radeon/hainan_mc.bin"); +MODULE_FIRMWARE("radeon/hainan_rlc.bin"); +MODULE_FIRMWARE("radeon/hainan_smc.bin"); + static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh); static void si_pcie_gen3_enable(struct radeon_device *rdev); static void si_program_aspm(struct radeon_device *rdev); @@ -1470,38 +1509,54 @@ static const u32 hainan_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = { /* ucode loading */ int si_mc_load_microcode(struct radeon_device *rdev) { - const __be32 *fw_data; + const __be32 *fw_data = NULL; + const __le32 *new_fw_data = NULL; u32 running, blackout = 0; - u32 *io_mc_regs; + u32 *io_mc_regs = NULL; + const __le32 *new_io_mc_regs = NULL; int i, regs_size, ucode_size; if (!rdev->mc_fw) return -EINVAL; - ucode_size = rdev->mc_fw->size / 4; + if (rdev->new_fw) { + const struct mc_firmware_header_v1_0 *hdr = + (const struct mc_firmware_header_v1_0 *)rdev->mc_fw->data; + + radeon_ucode_print_mc_hdr(&hdr->header); + regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2); + new_io_mc_regs = (const __le32 *) + (rdev->mc_fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes)); + ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; + new_fw_data = (const __le32 *) + (rdev->mc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + } else { + ucode_size = rdev->mc_fw->size / 4; - switch (rdev->family) { - case CHIP_TAHITI: - io_mc_regs = (u32 *)&tahiti_io_mc_regs; - regs_size = TAHITI_IO_MC_REGS_SIZE; - break; - case CHIP_PITCAIRN: - io_mc_regs = (u32 *)&pitcairn_io_mc_regs; - regs_size = TAHITI_IO_MC_REGS_SIZE; - break; - case CHIP_VERDE: - default: - io_mc_regs = (u32 *)&verde_io_mc_regs; - regs_size = TAHITI_IO_MC_REGS_SIZE; - break; - case CHIP_OLAND: - io_mc_regs = (u32 *)&oland_io_mc_regs; - regs_size = TAHITI_IO_MC_REGS_SIZE; - break; - case CHIP_HAINAN: - io_mc_regs = (u32 *)&hainan_io_mc_regs; - regs_size = TAHITI_IO_MC_REGS_SIZE; - break; + switch (rdev->family) { + case CHIP_TAHITI: + io_mc_regs = (u32 *)&tahiti_io_mc_regs; + regs_size = TAHITI_IO_MC_REGS_SIZE; + break; + case CHIP_PITCAIRN: + io_mc_regs = (u32 *)&pitcairn_io_mc_regs; + regs_size = TAHITI_IO_MC_REGS_SIZE; + break; + case CHIP_VERDE: + default: + io_mc_regs = (u32 *)&verde_io_mc_regs; + regs_size = TAHITI_IO_MC_REGS_SIZE; + break; + case CHIP_OLAND: + io_mc_regs = (u32 *)&oland_io_mc_regs; + regs_size = TAHITI_IO_MC_REGS_SIZE; + break; + case CHIP_HAINAN: + io_mc_regs = (u32 *)&hainan_io_mc_regs; + regs_size = TAHITI_IO_MC_REGS_SIZE; + break; + } + fw_data = (const __be32 *)rdev->mc_fw->data; } running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK; @@ -1518,13 +1573,21 @@ int si_mc_load_microcode(struct radeon_device *rdev) /* load mc io regs */ for (i = 0; i < regs_size; i++) { - WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]); - WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]); + if (rdev->new_fw) { + WREG32(MC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(new_io_mc_regs++)); + WREG32(MC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++)); + } else { + WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]); + WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]); + } } /* load the MC ucode */ - fw_data = (const __be32 *)rdev->mc_fw->data; - for (i = 0; i < ucode_size; i++) - WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++)); + for (i = 0; i < ucode_size; i++) { + if (rdev->new_fw) + WREG32(MC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++)); + else + WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++)); + } /* put the engine back into the active state */ WREG32(MC_SEQ_SUP_CNTL, 0x00000008); @@ -1553,18 +1616,19 @@ int si_mc_load_microcode(struct radeon_device *rdev) static int si_init_microcode(struct radeon_device *rdev) { const char *chip_name; - const char *rlc_chip_name; + const char *new_chip_name; size_t pfp_req_size, me_req_size, ce_req_size, rlc_req_size, mc_req_size; size_t smc_req_size, mc2_req_size; char fw_name[30]; int err; + int new_fw = 0; DRM_DEBUG("\n"); switch (rdev->family) { case CHIP_TAHITI: chip_name = "TAHITI"; - rlc_chip_name = "TAHITI"; + new_chip_name = "tahiti"; pfp_req_size = SI_PFP_UCODE_SIZE * 4; me_req_size = SI_PM4_UCODE_SIZE * 4; ce_req_size = SI_CE_UCODE_SIZE * 4; @@ -1575,7 +1639,7 @@ static int si_init_microcode(struct radeon_device *rdev) break; case CHIP_PITCAIRN: chip_name = "PITCAIRN"; - rlc_chip_name = "PITCAIRN"; + new_chip_name = "pitcairn"; pfp_req_size = SI_PFP_UCODE_SIZE * 4; me_req_size = SI_PM4_UCODE_SIZE * 4; ce_req_size = SI_CE_UCODE_SIZE * 4; @@ -1586,7 +1650,7 @@ static int si_init_microcode(struct radeon_device *rdev) break; case CHIP_VERDE: chip_name = "VERDE"; - rlc_chip_name = "VERDE"; + new_chip_name = "verde"; pfp_req_size = SI_PFP_UCODE_SIZE * 4; me_req_size = SI_PM4_UCODE_SIZE * 4; ce_req_size = SI_CE_UCODE_SIZE * 4; @@ -1597,7 +1661,7 @@ static int si_init_microcode(struct radeon_device *rdev) break; case CHIP_OLAND: chip_name = "OLAND"; - rlc_chip_name = "OLAND"; + new_chip_name = "oland"; pfp_req_size = SI_PFP_UCODE_SIZE * 4; me_req_size = SI_PM4_UCODE_SIZE * 4; ce_req_size = SI_CE_UCODE_SIZE * 4; @@ -1607,7 +1671,7 @@ static int si_init_microcode(struct radeon_device *rdev) break; case CHIP_HAINAN: chip_name = "HAINAN"; - rlc_chip_name = "HAINAN"; + new_chip_name = "hainan"; pfp_req_size = SI_PFP_UCODE_SIZE * 4; me_req_size = SI_PM4_UCODE_SIZE * 4; ce_req_size = SI_CE_UCODE_SIZE * 4; @@ -1618,86 +1682,178 @@ static int si_init_microcode(struct radeon_device *rdev) default: BUG(); } - DRM_INFO("Loading %s Microcode\n", chip_name); + DRM_INFO("Loading %s Microcode\n", new_chip_name); - snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); + snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", new_chip_name); err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev); - if (err) - goto out; - if (rdev->pfp_fw->size != pfp_req_size) { - printk(KERN_ERR - "si_cp: Bogus length %zu in firmware \"%s\"\n", - rdev->pfp_fw->size, fw_name); - err = -EINVAL; - goto out; + if (err) { + snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); + err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev); + if (err) + goto out; + if (rdev->pfp_fw->size != pfp_req_size) { + printk(KERN_ERR + "si_cp: Bogus length %zu in firmware \"%s\"\n", + rdev->pfp_fw->size, fw_name); + err = -EINVAL; + goto out; + } + } else { + err = radeon_ucode_validate(rdev->pfp_fw); + if (err) { + printk(KERN_ERR + "si_cp: validation failed for firmware \"%s\"\n", + fw_name); + goto out; + } else { + new_fw++; + } } - snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name); + snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", new_chip_name); err = request_firmware(&rdev->me_fw, fw_name, rdev->dev); - if (err) - goto out; - if (rdev->me_fw->size != me_req_size) { - printk(KERN_ERR - "si_cp: Bogus length %zu in firmware \"%s\"\n", - rdev->me_fw->size, fw_name); - err = -EINVAL; + if (err) { + snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name); + err = request_firmware(&rdev->me_fw, fw_name, rdev->dev); + if (err) + goto out; + if (rdev->me_fw->size != me_req_size) { + printk(KERN_ERR + "si_cp: Bogus length %zu in firmware \"%s\"\n", + rdev->me_fw->size, fw_name); + err = -EINVAL; + } + } else { + err = radeon_ucode_validate(rdev->me_fw); + if (err) { + printk(KERN_ERR + "si_cp: validation failed for firmware \"%s\"\n", + fw_name); + goto out; + } else { + new_fw++; + } } - snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name); + snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", new_chip_name); err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev); - if (err) - goto out; - if (rdev->ce_fw->size != ce_req_size) { - printk(KERN_ERR - "si_cp: Bogus length %zu in firmware \"%s\"\n", - rdev->ce_fw->size, fw_name); - err = -EINVAL; + if (err) { + snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name); + err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev); + if (err) + goto out; + if (rdev->ce_fw->size != ce_req_size) { + printk(KERN_ERR + "si_cp: Bogus length %zu in firmware \"%s\"\n", + rdev->ce_fw->size, fw_name); + err = -EINVAL; + } + } else { + err = radeon_ucode_validate(rdev->ce_fw); + if (err) { + printk(KERN_ERR + "si_cp: validation failed for firmware \"%s\"\n", + fw_name); + goto out; + } else { + new_fw++; + } } - snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name); + snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", new_chip_name); err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev); - if (err) - goto out; - if (rdev->rlc_fw->size != rlc_req_size) { - printk(KERN_ERR - "si_rlc: Bogus length %zu in firmware \"%s\"\n", - rdev->rlc_fw->size, fw_name); - err = -EINVAL; + if (err) { + snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name); + err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev); + if (err) + goto out; + if (rdev->rlc_fw->size != rlc_req_size) { + printk(KERN_ERR + "si_rlc: Bogus length %zu in firmware \"%s\"\n", + rdev->rlc_fw->size, fw_name); + err = -EINVAL; + } + } else { + err = radeon_ucode_validate(rdev->rlc_fw); + if (err) { + printk(KERN_ERR + "si_cp: validation failed for firmware \"%s\"\n", + fw_name); + goto out; + } else { + new_fw++; + } } - snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name); + snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name); err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); if (err) { - snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); + snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name); err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); - if (err) + if (err) { + snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); + err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); + if (err) + goto out; + } + if ((rdev->mc_fw->size != mc_req_size) && + (rdev->mc_fw->size != mc2_req_size)) { + printk(KERN_ERR + "si_mc: Bogus length %zu in firmware \"%s\"\n", + rdev->mc_fw->size, fw_name); + err = -EINVAL; + } + DRM_INFO("%s: %zu bytes\n", fw_name, rdev->mc_fw->size); + } else { + err = radeon_ucode_validate(rdev->mc_fw); + if (err) { + printk(KERN_ERR + "si_cp: validation failed for firmware \"%s\"\n", + fw_name); goto out; + } else { + new_fw++; + } } - if ((rdev->mc_fw->size != mc_req_size) && - (rdev->mc_fw->size != mc2_req_size)) { - printk(KERN_ERR - "si_mc: Bogus length %zu in firmware \"%s\"\n", - rdev->mc_fw->size, fw_name); - err = -EINVAL; - } - DRM_INFO("%s: %zu bytes\n", fw_name, rdev->mc_fw->size); - snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); + snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name); err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); if (err) { - printk(KERN_ERR - "smc: error loading firmware \"%s\"\n", - fw_name); - release_firmware(rdev->smc_fw); - rdev->smc_fw = NULL; - err = 0; - } else if (rdev->smc_fw->size != smc_req_size) { - printk(KERN_ERR - "si_smc: Bogus length %zu in firmware \"%s\"\n", - rdev->smc_fw->size, fw_name); - err = -EINVAL; + snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); + err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); + if (err) { + printk(KERN_ERR + "smc: error loading firmware \"%s\"\n", + fw_name); + release_firmware(rdev->smc_fw); + rdev->smc_fw = NULL; + err = 0; + } else if (rdev->smc_fw->size != smc_req_size) { + printk(KERN_ERR + "si_smc: Bogus length %zu in firmware \"%s\"\n", + rdev->smc_fw->size, fw_name); + err = -EINVAL; + } + } else { + err = radeon_ucode_validate(rdev->smc_fw); + if (err) { + printk(KERN_ERR + "si_cp: validation failed for firmware \"%s\"\n", + fw_name); + goto out; + } else { + new_fw++; + } } + if (new_fw == 0) { + rdev->new_fw = false; + } else if (new_fw < 6) { + printk(KERN_ERR "si_fw: mixing new and old firmware!\n"); + err = -EINVAL; + } else { + rdev->new_fw = true; + } out: if (err) { if (err != -EINVAL) @@ -2901,7 +3057,7 @@ static void si_gpu_init(struct radeon_device *rdev) u32 sx_debug_1; u32 hdp_host_path_cntl; u32 tmp; - int i, j, k; + int i, j; switch (rdev->family) { case CHIP_TAHITI: @@ -3099,12 +3255,11 @@ static void si_gpu_init(struct radeon_device *rdev) rdev->config.si.max_sh_per_se, rdev->config.si.max_cu_per_sh); + rdev->config.si.active_cus = 0; for (i = 0; i < rdev->config.si.max_shader_engines; i++) { for (j = 0; j < rdev->config.si.max_sh_per_se; j++) { - for (k = 0; k < rdev->config.si.max_cu_per_sh; k++) { - rdev->config.si.active_cus += - hweight32(si_get_cu_active_bitmap(rdev, i, j)); - } + rdev->config.si.active_cus += + hweight32(si_get_cu_active_bitmap(rdev, i, j)); } } @@ -3282,34 +3437,77 @@ static void si_cp_enable(struct radeon_device *rdev, bool enable) static int si_cp_load_microcode(struct radeon_device *rdev) { - const __be32 *fw_data; int i; - if (!rdev->me_fw || !rdev->pfp_fw) + if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw) return -EINVAL; si_cp_enable(rdev, false); - /* PFP */ - fw_data = (const __be32 *)rdev->pfp_fw->data; - WREG32(CP_PFP_UCODE_ADDR, 0); - for (i = 0; i < SI_PFP_UCODE_SIZE; i++) - WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++)); - WREG32(CP_PFP_UCODE_ADDR, 0); - - /* CE */ - fw_data = (const __be32 *)rdev->ce_fw->data; - WREG32(CP_CE_UCODE_ADDR, 0); - for (i = 0; i < SI_CE_UCODE_SIZE; i++) - WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++)); - WREG32(CP_CE_UCODE_ADDR, 0); - - /* ME */ - fw_data = (const __be32 *)rdev->me_fw->data; - WREG32(CP_ME_RAM_WADDR, 0); - for (i = 0; i < SI_PM4_UCODE_SIZE; i++) - WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++)); - WREG32(CP_ME_RAM_WADDR, 0); + if (rdev->new_fw) { + const struct gfx_firmware_header_v1_0 *pfp_hdr = + (const struct gfx_firmware_header_v1_0 *)rdev->pfp_fw->data; + const struct gfx_firmware_header_v1_0 *ce_hdr = + (const struct gfx_firmware_header_v1_0 *)rdev->ce_fw->data; + const struct gfx_firmware_header_v1_0 *me_hdr = + (const struct gfx_firmware_header_v1_0 *)rdev->me_fw->data; + const __le32 *fw_data; + u32 fw_size; + + radeon_ucode_print_gfx_hdr(&pfp_hdr->header); + radeon_ucode_print_gfx_hdr(&ce_hdr->header); + radeon_ucode_print_gfx_hdr(&me_hdr->header); + + /* PFP */ + fw_data = (const __le32 *) + (rdev->pfp_fw->data + le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes)); + fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4; + WREG32(CP_PFP_UCODE_ADDR, 0); + for (i = 0; i < fw_size; i++) + WREG32(CP_PFP_UCODE_DATA, le32_to_cpup(fw_data++)); + WREG32(CP_PFP_UCODE_ADDR, 0); + + /* CE */ + fw_data = (const __le32 *) + (rdev->ce_fw->data + le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes)); + fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4; + WREG32(CP_CE_UCODE_ADDR, 0); + for (i = 0; i < fw_size; i++) + WREG32(CP_CE_UCODE_DATA, le32_to_cpup(fw_data++)); + WREG32(CP_CE_UCODE_ADDR, 0); + + /* ME */ + fw_data = (const __be32 *) + (rdev->me_fw->data + le32_to_cpu(me_hdr->header.ucode_array_offset_bytes)); + fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4; + WREG32(CP_ME_RAM_WADDR, 0); + for (i = 0; i < fw_size; i++) + WREG32(CP_ME_RAM_DATA, le32_to_cpup(fw_data++)); + WREG32(CP_ME_RAM_WADDR, 0); + } else { + const __be32 *fw_data; + + /* PFP */ + fw_data = (const __be32 *)rdev->pfp_fw->data; + WREG32(CP_PFP_UCODE_ADDR, 0); + for (i = 0; i < SI_PFP_UCODE_SIZE; i++) + WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++)); + WREG32(CP_PFP_UCODE_ADDR, 0); + + /* CE */ + fw_data = (const __be32 *)rdev->ce_fw->data; + WREG32(CP_CE_UCODE_ADDR, 0); + for (i = 0; i < SI_CE_UCODE_SIZE; i++) + WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++)); + WREG32(CP_CE_UCODE_ADDR, 0); + + /* ME */ + fw_data = (const __be32 *)rdev->me_fw->data; + WREG32(CP_ME_RAM_WADDR, 0); + for (i = 0; i < SI_PM4_UCODE_SIZE; i++) + WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++)); + WREG32(CP_ME_RAM_WADDR, 0); + } WREG32(CP_PFP_UCODE_ADDR, 0); WREG32(CP_CE_UCODE_ADDR, 0); @@ -3342,7 +3540,7 @@ static int si_cp_start(struct radeon_device *rdev) radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE)); radeon_ring_write(ring, 0xc000); radeon_ring_write(ring, 0xe000); - radeon_ring_unlock_commit(rdev, ring); + radeon_ring_unlock_commit(rdev, ring, false); si_cp_enable(rdev, true); @@ -3371,7 +3569,7 @@ static int si_cp_start(struct radeon_device *rdev) radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */ - radeon_ring_unlock_commit(rdev, ring); + radeon_ring_unlock_commit(rdev, ring, false); for (i = RADEON_RING_TYPE_GFX_INDEX; i <= CAYMAN_RING_TYPE_CP2_INDEX; ++i) { ring = &rdev->ring[i]; @@ -3381,7 +3579,7 @@ static int si_cp_start(struct radeon_device *rdev) radeon_ring_write(ring, PACKET3_COMPUTE(PACKET3_CLEAR_STATE, 0)); radeon_ring_write(ring, 0); - radeon_ring_unlock_commit(rdev, ring); + radeon_ring_unlock_commit(rdev, ring, false); } return 0; @@ -4048,7 +4246,6 @@ static int si_pcie_gart_enable(struct radeon_device *rdev) r = radeon_gart_table_vram_pin(rdev); if (r) return r; - radeon_gart_restore(rdev); /* Setup TLB control */ WREG32(MC_VM_MX_L1_TLB_CNTL, (0xA << 7) | @@ -4093,10 +4290,10 @@ static int si_pcie_gart_enable(struct radeon_device *rdev) for (i = 1; i < 16; i++) { if (i < 8) WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2), - rdev->gart.table_addr >> 12); + rdev->vm_manager.saved_table_addr[i]); else WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2), - rdev->gart.table_addr >> 12); + rdev->vm_manager.saved_table_addr[i]); } /* enable context1-15 */ @@ -4128,6 +4325,17 @@ static int si_pcie_gart_enable(struct radeon_device *rdev) static void si_pcie_gart_disable(struct radeon_device *rdev) { + unsigned i; + + for (i = 1; i < 16; ++i) { + uint32_t reg; + if (i < 8) + reg = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2); + else + reg = VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2); + rdev->vm_manager.saved_table_addr[i] = RREG32(reg); + } + /* Disable all tables */ WREG32(VM_CONTEXT0_CNTL, 0); WREG32(VM_CONTEXT1_CNTL, 0); @@ -4815,7 +5023,7 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) /* write new base address */ radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); - radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | + radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) | WRITE_DATA_DST_SEL(0))); if (vm->id < 8) { @@ -4830,7 +5038,7 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) /* flush hdp cache */ radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); - radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | + radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) | WRITE_DATA_DST_SEL(0))); radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2); radeon_ring_write(ring, 0); @@ -4838,7 +5046,7 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) /* bits 0-15 are the VM contexts0-15 */ radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); - radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | + radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) | WRITE_DATA_DST_SEL(0))); radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); radeon_ring_write(ring, 0); @@ -5592,7 +5800,6 @@ static void si_enable_lbpw(struct radeon_device *rdev, bool enable) static int si_rlc_resume(struct radeon_device *rdev) { u32 i; - const __be32 *fw_data; if (!rdev->rlc_fw) return -EINVAL; @@ -5615,10 +5822,26 @@ static int si_rlc_resume(struct radeon_device *rdev) WREG32(RLC_MC_CNTL, 0); WREG32(RLC_UCODE_CNTL, 0); - fw_data = (const __be32 *)rdev->rlc_fw->data; - for (i = 0; i < SI_RLC_UCODE_SIZE; i++) { - WREG32(RLC_UCODE_ADDR, i); - WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++)); + if (rdev->new_fw) { + const struct rlc_firmware_header_v1_0 *hdr = + (const struct rlc_firmware_header_v1_0 *)rdev->rlc_fw->data; + u32 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; + const __le32 *fw_data = (const __le32 *) + (rdev->rlc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + + radeon_ucode_print_rlc_hdr(&hdr->header); + + for (i = 0; i < fw_size; i++) { + WREG32(RLC_UCODE_ADDR, i); + WREG32(RLC_UCODE_DATA, le32_to_cpup(fw_data++)); + } + } else { + const __be32 *fw_data = + (const __be32 *)rdev->rlc_fw->data; + for (i = 0; i < SI_RLC_UCODE_SIZE; i++) { + WREG32(RLC_UCODE_ADDR, i); + WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++)); + } } WREG32(RLC_UCODE_ADDR, 0); @@ -6093,17 +6316,17 @@ static inline u32 si_get_ih_wptr(struct radeon_device *rdev) wptr = RREG32(IH_RB_WPTR); if (wptr & RB_OVERFLOW) { + wptr &= ~RB_OVERFLOW; /* When a ring buffer overflow happen start parsing interrupt * from the last not overwritten vector (wptr + 16). Hopefully * this should allow us to catchup. */ - dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n", - wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask); + dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", + wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask); rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask; tmp = RREG32(IH_RB_CNTL); tmp |= IH_WPTR_OVERFLOW_CLEAR; WREG32(IH_RB_CNTL, tmp); - wptr &= ~RB_OVERFLOW; } return (wptr & rdev->ih.ptr_mask); } @@ -6318,7 +6541,8 @@ restart_ih: case 16: /* D5 page flip */ case 18: /* D6 page flip */ DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1); - radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1); + if (radeon_use_pflipirq > 0) + radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1); break; case 42: /* HPD hotplug */ switch (src_data) { @@ -6440,13 +6664,13 @@ restart_ih: /* wptr/rptr are in bytes! */ rptr += 16; rptr &= rdev->ih.ptr_mask; + WREG32(IH_RB_RPTR, rptr); } if (queue_hotplug) schedule_work(&rdev->hotplug_work); if (queue_thermal && rdev->pm.dpm_enabled) schedule_work(&rdev->pm.dpm.thermal.work); rdev->ih.rptr = rptr; - WREG32(IH_RB_RPTR, rdev->ih.rptr); atomic_set(&rdev->ih.lock, 0); /* make sure wptr hasn't changed while processing */ @@ -6964,6 +7188,9 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev) int ret, i; u16 tmp16; + if (pci_is_root_bus(rdev->pdev->bus)) + return; + if (radeon_pcie_gen2 == 0) return; @@ -7241,7 +7468,8 @@ static void si_program_aspm(struct radeon_device *rdev) if (orig != data) WREG32_PIF_PHY1(PB1_PIF_CNTL, data); - if (!disable_clkreq) { + if (!disable_clkreq && + !pci_is_root_bus(rdev->pdev->bus)) { struct pci_dev *root = rdev->pdev->bus->self; u32 lnkcap; diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c index e24c94b6d14d..7c22baaf94db 100644 --- a/drivers/gpu/drm/radeon/si_dma.c +++ b/drivers/gpu/drm/radeon/si_dma.c @@ -56,7 +56,41 @@ bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) } /** - * si_dma_vm_set_page - update the page tables using the DMA + * si_dma_vm_copy_pages - update PTEs by copying them from the GART + * + * @rdev: radeon_device pointer + * @ib: indirect buffer to fill with commands + * @pe: addr of the page entry + * @src: src addr where to copy from + * @count: number of page entries to update + * + * Update PTEs by copying them from the GART using the DMA (SI). + */ +void si_dma_vm_copy_pages(struct radeon_device *rdev, + struct radeon_ib *ib, + uint64_t pe, uint64_t src, + unsigned count) +{ + while (count) { + unsigned bytes = count * 8; + if (bytes > 0xFFFF8) + bytes = 0xFFFF8; + + ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY, + 1, 0, 0, bytes); + ib->ptr[ib->length_dw++] = lower_32_bits(pe); + ib->ptr[ib->length_dw++] = lower_32_bits(src); + ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; + ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff; + + pe += bytes; + src += bytes; + count -= bytes / 8; + } +} + +/** + * si_dma_vm_write_pages - update PTEs by writing them manually * * @rdev: radeon_device pointer * @ib: indirect buffer to fill with commands @@ -66,83 +100,89 @@ bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) * @incr: increase next addr by incr bytes * @flags: access flags * - * Update the page tables using the DMA (SI). + * Update PTEs by writing them manually using the DMA (SI). */ -void si_dma_vm_set_page(struct radeon_device *rdev, - struct radeon_ib *ib, - uint64_t pe, - uint64_t addr, unsigned count, - uint32_t incr, uint32_t flags) +void si_dma_vm_write_pages(struct radeon_device *rdev, + struct radeon_ib *ib, + uint64_t pe, + uint64_t addr, unsigned count, + uint32_t incr, uint32_t flags) { uint64_t value; unsigned ndw; - trace_radeon_vm_set_page(pe, addr, count, incr, flags); - - if (flags == R600_PTE_GART) { - uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8; - while (count) { - unsigned bytes = count * 8; - if (bytes > 0xFFFF8) - bytes = 0xFFFF8; - - ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY, - 1, 0, 0, bytes); - ib->ptr[ib->length_dw++] = lower_32_bits(pe); - ib->ptr[ib->length_dw++] = lower_32_bits(src); - ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; - ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff; - - pe += bytes; - src += bytes; - count -= bytes / 8; - } - } else if (flags & R600_PTE_SYSTEM) { - while (count) { - ndw = count * 2; - if (ndw > 0xFFFFE) - ndw = 0xFFFFE; - - /* for non-physically contiguous pages (system) */ - ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw); - ib->ptr[ib->length_dw++] = pe; - ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; - for (; ndw > 0; ndw -= 2, --count, pe += 8) { + while (count) { + ndw = count * 2; + if (ndw > 0xFFFFE) + ndw = 0xFFFFE; + + /* for non-physically contiguous pages (system) */ + ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw); + ib->ptr[ib->length_dw++] = pe; + ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; + for (; ndw > 0; ndw -= 2, --count, pe += 8) { + if (flags & R600_PTE_SYSTEM) { value = radeon_vm_map_gart(rdev, addr); value &= 0xFFFFFFFFFFFFF000ULL; - addr += incr; - value |= flags; - ib->ptr[ib->length_dw++] = value; - ib->ptr[ib->length_dw++] = upper_32_bits(value); - } - } - } else { - while (count) { - ndw = count * 2; - if (ndw > 0xFFFFE) - ndw = 0xFFFFE; - - if (flags & R600_PTE_VALID) + } else if (flags & R600_PTE_VALID) { value = addr; - else + } else { value = 0; - /* for physically contiguous pages (vram) */ - ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw); - ib->ptr[ib->length_dw++] = pe; /* dst addr */ - ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; - ib->ptr[ib->length_dw++] = flags; /* mask */ - ib->ptr[ib->length_dw++] = 0; - ib->ptr[ib->length_dw++] = value; /* value */ + } + addr += incr; + value |= flags; + ib->ptr[ib->length_dw++] = value; ib->ptr[ib->length_dw++] = upper_32_bits(value); - ib->ptr[ib->length_dw++] = incr; /* increment size */ - ib->ptr[ib->length_dw++] = 0; - pe += ndw * 4; - addr += (ndw / 2) * incr; - count -= ndw / 2; } } - while (ib->length_dw & 0x7) - ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0); +} + +/** + * si_dma_vm_set_pages - update the page tables using the DMA + * + * @rdev: radeon_device pointer + * @ib: indirect buffer to fill with commands + * @pe: addr of the page entry + * @addr: dst addr to write into pe + * @count: number of page entries to update + * @incr: increase next addr by incr bytes + * @flags: access flags + * + * Update the page tables using the DMA (SI). + */ +void si_dma_vm_set_pages(struct radeon_device *rdev, + struct radeon_ib *ib, + uint64_t pe, + uint64_t addr, unsigned count, + uint32_t incr, uint32_t flags) +{ + uint64_t value; + unsigned ndw; + + while (count) { + ndw = count * 2; + if (ndw > 0xFFFFE) + ndw = 0xFFFFE; + + if (flags & R600_PTE_VALID) + value = addr; + else + value = 0; + + /* for physically contiguous pages (vram) */ + ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw); + ib->ptr[ib->length_dw++] = pe; /* dst addr */ + ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; + ib->ptr[ib->length_dw++] = flags; /* mask */ + ib->ptr[ib->length_dw++] = 0; + ib->ptr[ib->length_dw++] = value; /* value */ + ib->ptr[ib->length_dw++] = upper_32_bits(value); + ib->ptr[ib->length_dw++] = incr; /* increment size */ + ib->ptr[ib->length_dw++] = 0; + pe += ndw * 4; + addr += (ndw / 2) * incr; + count -= ndw / 2; + } } void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) @@ -235,7 +275,7 @@ int si_copy_dma(struct radeon_device *rdev, return r; } - radeon_ring_unlock_commit(rdev, ring); + radeon_ring_unlock_commit(rdev, ring, false); radeon_semaphore_free(rdev, &sem, *fence); return r; diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index 58918868f894..70e61ffeace2 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c @@ -3812,6 +3812,27 @@ void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev, voltage_table->count = max_voltage_steps; } +static int si_get_svi2_voltage_table(struct radeon_device *rdev, + struct radeon_clock_voltage_dependency_table *voltage_dependency_table, + struct atom_voltage_table *voltage_table) +{ + u32 i; + + if (voltage_dependency_table == NULL) + return -EINVAL; + + voltage_table->mask_low = 0; + voltage_table->phase_delay = 0; + + voltage_table->count = voltage_dependency_table->count; + for (i = 0; i < voltage_table->count; i++) { + voltage_table->entries[i].value = voltage_dependency_table->entries[i].v; + voltage_table->entries[i].smio_low = 0; + } + + return 0; +} + static int si_construct_voltage_tables(struct radeon_device *rdev) { struct rv7xx_power_info *pi = rv770_get_pi(rdev); @@ -3819,15 +3840,25 @@ static int si_construct_voltage_tables(struct radeon_device *rdev) struct si_power_info *si_pi = si_get_pi(rdev); int ret; - ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDC, - VOLTAGE_OBJ_GPIO_LUT, &eg_pi->vddc_voltage_table); - if (ret) - return ret; + if (pi->voltage_control) { + ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDC, + VOLTAGE_OBJ_GPIO_LUT, &eg_pi->vddc_voltage_table); + if (ret) + return ret; - if (eg_pi->vddc_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS) - si_trim_voltage_table_to_fit_state_table(rdev, - SISLANDS_MAX_NO_VREG_STEPS, - &eg_pi->vddc_voltage_table); + if (eg_pi->vddc_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS) + si_trim_voltage_table_to_fit_state_table(rdev, + SISLANDS_MAX_NO_VREG_STEPS, + &eg_pi->vddc_voltage_table); + } else if (si_pi->voltage_control_svi2) { + ret = si_get_svi2_voltage_table(rdev, + &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk, + &eg_pi->vddc_voltage_table); + if (ret) + return ret; + } else { + return -EINVAL; + } if (eg_pi->vddci_control) { ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDCI, @@ -3840,6 +3871,13 @@ static int si_construct_voltage_tables(struct radeon_device *rdev) SISLANDS_MAX_NO_VREG_STEPS, &eg_pi->vddci_voltage_table); } + if (si_pi->vddci_control_svi2) { + ret = si_get_svi2_voltage_table(rdev, + &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk, + &eg_pi->vddci_voltage_table); + if (ret) + return ret; + } if (pi->mvdd_control) { ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_MVDDC, @@ -3893,46 +3931,55 @@ static int si_populate_smc_voltage_tables(struct radeon_device *rdev, struct si_power_info *si_pi = si_get_pi(rdev); u8 i; - if (eg_pi->vddc_voltage_table.count) { - si_populate_smc_voltage_table(rdev, &eg_pi->vddc_voltage_table, table); - table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] = - cpu_to_be32(eg_pi->vddc_voltage_table.mask_low); - - for (i = 0; i < eg_pi->vddc_voltage_table.count; i++) { - if (pi->max_vddc_in_table <= eg_pi->vddc_voltage_table.entries[i].value) { - table->maxVDDCIndexInPPTable = i; - break; + if (si_pi->voltage_control_svi2) { + si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc, + si_pi->svc_gpio_id); + si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd, + si_pi->svd_gpio_id); + si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_svi_rework_plat_type, + 2); + } else { + if (eg_pi->vddc_voltage_table.count) { + si_populate_smc_voltage_table(rdev, &eg_pi->vddc_voltage_table, table); + table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] = + cpu_to_be32(eg_pi->vddc_voltage_table.mask_low); + + for (i = 0; i < eg_pi->vddc_voltage_table.count; i++) { + if (pi->max_vddc_in_table <= eg_pi->vddc_voltage_table.entries[i].value) { + table->maxVDDCIndexInPPTable = i; + break; + } } } - } - if (eg_pi->vddci_voltage_table.count) { - si_populate_smc_voltage_table(rdev, &eg_pi->vddci_voltage_table, table); + if (eg_pi->vddci_voltage_table.count) { + si_populate_smc_voltage_table(rdev, &eg_pi->vddci_voltage_table, table); - table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDCI] = - cpu_to_be32(eg_pi->vddci_voltage_table.mask_low); - } + table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDCI] = + cpu_to_be32(eg_pi->vddci_voltage_table.mask_low); + } - if (si_pi->mvdd_voltage_table.count) { - si_populate_smc_voltage_table(rdev, &si_pi->mvdd_voltage_table, table); + if (si_pi->mvdd_voltage_table.count) { + si_populate_smc_voltage_table(rdev, &si_pi->mvdd_voltage_table, table); - table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_MVDD] = - cpu_to_be32(si_pi->mvdd_voltage_table.mask_low); - } + table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_MVDD] = + cpu_to_be32(si_pi->mvdd_voltage_table.mask_low); + } - if (si_pi->vddc_phase_shed_control) { - if (si_validate_phase_shedding_tables(rdev, &si_pi->vddc_phase_shed_table, - &rdev->pm.dpm.dyn_state.phase_shedding_limits_table)) { - si_populate_smc_voltage_table(rdev, &si_pi->vddc_phase_shed_table, table); + if (si_pi->vddc_phase_shed_control) { + if (si_validate_phase_shedding_tables(rdev, &si_pi->vddc_phase_shed_table, + &rdev->pm.dpm.dyn_state.phase_shedding_limits_table)) { + si_populate_smc_voltage_table(rdev, &si_pi->vddc_phase_shed_table, table); - table->phaseMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] = - cpu_to_be32(si_pi->vddc_phase_shed_table.mask_low); + table->phaseMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] = + cpu_to_be32(si_pi->vddc_phase_shed_table.mask_low); - si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_phase_shedding_delay, - (u32)si_pi->vddc_phase_shed_table.phase_delay); - } else { - si_pi->vddc_phase_shed_control = false; + si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_phase_shedding_delay, + (u32)si_pi->vddc_phase_shed_table.phase_delay); + } else { + si_pi->vddc_phase_shed_control = false; + } } } @@ -5798,16 +5845,17 @@ int si_dpm_enable(struct radeon_device *rdev) { struct rv7xx_power_info *pi = rv770_get_pi(rdev); struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); + struct si_power_info *si_pi = si_get_pi(rdev); struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps; int ret; if (si_is_smc_running(rdev)) return -EINVAL; - if (pi->voltage_control) + if (pi->voltage_control || si_pi->voltage_control_svi2) si_enable_voltage_control(rdev, true); if (pi->mvdd_control) si_get_mvdd_configuration(rdev); - if (pi->voltage_control) { + if (pi->voltage_control || si_pi->voltage_control_svi2) { ret = si_construct_voltage_tables(rdev); if (ret) { DRM_ERROR("si_construct_voltage_tables failed\n"); @@ -6406,16 +6454,32 @@ int si_dpm_init(struct radeon_device *rdev) ni_pi->mclk_rtt_mode_threshold = eg_pi->mclk_edc_wr_enable_threshold; pi->voltage_control = - radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, VOLTAGE_OBJ_GPIO_LUT); + radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, + VOLTAGE_OBJ_GPIO_LUT); + if (!pi->voltage_control) { + si_pi->voltage_control_svi2 = + radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, + VOLTAGE_OBJ_SVID2); + if (si_pi->voltage_control_svi2) + radeon_atom_get_svi2_info(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, + &si_pi->svd_gpio_id, &si_pi->svc_gpio_id); + } pi->mvdd_control = - radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_MVDDC, VOLTAGE_OBJ_GPIO_LUT); + radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_MVDDC, + VOLTAGE_OBJ_GPIO_LUT); eg_pi->vddci_control = - radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, VOLTAGE_OBJ_GPIO_LUT); + radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, + VOLTAGE_OBJ_GPIO_LUT); + if (!eg_pi->vddci_control) + si_pi->vddci_control_svi2 = + radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, + VOLTAGE_OBJ_SVID2); si_pi->vddc_phase_shed_control = - radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, VOLTAGE_OBJ_PHASE_LUT); + radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, + VOLTAGE_OBJ_PHASE_LUT); rv770_get_engine_memory_ss(rdev); diff --git a/drivers/gpu/drm/radeon/si_dpm.h b/drivers/gpu/drm/radeon/si_dpm.h index 4ce5032cdf49..8b5c06a0832d 100644 --- a/drivers/gpu/drm/radeon/si_dpm.h +++ b/drivers/gpu/drm/radeon/si_dpm.h @@ -170,6 +170,8 @@ struct si_power_info { bool vddc_phase_shed_control; bool pspp_notify_required; bool sclk_deep_sleep_above_low; + bool voltage_control_svi2; + bool vddci_control_svi2; /* smc offsets */ u32 sram_end; u32 state_table_start; @@ -192,6 +194,9 @@ struct si_power_info { SMC_SIslands_MCRegisters smc_mc_reg_table; SISLANDS_SMC_STATETABLE smc_statetable; PP_SIslands_PAPMParameters papm_parm; + /* SVI2 */ + u8 svd_gpio_id; + u8 svc_gpio_id; }; #define SISLANDS_INITIAL_STATE_ARB_INDEX 0 diff --git a/drivers/gpu/drm/radeon/si_smc.c b/drivers/gpu/drm/radeon/si_smc.c index e80efcf0c230..73dbc79c959d 100644 --- a/drivers/gpu/drm/radeon/si_smc.c +++ b/drivers/gpu/drm/radeon/si_smc.c @@ -219,36 +219,48 @@ int si_load_smc_ucode(struct radeon_device *rdev, u32 limit) if (!rdev->smc_fw) return -EINVAL; - switch (rdev->family) { - case CHIP_TAHITI: - ucode_start_address = TAHITI_SMC_UCODE_START; - ucode_size = TAHITI_SMC_UCODE_SIZE; - break; - case CHIP_PITCAIRN: - ucode_start_address = PITCAIRN_SMC_UCODE_START; - ucode_size = PITCAIRN_SMC_UCODE_SIZE; - break; - case CHIP_VERDE: - ucode_start_address = VERDE_SMC_UCODE_START; - ucode_size = VERDE_SMC_UCODE_SIZE; - break; - case CHIP_OLAND: - ucode_start_address = OLAND_SMC_UCODE_START; - ucode_size = OLAND_SMC_UCODE_SIZE; - break; - case CHIP_HAINAN: - ucode_start_address = HAINAN_SMC_UCODE_START; - ucode_size = HAINAN_SMC_UCODE_SIZE; - break; - default: - DRM_ERROR("unknown asic in smc ucode loader\n"); - BUG(); + if (rdev->new_fw) { + const struct smc_firmware_header_v1_0 *hdr = + (const struct smc_firmware_header_v1_0 *)rdev->smc_fw->data; + + radeon_ucode_print_smc_hdr(&hdr->header); + + ucode_start_address = le32_to_cpu(hdr->ucode_start_addr); + ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes); + src = (const u8 *) + (rdev->smc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + } else { + switch (rdev->family) { + case CHIP_TAHITI: + ucode_start_address = TAHITI_SMC_UCODE_START; + ucode_size = TAHITI_SMC_UCODE_SIZE; + break; + case CHIP_PITCAIRN: + ucode_start_address = PITCAIRN_SMC_UCODE_START; + ucode_size = PITCAIRN_SMC_UCODE_SIZE; + break; + case CHIP_VERDE: + ucode_start_address = VERDE_SMC_UCODE_START; + ucode_size = VERDE_SMC_UCODE_SIZE; + break; + case CHIP_OLAND: + ucode_start_address = OLAND_SMC_UCODE_START; + ucode_size = OLAND_SMC_UCODE_SIZE; + break; + case CHIP_HAINAN: + ucode_start_address = HAINAN_SMC_UCODE_START; + ucode_size = HAINAN_SMC_UCODE_SIZE; + break; + default: + DRM_ERROR("unknown asic in smc ucode loader\n"); + BUG(); + } + src = (const u8 *)rdev->smc_fw->data; } if (ucode_size & 3) return -EINVAL; - src = (const u8 *)rdev->smc_fw->data; spin_lock_irqsave(&rdev->smc_idx_lock, flags); WREG32(SMC_IND_INDEX_0, ucode_start_address); WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0); diff --git a/drivers/gpu/drm/radeon/sislands_smc.h b/drivers/gpu/drm/radeon/sislands_smc.h index 10e945a49479..623a0b1e2d9d 100644 --- a/drivers/gpu/drm/radeon/sislands_smc.h +++ b/drivers/gpu/drm/radeon/sislands_smc.h @@ -241,6 +241,9 @@ typedef struct SISLANDS_SMC_STATETABLE SISLANDS_SMC_STATETABLE; #define SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width 0xF4 #define SI_SMC_SOFT_REGISTER_tdr_is_about_to_happen 0xFC #define SI_SMC_SOFT_REGISTER_vr_hot_gpio 0x100 +#define SI_SMC_SOFT_REGISTER_svi_rework_plat_type 0x118 +#define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd 0x11c +#define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc 0x120 #define SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES 16 #define SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES 32 diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c index 32e50be9c4ac..57f780053b3e 100644 --- a/drivers/gpu/drm/radeon/trinity_dpm.c +++ b/drivers/gpu/drm/radeon/trinity_dpm.c @@ -1874,16 +1874,22 @@ int trinity_dpm_init(struct radeon_device *rdev) for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) pi->at[i] = TRINITY_AT_DFLT; - /* There are stability issues reported on with - * bapm enabled when switching between AC and battery - * power. At the same time, some MSI boards hang - * if it's not enabled and dpm is enabled. Just enable - * it for MSI boards right now. - */ - if (rdev->pdev->subsystem_vendor == 0x1462) - pi->enable_bapm = true; - else + if (radeon_bapm == -1) { + /* There are stability issues reported on with + * bapm enabled when switching between AC and battery + * power. At the same time, some MSI boards hang + * if it's not enabled and dpm is enabled. Just enable + * it for MSI boards right now. + */ + if (rdev->pdev->subsystem_vendor == 0x1462) + pi->enable_bapm = true; + else + pi->enable_bapm = false; + } else if (radeon_bapm == 0) { pi->enable_bapm = false; + } else { + pi->enable_bapm = true; + } pi->enable_nbps_policy = true; pi->enable_sclk_ds = true; pi->enable_gfx_power_gating = true; diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c index be42c8125203..cda391347286 100644 --- a/drivers/gpu/drm/radeon/uvd_v1_0.c +++ b/drivers/gpu/drm/radeon/uvd_v1_0.c @@ -124,7 +124,7 @@ int uvd_v1_0_init(struct radeon_device *rdev) radeon_ring_write(ring, PACKET0(UVD_SEMA_CNTL, 0)); radeon_ring_write(ring, 3); - radeon_ring_unlock_commit(rdev, ring); + radeon_ring_unlock_commit(rdev, ring, false); done: /* lower clocks again */ @@ -331,7 +331,7 @@ int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) } radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0)); radeon_ring_write(ring, 0xDEADBEEF); - radeon_ring_unlock_commit(rdev, ring); + radeon_ring_unlock_commit(rdev, ring, false); for (i = 0; i < rdev->usec_timeout; i++) { tmp = RREG32(UVD_CONTEXT_ID); if (tmp == 0xDEADBEEF) diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c index 792fd1d20e86..fda64b7b73e8 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c @@ -187,7 +187,7 @@ static struct drm_driver rcar_du_driver = { * Power management */ -#if CONFIG_PM_SLEEP +#ifdef CONFIG_PM_SLEEP static int rcar_du_pm_suspend(struct device *dev) { struct rcar_du_device *rcdu = dev_get_drvdata(dev); diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c index a87edfac111f..76026104d000 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c @@ -135,7 +135,9 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv, { struct rcar_du_device *rcdu = dev->dev_private; const struct rcar_du_format_info *format; + unsigned int max_pitch; unsigned int align; + unsigned int bpp; format = rcar_du_format_info(mode_cmd->pixel_format); if (format == NULL) { @@ -144,13 +146,20 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv, return ERR_PTR(-EINVAL); } + /* + * The pitch and alignment constraints are expressed in pixels on the + * hardware side and in bytes in the DRM API. + */ + bpp = format->planes == 2 ? 1 : format->bpp / 8; + max_pitch = 4096 * bpp; + if (rcar_du_needs(rcdu, RCAR_DU_QUIRK_ALIGN_128B)) align = 128; else - align = 16 * format->bpp / 8; + align = 16 * bpp; if (mode_cmd->pitches[0] & (align - 1) || - mode_cmd->pitches[0] >= 8192) { + mode_cmd->pitches[0] >= max_pitch) { dev_dbg(dev->dev, "invalid pitch value %u\n", mode_cmd->pitches[0]); return ERR_PTR(-EINVAL); diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c index 289048d1c7b2..21426bd234eb 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c @@ -64,7 +64,7 @@ static const struct drm_connector_helper_funcs connector_helper_funcs = { static void rcar_du_lvds_connector_destroy(struct drm_connector *connector) { - drm_sysfs_connector_remove(connector); + drm_connector_unregister(connector); drm_connector_cleanup(connector); } @@ -105,7 +105,7 @@ int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu, return ret; drm_connector_helper_add(connector, &connector_helper_funcs); - ret = drm_sysfs_connector_add(connector); + ret = drm_connector_register(connector); if (ret < 0) return ret; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c index ccfe64c7188f..8af3944d31b9 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c @@ -32,7 +32,7 @@ static const struct drm_connector_helper_funcs connector_helper_funcs = { static void rcar_du_vga_connector_destroy(struct drm_connector *connector) { - drm_sysfs_connector_remove(connector); + drm_connector_unregister(connector); drm_connector_cleanup(connector); } @@ -70,7 +70,7 @@ int rcar_du_vga_connector_init(struct rcar_du_device *rcdu, return ret; drm_connector_helper_add(connector, &connector_helper_funcs); - ret = drm_sysfs_connector_add(connector); + ret = drm_connector_register(connector); if (ret < 0) return ret; diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c index faf176b2daf9..47875de89010 100644 --- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c +++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c @@ -692,7 +692,7 @@ static void shmob_drm_connector_destroy(struct drm_connector *connector) struct shmob_drm_connector *scon = to_shmob_connector(connector); shmob_drm_backlight_exit(scon); - drm_sysfs_connector_remove(connector); + drm_connector_unregister(connector); drm_connector_cleanup(connector); } @@ -726,7 +726,7 @@ int shmob_drm_connector_create(struct shmob_drm_device *sdev, return ret; drm_connector_helper_add(connector, &connector_helper_funcs); - ret = drm_sysfs_connector_add(connector); + ret = drm_connector_register(connector); if (ret < 0) goto err_cleanup; @@ -749,7 +749,7 @@ int shmob_drm_connector_create(struct shmob_drm_device *sdev, err_backlight: shmob_drm_backlight_exit(&sdev->connector); err_sysfs: - drm_sysfs_connector_remove(connector); + drm_connector_unregister(connector); err_cleanup: drm_connector_cleanup(connector); return ret; diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c index 82c84c7fd4f6..ff4ba483b602 100644 --- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c +++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c @@ -297,7 +297,7 @@ static struct drm_driver shmob_drm_driver = { * Power management */ -#if CONFIG_PM_SLEEP +#ifdef CONFIG_PM_SLEEP static int shmob_drm_pm_suspend(struct device *dev) { struct shmob_drm_device *sdev = dev_get_drvdata(dev); diff --git a/drivers/gpu/drm/sti/Kconfig b/drivers/gpu/drm/sti/Kconfig new file mode 100644 index 000000000000..ae8850f3e63b --- /dev/null +++ b/drivers/gpu/drm/sti/Kconfig @@ -0,0 +1,15 @@ +config DRM_STI + tristate "DRM Support for STMicroelectronics SoC stiH41x Series" + depends on DRM && (SOC_STIH415 || SOC_STIH416 || ARCH_MULTIPLATFORM) + select RESET_CONTROLLER + select DRM_KMS_HELPER + select DRM_GEM_CMA_HELPER + select DRM_KMS_CMA_HELPER + help + Choose this option to enable DRM on STM stiH41x chipset + +config DRM_STI_FBDEV + bool "DRM frame buffer device for STMicroelectronics SoC stiH41x Serie" + depends on DRM_STI + help + Choose this option to enable FBDEV on top of DRM for STM stiH41x chipset diff --git a/drivers/gpu/drm/sti/Makefile b/drivers/gpu/drm/sti/Makefile new file mode 100644 index 000000000000..04ac2ceef27f --- /dev/null +++ b/drivers/gpu/drm/sti/Makefile @@ -0,0 +1,21 @@ +sticompositor-y := \ + sti_layer.o \ + sti_mixer.o \ + sti_gdp.o \ + sti_vid.o \ + sti_compositor.o \ + sti_drm_crtc.o \ + sti_drm_plane.o + +stihdmi-y := sti_hdmi.o \ + sti_hdmi_tx3g0c55phy.o \ + sti_hdmi_tx3g4c28phy.o \ + +obj-$(CONFIG_DRM_STI) = \ + sti_vtg.o \ + sti_vtac.o \ + stihdmi.o \ + sti_hda.o \ + sti_tvout.o \ + sticompositor.o \ + sti_drm_drv.o
\ No newline at end of file diff --git a/drivers/gpu/drm/sti/NOTES b/drivers/gpu/drm/sti/NOTES new file mode 100644 index 000000000000..57e257969198 --- /dev/null +++ b/drivers/gpu/drm/sti/NOTES @@ -0,0 +1,58 @@ +1. stiH display hardware IP +--------------------------- +The STMicroelectronics stiH SoCs use a common chain of HW display IP blocks: +- The High Quality Video Display Processor (HQVDP) gets video frames from a + video decoder and does high quality video processing, including scaling. + +- The Compositor is a multiplane, dual-mixer (Main & Aux) digital processor. It + has several inputs: + - The graphics planes are internally processed by the Generic Display + Pipeline (GDP). + - The video plug (VID) connects to the HQVDP output. + - The cursor handles ... a cursor. +- The TV OUT pre-formats (convert, clip, round) the compositor output data +- The HDMI / DVO / HD Analog / SD analog IP builds the video signals + - DVO (Digital Video Output) handles a 24bits parallel signal + - The HD analog signal is typically driven by a YCbCr cable, supporting up to + 1080i mode. + - The SD analog signal is typically used for legacy TV +- The VTG (Video Timing Generators) build Vsync signals used by the other HW IP +Note that some stiH drivers support only a subset of thee HW IP. + + .-------------. .-----------. .-----------. +GPU >-------------+GDP Main | | +---+ HDMI +--> HDMI +GPU >-------------+GDP mixer+---+ | :===========: +GPU >-------------+Cursor | | +---+ DVO +--> 24b// + ------- | COMPOSITOR | | TV OUT | :===========: + | | | | | +---+ HD analog +--> YCbCr +Vid >--+ HQVDP +--+VID Aux +---+ | :===========: +dec | | | mixer| | +---+ SD analog +--> CVBS + '-------' '-------------' '-----------' '-----------' + .-----------. + | main+--> Vsync + | VTG | + | aux+--> Vsync + '-----------' + +2. DRM / HW mapping +------------------- +These IP are mapped to the DRM objects as following: +- The CRTCs are mapped to the Compositor Main and Aux Mixers +- The Framebuffers and planes are mapped to the Compositor GDP (non video + buffers) and to HQVDP+VID (video buffers) +- The Cursor is mapped to the Compositor Cursor +- The Encoders are mapped to the TVOut +- The Bridges/Connectors are mapped to the HDMI / DVO / HD Analog / SD analog + +FB & planes Cursor CRTC Encoders Bridges/Connectors + | | | | | + | | | | | + | .-------------. | .-----------. .-----------. | + +------------> |GDP | Main | | | +-> | | HDMI | <-+ + +------------> |GDP v mixer|<+ | | | :===========: | + | |Cursor | | | +-> | | DVO | <-+ + | ------- | COMPOSITOR | | |TV OUT | | :===========: | + | | | | | | | +-> | | HD analog | <-+ + +-> | HQVDP | |VID Aux |<+ | | | :===========: | + | | | mixer| | +-> | | SD analog | <-+ + '-------' '-------------' '-----------' '-----------' diff --git a/drivers/gpu/drm/sti/sti_compositor.c b/drivers/gpu/drm/sti/sti_compositor.c new file mode 100644 index 000000000000..390d93e9a06c --- /dev/null +++ b/drivers/gpu/drm/sti/sti_compositor.c @@ -0,0 +1,281 @@ +/* + * Copyright (C) STMicroelectronics SA 2014 + * Authors: Benjamin Gaignard <benjamin.gaignard@st.com> + * Fabien Dessenne <fabien.dessenne@st.com> + * for STMicroelectronics. + * License terms: GNU General Public License (GPL), version 2 + */ + +#include <linux/component.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/reset.h> + +#include <drm/drmP.h> + +#include "sti_compositor.h" +#include "sti_drm_crtc.h" +#include "sti_drm_drv.h" +#include "sti_drm_plane.h" +#include "sti_gdp.h" +#include "sti_vtg.h" + +/* + * stiH407 compositor properties + */ +struct sti_compositor_data stih407_compositor_data = { + .nb_subdev = 6, + .subdev_desc = { + {STI_GPD_SUBDEV, (int)STI_GDP_0, 0x100}, + {STI_GPD_SUBDEV, (int)STI_GDP_1, 0x200}, + {STI_GPD_SUBDEV, (int)STI_GDP_2, 0x300}, + {STI_GPD_SUBDEV, (int)STI_GDP_3, 0x400}, + {STI_VID_SUBDEV, (int)STI_VID_0, 0x700}, + {STI_MIXER_MAIN_SUBDEV, STI_MIXER_MAIN, 0xC00} + }, +}; + +/* + * stiH416 compositor properties + * Note: + * on stih416 MIXER_AUX has a different base address from MIXER_MAIN + * Moreover, GDPx is different for Main and Aux Mixer. So this subdev map does + * not fit for stiH416 if we want to enable the MIXER_AUX. + */ +struct sti_compositor_data stih416_compositor_data = { + .nb_subdev = 3, + .subdev_desc = { + {STI_GPD_SUBDEV, (int)STI_GDP_0, 0x100}, + {STI_GPD_SUBDEV, (int)STI_GDP_1, 0x200}, + {STI_MIXER_MAIN_SUBDEV, STI_MIXER_MAIN, 0xC00} + }, +}; + +static int sti_compositor_init_subdev(struct sti_compositor *compo, + struct sti_compositor_subdev_descriptor *desc, + unsigned int array_size) +{ + unsigned int i, mixer_id = 0, layer_id = 0; + + for (i = 0; i < array_size; i++) { + switch (desc[i].type) { + case STI_MIXER_MAIN_SUBDEV: + case STI_MIXER_AUX_SUBDEV: + compo->mixer[mixer_id++] = + sti_mixer_create(compo->dev, desc[i].id, + compo->regs + desc[i].offset); + break; + case STI_GPD_SUBDEV: + case STI_VID_SUBDEV: + compo->layer[layer_id++] = + sti_layer_create(compo->dev, desc[i].id, + compo->regs + desc[i].offset); + break; + /* case STI_CURSOR_SUBDEV : TODO */ + default: + DRM_ERROR("Unknow subdev compoment type\n"); + return 1; + } + + } + compo->nb_mixers = mixer_id; + compo->nb_layers = layer_id; + + return 0; +} + +static int sti_compositor_bind(struct device *dev, struct device *master, + void *data) +{ + struct sti_compositor *compo = dev_get_drvdata(dev); + struct drm_device *drm_dev = data; + unsigned int i, crtc = 0, plane = 0; + struct sti_drm_private *dev_priv = drm_dev->dev_private; + struct drm_plane *cursor = NULL; + struct drm_plane *primary = NULL; + + dev_priv->compo = compo; + + for (i = 0; i < compo->nb_layers; i++) { + if (compo->layer[i]) { + enum sti_layer_desc desc = compo->layer[i]->desc; + enum sti_layer_type type = desc & STI_LAYER_TYPE_MASK; + enum drm_plane_type plane_type = DRM_PLANE_TYPE_OVERLAY; + + if (compo->mixer[crtc]) + plane_type = DRM_PLANE_TYPE_PRIMARY; + + switch (type) { + case STI_CUR: + cursor = sti_drm_plane_init(drm_dev, + compo->layer[i], + (1 << crtc) - 1, + DRM_PLANE_TYPE_CURSOR); + break; + case STI_GDP: + case STI_VID: + primary = sti_drm_plane_init(drm_dev, + compo->layer[i], + (1 << crtc) - 1, plane_type); + plane++; + break; + case STI_BCK: + break; + } + + /* The first planes are reserved for primary planes*/ + if (compo->mixer[crtc]) { + sti_drm_crtc_init(drm_dev, compo->mixer[crtc], + primary, cursor); + crtc++; + cursor = NULL; + } + } + } + + drm_vblank_init(drm_dev, crtc); + /* Allow usage of vblank without having to call drm_irq_install */ + drm_dev->irq_enabled = 1; + + DRM_DEBUG_DRIVER("Initialized %d DRM CRTC(s) and %d DRM plane(s)\n", + crtc, plane); + DRM_DEBUG_DRIVER("DRM plane(s) for VID/VDP not created yet\n"); + + return 0; +} + +static void sti_compositor_unbind(struct device *dev, struct device *master, + void *data) +{ + /* do nothing */ +} + +static const struct component_ops sti_compositor_ops = { + .bind = sti_compositor_bind, + .unbind = sti_compositor_unbind, +}; + +static const struct of_device_id compositor_of_match[] = { + { + .compatible = "st,stih416-compositor", + .data = &stih416_compositor_data, + }, { + .compatible = "st,stih407-compositor", + .data = &stih407_compositor_data, + }, { + /* end node */ + } +}; +MODULE_DEVICE_TABLE(of, compositor_of_match); + +static int sti_compositor_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct device_node *vtg_np; + struct sti_compositor *compo; + struct resource *res; + int err; + + compo = devm_kzalloc(dev, sizeof(*compo), GFP_KERNEL); + if (!compo) { + DRM_ERROR("Failed to allocate compositor context\n"); + return -ENOMEM; + } + compo->dev = dev; + compo->vtg_vblank_nb.notifier_call = sti_drm_crtc_vblank_cb; + + /* populate data structure depending on compatibility */ + BUG_ON(!of_match_node(compositor_of_match, np)->data); + + memcpy(&compo->data, of_match_node(compositor_of_match, np)->data, + sizeof(struct sti_compositor_data)); + + /* Get Memory ressources */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res == NULL) { + DRM_ERROR("Get memory resource failed\n"); + return -ENXIO; + } + compo->regs = devm_ioremap(dev, res->start, resource_size(res)); + if (compo->regs == NULL) { + DRM_ERROR("Register mapping failed\n"); + return -ENXIO; + } + + /* Get clock resources */ + compo->clk_compo_main = devm_clk_get(dev, "compo_main"); + if (IS_ERR(compo->clk_compo_main)) { + DRM_ERROR("Cannot get compo_main clock\n"); + return PTR_ERR(compo->clk_compo_main); + } + + compo->clk_compo_aux = devm_clk_get(dev, "compo_aux"); + if (IS_ERR(compo->clk_compo_aux)) { + DRM_ERROR("Cannot get compo_aux clock\n"); + return PTR_ERR(compo->clk_compo_aux); + } + + compo->clk_pix_main = devm_clk_get(dev, "pix_main"); + if (IS_ERR(compo->clk_pix_main)) { + DRM_ERROR("Cannot get pix_main clock\n"); + return PTR_ERR(compo->clk_pix_main); + } + + compo->clk_pix_aux = devm_clk_get(dev, "pix_aux"); + if (IS_ERR(compo->clk_pix_aux)) { + DRM_ERROR("Cannot get pix_aux clock\n"); + return PTR_ERR(compo->clk_pix_aux); + } + + /* Get reset resources */ + compo->rst_main = devm_reset_control_get(dev, "compo-main"); + /* Take compo main out of reset */ + if (!IS_ERR(compo->rst_main)) + reset_control_deassert(compo->rst_main); + + compo->rst_aux = devm_reset_control_get(dev, "compo-aux"); + /* Take compo aux out of reset */ + if (!IS_ERR(compo->rst_aux)) + reset_control_deassert(compo->rst_aux); + + vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 0); + if (vtg_np) + compo->vtg_main = of_vtg_find(vtg_np); + + vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 1); + if (vtg_np) + compo->vtg_aux = of_vtg_find(vtg_np); + + /* Initialize compositor subdevices */ + err = sti_compositor_init_subdev(compo, compo->data.subdev_desc, + compo->data.nb_subdev); + if (err) + return err; + + platform_set_drvdata(pdev, compo); + + return component_add(&pdev->dev, &sti_compositor_ops); +} + +static int sti_compositor_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &sti_compositor_ops); + return 0; +} + +static struct platform_driver sti_compositor_driver = { + .driver = { + .name = "sti-compositor", + .owner = THIS_MODULE, + .of_match_table = compositor_of_match, + }, + .probe = sti_compositor_probe, + .remove = sti_compositor_remove, +}; + +module_platform_driver(sti_compositor_driver); + +MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>"); +MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/sti/sti_compositor.h b/drivers/gpu/drm/sti/sti_compositor.h new file mode 100644 index 000000000000..3ea19db72e0f --- /dev/null +++ b/drivers/gpu/drm/sti/sti_compositor.h @@ -0,0 +1,90 @@ +/* + * Copyright (C) STMicroelectronics SA 2014 + * Authors: Benjamin Gaignard <benjamin.gaignard@st.com> + * Fabien Dessenne <fabien.dessenne@st.com> + * for STMicroelectronics. + * License terms: GNU General Public License (GPL), version 2 + */ + +#ifndef _STI_COMPOSITOR_H_ +#define _STI_COMPOSITOR_H_ + +#include <linux/clk.h> +#include <linux/kernel.h> + +#include "sti_layer.h" +#include "sti_mixer.h" + +#define WAIT_NEXT_VSYNC_MS 50 /*ms*/ + +#define STI_MAX_LAYER 8 +#define STI_MAX_MIXER 2 + +enum sti_compositor_subdev_type { + STI_MIXER_MAIN_SUBDEV, + STI_MIXER_AUX_SUBDEV, + STI_GPD_SUBDEV, + STI_VID_SUBDEV, + STI_CURSOR_SUBDEV, +}; + +struct sti_compositor_subdev_descriptor { + enum sti_compositor_subdev_type type; + int id; + unsigned int offset; +}; + +/** + * STI Compositor data structure + * + * @nb_subdev: number of subdevices supported by the compositor + * @subdev_desc: subdev list description + */ +#define MAX_SUBDEV 9 +struct sti_compositor_data { + unsigned int nb_subdev; + struct sti_compositor_subdev_descriptor subdev_desc[MAX_SUBDEV]; +}; + +/** + * STI Compositor structure + * + * @dev: driver device + * @regs: registers (main) + * @data: device data + * @clk_compo_main: clock for main compo + * @clk_compo_aux: clock for aux compo + * @clk_pix_main: pixel clock for main path + * @clk_pix_aux: pixel clock for aux path + * @rst_main: reset control of the main path + * @rst_aux: reset control of the aux path + * @mixer: array of mixers + * @vtg_main: vtg for main data path + * @vtg_aux: vtg for auxillary data path + * @layer: array of layers + * @nb_mixers: number of mixers for this compositor + * @nb_layers: number of layers (GDP,VID,...) for this compositor + * @enable: true if compositor is enable else false + * @vtg_vblank_nb: callback for VTG VSYNC notification + */ +struct sti_compositor { + struct device *dev; + void __iomem *regs; + struct sti_compositor_data data; + struct clk *clk_compo_main; + struct clk *clk_compo_aux; + struct clk *clk_pix_main; + struct clk *clk_pix_aux; + struct reset_control *rst_main; + struct reset_control *rst_aux; + struct sti_mixer *mixer[STI_MAX_MIXER]; + struct sti_vtg *vtg_main; + struct sti_vtg *vtg_aux; + struct sti_layer *layer[STI_MAX_LAYER]; + int nb_mixers; + int nb_layers; + bool enable; + struct notifier_block vtg_vblank_nb; +}; + +#endif diff --git a/drivers/gpu/drm/sti/sti_drm_crtc.c b/drivers/gpu/drm/sti/sti_drm_crtc.c new file mode 100644 index 000000000000..d2ae0c0e13be --- /dev/null +++ b/drivers/gpu/drm/sti/sti_drm_crtc.c @@ -0,0 +1,421 @@ +/* + * Copyright (C) STMicroelectronics SA 2014 + * Authors: Benjamin Gaignard <benjamin.gaignard@st.com> + * Fabien Dessenne <fabien.dessenne@st.com> + * for STMicroelectronics. + * License terms: GNU General Public License (GPL), version 2 + */ + +#include <linux/clk.h> + +#include <drm/drmP.h> +#include <drm/drm_crtc_helper.h> + +#include "sti_compositor.h" +#include "sti_drm_drv.h" +#include "sti_drm_crtc.h" +#include "sti_vtg.h" + +static void sti_drm_crtc_dpms(struct drm_crtc *crtc, int mode) +{ + DRM_DEBUG_KMS("\n"); +} + +static void sti_drm_crtc_prepare(struct drm_crtc *crtc) +{ + struct sti_mixer *mixer = to_sti_mixer(crtc); + struct device *dev = mixer->dev; + struct sti_compositor *compo = dev_get_drvdata(dev); + + compo->enable = true; + + /* Prepare and enable the compo IP clock */ + if (mixer->id == STI_MIXER_MAIN) { + if (clk_prepare_enable(compo->clk_compo_main)) + DRM_INFO("Failed to prepare/enable compo_main clk\n"); + } else { + if (clk_prepare_enable(compo->clk_compo_aux)) + DRM_INFO("Failed to prepare/enable compo_aux clk\n"); + } +} + +static void sti_drm_crtc_commit(struct drm_crtc *crtc) +{ + struct sti_mixer *mixer = to_sti_mixer(crtc); + struct device *dev = mixer->dev; + struct sti_compositor *compo = dev_get_drvdata(dev); + struct sti_layer *layer; + + if ((!mixer || !compo)) { + DRM_ERROR("Can not find mixer or compositor)\n"); + return; + } + + /* get GDP which is reserved to the CRTC FB */ + layer = to_sti_layer(crtc->primary); + if (layer) + sti_layer_commit(layer); + else + DRM_ERROR("Can not find CRTC dedicated plane (GDP0)\n"); + + /* Enable layer on mixer */ + if (sti_mixer_set_layer_status(mixer, layer, true)) + DRM_ERROR("Can not enable layer at mixer\n"); +} + +static bool sti_drm_crtc_mode_fixup(struct drm_crtc *crtc, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + /* accept the provided drm_display_mode, do not fix it up */ + return true; +} + +static int +sti_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode, int x, int y, + struct drm_framebuffer *old_fb) +{ + struct sti_mixer *mixer = to_sti_mixer(crtc); + struct device *dev = mixer->dev; + struct sti_compositor *compo = dev_get_drvdata(dev); + struct sti_layer *layer; + struct clk *clk; + int rate = mode->clock * 1000; + int res; + unsigned int w, h; + + DRM_DEBUG_KMS("CRTC:%d (%s) fb:%d mode:%d (%s)\n", + crtc->base.id, sti_mixer_to_str(mixer), + crtc->primary->fb->base.id, mode->base.id, mode->name); + + DRM_DEBUG_KMS("%d %d %d %d %d %d %d %d %d %d 0x%x 0x%x\n", + mode->vrefresh, mode->clock, + mode->hdisplay, + mode->hsync_start, mode->hsync_end, + mode->htotal, + mode->vdisplay, + mode->vsync_start, mode->vsync_end, + mode->vtotal, mode->type, mode->flags); + + /* Set rate and prepare/enable pixel clock */ + if (mixer->id == STI_MIXER_MAIN) + clk = compo->clk_pix_main; + else + clk = compo->clk_pix_aux; + + res = clk_set_rate(clk, rate); + if (res < 0) { + DRM_ERROR("Cannot set rate (%dHz) for pix clk\n", rate); + return -EINVAL; + } + if (clk_prepare_enable(clk)) { + DRM_ERROR("Failed to prepare/enable pix clk\n"); + return -EINVAL; + } + + sti_vtg_set_config(mixer->id == STI_MIXER_MAIN ? + compo->vtg_main : compo->vtg_aux, &crtc->mode); + + /* a GDP is reserved to the CRTC FB */ + layer = to_sti_layer(crtc->primary); + if (!layer) { + DRM_ERROR("Can not find GDP0)\n"); + return -EINVAL; + } + + /* copy the mode data adjusted by mode_fixup() into crtc->mode + * so that hardware can be set to proper mode + */ + memcpy(&crtc->mode, adjusted_mode, sizeof(*adjusted_mode)); + + res = sti_mixer_set_layer_depth(mixer, layer); + if (res) { + DRM_ERROR("Can not set layer depth\n"); + return -EINVAL; + } + res = sti_mixer_active_video_area(mixer, &crtc->mode); + if (res) { + DRM_ERROR("Can not set active video area\n"); + return -EINVAL; + } + + w = crtc->primary->fb->width - x; + h = crtc->primary->fb->height - y; + + return sti_layer_prepare(layer, crtc->primary->fb, &crtc->mode, + mixer->id, 0, 0, w, h, x, y, w, h); +} + +static int sti_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, + struct drm_framebuffer *old_fb) +{ + struct sti_mixer *mixer = to_sti_mixer(crtc); + struct sti_layer *layer; + unsigned int w, h; + int ret; + + DRM_DEBUG_KMS("CRTC:%d (%s) fb:%d (%d,%d)\n", + crtc->base.id, sti_mixer_to_str(mixer), + crtc->primary->fb->base.id, x, y); + + /* GDP is reserved to the CRTC FB */ + layer = to_sti_layer(crtc->primary); + if (!layer) { + DRM_ERROR("Can not find GDP0)\n"); + ret = -EINVAL; + goto out; + } + + w = crtc->primary->fb->width - crtc->x; + h = crtc->primary->fb->height - crtc->y; + + ret = sti_layer_prepare(layer, crtc->primary->fb, &crtc->mode, + mixer->id, 0, 0, w, h, + crtc->x, crtc->y, w, h); + if (ret) { + DRM_ERROR("Can not prepare layer\n"); + goto out; + } + + sti_drm_crtc_commit(crtc); +out: + return ret; +} + +static void sti_drm_crtc_load_lut(struct drm_crtc *crtc) +{ + /* do nothing */ +} + +static void sti_drm_crtc_disable(struct drm_crtc *crtc) +{ + struct sti_mixer *mixer = to_sti_mixer(crtc); + struct device *dev = mixer->dev; + struct sti_compositor *compo = dev_get_drvdata(dev); + struct sti_layer *layer; + + if (!compo->enable) + return; + + DRM_DEBUG_KMS("CRTC:%d (%s)\n", crtc->base.id, sti_mixer_to_str(mixer)); + + /* Disable Background */ + sti_mixer_set_background_status(mixer, false); + + /* Disable GDP */ + layer = to_sti_layer(crtc->primary); + if (!layer) { + DRM_ERROR("Cannot find GDP0\n"); + return; + } + + /* Disable layer at mixer level */ + if (sti_mixer_set_layer_status(mixer, layer, false)) + DRM_ERROR("Can not disable %s layer at mixer\n", + sti_layer_to_str(layer)); + + /* Wait a while to be sure that a Vsync event is received */ + msleep(WAIT_NEXT_VSYNC_MS); + + /* Then disable layer itself */ + sti_layer_disable(layer); + + drm_vblank_off(crtc->dev, mixer->id); + + /* Disable pixel clock and compo IP clocks */ + if (mixer->id == STI_MIXER_MAIN) { + clk_disable_unprepare(compo->clk_pix_main); + clk_disable_unprepare(compo->clk_compo_main); + } else { + clk_disable_unprepare(compo->clk_pix_aux); + clk_disable_unprepare(compo->clk_compo_aux); + } + + compo->enable = false; +} + +static struct drm_crtc_helper_funcs sti_crtc_helper_funcs = { + .dpms = sti_drm_crtc_dpms, + .prepare = sti_drm_crtc_prepare, + .commit = sti_drm_crtc_commit, + .mode_fixup = sti_drm_crtc_mode_fixup, + .mode_set = sti_drm_crtc_mode_set, + .mode_set_base = sti_drm_crtc_mode_set_base, + .load_lut = sti_drm_crtc_load_lut, + .disable = sti_drm_crtc_disable, +}; + +static int sti_drm_crtc_page_flip(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event, + uint32_t page_flip_flags) +{ + struct drm_device *drm_dev = crtc->dev; + struct drm_framebuffer *old_fb; + struct sti_mixer *mixer = to_sti_mixer(crtc); + unsigned long flags; + int ret; + + DRM_DEBUG_KMS("fb %d --> fb %d\n", + crtc->primary->fb->base.id, fb->base.id); + + mutex_lock(&drm_dev->struct_mutex); + + old_fb = crtc->primary->fb; + crtc->primary->fb = fb; + ret = sti_drm_crtc_mode_set_base(crtc, crtc->x, crtc->y, old_fb); + if (ret) { + DRM_ERROR("failed\n"); + crtc->primary->fb = old_fb; + goto out; + } + + if (event) { + event->pipe = mixer->id; + + ret = drm_vblank_get(drm_dev, event->pipe); + if (ret) { + DRM_ERROR("Cannot get vblank\n"); + goto out; + } + + spin_lock_irqsave(&drm_dev->event_lock, flags); + if (mixer->pending_event) { + drm_vblank_put(drm_dev, event->pipe); + ret = -EBUSY; + } else { + mixer->pending_event = event; + } + spin_unlock_irqrestore(&drm_dev->event_lock, flags); + } +out: + mutex_unlock(&drm_dev->struct_mutex); + return ret; +} + +static void sti_drm_crtc_destroy(struct drm_crtc *crtc) +{ + DRM_DEBUG_KMS("\n"); + drm_crtc_cleanup(crtc); +} + +static int sti_drm_crtc_set_property(struct drm_crtc *crtc, + struct drm_property *property, + uint64_t val) +{ + DRM_DEBUG_KMS("\n"); + return 0; +} + +int sti_drm_crtc_vblank_cb(struct notifier_block *nb, + unsigned long event, void *data) +{ + struct drm_device *drm_dev; + struct sti_compositor *compo = + container_of(nb, struct sti_compositor, vtg_vblank_nb); + int *crtc = data; + unsigned long flags; + struct sti_drm_private *priv; + + drm_dev = compo->mixer[*crtc]->drm_crtc.dev; + priv = drm_dev->dev_private; + + if ((event != VTG_TOP_FIELD_EVENT) && + (event != VTG_BOTTOM_FIELD_EVENT)) { + DRM_ERROR("unknown event: %lu\n", event); + return -EINVAL; + } + + drm_handle_vblank(drm_dev, *crtc); + + spin_lock_irqsave(&drm_dev->event_lock, flags); + if (compo->mixer[*crtc]->pending_event) { + drm_send_vblank_event(drm_dev, -1, + compo->mixer[*crtc]->pending_event); + drm_vblank_put(drm_dev, *crtc); + compo->mixer[*crtc]->pending_event = NULL; + } + spin_unlock_irqrestore(&drm_dev->event_lock, flags); + + return 0; +} + +int sti_drm_crtc_enable_vblank(struct drm_device *dev, int crtc) +{ + struct sti_drm_private *dev_priv = dev->dev_private; + struct sti_compositor *compo = dev_priv->compo; + struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb; + + if (sti_vtg_register_client(crtc == STI_MIXER_MAIN ? + compo->vtg_main : compo->vtg_aux, + vtg_vblank_nb, crtc)) { + DRM_ERROR("Cannot register VTG notifier\n"); + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL(sti_drm_crtc_enable_vblank); + +void sti_drm_crtc_disable_vblank(struct drm_device *dev, int crtc) +{ + struct sti_drm_private *priv = dev->dev_private; + struct sti_compositor *compo = priv->compo; + struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb; + unsigned long flags; + + DRM_DEBUG_DRIVER("\n"); + + if (sti_vtg_unregister_client(crtc == STI_MIXER_MAIN ? + compo->vtg_main : compo->vtg_aux, vtg_vblank_nb)) + DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n"); + + /* free the resources of the pending requests */ + spin_lock_irqsave(&dev->event_lock, flags); + if (compo->mixer[crtc]->pending_event) { + drm_vblank_put(dev, crtc); + compo->mixer[crtc]->pending_event = NULL; + } + spin_unlock_irqrestore(&dev->event_lock, flags); + +} +EXPORT_SYMBOL(sti_drm_crtc_disable_vblank); + +static struct drm_crtc_funcs sti_crtc_funcs = { + .set_config = drm_crtc_helper_set_config, + .page_flip = sti_drm_crtc_page_flip, + .destroy = sti_drm_crtc_destroy, + .set_property = sti_drm_crtc_set_property, +}; + +bool sti_drm_crtc_is_main(struct drm_crtc *crtc) +{ + struct sti_mixer *mixer = to_sti_mixer(crtc); + + if (mixer->id == STI_MIXER_MAIN) + return true; + + return false; +} + +int sti_drm_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer, + struct drm_plane *primary, struct drm_plane *cursor) +{ + struct drm_crtc *crtc = &mixer->drm_crtc; + int res; + + res = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor, + &sti_crtc_funcs); + if (res) { + DRM_ERROR("Can not initialze CRTC\n"); + return -EINVAL; + } + + drm_crtc_helper_add(crtc, &sti_crtc_helper_funcs); + + DRM_DEBUG_DRIVER("drm CRTC:%d mapped to %s\n", + crtc->base.id, sti_mixer_to_str(mixer)); + + return 0; +} diff --git a/drivers/gpu/drm/sti/sti_drm_crtc.h b/drivers/gpu/drm/sti/sti_drm_crtc.h new file mode 100644 index 000000000000..caca8b14f017 --- /dev/null +++ b/drivers/gpu/drm/sti/sti_drm_crtc.h @@ -0,0 +1,22 @@ +/* + * Copyright (C) STMicroelectronics SA 2014 + * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics. + * License terms: GNU General Public License (GPL), version 2 + */ + +#ifndef _STI_DRM_CRTC_H_ +#define _STI_DRM_CRTC_H_ + +#include <drm/drmP.h> + +struct sti_mixer; + +int sti_drm_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer, + struct drm_plane *primary, struct drm_plane *cursor); +int sti_drm_crtc_enable_vblank(struct drm_device *dev, int crtc); +void sti_drm_crtc_disable_vblank(struct drm_device *dev, int crtc); +int sti_drm_crtc_vblank_cb(struct notifier_block *nb, + unsigned long event, void *data); +bool sti_drm_crtc_is_main(struct drm_crtc *drm_crtc); + +#endif diff --git a/drivers/gpu/drm/sti/sti_drm_drv.c b/drivers/gpu/drm/sti/sti_drm_drv.c new file mode 100644 index 000000000000..223d93c3a05d --- /dev/null +++ b/drivers/gpu/drm/sti/sti_drm_drv.c @@ -0,0 +1,241 @@ +/* + * Copyright (C) STMicroelectronics SA 2014 + * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics. + * License terms: GNU General Public License (GPL), version 2 + */ + +#include <drm/drmP.h> + +#include <linux/component.h> +#include <linux/debugfs.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_platform.h> + +#include <drm/drm_crtc_helper.h> +#include <drm/drm_gem_cma_helper.h> +#include <drm/drm_fb_cma_helper.h> + +#include "sti_drm_drv.h" +#include "sti_drm_crtc.h" + +#define DRIVER_NAME "sti" +#define DRIVER_DESC "STMicroelectronics SoC DRM" +#define DRIVER_DATE "20140601" +#define DRIVER_MAJOR 1 +#define DRIVER_MINOR 0 + +#define STI_MAX_FB_HEIGHT 4096 +#define STI_MAX_FB_WIDTH 4096 + +static struct drm_mode_config_funcs sti_drm_mode_config_funcs = { + .fb_create = drm_fb_cma_create, +}; + +static void sti_drm_mode_config_init(struct drm_device *dev) +{ + dev->mode_config.min_width = 0; + dev->mode_config.min_height = 0; + + /* + * set max width and height as default value. + * this value would be used to check framebuffer size limitation + * at drm_mode_addfb(). + */ + dev->mode_config.max_width = STI_MAX_FB_HEIGHT; + dev->mode_config.max_height = STI_MAX_FB_WIDTH; + + dev->mode_config.funcs = &sti_drm_mode_config_funcs; +} + +static int sti_drm_load(struct drm_device *dev, unsigned long flags) +{ + struct sti_drm_private *private; + int ret; + + private = kzalloc(sizeof(struct sti_drm_private), GFP_KERNEL); + if (!private) { + DRM_ERROR("Failed to allocate private\n"); + return -ENOMEM; + } + dev->dev_private = (void *)private; + private->drm_dev = dev; + + drm_mode_config_init(dev); + drm_kms_helper_poll_init(dev); + + sti_drm_mode_config_init(dev); + + ret = component_bind_all(dev->dev, dev); + if (ret) + return ret; + + drm_helper_disable_unused_functions(dev); + +#ifdef CONFIG_DRM_STI_FBDEV + drm_fbdev_cma_init(dev, 32, + dev->mode_config.num_crtc, + dev->mode_config.num_connector); +#endif + return 0; +} + +static const struct file_operations sti_drm_driver_fops = { + .owner = THIS_MODULE, + .open = drm_open, + .mmap = drm_gem_cma_mmap, + .poll = drm_poll, + .read = drm_read, + .unlocked_ioctl = drm_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = drm_compat_ioctl, +#endif + .release = drm_release, +}; + +static struct dma_buf *sti_drm_gem_prime_export(struct drm_device *dev, + struct drm_gem_object *obj, + int flags) +{ + /* we want to be able to write in mmapped buffer */ + flags |= O_RDWR; + return drm_gem_prime_export(dev, obj, flags); +} + +static struct drm_driver sti_drm_driver = { + .driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET | + DRIVER_GEM | DRIVER_PRIME, + .load = sti_drm_load, + .gem_free_object = drm_gem_cma_free_object, + .gem_vm_ops = &drm_gem_cma_vm_ops, + .dumb_create = drm_gem_cma_dumb_create, + .dumb_map_offset = drm_gem_cma_dumb_map_offset, + .dumb_destroy = drm_gem_dumb_destroy, + .fops = &sti_drm_driver_fops, + + .get_vblank_counter = drm_vblank_count, + .enable_vblank = sti_drm_crtc_enable_vblank, + .disable_vblank = sti_drm_crtc_disable_vblank, + + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, + .gem_prime_export = sti_drm_gem_prime_export, + .gem_prime_import = drm_gem_prime_import, + .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table, + .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table, + .gem_prime_vmap = drm_gem_cma_prime_vmap, + .gem_prime_vunmap = drm_gem_cma_prime_vunmap, + .gem_prime_mmap = drm_gem_cma_prime_mmap, + + .name = DRIVER_NAME, + .desc = DRIVER_DESC, + .date = DRIVER_DATE, + .major = DRIVER_MAJOR, + .minor = DRIVER_MINOR, +}; + +static int compare_of(struct device *dev, void *data) +{ + return dev->of_node == data; +} + +static int sti_drm_bind(struct device *dev) +{ + return drm_platform_init(&sti_drm_driver, to_platform_device(dev)); +} + +static void sti_drm_unbind(struct device *dev) +{ + drm_put_dev(dev_get_drvdata(dev)); +} + +static const struct component_master_ops sti_drm_ops = { + .bind = sti_drm_bind, + .unbind = sti_drm_unbind, +}; + +static int sti_drm_master_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *node = dev->parent->of_node; + struct device_node *child_np; + struct component_match *match = NULL; + + dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); + + child_np = of_get_next_available_child(node, NULL); + + while (child_np) { + component_match_add(dev, &match, compare_of, child_np); + of_node_put(child_np); + child_np = of_get_next_available_child(node, child_np); + } + + return component_master_add_with_match(dev, &sti_drm_ops, match); +} + +static int sti_drm_master_remove(struct platform_device *pdev) +{ + component_master_del(&pdev->dev, &sti_drm_ops); + return 0; +} + +static struct platform_driver sti_drm_master_driver = { + .probe = sti_drm_master_probe, + .remove = sti_drm_master_remove, + .driver = { + .owner = THIS_MODULE, + .name = DRIVER_NAME "__master", + }, +}; + +static int sti_drm_platform_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *node = dev->of_node; + struct platform_device *master; + + of_platform_populate(node, NULL, NULL, dev); + + platform_driver_register(&sti_drm_master_driver); + master = platform_device_register_resndata(dev, + DRIVER_NAME "__master", -1, + NULL, 0, NULL, 0); + if (IS_ERR(master)) + return PTR_ERR(master); + + platform_set_drvdata(pdev, master); + return 0; +} + +static int sti_drm_platform_remove(struct platform_device *pdev) +{ + struct platform_device *master = platform_get_drvdata(pdev); + + of_platform_depopulate(&pdev->dev); + platform_device_unregister(master); + platform_driver_unregister(&sti_drm_master_driver); + return 0; +} + +static const struct of_device_id sti_drm_dt_ids[] = { + { .compatible = "st,sti-display-subsystem", }, + { /* end node */ }, +}; +MODULE_DEVICE_TABLE(of, sti_drm_dt_ids); + +static struct platform_driver sti_drm_platform_driver = { + .probe = sti_drm_platform_probe, + .remove = sti_drm_platform_remove, + .driver = { + .owner = THIS_MODULE, + .name = DRIVER_NAME, + .of_match_table = sti_drm_dt_ids, + }, +}; + +module_platform_driver(sti_drm_platform_driver); + +MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>"); +MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/sti/sti_drm_drv.h b/drivers/gpu/drm/sti/sti_drm_drv.h new file mode 100644 index 000000000000..ec5e2eb8dff9 --- /dev/null +++ b/drivers/gpu/drm/sti/sti_drm_drv.h @@ -0,0 +1,29 @@ +/* + * Copyright (C) STMicroelectronics SA 2014 + * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics. + * License terms: GNU General Public License (GPL), version 2 + */ + +#ifndef _STI_DRM_DRV_H_ +#define _STI_DRM_DRV_H_ + +#include <drm/drmP.h> + +struct sti_compositor; +struct sti_tvout; + +/** + * STI drm private structure + * This structure is stored as private in the drm_device + * + * @compo: compositor + * @plane_zorder_property: z-order property for CRTC planes + * @drm_dev: drm device + */ +struct sti_drm_private { + struct sti_compositor *compo; + struct drm_property *plane_zorder_property; + struct drm_device *drm_dev; +}; + +#endif diff --git a/drivers/gpu/drm/sti/sti_drm_plane.c b/drivers/gpu/drm/sti/sti_drm_plane.c new file mode 100644 index 000000000000..f4118d4cac22 --- /dev/null +++ b/drivers/gpu/drm/sti/sti_drm_plane.c @@ -0,0 +1,195 @@ +/* + * Copyright (C) STMicroelectronics SA 2014 + * Authors: Benjamin Gaignard <benjamin.gaignard@st.com> + * Fabien Dessenne <fabien.dessenne@st.com> + * for STMicroelectronics. + * License terms: GNU General Public License (GPL), version 2 + */ + +#include "sti_compositor.h" +#include "sti_drm_drv.h" +#include "sti_drm_plane.h" +#include "sti_vtg.h" + +enum sti_layer_desc sti_layer_default_zorder[] = { + STI_GDP_0, + STI_VID_0, + STI_GDP_1, + STI_VID_1, + STI_GDP_2, + STI_GDP_3, +}; + +/* (Background) < GDP0 < VID0 < GDP1 < VID1 < GDP2 < GDP3 < (ForeGround) */ + +static int +sti_drm_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, + struct drm_framebuffer *fb, int crtc_x, int crtc_y, + unsigned int crtc_w, unsigned int crtc_h, + uint32_t src_x, uint32_t src_y, + uint32_t src_w, uint32_t src_h) +{ + struct sti_layer *layer = to_sti_layer(plane); + struct sti_mixer *mixer = to_sti_mixer(crtc); + int res; + + DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s) drm fb:%d\n", + crtc->base.id, sti_mixer_to_str(mixer), + plane->base.id, sti_layer_to_str(layer), fb->base.id); + DRM_DEBUG_KMS("(%dx%d)@(%d,%d)\n", crtc_w, crtc_h, crtc_x, crtc_y); + + res = sti_mixer_set_layer_depth(mixer, layer); + if (res) { + DRM_ERROR("Can not set layer depth\n"); + return res; + } + + /* src_x are in 16.16 format. */ + res = sti_layer_prepare(layer, fb, &crtc->mode, mixer->id, + crtc_x, crtc_y, crtc_w, crtc_h, + src_x >> 16, src_y >> 16, + src_w >> 16, src_h >> 16); + if (res) { + DRM_ERROR("Layer prepare failed\n"); + return res; + } + + res = sti_layer_commit(layer); + if (res) { + DRM_ERROR("Layer commit failed\n"); + return res; + } + + res = sti_mixer_set_layer_status(mixer, layer, true); + if (res) { + DRM_ERROR("Can not enable layer at mixer\n"); + return res; + } + + return 0; +} + +static int sti_drm_disable_plane(struct drm_plane *plane) +{ + struct sti_layer *layer; + struct sti_mixer *mixer; + int lay_res, mix_res; + + if (!plane->crtc) { + DRM_DEBUG_DRIVER("drm plane:%d not enabled\n", plane->base.id); + return 0; + } + layer = to_sti_layer(plane); + mixer = to_sti_mixer(plane->crtc); + + DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n", + plane->crtc->base.id, sti_mixer_to_str(mixer), + plane->base.id, sti_layer_to_str(layer)); + + /* Disable layer at mixer level */ + mix_res = sti_mixer_set_layer_status(mixer, layer, false); + if (mix_res) + DRM_ERROR("Can not disable layer at mixer\n"); + + /* Wait a while to be sure that a Vsync event is received */ + msleep(WAIT_NEXT_VSYNC_MS); + + /* Then disable layer itself */ + lay_res = sti_layer_disable(layer); + if (lay_res) + DRM_ERROR("Layer disable failed\n"); + + if (lay_res || mix_res) + return -EINVAL; + + return 0; +} + +static void sti_drm_plane_destroy(struct drm_plane *plane) +{ + DRM_DEBUG_DRIVER("\n"); + + sti_drm_disable_plane(plane); + drm_plane_cleanup(plane); +} + +static int sti_drm_plane_set_property(struct drm_plane *plane, + struct drm_property *property, + uint64_t val) +{ + struct drm_device *dev = plane->dev; + struct sti_drm_private *private = dev->dev_private; + struct sti_layer *layer = to_sti_layer(plane); + + DRM_DEBUG_DRIVER("\n"); + + if (property == private->plane_zorder_property) { + layer->zorder = val; + return 0; + } + + return -EINVAL; +} + +static struct drm_plane_funcs sti_drm_plane_funcs = { + .update_plane = sti_drm_update_plane, + .disable_plane = sti_drm_disable_plane, + .destroy = sti_drm_plane_destroy, + .set_property = sti_drm_plane_set_property, +}; + +static void sti_drm_plane_attach_zorder_property(struct drm_plane *plane, + uint64_t default_val) +{ + struct drm_device *dev = plane->dev; + struct sti_drm_private *private = dev->dev_private; + struct drm_property *prop; + struct sti_layer *layer = to_sti_layer(plane); + + prop = private->plane_zorder_property; + if (!prop) { + prop = drm_property_create_range(dev, 0, "zpos", 0, + GAM_MIXER_NB_DEPTH_LEVEL - 1); + if (!prop) + return; + + private->plane_zorder_property = prop; + } + + drm_object_attach_property(&plane->base, prop, default_val); + layer->zorder = default_val; +} + +struct drm_plane *sti_drm_plane_init(struct drm_device *dev, + struct sti_layer *layer, + unsigned int possible_crtcs, + enum drm_plane_type type) +{ + int err, i; + uint64_t default_zorder = 0; + + err = drm_universal_plane_init(dev, &layer->plane, possible_crtcs, + &sti_drm_plane_funcs, + sti_layer_get_formats(layer), + sti_layer_get_nb_formats(layer), type); + if (err) { + DRM_ERROR("Failed to initialize plane\n"); + return NULL; + } + + for (i = 0; i < ARRAY_SIZE(sti_layer_default_zorder); i++) + if (sti_layer_default_zorder[i] == layer->desc) + break; + + default_zorder = i; + + if (type == DRM_PLANE_TYPE_OVERLAY) + sti_drm_plane_attach_zorder_property(&layer->plane, + default_zorder); + + DRM_DEBUG_DRIVER("drm plane:%d mapped to %s with zorder:%llu\n", + layer->plane.base.id, + sti_layer_to_str(layer), default_zorder); + + return &layer->plane; +} diff --git a/drivers/gpu/drm/sti/sti_drm_plane.h b/drivers/gpu/drm/sti/sti_drm_plane.h new file mode 100644 index 000000000000..4f191839f2a7 --- /dev/null +++ b/drivers/gpu/drm/sti/sti_drm_plane.h @@ -0,0 +1,18 @@ +/* + * Copyright (C) STMicroelectronics SA 2014 + * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics. + * License terms: GNU General Public License (GPL), version 2 + */ + +#ifndef _STI_DRM_PLANE_H_ +#define _STI_DRM_PLANE_H_ + +#include <drm/drmP.h> + +struct sti_layer; + +struct drm_plane *sti_drm_plane_init(struct drm_device *dev, + struct sti_layer *layer, + unsigned int possible_crtcs, + enum drm_plane_type type); +#endif diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c new file mode 100644 index 000000000000..4e30b74559f5 --- /dev/null +++ b/drivers/gpu/drm/sti/sti_gdp.c @@ -0,0 +1,549 @@ +/* + * Copyright (C) STMicroelectronics SA 2014 + * Authors: Benjamin Gaignard <benjamin.gaignard@st.com> + * Fabien Dessenne <fabien.dessenne@st.com> + * for STMicroelectronics. + * License terms: GNU General Public License (GPL), version 2 + */ + +#include <linux/clk.h> +#include <linux/dma-mapping.h> + +#include "sti_compositor.h" +#include "sti_gdp.h" +#include "sti_layer.h" +#include "sti_vtg.h" + +#define ENA_COLOR_FILL BIT(8) +#define WAIT_NEXT_VSYNC BIT(31) + +/* GDP color formats */ +#define GDP_RGB565 0x00 +#define GDP_RGB888 0x01 +#define GDP_RGB888_32 0x02 +#define GDP_ARGB8565 0x04 +#define GDP_ARGB8888 0x05 +#define GDP_ARGB1555 0x06 +#define GDP_ARGB4444 0x07 +#define GDP_CLUT8 0x0B +#define GDP_YCBR888 0x10 +#define GDP_YCBR422R 0x12 +#define GDP_AYCBR8888 0x15 + +#define GAM_GDP_CTL_OFFSET 0x00 +#define GAM_GDP_AGC_OFFSET 0x04 +#define GAM_GDP_VPO_OFFSET 0x0C +#define GAM_GDP_VPS_OFFSET 0x10 +#define GAM_GDP_PML_OFFSET 0x14 +#define GAM_GDP_PMP_OFFSET 0x18 +#define GAM_GDP_SIZE_OFFSET 0x1C +#define GAM_GDP_NVN_OFFSET 0x24 +#define GAM_GDP_KEY1_OFFSET 0x28 +#define GAM_GDP_KEY2_OFFSET 0x2C +#define GAM_GDP_PPT_OFFSET 0x34 +#define GAM_GDP_CML_OFFSET 0x3C +#define GAM_GDP_MST_OFFSET 0x68 + +#define GAM_GDP_ALPHARANGE_255 BIT(5) +#define GAM_GDP_AGC_FULL_RANGE 0x00808080 +#define GAM_GDP_PPT_IGNORE (BIT(1) | BIT(0)) +#define GAM_GDP_SIZE_MAX 0x7FF + +#define GDP_NODE_NB_BANK 2 +#define GDP_NODE_PER_FIELD 2 + +struct sti_gdp_node { + u32 gam_gdp_ctl; + u32 gam_gdp_agc; + u32 reserved1; + u32 gam_gdp_vpo; + u32 gam_gdp_vps; + u32 gam_gdp_pml; + u32 gam_gdp_pmp; + u32 gam_gdp_size; + u32 reserved2; + u32 gam_gdp_nvn; + u32 gam_gdp_key1; + u32 gam_gdp_key2; + u32 reserved3; + u32 gam_gdp_ppt; + u32 reserved4; + u32 gam_gdp_cml; +}; + +struct sti_gdp_node_list { + struct sti_gdp_node *top_field; + struct sti_gdp_node *btm_field; +}; + +/** + * STI GDP structure + * + * @layer: layer structure + * @clk_pix: pixel clock for the current gdp + * @vtg_field_nb: callback for VTG FIELD (top or bottom) notification + * @is_curr_top: true if the current node processed is the top field + * @node_list: array of node list + */ +struct sti_gdp { + struct sti_layer layer; + struct clk *clk_pix; + struct notifier_block vtg_field_nb; + bool is_curr_top; + struct sti_gdp_node_list node_list[GDP_NODE_NB_BANK]; +}; + +#define to_sti_gdp(x) container_of(x, struct sti_gdp, layer) + +static const uint32_t gdp_supported_formats[] = { + DRM_FORMAT_XRGB8888, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_ARGB4444, + DRM_FORMAT_ARGB1555, + DRM_FORMAT_RGB565, + DRM_FORMAT_RGB888, + DRM_FORMAT_AYUV, + DRM_FORMAT_YUV444, + DRM_FORMAT_VYUY, + DRM_FORMAT_C8, +}; + +static const uint32_t *sti_gdp_get_formats(struct sti_layer *layer) +{ + return gdp_supported_formats; +} + +static unsigned int sti_gdp_get_nb_formats(struct sti_layer *layer) +{ + return ARRAY_SIZE(gdp_supported_formats); +} + +static int sti_gdp_fourcc2format(int fourcc) +{ + switch (fourcc) { + case DRM_FORMAT_XRGB8888: + return GDP_RGB888_32; + case DRM_FORMAT_ARGB8888: + return GDP_ARGB8888; + case DRM_FORMAT_ARGB4444: + return GDP_ARGB4444; + case DRM_FORMAT_ARGB1555: + return GDP_ARGB1555; + case DRM_FORMAT_RGB565: + return GDP_RGB565; + case DRM_FORMAT_RGB888: + return GDP_RGB888; + case DRM_FORMAT_AYUV: + return GDP_AYCBR8888; + case DRM_FORMAT_YUV444: + return GDP_YCBR888; + case DRM_FORMAT_VYUY: + return GDP_YCBR422R; + case DRM_FORMAT_C8: + return GDP_CLUT8; + } + return -1; +} + +static int sti_gdp_get_alpharange(int format) +{ + switch (format) { + case GDP_ARGB8565: + case GDP_ARGB8888: + case GDP_AYCBR8888: + return GAM_GDP_ALPHARANGE_255; + } + return 0; +} + +/** + * sti_gdp_get_free_nodes + * @layer: gdp layer + * + * Look for a GDP node list that is not currently read by the HW. + * + * RETURNS: + * Pointer to the free GDP node list + */ +static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_layer *layer) +{ + int hw_nvn; + void *virt_nvn; + struct sti_gdp *gdp = to_sti_gdp(layer); + unsigned int i; + + hw_nvn = readl(layer->regs + GAM_GDP_NVN_OFFSET); + if (!hw_nvn) + goto end; + + virt_nvn = dma_to_virt(layer->dev, (dma_addr_t) hw_nvn); + + for (i = 0; i < GDP_NODE_NB_BANK; i++) + if ((virt_nvn != gdp->node_list[i].btm_field) && + (virt_nvn != gdp->node_list[i].top_field)) + return &gdp->node_list[i]; + + /* in hazardious cases restart with the first node */ + DRM_ERROR("inconsistent NVN for %s: 0x%08X\n", + sti_layer_to_str(layer), hw_nvn); + +end: + return &gdp->node_list[0]; +} + +/** + * sti_gdp_get_current_nodes + * @layer: GDP layer + * + * Look for GDP nodes that are currently read by the HW. + * + * RETURNS: + * Pointer to the current GDP node list + */ +static +struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_layer *layer) +{ + int hw_nvn; + void *virt_nvn; + struct sti_gdp *gdp = to_sti_gdp(layer); + unsigned int i; + + hw_nvn = readl(layer->regs + GAM_GDP_NVN_OFFSET); + if (!hw_nvn) + goto end; + + virt_nvn = dma_to_virt(layer->dev, (dma_addr_t) hw_nvn); + + for (i = 0; i < GDP_NODE_NB_BANK; i++) + if ((virt_nvn == gdp->node_list[i].btm_field) || + (virt_nvn == gdp->node_list[i].top_field)) + return &gdp->node_list[i]; + +end: + DRM_DEBUG_DRIVER("Warning, NVN 0x%08X for %s does not match any node\n", + hw_nvn, sti_layer_to_str(layer)); + + return NULL; +} + +/** + * sti_gdp_prepare_layer + * @lay: gdp layer + * @first_prepare: true if it is the first time this function is called + * + * Update the free GDP node list according to the layer properties. + * + * RETURNS: + * 0 on success. + */ +static int sti_gdp_prepare_layer(struct sti_layer *layer, bool first_prepare) +{ + struct sti_gdp_node_list *list; + struct sti_gdp_node *top_field, *btm_field; + struct drm_display_mode *mode = layer->mode; + struct device *dev = layer->dev; + struct sti_gdp *gdp = to_sti_gdp(layer); + struct sti_compositor *compo = dev_get_drvdata(dev); + int format; + unsigned int depth, bpp; + int rate = mode->clock * 1000; + int res; + u32 ydo, xdo, yds, xds; + + list = sti_gdp_get_free_nodes(layer); + top_field = list->top_field; + btm_field = list->btm_field; + + dev_dbg(dev, "%s %s top_node:0x%p btm_node:0x%p\n", __func__, + sti_layer_to_str(layer), top_field, btm_field); + + /* Build the top field from layer params */ + top_field->gam_gdp_agc = GAM_GDP_AGC_FULL_RANGE; + top_field->gam_gdp_ctl = WAIT_NEXT_VSYNC; + format = sti_gdp_fourcc2format(layer->format); + if (format == -1) { + DRM_ERROR("Format not supported by GDP %.4s\n", + (char *)&layer->format); + return 1; + } + top_field->gam_gdp_ctl |= format; + top_field->gam_gdp_ctl |= sti_gdp_get_alpharange(format); + top_field->gam_gdp_ppt &= ~GAM_GDP_PPT_IGNORE; + + /* pixel memory location */ + drm_fb_get_bpp_depth(layer->format, &depth, &bpp); + top_field->gam_gdp_pml = (u32) layer->paddr + layer->offsets[0]; + top_field->gam_gdp_pml += layer->src_x * (bpp >> 3); + top_field->gam_gdp_pml += layer->src_y * layer->pitches[0]; + + /* input parameters */ + top_field->gam_gdp_pmp = layer->pitches[0]; + top_field->gam_gdp_size = + clamp_val(layer->src_h, 0, GAM_GDP_SIZE_MAX) << 16 | + clamp_val(layer->src_w, 0, GAM_GDP_SIZE_MAX); + + /* output parameters */ + ydo = sti_vtg_get_line_number(*mode, layer->dst_y); + yds = sti_vtg_get_line_number(*mode, layer->dst_y + layer->dst_h - 1); + xdo = sti_vtg_get_pixel_number(*mode, layer->dst_x); + xds = sti_vtg_get_pixel_number(*mode, layer->dst_x + layer->dst_w - 1); + top_field->gam_gdp_vpo = (ydo << 16) | xdo; + top_field->gam_gdp_vps = (yds << 16) | xds; + + /* Same content and chained together */ + memcpy(btm_field, top_field, sizeof(*btm_field)); + top_field->gam_gdp_nvn = virt_to_dma(dev, btm_field); + btm_field->gam_gdp_nvn = virt_to_dma(dev, top_field); + + /* Interlaced mode */ + if (layer->mode->flags & DRM_MODE_FLAG_INTERLACE) + btm_field->gam_gdp_pml = top_field->gam_gdp_pml + + layer->pitches[0]; + + if (first_prepare) { + /* Register gdp callback */ + if (sti_vtg_register_client(layer->mixer_id == STI_MIXER_MAIN ? + compo->vtg_main : compo->vtg_aux, + &gdp->vtg_field_nb, layer->mixer_id)) { + DRM_ERROR("Cannot register VTG notifier\n"); + return 1; + } + + /* Set and enable gdp clock */ + if (gdp->clk_pix) { + res = clk_set_rate(gdp->clk_pix, rate); + if (res < 0) { + DRM_ERROR("Cannot set rate (%dHz) for gdp\n", + rate); + return 1; + } + + if (clk_prepare_enable(gdp->clk_pix)) { + DRM_ERROR("Failed to prepare/enable gdp\n"); + return 1; + } + } + } + + return 0; +} + +/** + * sti_gdp_commit_layer + * @lay: gdp layer + * + * Update the NVN field of the 'right' field of the current GDP node (being + * used by the HW) with the address of the updated ('free') top field GDP node. + * - In interlaced mode the 'right' field is the bottom field as we update + * frames starting from their top field + * - In progressive mode, we update both bottom and top fields which are + * equal nodes. + * At the next VSYNC, the updated node list will be used by the HW. + * + * RETURNS: + * 0 on success. + */ +static int sti_gdp_commit_layer(struct sti_layer *layer) +{ + struct sti_gdp_node_list *updated_list = sti_gdp_get_free_nodes(layer); + struct sti_gdp_node *updated_top_node = updated_list->top_field; + struct sti_gdp_node *updated_btm_node = updated_list->btm_field; + struct sti_gdp *gdp = to_sti_gdp(layer); + u32 dma_updated_top = virt_to_dma(layer->dev, updated_top_node); + u32 dma_updated_btm = virt_to_dma(layer->dev, updated_btm_node); + struct sti_gdp_node_list *curr_list = sti_gdp_get_current_nodes(layer); + + dev_dbg(layer->dev, "%s %s top/btm_node:0x%p/0x%p\n", __func__, + sti_layer_to_str(layer), + updated_top_node, updated_btm_node); + dev_dbg(layer->dev, "Current NVN:0x%X\n", + readl(layer->regs + GAM_GDP_NVN_OFFSET)); + dev_dbg(layer->dev, "Posted buff: %lx current buff: %x\n", + (unsigned long)layer->paddr, + readl(layer->regs + GAM_GDP_PML_OFFSET)); + + if (curr_list == NULL) { + /* First update or invalid node should directly write in the + * hw register */ + DRM_DEBUG_DRIVER("%s first update (or invalid node)", + sti_layer_to_str(layer)); + + writel(gdp->is_curr_top == true ? + dma_updated_btm : dma_updated_top, + layer->regs + GAM_GDP_NVN_OFFSET); + return 0; + } + + if (layer->mode->flags & DRM_MODE_FLAG_INTERLACE) { + if (gdp->is_curr_top == true) { + /* Do not update in the middle of the frame, but + * postpone the update after the bottom field has + * been displayed */ + curr_list->btm_field->gam_gdp_nvn = dma_updated_top; + } else { + /* Direct update to avoid one frame delay */ + writel(dma_updated_top, + layer->regs + GAM_GDP_NVN_OFFSET); + } + } else { + /* Direct update for progressive to avoid one frame delay */ + writel(dma_updated_top, layer->regs + GAM_GDP_NVN_OFFSET); + } + + return 0; +} + +/** + * sti_gdp_disable_layer + * @lay: gdp layer + * + * Disable a GDP. + * + * RETURNS: + * 0 on success. + */ +static int sti_gdp_disable_layer(struct sti_layer *layer) +{ + unsigned int i; + struct sti_gdp *gdp = to_sti_gdp(layer); + struct sti_compositor *compo = dev_get_drvdata(layer->dev); + + DRM_DEBUG_DRIVER("%s\n", sti_layer_to_str(layer)); + + /* Set the nodes as 'to be ignored on mixer' */ + for (i = 0; i < GDP_NODE_NB_BANK; i++) { + gdp->node_list[i].top_field->gam_gdp_ppt |= GAM_GDP_PPT_IGNORE; + gdp->node_list[i].btm_field->gam_gdp_ppt |= GAM_GDP_PPT_IGNORE; + } + + if (sti_vtg_unregister_client(layer->mixer_id == STI_MIXER_MAIN ? + compo->vtg_main : compo->vtg_aux, &gdp->vtg_field_nb)) + DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n"); + + if (gdp->clk_pix) + clk_disable_unprepare(gdp->clk_pix); + + return 0; +} + +/** + * sti_gdp_field_cb + * @nb: notifier block + * @event: event message + * @data: private data + * + * Handle VTG top field and bottom field event. + * + * RETURNS: + * 0 on success. + */ +int sti_gdp_field_cb(struct notifier_block *nb, + unsigned long event, void *data) +{ + struct sti_gdp *gdp = container_of(nb, struct sti_gdp, vtg_field_nb); + + switch (event) { + case VTG_TOP_FIELD_EVENT: + gdp->is_curr_top = true; + break; + case VTG_BOTTOM_FIELD_EVENT: + gdp->is_curr_top = false; + break; + default: + DRM_ERROR("unsupported event: %lu\n", event); + break; + } + + return 0; +} + +static void sti_gdp_init(struct sti_layer *layer) +{ + struct sti_gdp *gdp = to_sti_gdp(layer); + struct device_node *np = layer->dev->of_node; + dma_addr_t dma; + void *base; + unsigned int i, size; + + /* Allocate all the nodes within a single memory page */ + size = sizeof(struct sti_gdp_node) * + GDP_NODE_PER_FIELD * GDP_NODE_NB_BANK; + + base = dma_alloc_writecombine(layer->dev, + size, &dma, GFP_KERNEL | GFP_DMA); + if (!base) { + DRM_ERROR("Failed to allocate memory for GDP node\n"); + return; + } + memset(base, 0, size); + + for (i = 0; i < GDP_NODE_NB_BANK; i++) { + if (virt_to_dma(layer->dev, base) & 0xF) { + DRM_ERROR("Mem alignment failed\n"); + return; + } + gdp->node_list[i].top_field = base; + DRM_DEBUG_DRIVER("node[%d].top_field=%p\n", i, base); + base += sizeof(struct sti_gdp_node); + + if (virt_to_dma(layer->dev, base) & 0xF) { + DRM_ERROR("Mem alignment failed\n"); + return; + } + gdp->node_list[i].btm_field = base; + DRM_DEBUG_DRIVER("node[%d].btm_field=%p\n", i, base); + base += sizeof(struct sti_gdp_node); + } + + if (of_device_is_compatible(np, "st,stih407-compositor")) { + /* GDP of STiH407 chip have its own pixel clock */ + char *clk_name; + + switch (layer->desc) { + case STI_GDP_0: + clk_name = "pix_gdp1"; + break; + case STI_GDP_1: + clk_name = "pix_gdp2"; + break; + case STI_GDP_2: + clk_name = "pix_gdp3"; + break; + case STI_GDP_3: + clk_name = "pix_gdp4"; + break; + default: + DRM_ERROR("GDP id not recognized\n"); + return; + } + + gdp->clk_pix = devm_clk_get(layer->dev, clk_name); + if (IS_ERR(gdp->clk_pix)) + DRM_ERROR("Cannot get %s clock\n", clk_name); + } +} + +static const struct sti_layer_funcs gdp_ops = { + .get_formats = sti_gdp_get_formats, + .get_nb_formats = sti_gdp_get_nb_formats, + .init = sti_gdp_init, + .prepare = sti_gdp_prepare_layer, + .commit = sti_gdp_commit_layer, + .disable = sti_gdp_disable_layer, +}; + +struct sti_layer *sti_gdp_create(struct device *dev, int id) +{ + struct sti_gdp *gdp; + + gdp = devm_kzalloc(dev, sizeof(*gdp), GFP_KERNEL); + if (!gdp) { + DRM_ERROR("Failed to allocate memory for GDP\n"); + return NULL; + } + + gdp->layer.ops = &gdp_ops; + gdp->vtg_field_nb.notifier_call = sti_gdp_field_cb; + + return (struct sti_layer *)gdp; +} diff --git a/drivers/gpu/drm/sti/sti_gdp.h b/drivers/gpu/drm/sti/sti_gdp.h new file mode 100644 index 000000000000..1dab68274ad3 --- /dev/null +++ b/drivers/gpu/drm/sti/sti_gdp.h @@ -0,0 +1,16 @@ +/* + * Copyright (C) STMicroelectronics SA 2014 + * Authors: Benjamin Gaignard <benjamin.gaignard@st.com> + * Fabien Dessenne <fabien.dessenne@st.com> + * for STMicroelectronics. + * License terms: GNU General Public License (GPL), version 2 + */ + +#ifndef _STI_GDP_H_ +#define _STI_GDP_H_ + +#include <linux/types.h> + +struct sti_layer *sti_gdp_create(struct device *dev, int id); + +#endif diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c new file mode 100644 index 000000000000..2ae9a9b73666 --- /dev/null +++ b/drivers/gpu/drm/sti/sti_hda.c @@ -0,0 +1,794 @@ +/* + * Copyright (C) STMicroelectronics SA 2014 + * Author: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics. + * License terms: GNU General Public License (GPL), version 2 + */ + +#include <linux/clk.h> +#include <linux/component.h> +#include <linux/module.h> +#include <linux/platform_device.h> + +#include <drm/drmP.h> +#include <drm/drm_crtc_helper.h> + +/* HDformatter registers */ +#define HDA_ANA_CFG 0x0000 +#define HDA_ANA_SCALE_CTRL_Y 0x0004 +#define HDA_ANA_SCALE_CTRL_CB 0x0008 +#define HDA_ANA_SCALE_CTRL_CR 0x000C +#define HDA_ANA_ANC_CTRL 0x0010 +#define HDA_ANA_SRC_Y_CFG 0x0014 +#define HDA_COEFF_Y_PH1_TAP123 0x0018 +#define HDA_COEFF_Y_PH1_TAP456 0x001C +#define HDA_COEFF_Y_PH2_TAP123 0x0020 +#define HDA_COEFF_Y_PH2_TAP456 0x0024 +#define HDA_COEFF_Y_PH3_TAP123 0x0028 +#define HDA_COEFF_Y_PH3_TAP456 0x002C +#define HDA_COEFF_Y_PH4_TAP123 0x0030 +#define HDA_COEFF_Y_PH4_TAP456 0x0034 +#define HDA_ANA_SRC_C_CFG 0x0040 +#define HDA_COEFF_C_PH1_TAP123 0x0044 +#define HDA_COEFF_C_PH1_TAP456 0x0048 +#define HDA_COEFF_C_PH2_TAP123 0x004C +#define HDA_COEFF_C_PH2_TAP456 0x0050 +#define HDA_COEFF_C_PH3_TAP123 0x0054 +#define HDA_COEFF_C_PH3_TAP456 0x0058 +#define HDA_COEFF_C_PH4_TAP123 0x005C +#define HDA_COEFF_C_PH4_TAP456 0x0060 +#define HDA_SYNC_AWGI 0x0300 + +/* HDA_ANA_CFG */ +#define CFG_AWG_ASYNC_EN BIT(0) +#define CFG_AWG_ASYNC_HSYNC_MTD BIT(1) +#define CFG_AWG_ASYNC_VSYNC_MTD BIT(2) +#define CFG_AWG_SYNC_DEL BIT(3) +#define CFG_AWG_FLTR_MODE_SHIFT 4 +#define CFG_AWG_FLTR_MODE_MASK (0xF << CFG_AWG_FLTR_MODE_SHIFT) +#define CFG_AWG_FLTR_MODE_SD (0 << CFG_AWG_FLTR_MODE_SHIFT) +#define CFG_AWG_FLTR_MODE_ED (1 << CFG_AWG_FLTR_MODE_SHIFT) +#define CFG_AWG_FLTR_MODE_HD (2 << CFG_AWG_FLTR_MODE_SHIFT) +#define CFG_SYNC_ON_PBPR_MASK BIT(8) +#define CFG_PREFILTER_EN_MASK BIT(9) +#define CFG_PBPR_SYNC_OFF_SHIFT 16 +#define CFG_PBPR_SYNC_OFF_MASK (0x7FF << CFG_PBPR_SYNC_OFF_SHIFT) +#define CFG_PBPR_SYNC_OFF_VAL 0x117 /* Voltage dependent. stiH416 */ + +/* Default scaling values */ +#define SCALE_CTRL_Y_DFLT 0x00C50256 +#define SCALE_CTRL_CB_DFLT 0x00DB0249 +#define SCALE_CTRL_CR_DFLT 0x00DB0249 + +/* Video DACs control */ +#define VIDEO_DACS_CONTROL_MASK 0x0FFF +#define VIDEO_DACS_CONTROL_SYSCFG2535 0x085C /* for stih416 */ +#define DAC_CFG_HD_OFF_SHIFT 5 +#define DAC_CFG_HD_OFF_MASK (0x7 << DAC_CFG_HD_OFF_SHIFT) +#define VIDEO_DACS_CONTROL_SYSCFG5072 0x0120 /* for stih407 */ +#define DAC_CFG_HD_HZUVW_OFF_MASK BIT(1) + + +/* Upsampler values for the alternative 2X Filter */ +#define SAMPLER_COEF_NB 8 +#define HDA_ANA_SRC_Y_CFG_ALT_2X 0x01130000 +static u32 coef_y_alt_2x[] = { + 0x00FE83FB, 0x1F900401, 0x00000000, 0x00000000, + 0x00F408F9, 0x055F7C25, 0x00000000, 0x00000000 +}; + +#define HDA_ANA_SRC_C_CFG_ALT_2X 0x01750004 +static u32 coef_c_alt_2x[] = { + 0x001305F7, 0x05274BD0, 0x00000000, 0x00000000, + 0x0004907C, 0x09C80B9D, 0x00000000, 0x00000000 +}; + +/* Upsampler values for the 4X Filter */ +#define HDA_ANA_SRC_Y_CFG_4X 0x01ED0005 +#define HDA_ANA_SRC_C_CFG_4X 0x01ED0004 +static u32 coef_yc_4x[] = { + 0x00FC827F, 0x008FE20B, 0x00F684FC, 0x050F7C24, + 0x00F4857C, 0x0A1F402E, 0x00FA027F, 0x0E076E1D +}; + +/* AWG instructions for some video modes */ +#define AWG_MAX_INST 64 + +/* 720p@50 */ +static u32 AWGi_720p_50[] = { + 0x00000971, 0x00000C26, 0x0000013B, 0x00000CDA, + 0x00000104, 0x00000E7E, 0x00000E7F, 0x0000013B, + 0x00000D8E, 0x00000104, 0x00001804, 0x00000971, + 0x00000C26, 0x0000003B, 0x00000FB4, 0x00000FB5, + 0x00000104, 0x00001AE8 +}; + +#define NN_720p_50 ARRAY_SIZE(AWGi_720p_50) + +/* 720p@60 */ +static u32 AWGi_720p_60[] = { + 0x00000971, 0x00000C26, 0x0000013B, 0x00000CDA, + 0x00000104, 0x00000E7E, 0x00000E7F, 0x0000013B, + 0x00000C44, 0x00000104, 0x00001804, 0x00000971, + 0x00000C26, 0x0000003B, 0x00000F0F, 0x00000F10, + 0x00000104, 0x00001AE8 +}; + +#define NN_720p_60 ARRAY_SIZE(AWGi_720p_60) + +/* 1080p@30 */ +static u32 AWGi_1080p_30[] = { + 0x00000971, 0x00000C2A, 0x0000013B, 0x00000C56, + 0x00000104, 0x00000FDC, 0x00000FDD, 0x0000013B, + 0x00000C2A, 0x00000104, 0x00001804, 0x00000971, + 0x00000C2A, 0x0000003B, 0x00000EBE, 0x00000EBF, + 0x00000EBF, 0x00000104, 0x00001A2F, 0x00001C4B, + 0x00001C52 +}; + +#define NN_1080p_30 ARRAY_SIZE(AWGi_1080p_30) + +/* 1080p@25 */ +static u32 AWGi_1080p_25[] = { + 0x00000971, 0x00000C2A, 0x0000013B, 0x00000C56, + 0x00000104, 0x00000FDC, 0x00000FDD, 0x0000013B, + 0x00000DE2, 0x00000104, 0x00001804, 0x00000971, + 0x00000C2A, 0x0000003B, 0x00000F51, 0x00000F51, + 0x00000F52, 0x00000104, 0x00001A2F, 0x00001C4B, + 0x00001C52 +}; + +#define NN_1080p_25 ARRAY_SIZE(AWGi_1080p_25) + +/* 1080p@24 */ +static u32 AWGi_1080p_24[] = { + 0x00000971, 0x00000C2A, 0x0000013B, 0x00000C56, + 0x00000104, 0x00000FDC, 0x00000FDD, 0x0000013B, + 0x00000E50, 0x00000104, 0x00001804, 0x00000971, + 0x00000C2A, 0x0000003B, 0x00000F76, 0x00000F76, + 0x00000F76, 0x00000104, 0x00001A2F, 0x00001C4B, + 0x00001C52 +}; + +#define NN_1080p_24 ARRAY_SIZE(AWGi_1080p_24) + +/* 720x480p@60 */ +static u32 AWGi_720x480p_60[] = { + 0x00000904, 0x00000F18, 0x0000013B, 0x00001805, + 0x00000904, 0x00000C3D, 0x0000003B, 0x00001A06 +}; + +#define NN_720x480p_60 ARRAY_SIZE(AWGi_720x480p_60) + +/* Video mode category */ +enum sti_hda_vid_cat { + VID_SD, + VID_ED, + VID_HD_74M, + VID_HD_148M +}; + +struct sti_hda_video_config { + struct drm_display_mode mode; + u32 *awg_instr; + int nb_instr; + enum sti_hda_vid_cat vid_cat; +}; + +/* HD analog supported modes + * Interlaced modes may be added when supported by the whole display chain + */ +static const struct sti_hda_video_config hda_supported_modes[] = { + /* 1080p30 74.250Mhz */ + {{DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008, + 2052, 2200, 0, 1080, 1084, 1089, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)}, + AWGi_1080p_30, NN_1080p_30, VID_HD_74M}, + /* 1080p30 74.176Mhz */ + {{DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74176, 1920, 2008, + 2052, 2200, 0, 1080, 1084, 1089, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)}, + AWGi_1080p_30, NN_1080p_30, VID_HD_74M}, + /* 1080p24 74.250Mhz */ + {{DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558, + 2602, 2750, 0, 1080, 1084, 1089, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)}, + AWGi_1080p_24, NN_1080p_24, VID_HD_74M}, + /* 1080p24 74.176Mhz */ + {{DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74176, 1920, 2558, + 2602, 2750, 0, 1080, 1084, 1089, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)}, + AWGi_1080p_24, NN_1080p_24, VID_HD_74M}, + /* 1080p25 74.250Mhz */ + {{DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448, + 2492, 2640, 0, 1080, 1084, 1089, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)}, + AWGi_1080p_25, NN_1080p_25, VID_HD_74M}, + /* 720p60 74.250Mhz */ + {{DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390, + 1430, 1650, 0, 720, 725, 730, 750, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)}, + AWGi_720p_60, NN_720p_60, VID_HD_74M}, + /* 720p60 74.176Mhz */ + {{DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74176, 1280, 1390, + 1430, 1650, 0, 720, 725, 730, 750, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)}, + AWGi_720p_60, NN_720p_60, VID_HD_74M}, + /* 720p50 74.250Mhz */ + {{DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720, + 1760, 1980, 0, 720, 725, 730, 750, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)}, + AWGi_720p_50, NN_720p_50, VID_HD_74M}, + /* 720x480p60 27.027Mhz */ + {{DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27027, 720, 736, + 798, 858, 0, 480, 489, 495, 525, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)}, + AWGi_720x480p_60, NN_720x480p_60, VID_ED}, + /* 720x480p60 27.000Mhz */ + {{DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736, + 798, 858, 0, 480, 489, 495, 525, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)}, + AWGi_720x480p_60, NN_720x480p_60, VID_ED} +}; + +/** + * STI hd analog structure + * + * @dev: driver device + * @drm_dev: pointer to drm device + * @mode: current display mode selected + * @regs: HD analog register + * @video_dacs_ctrl: video DACS control register + * @enabled: true if HD analog is enabled else false + */ +struct sti_hda { + struct device dev; + struct drm_device *drm_dev; + struct drm_display_mode mode; + void __iomem *regs; + void __iomem *video_dacs_ctrl; + struct clk *clk_pix; + struct clk *clk_hddac; + bool enabled; +}; + +struct sti_hda_connector { + struct drm_connector drm_connector; + struct drm_encoder *encoder; + struct sti_hda *hda; +}; + +#define to_sti_hda_connector(x) \ + container_of(x, struct sti_hda_connector, drm_connector) + +static u32 hda_read(struct sti_hda *hda, int offset) +{ + return readl(hda->regs + offset); +} + +static void hda_write(struct sti_hda *hda, u32 val, int offset) +{ + writel(val, hda->regs + offset); +} + +/** + * Search for a video mode in the supported modes table + * + * @mode: mode being searched + * @idx: index of the found mode + * + * Return true if mode is found + */ +static bool hda_get_mode_idx(struct drm_display_mode mode, int *idx) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(hda_supported_modes); i++) + if (drm_mode_equal(&hda_supported_modes[i].mode, &mode)) { + *idx = i; + return true; + } + return false; +} + +/** + * Enable the HD DACS + * + * @hda: pointer to HD analog structure + * @enable: true if HD DACS need to be enabled, else false + */ +static void hda_enable_hd_dacs(struct sti_hda *hda, bool enable) +{ + u32 mask; + + if (hda->video_dacs_ctrl) { + u32 val; + + switch ((u32)hda->video_dacs_ctrl & VIDEO_DACS_CONTROL_MASK) { + case VIDEO_DACS_CONTROL_SYSCFG2535: + mask = DAC_CFG_HD_OFF_MASK; + break; + case VIDEO_DACS_CONTROL_SYSCFG5072: + mask = DAC_CFG_HD_HZUVW_OFF_MASK; + break; + default: + DRM_INFO("Video DACS control register not supported!"); + return; + } + + val = readl(hda->video_dacs_ctrl); + if (enable) + val &= ~mask; + else + val |= mask; + + writel(val, hda->video_dacs_ctrl); + } +} + +/** + * Configure AWG, writing instructions + * + * @hda: pointer to HD analog structure + * @awg_instr: pointer to AWG instructions table + * @nb: nb of AWG instructions + */ +static void sti_hda_configure_awg(struct sti_hda *hda, u32 *awg_instr, int nb) +{ + unsigned int i; + + DRM_DEBUG_DRIVER("\n"); + + for (i = 0; i < nb; i++) + hda_write(hda, awg_instr[i], HDA_SYNC_AWGI + i * 4); + for (i = nb; i < AWG_MAX_INST; i++) + hda_write(hda, 0, HDA_SYNC_AWGI + i * 4); +} + +static void sti_hda_disable(struct drm_bridge *bridge) +{ + struct sti_hda *hda = bridge->driver_private; + u32 val; + + if (!hda->enabled) + return; + + DRM_DEBUG_DRIVER("\n"); + + /* Disable HD DAC and AWG */ + val = hda_read(hda, HDA_ANA_CFG); + val &= ~CFG_AWG_ASYNC_EN; + hda_write(hda, val, HDA_ANA_CFG); + hda_write(hda, 0, HDA_ANA_ANC_CTRL); + + hda_enable_hd_dacs(hda, false); + + /* Disable/unprepare hda clock */ + clk_disable_unprepare(hda->clk_hddac); + clk_disable_unprepare(hda->clk_pix); + + hda->enabled = false; +} + +static void sti_hda_pre_enable(struct drm_bridge *bridge) +{ + struct sti_hda *hda = bridge->driver_private; + u32 val, i, mode_idx; + u32 src_filter_y, src_filter_c; + u32 *coef_y, *coef_c; + u32 filter_mode; + + DRM_DEBUG_DRIVER("\n"); + + if (hda->enabled) + return; + + /* Prepare/enable clocks */ + if (clk_prepare_enable(hda->clk_pix)) + DRM_ERROR("Failed to prepare/enable hda_pix clk\n"); + if (clk_prepare_enable(hda->clk_hddac)) + DRM_ERROR("Failed to prepare/enable hda_hddac clk\n"); + + if (!hda_get_mode_idx(hda->mode, &mode_idx)) { + DRM_ERROR("Undefined mode\n"); + return; + } + + switch (hda_supported_modes[mode_idx].vid_cat) { + case VID_HD_148M: + DRM_ERROR("Beyond HD analog capabilities\n"); + return; + case VID_HD_74M: + /* HD use alternate 2x filter */ + filter_mode = CFG_AWG_FLTR_MODE_HD; + src_filter_y = HDA_ANA_SRC_Y_CFG_ALT_2X; + src_filter_c = HDA_ANA_SRC_C_CFG_ALT_2X; + coef_y = coef_y_alt_2x; + coef_c = coef_c_alt_2x; + break; + case VID_ED: + /* ED uses 4x filter */ + filter_mode = CFG_AWG_FLTR_MODE_ED; + src_filter_y = HDA_ANA_SRC_Y_CFG_4X; + src_filter_c = HDA_ANA_SRC_C_CFG_4X; + coef_y = coef_yc_4x; + coef_c = coef_yc_4x; + break; + case VID_SD: + DRM_ERROR("Not supported\n"); + return; + default: + DRM_ERROR("Undefined resolution\n"); + return; + } + DRM_DEBUG_DRIVER("Using HDA mode #%d\n", mode_idx); + + /* Enable HD Video DACs */ + hda_enable_hd_dacs(hda, true); + + /* Configure scaler */ + hda_write(hda, SCALE_CTRL_Y_DFLT, HDA_ANA_SCALE_CTRL_Y); + hda_write(hda, SCALE_CTRL_CB_DFLT, HDA_ANA_SCALE_CTRL_CB); + hda_write(hda, SCALE_CTRL_CR_DFLT, HDA_ANA_SCALE_CTRL_CR); + + /* Configure sampler */ + hda_write(hda , src_filter_y, HDA_ANA_SRC_Y_CFG); + hda_write(hda, src_filter_c, HDA_ANA_SRC_C_CFG); + for (i = 0; i < SAMPLER_COEF_NB; i++) { + hda_write(hda, coef_y[i], HDA_COEFF_Y_PH1_TAP123 + i * 4); + hda_write(hda, coef_c[i], HDA_COEFF_C_PH1_TAP123 + i * 4); + } + + /* Configure main HDFormatter */ + val = 0; + val |= (hda->mode.flags & DRM_MODE_FLAG_INTERLACE) ? + 0 : CFG_AWG_ASYNC_VSYNC_MTD; + val |= (CFG_PBPR_SYNC_OFF_VAL << CFG_PBPR_SYNC_OFF_SHIFT); + val |= filter_mode; + hda_write(hda, val, HDA_ANA_CFG); + + /* Configure AWG */ + sti_hda_configure_awg(hda, hda_supported_modes[mode_idx].awg_instr, + hda_supported_modes[mode_idx].nb_instr); + + /* Enable AWG */ + val = hda_read(hda, HDA_ANA_CFG); + val |= CFG_AWG_ASYNC_EN; + hda_write(hda, val, HDA_ANA_CFG); + + hda->enabled = true; +} + +static void sti_hda_set_mode(struct drm_bridge *bridge, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct sti_hda *hda = bridge->driver_private; + u32 mode_idx; + int hddac_rate; + int ret; + + DRM_DEBUG_DRIVER("\n"); + + memcpy(&hda->mode, mode, sizeof(struct drm_display_mode)); + + if (!hda_get_mode_idx(hda->mode, &mode_idx)) { + DRM_ERROR("Undefined mode\n"); + return; + } + + switch (hda_supported_modes[mode_idx].vid_cat) { + case VID_HD_74M: + /* HD use alternate 2x filter */ + hddac_rate = mode->clock * 1000 * 2; + break; + case VID_ED: + /* ED uses 4x filter */ + hddac_rate = mode->clock * 1000 * 4; + break; + default: + DRM_ERROR("Undefined mode\n"); + return; + } + + /* HD DAC = 148.5Mhz or 108 Mhz */ + ret = clk_set_rate(hda->clk_hddac, hddac_rate); + if (ret < 0) + DRM_ERROR("Cannot set rate (%dHz) for hda_hddac clk\n", + hddac_rate); + + /* HDformatter clock = compositor clock */ + ret = clk_set_rate(hda->clk_pix, mode->clock * 1000); + if (ret < 0) + DRM_ERROR("Cannot set rate (%dHz) for hda_pix clk\n", + mode->clock * 1000); +} + +static void sti_hda_bridge_nope(struct drm_bridge *bridge) +{ + /* do nothing */ +} + +static void sti_hda_brigde_destroy(struct drm_bridge *bridge) +{ + drm_bridge_cleanup(bridge); + kfree(bridge); +} + +static const struct drm_bridge_funcs sti_hda_bridge_funcs = { + .pre_enable = sti_hda_pre_enable, + .enable = sti_hda_bridge_nope, + .disable = sti_hda_disable, + .post_disable = sti_hda_bridge_nope, + .mode_set = sti_hda_set_mode, + .destroy = sti_hda_brigde_destroy, +}; + +static int sti_hda_connector_get_modes(struct drm_connector *connector) +{ + unsigned int i; + int count = 0; + struct sti_hda_connector *hda_connector + = to_sti_hda_connector(connector); + struct sti_hda *hda = hda_connector->hda; + + DRM_DEBUG_DRIVER("\n"); + + for (i = 0; i < ARRAY_SIZE(hda_supported_modes); i++) { + struct drm_display_mode *mode = + drm_mode_duplicate(hda->drm_dev, + &hda_supported_modes[i].mode); + if (!mode) + continue; + mode->vrefresh = drm_mode_vrefresh(mode); + + /* the first mode is the preferred mode */ + if (i == 0) + mode->type |= DRM_MODE_TYPE_PREFERRED; + + drm_mode_probed_add(connector, mode); + count++; + } + + drm_mode_sort(&connector->modes); + + return count; +} + +#define CLK_TOLERANCE_HZ 50 + +static int sti_hda_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + int target = mode->clock * 1000; + int target_min = target - CLK_TOLERANCE_HZ; + int target_max = target + CLK_TOLERANCE_HZ; + int result; + int idx; + struct sti_hda_connector *hda_connector + = to_sti_hda_connector(connector); + struct sti_hda *hda = hda_connector->hda; + + if (!hda_get_mode_idx(*mode, &idx)) { + return MODE_BAD; + } else { + result = clk_round_rate(hda->clk_pix, target); + + DRM_DEBUG_DRIVER("target rate = %d => available rate = %d\n", + target, result); + + if ((result < target_min) || (result > target_max)) { + DRM_DEBUG_DRIVER("hda pixclk=%d not supported\n", + target); + return MODE_BAD; + } + } + + return MODE_OK; +} + +struct drm_encoder *sti_hda_best_encoder(struct drm_connector *connector) +{ + struct sti_hda_connector *hda_connector + = to_sti_hda_connector(connector); + + /* Best encoder is the one associated during connector creation */ + return hda_connector->encoder; +} + +static struct drm_connector_helper_funcs sti_hda_connector_helper_funcs = { + .get_modes = sti_hda_connector_get_modes, + .mode_valid = sti_hda_connector_mode_valid, + .best_encoder = sti_hda_best_encoder, +}; + +static enum drm_connector_status +sti_hda_connector_detect(struct drm_connector *connector, bool force) +{ + return connector_status_connected; +} + +static void sti_hda_connector_destroy(struct drm_connector *connector) +{ + struct sti_hda_connector *hda_connector + = to_sti_hda_connector(connector); + + drm_connector_unregister(connector); + drm_connector_cleanup(connector); + kfree(hda_connector); +} + +static struct drm_connector_funcs sti_hda_connector_funcs = { + .dpms = drm_helper_connector_dpms, + .fill_modes = drm_helper_probe_single_connector_modes, + .detect = sti_hda_connector_detect, + .destroy = sti_hda_connector_destroy, +}; + +static struct drm_encoder *sti_hda_find_encoder(struct drm_device *dev) +{ + struct drm_encoder *encoder; + + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + if (encoder->encoder_type == DRM_MODE_ENCODER_DAC) + return encoder; + } + + return NULL; +} + +static int sti_hda_bind(struct device *dev, struct device *master, void *data) +{ + struct sti_hda *hda = dev_get_drvdata(dev); + struct drm_device *drm_dev = data; + struct drm_encoder *encoder; + struct sti_hda_connector *connector; + struct drm_connector *drm_connector; + struct drm_bridge *bridge; + int err; + + /* Set the drm device handle */ + hda->drm_dev = drm_dev; + + encoder = sti_hda_find_encoder(drm_dev); + if (!encoder) + return -ENOMEM; + + connector = devm_kzalloc(dev, sizeof(*connector), GFP_KERNEL); + if (!connector) + return -ENOMEM; + + connector->hda = hda; + + bridge = devm_kzalloc(dev, sizeof(*bridge), GFP_KERNEL); + if (!bridge) + return -ENOMEM; + + bridge->driver_private = hda; + drm_bridge_init(drm_dev, bridge, &sti_hda_bridge_funcs); + + encoder->bridge = bridge; + connector->encoder = encoder; + + drm_connector = (struct drm_connector *)connector; + + drm_connector->polled = DRM_CONNECTOR_POLL_HPD; + + drm_connector_init(drm_dev, drm_connector, + &sti_hda_connector_funcs, DRM_MODE_CONNECTOR_Component); + drm_connector_helper_add(drm_connector, + &sti_hda_connector_helper_funcs); + + err = drm_connector_register(drm_connector); + if (err) + goto err_connector; + + err = drm_mode_connector_attach_encoder(drm_connector, encoder); + if (err) { + DRM_ERROR("Failed to attach a connector to a encoder\n"); + goto err_sysfs; + } + + return 0; + +err_sysfs: + drm_connector_unregister(drm_connector); +err_connector: + drm_bridge_cleanup(bridge); + drm_connector_cleanup(drm_connector); + return -EINVAL; +} + +static void sti_hda_unbind(struct device *dev, + struct device *master, void *data) +{ + /* do nothing */ +} + +static const struct component_ops sti_hda_ops = { + .bind = sti_hda_bind, + .unbind = sti_hda_unbind, +}; + +static int sti_hda_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct sti_hda *hda; + struct resource *res; + + DRM_INFO("%s\n", __func__); + + hda = devm_kzalloc(dev, sizeof(*hda), GFP_KERNEL); + if (!hda) + return -ENOMEM; + + hda->dev = pdev->dev; + + /* Get resources */ + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hda-reg"); + if (!res) { + DRM_ERROR("Invalid hda resource\n"); + return -ENOMEM; + } + hda->regs = devm_ioremap_nocache(dev, res->start, resource_size(res)); + if (!hda->regs) + return -ENOMEM; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "video-dacs-ctrl"); + if (res) { + hda->video_dacs_ctrl = devm_ioremap_nocache(dev, res->start, + resource_size(res)); + if (!hda->video_dacs_ctrl) + return -ENOMEM; + } else { + /* If no existing video-dacs-ctrl resource continue the probe */ + DRM_DEBUG_DRIVER("No video-dacs-ctrl resource\n"); + hda->video_dacs_ctrl = NULL; + } + + /* Get clock resources */ + hda->clk_pix = devm_clk_get(dev, "pix"); + if (IS_ERR(hda->clk_pix)) { + DRM_ERROR("Cannot get hda_pix clock\n"); + return PTR_ERR(hda->clk_pix); + } + + hda->clk_hddac = devm_clk_get(dev, "hddac"); + if (IS_ERR(hda->clk_hddac)) { + DRM_ERROR("Cannot get hda_hddac clock\n"); + return PTR_ERR(hda->clk_hddac); + } + + platform_set_drvdata(pdev, hda); + + return component_add(&pdev->dev, &sti_hda_ops); +} + +static int sti_hda_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &sti_hda_ops); + return 0; +} + +static const struct of_device_id hda_of_match[] = { + { .compatible = "st,stih416-hda", }, + { .compatible = "st,stih407-hda", }, + { /* end node */ } +}; +MODULE_DEVICE_TABLE(of, hda_of_match); + +struct platform_driver sti_hda_driver = { + .driver = { + .name = "sti-hda", + .owner = THIS_MODULE, + .of_match_table = hda_of_match, + }, + .probe = sti_hda_probe, + .remove = sti_hda_remove, +}; + +module_platform_driver(sti_hda_driver); + +MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>"); +MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c new file mode 100644 index 000000000000..b22968c08d1f --- /dev/null +++ b/drivers/gpu/drm/sti/sti_hdmi.c @@ -0,0 +1,809 @@ +/* + * Copyright (C) STMicroelectronics SA 2014 + * Author: Vincent Abriou <vincent.abriou@st.com> for STMicroelectronics. + * License terms: GNU General Public License (GPL), version 2 + */ + +#include <linux/clk.h> +#include <linux/component.h> +#include <linux/hdmi.h> +#include <linux/module.h> +#include <linux/of_gpio.h> +#include <linux/platform_device.h> +#include <linux/reset.h> + +#include <drm/drmP.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_edid.h> + +#include "sti_hdmi.h" +#include "sti_hdmi_tx3g4c28phy.h" +#include "sti_hdmi_tx3g0c55phy.h" +#include "sti_vtg.h" + +#define HDMI_CFG 0x0000 +#define HDMI_INT_EN 0x0004 +#define HDMI_INT_STA 0x0008 +#define HDMI_INT_CLR 0x000C +#define HDMI_STA 0x0010 +#define HDMI_ACTIVE_VID_XMIN 0x0100 +#define HDMI_ACTIVE_VID_XMAX 0x0104 +#define HDMI_ACTIVE_VID_YMIN 0x0108 +#define HDMI_ACTIVE_VID_YMAX 0x010C +#define HDMI_DFLT_CHL0_DAT 0x0110 +#define HDMI_DFLT_CHL1_DAT 0x0114 +#define HDMI_DFLT_CHL2_DAT 0x0118 +#define HDMI_SW_DI_1_HEAD_WORD 0x0210 +#define HDMI_SW_DI_1_PKT_WORD0 0x0214 +#define HDMI_SW_DI_1_PKT_WORD1 0x0218 +#define HDMI_SW_DI_1_PKT_WORD2 0x021C +#define HDMI_SW_DI_1_PKT_WORD3 0x0220 +#define HDMI_SW_DI_1_PKT_WORD4 0x0224 +#define HDMI_SW_DI_1_PKT_WORD5 0x0228 +#define HDMI_SW_DI_1_PKT_WORD6 0x022C +#define HDMI_SW_DI_CFG 0x0230 + +#define HDMI_IFRAME_SLOT_AVI 1 + +#define XCAT(prefix, x, suffix) prefix ## x ## suffix +#define HDMI_SW_DI_N_HEAD_WORD(x) XCAT(HDMI_SW_DI_, x, _HEAD_WORD) +#define HDMI_SW_DI_N_PKT_WORD0(x) XCAT(HDMI_SW_DI_, x, _PKT_WORD0) +#define HDMI_SW_DI_N_PKT_WORD1(x) XCAT(HDMI_SW_DI_, x, _PKT_WORD1) +#define HDMI_SW_DI_N_PKT_WORD2(x) XCAT(HDMI_SW_DI_, x, _PKT_WORD2) +#define HDMI_SW_DI_N_PKT_WORD3(x) XCAT(HDMI_SW_DI_, x, _PKT_WORD3) +#define HDMI_SW_DI_N_PKT_WORD4(x) XCAT(HDMI_SW_DI_, x, _PKT_WORD4) +#define HDMI_SW_DI_N_PKT_WORD5(x) XCAT(HDMI_SW_DI_, x, _PKT_WORD5) +#define HDMI_SW_DI_N_PKT_WORD6(x) XCAT(HDMI_SW_DI_, x, _PKT_WORD6) + +#define HDMI_IFRAME_DISABLED 0x0 +#define HDMI_IFRAME_SINGLE_SHOT 0x1 +#define HDMI_IFRAME_FIELD 0x2 +#define HDMI_IFRAME_FRAME 0x3 +#define HDMI_IFRAME_MASK 0x3 +#define HDMI_IFRAME_CFG_DI_N(x, n) ((x) << ((n-1)*4)) /* n from 1 to 6 */ + +#define HDMI_CFG_DEVICE_EN BIT(0) +#define HDMI_CFG_HDMI_NOT_DVI BIT(1) +#define HDMI_CFG_HDCP_EN BIT(2) +#define HDMI_CFG_ESS_NOT_OESS BIT(3) +#define HDMI_CFG_H_SYNC_POL_NEG BIT(4) +#define HDMI_CFG_SINK_TERM_DET_EN BIT(5) +#define HDMI_CFG_V_SYNC_POL_NEG BIT(6) +#define HDMI_CFG_422_EN BIT(8) +#define HDMI_CFG_FIFO_OVERRUN_CLR BIT(12) +#define HDMI_CFG_FIFO_UNDERRUN_CLR BIT(13) +#define HDMI_CFG_SW_RST_EN BIT(31) + +#define HDMI_INT_GLOBAL BIT(0) +#define HDMI_INT_SW_RST BIT(1) +#define HDMI_INT_PIX_CAP BIT(3) +#define HDMI_INT_HOT_PLUG BIT(4) +#define HDMI_INT_DLL_LCK BIT(5) +#define HDMI_INT_NEW_FRAME BIT(6) +#define HDMI_INT_GENCTRL_PKT BIT(7) +#define HDMI_INT_SINK_TERM_PRESENT BIT(11) + +#define HDMI_DEFAULT_INT (HDMI_INT_SINK_TERM_PRESENT \ + | HDMI_INT_DLL_LCK \ + | HDMI_INT_HOT_PLUG \ + | HDMI_INT_GLOBAL) + +#define HDMI_WORKING_INT (HDMI_INT_SINK_TERM_PRESENT \ + | HDMI_INT_GENCTRL_PKT \ + | HDMI_INT_NEW_FRAME \ + | HDMI_INT_DLL_LCK \ + | HDMI_INT_HOT_PLUG \ + | HDMI_INT_PIX_CAP \ + | HDMI_INT_SW_RST \ + | HDMI_INT_GLOBAL) + +#define HDMI_STA_SW_RST BIT(1) + +struct sti_hdmi_connector { + struct drm_connector drm_connector; + struct drm_encoder *encoder; + struct sti_hdmi *hdmi; +}; + +#define to_sti_hdmi_connector(x) \ + container_of(x, struct sti_hdmi_connector, drm_connector) + +u32 hdmi_read(struct sti_hdmi *hdmi, int offset) +{ + return readl(hdmi->regs + offset); +} + +void hdmi_write(struct sti_hdmi *hdmi, u32 val, int offset) +{ + writel(val, hdmi->regs + offset); +} + +/** + * HDMI interrupt handler threaded + * + * @irq: irq number + * @arg: connector structure + */ +static irqreturn_t hdmi_irq_thread(int irq, void *arg) +{ + struct sti_hdmi *hdmi = arg; + + /* Hot plug/unplug IRQ */ + if (hdmi->irq_status & HDMI_INT_HOT_PLUG) { + /* read gpio to get the status */ + hdmi->hpd = gpio_get_value(hdmi->hpd_gpio); + if (hdmi->drm_dev) + drm_helper_hpd_irq_event(hdmi->drm_dev); + } + + /* Sw reset and PLL lock are exclusive so we can use the same + * event to signal them + */ + if (hdmi->irq_status & (HDMI_INT_SW_RST | HDMI_INT_DLL_LCK)) { + hdmi->event_received = true; + wake_up_interruptible(&hdmi->wait_event); + } + + return IRQ_HANDLED; +} + +/** + * HDMI interrupt handler + * + * @irq: irq number + * @arg: connector structure + */ +static irqreturn_t hdmi_irq(int irq, void *arg) +{ + struct sti_hdmi *hdmi = arg; + + /* read interrupt status */ + hdmi->irq_status = hdmi_read(hdmi, HDMI_INT_STA); + + /* clear interrupt status */ + hdmi_write(hdmi, hdmi->irq_status, HDMI_INT_CLR); + + /* force sync bus write */ + hdmi_read(hdmi, HDMI_INT_STA); + + return IRQ_WAKE_THREAD; +} + +/** + * Set hdmi active area depending on the drm display mode selected + * + * @hdmi: pointer on the hdmi internal structure + */ +static void hdmi_active_area(struct sti_hdmi *hdmi) +{ + u32 xmin, xmax; + u32 ymin, ymax; + + xmin = sti_vtg_get_pixel_number(hdmi->mode, 0); + xmax = sti_vtg_get_pixel_number(hdmi->mode, hdmi->mode.hdisplay - 1); + ymin = sti_vtg_get_line_number(hdmi->mode, 0); + ymax = sti_vtg_get_line_number(hdmi->mode, hdmi->mode.vdisplay - 1); + + hdmi_write(hdmi, xmin, HDMI_ACTIVE_VID_XMIN); + hdmi_write(hdmi, xmax, HDMI_ACTIVE_VID_XMAX); + hdmi_write(hdmi, ymin, HDMI_ACTIVE_VID_YMIN); + hdmi_write(hdmi, ymax, HDMI_ACTIVE_VID_YMAX); +} + +/** + * Overall hdmi configuration + * + * @hdmi: pointer on the hdmi internal structure + */ +static void hdmi_config(struct sti_hdmi *hdmi) +{ + u32 conf; + + DRM_DEBUG_DRIVER("\n"); + + /* Clear overrun and underrun fifo */ + conf = HDMI_CFG_FIFO_OVERRUN_CLR | HDMI_CFG_FIFO_UNDERRUN_CLR; + + /* Enable HDMI mode not DVI */ + conf |= HDMI_CFG_HDMI_NOT_DVI | HDMI_CFG_ESS_NOT_OESS; + + /* Enable sink term detection */ + conf |= HDMI_CFG_SINK_TERM_DET_EN; + + /* Set Hsync polarity */ + if (hdmi->mode.flags & DRM_MODE_FLAG_NHSYNC) { + DRM_DEBUG_DRIVER("H Sync Negative\n"); + conf |= HDMI_CFG_H_SYNC_POL_NEG; + } + + /* Set Vsync polarity */ + if (hdmi->mode.flags & DRM_MODE_FLAG_NVSYNC) { + DRM_DEBUG_DRIVER("V Sync Negative\n"); + conf |= HDMI_CFG_V_SYNC_POL_NEG; + } + + /* Enable HDMI */ + conf |= HDMI_CFG_DEVICE_EN; + + hdmi_write(hdmi, conf, HDMI_CFG); +} + +/** + * Prepare and configure the AVI infoframe + * + * AVI infoframe are transmitted at least once per two video field and + * contains information about HDMI transmission mode such as color space, + * colorimetry, ... + * + * @hdmi: pointer on the hdmi internal structure + * + * Return negative value if error occurs + */ +static int hdmi_avi_infoframe_config(struct sti_hdmi *hdmi) +{ + struct drm_display_mode *mode = &hdmi->mode; + struct hdmi_avi_infoframe infoframe; + u8 buffer[HDMI_INFOFRAME_SIZE(AVI)]; + u8 *frame = buffer + HDMI_INFOFRAME_HEADER_SIZE; + u32 val; + int ret; + + DRM_DEBUG_DRIVER("\n"); + + ret = drm_hdmi_avi_infoframe_from_display_mode(&infoframe, mode); + if (ret < 0) { + DRM_ERROR("failed to setup AVI infoframe: %d\n", ret); + return ret; + } + + /* fixed infoframe configuration not linked to the mode */ + infoframe.colorspace = HDMI_COLORSPACE_RGB; + infoframe.quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT; + infoframe.colorimetry = HDMI_COLORIMETRY_NONE; + + ret = hdmi_avi_infoframe_pack(&infoframe, buffer, sizeof(buffer)); + if (ret < 0) { + DRM_ERROR("failed to pack AVI infoframe: %d\n", ret); + return ret; + } + + /* Disable transmission slot for AVI infoframe */ + val = hdmi_read(hdmi, HDMI_SW_DI_CFG); + val &= ~HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_MASK, HDMI_IFRAME_SLOT_AVI); + hdmi_write(hdmi, val, HDMI_SW_DI_CFG); + + /* Infoframe header */ + val = buffer[0x0]; + val |= buffer[0x1] << 8; + val |= buffer[0x2] << 16; + hdmi_write(hdmi, val, HDMI_SW_DI_N_HEAD_WORD(HDMI_IFRAME_SLOT_AVI)); + + /* Infoframe packet bytes */ + val = frame[0x0]; + val |= frame[0x1] << 8; + val |= frame[0x2] << 16; + val |= frame[0x3] << 24; + hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD0(HDMI_IFRAME_SLOT_AVI)); + + val = frame[0x4]; + val |= frame[0x5] << 8; + val |= frame[0x6] << 16; + val |= frame[0x7] << 24; + hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD1(HDMI_IFRAME_SLOT_AVI)); + + val = frame[0x8]; + val |= frame[0x9] << 8; + val |= frame[0xA] << 16; + val |= frame[0xB] << 24; + hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD2(HDMI_IFRAME_SLOT_AVI)); + + val = frame[0xC]; + hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD3(HDMI_IFRAME_SLOT_AVI)); + + /* Enable transmission slot for AVI infoframe + * According to the hdmi specification, AVI infoframe should be + * transmitted at least once per two video fields + */ + val = hdmi_read(hdmi, HDMI_SW_DI_CFG); + val |= HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_FIELD, HDMI_IFRAME_SLOT_AVI); + hdmi_write(hdmi, val, HDMI_SW_DI_CFG); + + return 0; +} + +/** + * Software reset of the hdmi subsystem + * + * @hdmi: pointer on the hdmi internal structure + * + */ +#define HDMI_TIMEOUT_SWRESET 100 /*milliseconds */ +static void hdmi_swreset(struct sti_hdmi *hdmi) +{ + u32 val; + + DRM_DEBUG_DRIVER("\n"); + + /* Enable hdmi_audio clock only during hdmi reset */ + if (clk_prepare_enable(hdmi->clk_audio)) + DRM_INFO("Failed to prepare/enable hdmi_audio clk\n"); + + /* Sw reset */ + hdmi->event_received = false; + + val = hdmi_read(hdmi, HDMI_CFG); + val |= HDMI_CFG_SW_RST_EN; + hdmi_write(hdmi, val, HDMI_CFG); + + /* Wait reset completed */ + wait_event_interruptible_timeout(hdmi->wait_event, + hdmi->event_received == true, + msecs_to_jiffies + (HDMI_TIMEOUT_SWRESET)); + + /* + * HDMI_STA_SW_RST bit is set to '1' when SW_RST bit in HDMI_CFG is + * set to '1' and clk_audio is running. + */ + if ((hdmi_read(hdmi, HDMI_STA) & HDMI_STA_SW_RST) == 0) + DRM_DEBUG_DRIVER("Warning: HDMI sw reset timeout occurs\n"); + + val = hdmi_read(hdmi, HDMI_CFG); + val &= ~HDMI_CFG_SW_RST_EN; + hdmi_write(hdmi, val, HDMI_CFG); + + /* Disable hdmi_audio clock. Not used anymore for drm purpose */ + clk_disable_unprepare(hdmi->clk_audio); +} + +static void sti_hdmi_disable(struct drm_bridge *bridge) +{ + struct sti_hdmi *hdmi = bridge->driver_private; + + u32 val = hdmi_read(hdmi, HDMI_CFG); + + if (!hdmi->enabled) + return; + + DRM_DEBUG_DRIVER("\n"); + + /* Disable HDMI */ + val &= ~HDMI_CFG_DEVICE_EN; + hdmi_write(hdmi, val, HDMI_CFG); + + hdmi_write(hdmi, 0xffffffff, HDMI_INT_CLR); + + /* Stop the phy */ + hdmi->phy_ops->stop(hdmi); + + /* Set the default channel data to be a dark red */ + hdmi_write(hdmi, 0x0000, HDMI_DFLT_CHL0_DAT); + hdmi_write(hdmi, 0x0000, HDMI_DFLT_CHL1_DAT); + hdmi_write(hdmi, 0x0060, HDMI_DFLT_CHL2_DAT); + + /* Disable/unprepare hdmi clock */ + clk_disable_unprepare(hdmi->clk_phy); + clk_disable_unprepare(hdmi->clk_tmds); + clk_disable_unprepare(hdmi->clk_pix); + + hdmi->enabled = false; +} + +static void sti_hdmi_pre_enable(struct drm_bridge *bridge) +{ + struct sti_hdmi *hdmi = bridge->driver_private; + + DRM_DEBUG_DRIVER("\n"); + + if (hdmi->enabled) + return; + + /* Prepare/enable clocks */ + if (clk_prepare_enable(hdmi->clk_pix)) + DRM_ERROR("Failed to prepare/enable hdmi_pix clk\n"); + if (clk_prepare_enable(hdmi->clk_tmds)) + DRM_ERROR("Failed to prepare/enable hdmi_tmds clk\n"); + if (clk_prepare_enable(hdmi->clk_phy)) + DRM_ERROR("Failed to prepare/enable hdmi_rejec_pll clk\n"); + + hdmi->enabled = true; + + /* Program hdmi serializer and start phy */ + if (!hdmi->phy_ops->start(hdmi)) { + DRM_ERROR("Unable to start hdmi phy\n"); + return; + } + + /* Program hdmi active area */ + hdmi_active_area(hdmi); + + /* Enable working interrupts */ + hdmi_write(hdmi, HDMI_WORKING_INT, HDMI_INT_EN); + + /* Program hdmi config */ + hdmi_config(hdmi); + + /* Program AVI infoframe */ + if (hdmi_avi_infoframe_config(hdmi)) + DRM_ERROR("Unable to configure AVI infoframe\n"); + + /* Sw reset */ + hdmi_swreset(hdmi); +} + +static void sti_hdmi_set_mode(struct drm_bridge *bridge, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct sti_hdmi *hdmi = bridge->driver_private; + int ret; + + DRM_DEBUG_DRIVER("\n"); + + /* Copy the drm display mode in the connector local structure */ + memcpy(&hdmi->mode, mode, sizeof(struct drm_display_mode)); + + /* Update clock framerate according to the selected mode */ + ret = clk_set_rate(hdmi->clk_pix, mode->clock * 1000); + if (ret < 0) { + DRM_ERROR("Cannot set rate (%dHz) for hdmi_pix clk\n", + mode->clock * 1000); + return; + } + ret = clk_set_rate(hdmi->clk_phy, mode->clock * 1000); + if (ret < 0) { + DRM_ERROR("Cannot set rate (%dHz) for hdmi_rejection_pll clk\n", + mode->clock * 1000); + return; + } +} + +static void sti_hdmi_bridge_nope(struct drm_bridge *bridge) +{ + /* do nothing */ +} + +static void sti_hdmi_brigde_destroy(struct drm_bridge *bridge) +{ + drm_bridge_cleanup(bridge); + kfree(bridge); +} + +static const struct drm_bridge_funcs sti_hdmi_bridge_funcs = { + .pre_enable = sti_hdmi_pre_enable, + .enable = sti_hdmi_bridge_nope, + .disable = sti_hdmi_disable, + .post_disable = sti_hdmi_bridge_nope, + .mode_set = sti_hdmi_set_mode, + .destroy = sti_hdmi_brigde_destroy, +}; + +static int sti_hdmi_connector_get_modes(struct drm_connector *connector) +{ + struct i2c_adapter *i2c_adap; + struct edid *edid; + int count; + + DRM_DEBUG_DRIVER("\n"); + + i2c_adap = i2c_get_adapter(1); + if (!i2c_adap) + goto fail; + + edid = drm_get_edid(connector, i2c_adap); + if (!edid) + goto fail; + + count = drm_add_edid_modes(connector, edid); + drm_mode_connector_update_edid_property(connector, edid); + + kfree(edid); + return count; + +fail: + DRM_ERROR("Can not read HDMI EDID\n"); + return 0; +} + +#define CLK_TOLERANCE_HZ 50 + +static int sti_hdmi_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + int target = mode->clock * 1000; + int target_min = target - CLK_TOLERANCE_HZ; + int target_max = target + CLK_TOLERANCE_HZ; + int result; + struct sti_hdmi_connector *hdmi_connector + = to_sti_hdmi_connector(connector); + struct sti_hdmi *hdmi = hdmi_connector->hdmi; + + + result = clk_round_rate(hdmi->clk_pix, target); + + DRM_DEBUG_DRIVER("target rate = %d => available rate = %d\n", + target, result); + + if ((result < target_min) || (result > target_max)) { + DRM_DEBUG_DRIVER("hdmi pixclk=%d not supported\n", target); + return MODE_BAD; + } + + return MODE_OK; +} + +struct drm_encoder *sti_hdmi_best_encoder(struct drm_connector *connector) +{ + struct sti_hdmi_connector *hdmi_connector + = to_sti_hdmi_connector(connector); + + /* Best encoder is the one associated during connector creation */ + return hdmi_connector->encoder; +} + +static struct drm_connector_helper_funcs sti_hdmi_connector_helper_funcs = { + .get_modes = sti_hdmi_connector_get_modes, + .mode_valid = sti_hdmi_connector_mode_valid, + .best_encoder = sti_hdmi_best_encoder, +}; + +/* get detection status of display device */ +static enum drm_connector_status +sti_hdmi_connector_detect(struct drm_connector *connector, bool force) +{ + struct sti_hdmi_connector *hdmi_connector + = to_sti_hdmi_connector(connector); + struct sti_hdmi *hdmi = hdmi_connector->hdmi; + + DRM_DEBUG_DRIVER("\n"); + + if (hdmi->hpd) { + DRM_DEBUG_DRIVER("hdmi cable connected\n"); + return connector_status_connected; + } + + DRM_DEBUG_DRIVER("hdmi cable disconnected\n"); + return connector_status_disconnected; +} + +static void sti_hdmi_connector_destroy(struct drm_connector *connector) +{ + struct sti_hdmi_connector *hdmi_connector + = to_sti_hdmi_connector(connector); + + drm_connector_unregister(connector); + drm_connector_cleanup(connector); + kfree(hdmi_connector); +} + +static struct drm_connector_funcs sti_hdmi_connector_funcs = { + .dpms = drm_helper_connector_dpms, + .fill_modes = drm_helper_probe_single_connector_modes, + .detect = sti_hdmi_connector_detect, + .destroy = sti_hdmi_connector_destroy, +}; + +static struct drm_encoder *sti_hdmi_find_encoder(struct drm_device *dev) +{ + struct drm_encoder *encoder; + + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + if (encoder->encoder_type == DRM_MODE_ENCODER_TMDS) + return encoder; + } + + return NULL; +} + +static int sti_hdmi_bind(struct device *dev, struct device *master, void *data) +{ + struct sti_hdmi *hdmi = dev_get_drvdata(dev); + struct drm_device *drm_dev = data; + struct drm_encoder *encoder; + struct sti_hdmi_connector *connector; + struct drm_connector *drm_connector; + struct drm_bridge *bridge; + struct i2c_adapter *i2c_adap; + int err; + + i2c_adap = i2c_get_adapter(1); + if (!i2c_adap) + return -EPROBE_DEFER; + + /* Set the drm device handle */ + hdmi->drm_dev = drm_dev; + + encoder = sti_hdmi_find_encoder(drm_dev); + if (!encoder) + return -ENOMEM; + + connector = devm_kzalloc(dev, sizeof(*connector), GFP_KERNEL); + if (!connector) + return -ENOMEM; + + connector->hdmi = hdmi; + + bridge = devm_kzalloc(dev, sizeof(*bridge), GFP_KERNEL); + if (!bridge) + return -ENOMEM; + + bridge->driver_private = hdmi; + drm_bridge_init(drm_dev, bridge, &sti_hdmi_bridge_funcs); + + encoder->bridge = bridge; + connector->encoder = encoder; + + drm_connector = (struct drm_connector *)connector; + + drm_connector->polled = DRM_CONNECTOR_POLL_HPD; + + drm_connector_init(drm_dev, drm_connector, + &sti_hdmi_connector_funcs, DRM_MODE_CONNECTOR_HDMIA); + drm_connector_helper_add(drm_connector, + &sti_hdmi_connector_helper_funcs); + + err = drm_connector_register(drm_connector); + if (err) + goto err_connector; + + err = drm_mode_connector_attach_encoder(drm_connector, encoder); + if (err) { + DRM_ERROR("Failed to attach a connector to a encoder\n"); + goto err_sysfs; + } + + /* Enable default interrupts */ + hdmi_write(hdmi, HDMI_DEFAULT_INT, HDMI_INT_EN); + + return 0; + +err_sysfs: + drm_connector_unregister(drm_connector); +err_connector: + drm_bridge_cleanup(bridge); + drm_connector_cleanup(drm_connector); + return -EINVAL; +} + +static void sti_hdmi_unbind(struct device *dev, + struct device *master, void *data) +{ + /* do nothing */ +} + +static const struct component_ops sti_hdmi_ops = { + .bind = sti_hdmi_bind, + .unbind = sti_hdmi_unbind, +}; + +static const struct of_device_id hdmi_of_match[] = { + { + .compatible = "st,stih416-hdmi", + .data = &tx3g0c55phy_ops, + }, { + .compatible = "st,stih407-hdmi", + .data = &tx3g4c28phy_ops, + }, { + /* end node */ + } +}; +MODULE_DEVICE_TABLE(of, hdmi_of_match); + +static int sti_hdmi_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct sti_hdmi *hdmi; + struct device_node *np = dev->of_node; + struct resource *res; + int ret; + + DRM_INFO("%s\n", __func__); + + hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL); + if (!hdmi) + return -ENOMEM; + + hdmi->dev = pdev->dev; + + /* Get resources */ + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hdmi-reg"); + if (!res) { + DRM_ERROR("Invalid hdmi resource\n"); + return -ENOMEM; + } + hdmi->regs = devm_ioremap_nocache(dev, res->start, resource_size(res)); + if (!hdmi->regs) + return -ENOMEM; + + if (of_device_is_compatible(np, "st,stih416-hdmi")) { + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "syscfg"); + if (!res) { + DRM_ERROR("Invalid syscfg resource\n"); + return -ENOMEM; + } + hdmi->syscfg = devm_ioremap_nocache(dev, res->start, + resource_size(res)); + if (!hdmi->syscfg) + return -ENOMEM; + + } + + hdmi->phy_ops = (struct hdmi_phy_ops *) + of_match_node(hdmi_of_match, np)->data; + + /* Get clock resources */ + hdmi->clk_pix = devm_clk_get(dev, "pix"); + if (IS_ERR(hdmi->clk_pix)) { + DRM_ERROR("Cannot get hdmi_pix clock\n"); + return PTR_ERR(hdmi->clk_pix); + } + + hdmi->clk_tmds = devm_clk_get(dev, "tmds"); + if (IS_ERR(hdmi->clk_tmds)) { + DRM_ERROR("Cannot get hdmi_tmds clock\n"); + return PTR_ERR(hdmi->clk_tmds); + } + + hdmi->clk_phy = devm_clk_get(dev, "phy"); + if (IS_ERR(hdmi->clk_phy)) { + DRM_ERROR("Cannot get hdmi_phy clock\n"); + return PTR_ERR(hdmi->clk_phy); + } + + hdmi->clk_audio = devm_clk_get(dev, "audio"); + if (IS_ERR(hdmi->clk_audio)) { + DRM_ERROR("Cannot get hdmi_audio clock\n"); + return PTR_ERR(hdmi->clk_audio); + } + + hdmi->hpd_gpio = of_get_named_gpio(np, "hdmi,hpd-gpio", 0); + if (hdmi->hpd_gpio < 0) { + DRM_ERROR("Failed to get hdmi hpd-gpio\n"); + return -EIO; + } + + hdmi->hpd = gpio_get_value(hdmi->hpd_gpio); + + init_waitqueue_head(&hdmi->wait_event); + + hdmi->irq = platform_get_irq_byname(pdev, "irq"); + + ret = devm_request_threaded_irq(dev, hdmi->irq, hdmi_irq, + hdmi_irq_thread, IRQF_ONESHOT, dev_name(dev), hdmi); + if (ret) { + DRM_ERROR("Failed to register HDMI interrupt\n"); + return ret; + } + + hdmi->reset = devm_reset_control_get(dev, "hdmi"); + /* Take hdmi out of reset */ + if (!IS_ERR(hdmi->reset)) + reset_control_deassert(hdmi->reset); + + platform_set_drvdata(pdev, hdmi); + + return component_add(&pdev->dev, &sti_hdmi_ops); +} + +static int sti_hdmi_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &sti_hdmi_ops); + return 0; +} + +struct platform_driver sti_hdmi_driver = { + .driver = { + .name = "sti-hdmi", + .owner = THIS_MODULE, + .of_match_table = hdmi_of_match, + }, + .probe = sti_hdmi_probe, + .remove = sti_hdmi_remove, +}; + +module_platform_driver(sti_hdmi_driver); + +MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>"); +MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/sti/sti_hdmi.h b/drivers/gpu/drm/sti/sti_hdmi.h new file mode 100644 index 000000000000..61bec6557ceb --- /dev/null +++ b/drivers/gpu/drm/sti/sti_hdmi.h @@ -0,0 +1,88 @@ +/* + * Copyright (C) STMicroelectronics SA 2014 + * Author: Vincent Abriou <vincent.abriou@st.com> for STMicroelectronics. + * License terms: GNU General Public License (GPL), version 2 + */ + +#ifndef _STI_HDMI_H_ +#define _STI_HDMI_H_ + +#include <linux/platform_device.h> + +#include <drm/drmP.h> + +#define HDMI_STA 0x0010 +#define HDMI_STA_DLL_LCK BIT(5) + +struct sti_hdmi; + +struct hdmi_phy_ops { + bool (*start)(struct sti_hdmi *hdmi); + void (*stop)(struct sti_hdmi *hdmi); +}; + +/** + * STI hdmi structure + * + * @dev: driver device + * @drm_dev: pointer to drm device + * @mode: current display mode selected + * @regs: hdmi register + * @syscfg: syscfg register for pll rejection configuration + * @clk_pix: hdmi pixel clock + * @clk_tmds: hdmi tmds clock + * @clk_phy: hdmi phy clock + * @clk_audio: hdmi audio clock + * @irq: hdmi interrupt number + * @irq_status: interrupt status register + * @phy_ops: phy start/stop operations + * @enabled: true if hdmi is enabled else false + * @hpd_gpio: hdmi hot plug detect gpio number + * @hpd: hot plug detect status + * @wait_event: wait event + * @event_received: wait event status + * @reset: reset control of the hdmi phy + */ +struct sti_hdmi { + struct device dev; + struct drm_device *drm_dev; + struct drm_display_mode mode; + void __iomem *regs; + void __iomem *syscfg; + struct clk *clk_pix; + struct clk *clk_tmds; + struct clk *clk_phy; + struct clk *clk_audio; + int irq; + u32 irq_status; + struct hdmi_phy_ops *phy_ops; + bool enabled; + int hpd_gpio; + bool hpd; + wait_queue_head_t wait_event; + bool event_received; + struct reset_control *reset; +}; + +u32 hdmi_read(struct sti_hdmi *hdmi, int offset); +void hdmi_write(struct sti_hdmi *hdmi, u32 val, int offset); + +/** + * hdmi phy config structure + * + * A pointer to an array of these structures is passed to a TMDS (HDMI) output + * via the control interface to provide board and SoC specific + * configurations of the HDMI PHY. Each entry in the array specifies a hardware + * specific configuration for a given TMDS clock frequency range. + * + * @min_tmds_freq: Lower bound of TMDS clock frequency this entry applies to + * @max_tmds_freq: Upper bound of TMDS clock frequency this entry applies to + * @config: SoC specific register configuration + */ +struct hdmi_phy_config { + u32 min_tmds_freq; + u32 max_tmds_freq; + u32 config[4]; +}; + +#endif diff --git a/drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.c b/drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.c new file mode 100644 index 000000000000..49ae8e44b285 --- /dev/null +++ b/drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.c @@ -0,0 +1,336 @@ +/* + * Copyright (C) STMicroelectronics SA 2014 + * Author: Vincent Abriou <vincent.abriou@st.com> for STMicroelectronics. + * License terms: GNU General Public License (GPL), version 2 + */ + +#include "sti_hdmi_tx3g0c55phy.h" + +#define HDMI_SRZ_PLL_CFG 0x0504 +#define HDMI_SRZ_TAP_1 0x0508 +#define HDMI_SRZ_TAP_2 0x050C +#define HDMI_SRZ_TAP_3 0x0510 +#define HDMI_SRZ_CTRL 0x0514 + +#define HDMI_SRZ_PLL_CFG_POWER_DOWN BIT(0) +#define HDMI_SRZ_PLL_CFG_VCOR_SHIFT 1 +#define HDMI_SRZ_PLL_CFG_VCOR_425MHZ 0 +#define HDMI_SRZ_PLL_CFG_VCOR_850MHZ 1 +#define HDMI_SRZ_PLL_CFG_VCOR_1700MHZ 2 +#define HDMI_SRZ_PLL_CFG_VCOR_3000MHZ 3 +#define HDMI_SRZ_PLL_CFG_VCOR_MASK 3 +#define HDMI_SRZ_PLL_CFG_VCOR(x) (x << HDMI_SRZ_PLL_CFG_VCOR_SHIFT) +#define HDMI_SRZ_PLL_CFG_NDIV_SHIFT 8 +#define HDMI_SRZ_PLL_CFG_NDIV_MASK (0x1F << HDMI_SRZ_PLL_CFG_NDIV_SHIFT) +#define HDMI_SRZ_PLL_CFG_MODE_SHIFT 16 +#define HDMI_SRZ_PLL_CFG_MODE_13_5_MHZ 0x1 +#define HDMI_SRZ_PLL_CFG_MODE_25_2_MHZ 0x4 +#define HDMI_SRZ_PLL_CFG_MODE_27_MHZ 0x5 +#define HDMI_SRZ_PLL_CFG_MODE_33_75_MHZ 0x6 +#define HDMI_SRZ_PLL_CFG_MODE_40_5_MHZ 0x7 +#define HDMI_SRZ_PLL_CFG_MODE_54_MHZ 0x8 +#define HDMI_SRZ_PLL_CFG_MODE_67_5_MHZ 0x9 +#define HDMI_SRZ_PLL_CFG_MODE_74_25_MHZ 0xA +#define HDMI_SRZ_PLL_CFG_MODE_81_MHZ 0xB +#define HDMI_SRZ_PLL_CFG_MODE_82_5_MHZ 0xC +#define HDMI_SRZ_PLL_CFG_MODE_108_MHZ 0xD +#define HDMI_SRZ_PLL_CFG_MODE_148_5_MHZ 0xE +#define HDMI_SRZ_PLL_CFG_MODE_165_MHZ 0xF +#define HDMI_SRZ_PLL_CFG_MODE_MASK 0xF +#define HDMI_SRZ_PLL_CFG_MODE(x) (x << HDMI_SRZ_PLL_CFG_MODE_SHIFT) + +#define HDMI_SRZ_CTRL_POWER_DOWN (1 << 0) +#define HDMI_SRZ_CTRL_EXTERNAL_DATA_EN (1 << 1) + +/* sysconf registers */ +#define HDMI_REJECTION_PLL_CONFIGURATION 0x0858 /* SYSTEM_CONFIG2534 */ +#define HDMI_REJECTION_PLL_STATUS 0x0948 /* SYSTEM_CONFIG2594 */ + +#define REJECTION_PLL_HDMI_ENABLE_SHIFT 0 +#define REJECTION_PLL_HDMI_ENABLE_MASK (0x1 << REJECTION_PLL_HDMI_ENABLE_SHIFT) +#define REJECTION_PLL_HDMI_PDIV_SHIFT 24 +#define REJECTION_PLL_HDMI_PDIV_MASK (0x7 << REJECTION_PLL_HDMI_PDIV_SHIFT) +#define REJECTION_PLL_HDMI_NDIV_SHIFT 16 +#define REJECTION_PLL_HDMI_NDIV_MASK (0xFF << REJECTION_PLL_HDMI_NDIV_SHIFT) +#define REJECTION_PLL_HDMI_MDIV_SHIFT 8 +#define REJECTION_PLL_HDMI_MDIV_MASK (0xFF << REJECTION_PLL_HDMI_MDIV_SHIFT) + +#define REJECTION_PLL_HDMI_REJ_PLL_LOCK BIT(0) + +#define HDMI_TIMEOUT_PLL_LOCK 50 /*milliseconds */ + +/** + * pll mode structure + * + * A pointer to an array of these structures is passed to a TMDS (HDMI) output + * via the control interface to provide board and SoC specific + * configurations of the HDMI PHY. Each entry in the array specifies a hardware + * specific configuration for a given TMDS clock frequency range. The array + * should be terminated with an entry that has all fields set to zero. + * + * @min: Lower bound of TMDS clock frequency this entry applies to + * @max: Upper bound of TMDS clock frequency this entry applies to + * @mode: SoC specific register configuration + */ +struct pllmode { + u32 min; + u32 max; + u32 mode; +}; + +#define NB_PLL_MODE 7 +static struct pllmode pllmodes[NB_PLL_MODE] = { + {13500000, 13513500, HDMI_SRZ_PLL_CFG_MODE_13_5_MHZ}, + {25174800, 25200000, HDMI_SRZ_PLL_CFG_MODE_25_2_MHZ}, + {27000000, 27027000, HDMI_SRZ_PLL_CFG_MODE_27_MHZ}, + {54000000, 54054000, HDMI_SRZ_PLL_CFG_MODE_54_MHZ}, + {72000000, 74250000, HDMI_SRZ_PLL_CFG_MODE_74_25_MHZ}, + {108000000, 108108000, HDMI_SRZ_PLL_CFG_MODE_108_MHZ}, + {148351648, 297000000, HDMI_SRZ_PLL_CFG_MODE_148_5_MHZ} +}; + +#define NB_HDMI_PHY_CONFIG 5 +static struct hdmi_phy_config hdmiphy_config[NB_HDMI_PHY_CONFIG] = { + {0, 40000000, {0x00101010, 0x00101010, 0x00101010, 0x02} }, + {40000000, 140000000, {0x00111111, 0x00111111, 0x00111111, 0x02} }, + {140000000, 160000000, {0x00131313, 0x00101010, 0x00101010, 0x02} }, + {160000000, 250000000, {0x00131313, 0x00111111, 0x00111111, 0x03FE} }, + {250000000, 300000000, {0x00151515, 0x00101010, 0x00101010, 0x03FE} }, +}; + +#define PLL_CHANGE_DELAY 1 /* ms */ + +/** + * Disable the pll rejection + * + * @hdmi: pointer on the hdmi internal structure + * + * return true if the pll has been disabled + */ +static bool disable_pll_rejection(struct sti_hdmi *hdmi) +{ + u32 val; + + DRM_DEBUG_DRIVER("\n"); + + val = readl(hdmi->syscfg + HDMI_REJECTION_PLL_CONFIGURATION); + val &= ~REJECTION_PLL_HDMI_ENABLE_MASK; + writel(val, hdmi->syscfg + HDMI_REJECTION_PLL_CONFIGURATION); + + msleep(PLL_CHANGE_DELAY); + val = readl(hdmi->syscfg + HDMI_REJECTION_PLL_STATUS); + + return !(val & REJECTION_PLL_HDMI_REJ_PLL_LOCK); +} + +/** + * Enable the old BCH/rejection PLL is now reused to provide the CLKPXPLL + * clock input to the new PHY PLL that generates the serializer clock + * (TMDS*10) and the TMDS clock which is now fed back into the HDMI + * formatter instead of the TMDS clock line from ClockGenB. + * + * @hdmi: pointer on the hdmi internal structure + * + * return true if pll has been correctly set + */ +static bool enable_pll_rejection(struct sti_hdmi *hdmi) +{ + unsigned int inputclock; + u32 mdiv, ndiv, pdiv, val; + + DRM_DEBUG_DRIVER("\n"); + + if (!disable_pll_rejection(hdmi)) + return false; + + inputclock = hdmi->mode.clock * 1000; + + DRM_DEBUG_DRIVER("hdmi rejection pll input clock = %dHz\n", inputclock); + + + /* Power up the HDMI rejection PLL + * Note: On this SoC (stiH416) we are forced to have the input clock + * be equal to the HDMI pixel clock. + * + * The values here have been suggested by validation however they are + * still provisional and subject to change. + * + * PLLout = (Fin*Mdiv) / ((2 * Ndiv) / 2^Pdiv) + */ + if (inputclock < 50000000) { + /* + * For slower clocks we need to multiply more to keep the + * internal VCO frequency within the physical specification + * of the PLL. + */ + pdiv = 4; + ndiv = 240; + mdiv = 30; + } else { + pdiv = 2; + ndiv = 60; + mdiv = 30; + } + + val = readl(hdmi->syscfg + HDMI_REJECTION_PLL_CONFIGURATION); + + val &= ~(REJECTION_PLL_HDMI_PDIV_MASK | + REJECTION_PLL_HDMI_NDIV_MASK | + REJECTION_PLL_HDMI_MDIV_MASK | + REJECTION_PLL_HDMI_ENABLE_MASK); + + val |= (pdiv << REJECTION_PLL_HDMI_PDIV_SHIFT) | + (ndiv << REJECTION_PLL_HDMI_NDIV_SHIFT) | + (mdiv << REJECTION_PLL_HDMI_MDIV_SHIFT) | + (0x1 << REJECTION_PLL_HDMI_ENABLE_SHIFT); + + writel(val, hdmi->syscfg + HDMI_REJECTION_PLL_CONFIGURATION); + + msleep(PLL_CHANGE_DELAY); + val = readl(hdmi->syscfg + HDMI_REJECTION_PLL_STATUS); + + return (val & REJECTION_PLL_HDMI_REJ_PLL_LOCK); +} + +/** + * Start hdmi phy macro cell tx3g0c55 + * + * @hdmi: pointer on the hdmi internal structure + * + * Return false if an error occur + */ +static bool sti_hdmi_tx3g0c55phy_start(struct sti_hdmi *hdmi) +{ + u32 ckpxpll = hdmi->mode.clock * 1000; + u32 val, tmdsck, freqvco, pllctrl = 0; + unsigned int i; + + if (!enable_pll_rejection(hdmi)) + return false; + + DRM_DEBUG_DRIVER("ckpxpll = %dHz\n", ckpxpll); + + /* Assuming no pixel repetition and 24bits color */ + tmdsck = ckpxpll; + pllctrl = 2 << HDMI_SRZ_PLL_CFG_NDIV_SHIFT; + + /* + * Setup the PLL mode parameter based on the ckpxpll. If we haven't got + * a clock frequency supported by one of the specific PLL modes then we + * will end up using the generic mode (0) which only supports a 10x + * multiplier, hence only 24bit color. + */ + for (i = 0; i < NB_PLL_MODE; i++) { + if (ckpxpll >= pllmodes[i].min && ckpxpll <= pllmodes[i].max) + pllctrl |= HDMI_SRZ_PLL_CFG_MODE(pllmodes[i].mode); + } + + freqvco = tmdsck * 10; + if (freqvco <= 425000000UL) + pllctrl |= HDMI_SRZ_PLL_CFG_VCOR(HDMI_SRZ_PLL_CFG_VCOR_425MHZ); + else if (freqvco <= 850000000UL) + pllctrl |= HDMI_SRZ_PLL_CFG_VCOR(HDMI_SRZ_PLL_CFG_VCOR_850MHZ); + else if (freqvco <= 1700000000UL) + pllctrl |= HDMI_SRZ_PLL_CFG_VCOR(HDMI_SRZ_PLL_CFG_VCOR_1700MHZ); + else if (freqvco <= 2970000000UL) + pllctrl |= HDMI_SRZ_PLL_CFG_VCOR(HDMI_SRZ_PLL_CFG_VCOR_3000MHZ); + else { + DRM_ERROR("PHY serializer clock out of range\n"); + goto err; + } + + /* + * Configure and power up the PHY PLL + */ + hdmi->event_received = false; + DRM_DEBUG_DRIVER("pllctrl = 0x%x\n", pllctrl); + hdmi_write(hdmi, pllctrl, HDMI_SRZ_PLL_CFG); + + /* wait PLL interrupt */ + wait_event_interruptible_timeout(hdmi->wait_event, + hdmi->event_received == true, + msecs_to_jiffies + (HDMI_TIMEOUT_PLL_LOCK)); + + if ((hdmi_read(hdmi, HDMI_STA) & HDMI_STA_DLL_LCK) == 0) { + DRM_ERROR("hdmi phy pll not locked\n"); + goto err; + } + + DRM_DEBUG_DRIVER("got PHY PLL Lock\n"); + + /* + * To configure the source termination and pre-emphasis appropriately + * for different high speed TMDS clock frequencies a phy configuration + * table must be provided, tailored to the SoC and board combination. + */ + for (i = 0; i < NB_HDMI_PHY_CONFIG; i++) { + if ((hdmiphy_config[i].min_tmds_freq <= tmdsck) && + (hdmiphy_config[i].max_tmds_freq >= tmdsck)) { + val = hdmiphy_config[i].config[0]; + hdmi_write(hdmi, val, HDMI_SRZ_TAP_1); + val = hdmiphy_config[i].config[1]; + hdmi_write(hdmi, val, HDMI_SRZ_TAP_2); + val = hdmiphy_config[i].config[2]; + hdmi_write(hdmi, val, HDMI_SRZ_TAP_3); + val = hdmiphy_config[i].config[3]; + val |= HDMI_SRZ_CTRL_EXTERNAL_DATA_EN; + val &= ~HDMI_SRZ_CTRL_POWER_DOWN; + hdmi_write(hdmi, val, HDMI_SRZ_CTRL); + + DRM_DEBUG_DRIVER("serializer cfg 0x%x 0x%x 0x%x 0x%x\n", + hdmiphy_config[i].config[0], + hdmiphy_config[i].config[1], + hdmiphy_config[i].config[2], + hdmiphy_config[i].config[3]); + return true; + } + } + + /* + * Default, power up the serializer with no pre-emphasis or source + * termination. + */ + hdmi_write(hdmi, 0x0, HDMI_SRZ_TAP_1); + hdmi_write(hdmi, 0x0, HDMI_SRZ_TAP_2); + hdmi_write(hdmi, 0x0, HDMI_SRZ_TAP_3); + hdmi_write(hdmi, HDMI_SRZ_CTRL_EXTERNAL_DATA_EN, HDMI_SRZ_CTRL); + + return true; + +err: + disable_pll_rejection(hdmi); + + return false; +} + +/** + * Stop hdmi phy macro cell tx3g0c55 + * + * @hdmi: pointer on the hdmi internal structure + */ +static void sti_hdmi_tx3g0c55phy_stop(struct sti_hdmi *hdmi) +{ + DRM_DEBUG_DRIVER("\n"); + + hdmi->event_received = false; + + hdmi_write(hdmi, HDMI_SRZ_CTRL_POWER_DOWN, HDMI_SRZ_CTRL); + hdmi_write(hdmi, HDMI_SRZ_PLL_CFG_POWER_DOWN, HDMI_SRZ_PLL_CFG); + + /* wait PLL interrupt */ + wait_event_interruptible_timeout(hdmi->wait_event, + hdmi->event_received == true, + msecs_to_jiffies + (HDMI_TIMEOUT_PLL_LOCK)); + + if (hdmi_read(hdmi, HDMI_STA) & HDMI_STA_DLL_LCK) + DRM_ERROR("hdmi phy pll not well disabled\n"); + + disable_pll_rejection(hdmi); +} + +struct hdmi_phy_ops tx3g0c55phy_ops = { + .start = sti_hdmi_tx3g0c55phy_start, + .stop = sti_hdmi_tx3g0c55phy_stop, +}; diff --git a/drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.h b/drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.h new file mode 100644 index 000000000000..068237b3a303 --- /dev/null +++ b/drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.h @@ -0,0 +1,14 @@ +/* + * Copyright (C) STMicroelectronics SA 2014 + * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics. + * License terms: GNU General Public License (GPL), version 2 + */ + +#ifndef _STI_HDMI_TX3G0C55PHY_H_ +#define _STI_HDMI_TX3G0C55PHY_H_ + +#include "sti_hdmi.h" + +extern struct hdmi_phy_ops tx3g0c55phy_ops; + +#endif diff --git a/drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.c b/drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.c new file mode 100644 index 000000000000..8e0ceb0ced33 --- /dev/null +++ b/drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.c @@ -0,0 +1,211 @@ +/* + * Copyright (C) STMicroelectronics SA 2014 + * Author: Vincent Abriou <vincent.abriou@st.com> for STMicroelectronics. + * License terms: GNU General Public License (GPL), version 2 + */ + +#include "sti_hdmi_tx3g4c28phy.h" + +#define HDMI_SRZ_CFG 0x504 +#define HDMI_SRZ_PLL_CFG 0x510 +#define HDMI_SRZ_ICNTL 0x518 +#define HDMI_SRZ_CALCODE_EXT 0x520 + +#define HDMI_SRZ_CFG_EN BIT(0) +#define HDMI_SRZ_CFG_DISABLE_BYPASS_SINK_CURRENT BIT(1) +#define HDMI_SRZ_CFG_EXTERNAL_DATA BIT(16) +#define HDMI_SRZ_CFG_RBIAS_EXT BIT(17) +#define HDMI_SRZ_CFG_EN_SINK_TERM_DETECTION BIT(18) +#define HDMI_SRZ_CFG_EN_BIASRES_DETECTION BIT(19) +#define HDMI_SRZ_CFG_EN_SRC_TERMINATION BIT(24) + +#define HDMI_SRZ_CFG_INTERNAL_MASK (HDMI_SRZ_CFG_EN | \ + HDMI_SRZ_CFG_DISABLE_BYPASS_SINK_CURRENT | \ + HDMI_SRZ_CFG_EXTERNAL_DATA | \ + HDMI_SRZ_CFG_RBIAS_EXT | \ + HDMI_SRZ_CFG_EN_SINK_TERM_DETECTION | \ + HDMI_SRZ_CFG_EN_BIASRES_DETECTION | \ + HDMI_SRZ_CFG_EN_SRC_TERMINATION) + +#define PLL_CFG_EN BIT(0) +#define PLL_CFG_NDIV_SHIFT (8) +#define PLL_CFG_IDF_SHIFT (16) +#define PLL_CFG_ODF_SHIFT (24) + +#define ODF_DIV_1 (0) +#define ODF_DIV_2 (1) +#define ODF_DIV_4 (2) +#define ODF_DIV_8 (3) + +#define HDMI_TIMEOUT_PLL_LOCK 50 /*milliseconds */ + +struct plldividers_s { + uint32_t min; + uint32_t max; + uint32_t idf; + uint32_t odf; +}; + +/* + * Functional specification recommended values + */ +#define NB_PLL_MODE 5 +static struct plldividers_s plldividers[NB_PLL_MODE] = { + {0, 20000000, 1, ODF_DIV_8}, + {20000000, 42500000, 2, ODF_DIV_8}, + {42500000, 85000000, 4, ODF_DIV_4}, + {85000000, 170000000, 8, ODF_DIV_2}, + {170000000, 340000000, 16, ODF_DIV_1} +}; + +#define NB_HDMI_PHY_CONFIG 2 +static struct hdmi_phy_config hdmiphy_config[NB_HDMI_PHY_CONFIG] = { + {0, 250000000, {0x0, 0x0, 0x0, 0x0} }, + {250000000, 300000000, {0x1110, 0x0, 0x0, 0x0} }, +}; + +/** + * Start hdmi phy macro cell tx3g4c28 + * + * @hdmi: pointer on the hdmi internal structure + * + * Return false if an error occur + */ +static bool sti_hdmi_tx3g4c28phy_start(struct sti_hdmi *hdmi) +{ + u32 ckpxpll = hdmi->mode.clock * 1000; + u32 val, tmdsck, idf, odf, pllctrl = 0; + bool foundplldivides = false; + int i; + + DRM_DEBUG_DRIVER("ckpxpll = %dHz\n", ckpxpll); + + for (i = 0; i < NB_PLL_MODE; i++) { + if (ckpxpll >= plldividers[i].min && + ckpxpll < plldividers[i].max) { + idf = plldividers[i].idf; + odf = plldividers[i].odf; + foundplldivides = true; + break; + } + } + + if (!foundplldivides) { + DRM_ERROR("input TMDS clock speed (%d) not supported\n", + ckpxpll); + goto err; + } + + /* Assuming no pixel repetition and 24bits color */ + tmdsck = ckpxpll; + pllctrl |= 40 << PLL_CFG_NDIV_SHIFT; + + if (tmdsck > 340000000) { + DRM_ERROR("output TMDS clock (%d) out of range\n", tmdsck); + goto err; + } + + pllctrl |= idf << PLL_CFG_IDF_SHIFT; + pllctrl |= odf << PLL_CFG_ODF_SHIFT; + + /* + * Configure and power up the PHY PLL + */ + hdmi->event_received = false; + DRM_DEBUG_DRIVER("pllctrl = 0x%x\n", pllctrl); + hdmi_write(hdmi, (pllctrl | PLL_CFG_EN), HDMI_SRZ_PLL_CFG); + + /* wait PLL interrupt */ + wait_event_interruptible_timeout(hdmi->wait_event, + hdmi->event_received == true, + msecs_to_jiffies + (HDMI_TIMEOUT_PLL_LOCK)); + + if ((hdmi_read(hdmi, HDMI_STA) & HDMI_STA_DLL_LCK) == 0) { + DRM_ERROR("hdmi phy pll not locked\n"); + goto err; + } + + DRM_DEBUG_DRIVER("got PHY PLL Lock\n"); + + val = (HDMI_SRZ_CFG_EN | + HDMI_SRZ_CFG_EXTERNAL_DATA | + HDMI_SRZ_CFG_EN_BIASRES_DETECTION | + HDMI_SRZ_CFG_EN_SINK_TERM_DETECTION); + + if (tmdsck > 165000000) + val |= HDMI_SRZ_CFG_EN_SRC_TERMINATION; + + /* + * To configure the source termination and pre-emphasis appropriately + * for different high speed TMDS clock frequencies a phy configuration + * table must be provided, tailored to the SoC and board combination. + */ + for (i = 0; i < NB_HDMI_PHY_CONFIG; i++) { + if ((hdmiphy_config[i].min_tmds_freq <= tmdsck) && + (hdmiphy_config[i].max_tmds_freq >= tmdsck)) { + val |= (hdmiphy_config[i].config[0] + & ~HDMI_SRZ_CFG_INTERNAL_MASK); + hdmi_write(hdmi, val, HDMI_SRZ_CFG); + + val = hdmiphy_config[i].config[1]; + hdmi_write(hdmi, val, HDMI_SRZ_ICNTL); + + val = hdmiphy_config[i].config[2]; + hdmi_write(hdmi, val, HDMI_SRZ_CALCODE_EXT); + + DRM_DEBUG_DRIVER("serializer cfg 0x%x 0x%x 0x%x\n", + hdmiphy_config[i].config[0], + hdmiphy_config[i].config[1], + hdmiphy_config[i].config[2]); + return true; + } + } + + /* + * Default, power up the serializer with no pre-emphasis or + * output swing correction + */ + hdmi_write(hdmi, val, HDMI_SRZ_CFG); + hdmi_write(hdmi, 0x0, HDMI_SRZ_ICNTL); + hdmi_write(hdmi, 0x0, HDMI_SRZ_CALCODE_EXT); + + return true; + +err: + return false; +} + +/** + * Stop hdmi phy macro cell tx3g4c28 + * + * @hdmi: pointer on the hdmi internal structure + */ +static void sti_hdmi_tx3g4c28phy_stop(struct sti_hdmi *hdmi) +{ + int val = 0; + + DRM_DEBUG_DRIVER("\n"); + + hdmi->event_received = false; + + val = HDMI_SRZ_CFG_EN_SINK_TERM_DETECTION; + val |= HDMI_SRZ_CFG_EN_BIASRES_DETECTION; + + hdmi_write(hdmi, val, HDMI_SRZ_CFG); + hdmi_write(hdmi, 0, HDMI_SRZ_PLL_CFG); + + /* wait PLL interrupt */ + wait_event_interruptible_timeout(hdmi->wait_event, + hdmi->event_received == true, + msecs_to_jiffies + (HDMI_TIMEOUT_PLL_LOCK)); + + if (hdmi_read(hdmi, HDMI_STA) & HDMI_STA_DLL_LCK) + DRM_ERROR("hdmi phy pll not well disabled\n"); +} + +struct hdmi_phy_ops tx3g4c28phy_ops = { + .start = sti_hdmi_tx3g4c28phy_start, + .stop = sti_hdmi_tx3g4c28phy_stop, +}; diff --git a/drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.h b/drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.h new file mode 100644 index 000000000000..f99a7ff281ef --- /dev/null +++ b/drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.h @@ -0,0 +1,14 @@ +/* + * Copyright (C) STMicroelectronics SA 2014 + * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics. + * License terms: GNU General Public License (GPL), version 2 + */ + +#ifndef _STI_HDMI_TX3G4C28PHY_H_ +#define _STI_HDMI_TX3G4C28PHY_H_ + +#include "sti_hdmi.h" + +extern struct hdmi_phy_ops tx3g4c28phy_ops; + +#endif diff --git a/drivers/gpu/drm/sti/sti_layer.c b/drivers/gpu/drm/sti/sti_layer.c new file mode 100644 index 000000000000..06a587c4f1bb --- /dev/null +++ b/drivers/gpu/drm/sti/sti_layer.c @@ -0,0 +1,197 @@ +/* + * Copyright (C) STMicroelectronics SA 2014 + * Authors: Benjamin Gaignard <benjamin.gaignard@st.com> + * Fabien Dessenne <fabien.dessenne@st.com> + * for STMicroelectronics. + * License terms: GNU General Public License (GPL), version 2 + */ + +#include <drm/drmP.h> +#include <drm/drm_gem_cma_helper.h> +#include <drm/drm_fb_cma_helper.h> + +#include "sti_compositor.h" +#include "sti_gdp.h" +#include "sti_layer.h" +#include "sti_vid.h" + +const char *sti_layer_to_str(struct sti_layer *layer) +{ + switch (layer->desc) { + case STI_GDP_0: + return "GDP0"; + case STI_GDP_1: + return "GDP1"; + case STI_GDP_2: + return "GDP2"; + case STI_GDP_3: + return "GDP3"; + case STI_VID_0: + return "VID0"; + case STI_VID_1: + return "VID1"; + case STI_CURSOR: + return "CURSOR"; + default: + return "<UNKNOWN LAYER>"; + } +} + +struct sti_layer *sti_layer_create(struct device *dev, int desc, + void __iomem *baseaddr) +{ + + struct sti_layer *layer = NULL; + + switch (desc & STI_LAYER_TYPE_MASK) { + case STI_GDP: + layer = sti_gdp_create(dev, desc); + break; + case STI_VID: + layer = sti_vid_create(dev); + break; + } + + if (!layer) { + DRM_ERROR("Failed to create layer\n"); + return NULL; + } + + layer->desc = desc; + layer->dev = dev; + layer->regs = baseaddr; + + layer->ops->init(layer); + + DRM_DEBUG_DRIVER("%s created\n", sti_layer_to_str(layer)); + + return layer; +} + +int sti_layer_prepare(struct sti_layer *layer, struct drm_framebuffer *fb, + struct drm_display_mode *mode, int mixer_id, + int dest_x, int dest_y, int dest_w, int dest_h, + int src_x, int src_y, int src_w, int src_h) +{ + int ret; + unsigned int i; + struct drm_gem_cma_object *cma_obj; + + if (!layer || !fb || !mode) { + DRM_ERROR("Null fb, layer or mode\n"); + return 1; + } + + cma_obj = drm_fb_cma_get_gem_obj(fb, 0); + if (!cma_obj) { + DRM_ERROR("Can't get CMA GEM object for fb\n"); + return 1; + } + + layer->fb = fb; + layer->mode = mode; + layer->mixer_id = mixer_id; + layer->dst_x = dest_x; + layer->dst_y = dest_y; + layer->dst_w = clamp_val(dest_w, 0, mode->crtc_hdisplay - dest_x); + layer->dst_h = clamp_val(dest_h, 0, mode->crtc_vdisplay - dest_y); + layer->src_x = src_x; + layer->src_y = src_y; + layer->src_w = src_w; + layer->src_h = src_h; + layer->format = fb->pixel_format; + layer->paddr = cma_obj->paddr; + for (i = 0; i < 4; i++) { + layer->pitches[i] = fb->pitches[i]; + layer->offsets[i] = fb->offsets[i]; + } + + DRM_DEBUG_DRIVER("%s is associated with mixer_id %d\n", + sti_layer_to_str(layer), + layer->mixer_id); + DRM_DEBUG_DRIVER("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n", + sti_layer_to_str(layer), + layer->dst_w, layer->dst_h, layer->dst_x, layer->dst_y, + layer->src_w, layer->src_h, layer->src_x, + layer->src_y); + + DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id, + (char *)&layer->format, (unsigned long)layer->paddr); + + if (!layer->ops->prepare) + goto err_no_prepare; + + ret = layer->ops->prepare(layer, !layer->enabled); + if (!ret) + layer->enabled = true; + + return ret; + +err_no_prepare: + DRM_ERROR("Cannot prepare\n"); + return 1; +} + +int sti_layer_commit(struct sti_layer *layer) +{ + if (!layer) + return 1; + + if (!layer->ops->commit) + goto err_no_commit; + + return layer->ops->commit(layer); + +err_no_commit: + DRM_ERROR("Cannot commit\n"); + return 1; +} + +int sti_layer_disable(struct sti_layer *layer) +{ + int ret; + + DRM_DEBUG_DRIVER("%s\n", sti_layer_to_str(layer)); + if (!layer) + return 1; + + if (!layer->enabled) + return 0; + + if (!layer->ops->disable) + goto err_no_disable; + + ret = layer->ops->disable(layer); + if (!ret) + layer->enabled = false; + else + DRM_ERROR("Disable failed\n"); + + return ret; + +err_no_disable: + DRM_ERROR("Cannot disable\n"); + return 1; +} + +const uint32_t *sti_layer_get_formats(struct sti_layer *layer) +{ + if (!layer) + return NULL; + + if (!layer->ops->get_formats) + return NULL; + + return layer->ops->get_formats(layer); +} + +unsigned int sti_layer_get_nb_formats(struct sti_layer *layer) +{ + if (!layer) + return 0; + + if (!layer->ops->get_nb_formats) + return 0; + + return layer->ops->get_nb_formats(layer); +} diff --git a/drivers/gpu/drm/sti/sti_layer.h b/drivers/gpu/drm/sti/sti_layer.h new file mode 100644 index 000000000000..198c3774cc12 --- /dev/null +++ b/drivers/gpu/drm/sti/sti_layer.h @@ -0,0 +1,123 @@ +/* + * Copyright (C) STMicroelectronics SA 2014 + * Authors: Benjamin Gaignard <benjamin.gaignard@st.com> + * Fabien Dessenne <fabien.dessenne@st.com> + * for STMicroelectronics. + * License terms: GNU General Public License (GPL), version 2 + */ + +#ifndef _STI_LAYER_H_ +#define _STI_LAYER_H_ + +#include <drm/drmP.h> + +#define to_sti_layer(x) container_of(x, struct sti_layer, plane) + +#define STI_LAYER_TYPE_SHIFT 8 +#define STI_LAYER_TYPE_MASK (~((1<<STI_LAYER_TYPE_SHIFT)-1)) + +struct sti_layer; + +enum sti_layer_type { + STI_GDP = 1 << STI_LAYER_TYPE_SHIFT, + STI_VID = 2 << STI_LAYER_TYPE_SHIFT, + STI_CUR = 3 << STI_LAYER_TYPE_SHIFT, + STI_BCK = 4 << STI_LAYER_TYPE_SHIFT +}; + +enum sti_layer_id_of_type { + STI_ID_0 = 0, + STI_ID_1 = 1, + STI_ID_2 = 2, + STI_ID_3 = 3 +}; + +enum sti_layer_desc { + STI_GDP_0 = STI_GDP | STI_ID_0, + STI_GDP_1 = STI_GDP | STI_ID_1, + STI_GDP_2 = STI_GDP | STI_ID_2, + STI_GDP_3 = STI_GDP | STI_ID_3, + STI_VID_0 = STI_VID | STI_ID_0, + STI_VID_1 = STI_VID | STI_ID_1, + STI_CURSOR = STI_CUR, + STI_BACK = STI_BCK +}; + +/** + * STI layer functions structure + * + * @get_formats: get layer supported formats + * @get_nb_formats: get number of format supported + * @init: initialize the layer + * @prepare: prepare layer before rendering + * @commit: set layer for rendering + * @disable: disable layer + */ +struct sti_layer_funcs { + const uint32_t* (*get_formats)(struct sti_layer *layer); + unsigned int (*get_nb_formats)(struct sti_layer *layer); + void (*init)(struct sti_layer *layer); + int (*prepare)(struct sti_layer *layer, bool first_prepare); + int (*commit)(struct sti_layer *layer); + int (*disable)(struct sti_layer *layer); +}; + +/** + * STI layer structure + * + * @plane: drm plane it is bound to (if any) + * @fb: drm fb it is bound to + * @mode: display mode + * @desc: layer type & id + * @device: driver device + * @regs: layer registers + * @ops: layer functions + * @zorder: layer z-order + * @mixer_id: id of the mixer used to display the layer + * @enabled: to know if the layer is active or not + * @src_x src_y: coordinates of the input (fb) area + * @src_w src_h: size of the input (fb) area + * @dst_x dst_y: coordinates of the output (crtc) area + * @dst_w dst_h: size of the output (crtc) area + * @format: format + * @pitches: pitch of 'planes' (eg: Y, U, V) + * @offsets: offset of 'planes' + * @paddr: physical address of the input buffer + */ +struct sti_layer { + struct drm_plane plane; + struct drm_framebuffer *fb; + struct drm_display_mode *mode; + enum sti_layer_desc desc; + struct device *dev; + void __iomem *regs; + const struct sti_layer_funcs *ops; + int zorder; + int mixer_id; + bool enabled; + int src_x, src_y; + int src_w, src_h; + int dst_x, dst_y; + int dst_w, dst_h; + uint32_t format; + unsigned int pitches[4]; + unsigned int offsets[4]; + dma_addr_t paddr; +}; + +struct sti_layer *sti_layer_create(struct device *dev, int desc, + void __iomem *baseaddr); +int sti_layer_prepare(struct sti_layer *layer, struct drm_framebuffer *fb, + struct drm_display_mode *mode, + int mixer_id, + int dest_x, int dest_y, + int dest_w, int dest_h, + int src_x, int src_y, + int src_w, int src_h); +int sti_layer_commit(struct sti_layer *layer); +int sti_layer_disable(struct sti_layer *layer); +const uint32_t *sti_layer_get_formats(struct sti_layer *layer); +unsigned int sti_layer_get_nb_formats(struct sti_layer *layer); +const char *sti_layer_to_str(struct sti_layer *layer); + +#endif diff --git a/drivers/gpu/drm/sti/sti_mixer.c b/drivers/gpu/drm/sti/sti_mixer.c new file mode 100644 index 000000000000..79f369db9fb6 --- /dev/null +++ b/drivers/gpu/drm/sti/sti_mixer.c @@ -0,0 +1,249 @@ +/* + * Copyright (C) STMicroelectronics SA 2014 + * Authors: Benjamin Gaignard <benjamin.gaignard@st.com> + * Fabien Dessenne <fabien.dessenne@st.com> + * for STMicroelectronics. + * License terms: GNU General Public License (GPL), version 2 + */ + +#include "sti_compositor.h" +#include "sti_mixer.h" +#include "sti_vtg.h" + +/* Identity: G=Y , B=Cb , R=Cr */ +static const u32 mixerColorSpaceMatIdentity[] = { + 0x10000000, 0x00000000, 0x10000000, 0x00001000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000 +}; + +/* regs offset */ +#define GAM_MIXER_CTL 0x00 +#define GAM_MIXER_BKC 0x04 +#define GAM_MIXER_BCO 0x0C +#define GAM_MIXER_BCS 0x10 +#define GAM_MIXER_AVO 0x28 +#define GAM_MIXER_AVS 0x2C +#define GAM_MIXER_CRB 0x34 +#define GAM_MIXER_ACT 0x38 +#define GAM_MIXER_MBP 0x3C +#define GAM_MIXER_MX0 0x80 + +/* id for depth of CRB reg */ +#define GAM_DEPTH_VID0_ID 1 +#define GAM_DEPTH_VID1_ID 2 +#define GAM_DEPTH_GDP0_ID 3 +#define GAM_DEPTH_GDP1_ID 4 +#define GAM_DEPTH_GDP2_ID 5 +#define GAM_DEPTH_GDP3_ID 6 +#define GAM_DEPTH_MASK_ID 7 + +/* mask in CTL reg */ +#define GAM_CTL_BACK_MASK BIT(0) +#define GAM_CTL_VID0_MASK BIT(1) +#define GAM_CTL_VID1_MASK BIT(2) +#define GAM_CTL_GDP0_MASK BIT(3) +#define GAM_CTL_GDP1_MASK BIT(4) +#define GAM_CTL_GDP2_MASK BIT(5) +#define GAM_CTL_GDP3_MASK BIT(6) + +const char *sti_mixer_to_str(struct sti_mixer *mixer) +{ + switch (mixer->id) { + case STI_MIXER_MAIN: + return "MAIN_MIXER"; + case STI_MIXER_AUX: + return "AUX_MIXER"; + default: + return "<UNKNOWN MIXER>"; + } +} + +static inline u32 sti_mixer_reg_read(struct sti_mixer *mixer, u32 reg_id) +{ + return readl(mixer->regs + reg_id); +} + +static inline void sti_mixer_reg_write(struct sti_mixer *mixer, + u32 reg_id, u32 val) +{ + writel(val, mixer->regs + reg_id); +} + +void sti_mixer_set_background_status(struct sti_mixer *mixer, bool enable) +{ + u32 val = sti_mixer_reg_read(mixer, GAM_MIXER_CTL); + + val &= ~GAM_CTL_BACK_MASK; + val |= enable; + sti_mixer_reg_write(mixer, GAM_MIXER_CTL, val); +} + +static void sti_mixer_set_background_color(struct sti_mixer *mixer, + u8 red, u8 green, u8 blue) +{ + u32 val = (red << 16) | (green << 8) | blue; + + sti_mixer_reg_write(mixer, GAM_MIXER_BKC, val); +} + +static void sti_mixer_set_background_area(struct sti_mixer *mixer, + struct drm_display_mode *mode) +{ + u32 ydo, xdo, yds, xds; + + ydo = sti_vtg_get_line_number(*mode, 0); + yds = sti_vtg_get_line_number(*mode, mode->vdisplay - 1); + xdo = sti_vtg_get_pixel_number(*mode, 0); + xds = sti_vtg_get_pixel_number(*mode, mode->hdisplay - 1); + + sti_mixer_reg_write(mixer, GAM_MIXER_BCO, ydo << 16 | xdo); + sti_mixer_reg_write(mixer, GAM_MIXER_BCS, yds << 16 | xds); +} + +int sti_mixer_set_layer_depth(struct sti_mixer *mixer, struct sti_layer *layer) +{ + int layer_id = 0, depth = layer->zorder; + u32 mask, val; + + if (depth >= GAM_MIXER_NB_DEPTH_LEVEL) + return 1; + + switch (layer->desc) { + case STI_GDP_0: + layer_id = GAM_DEPTH_GDP0_ID; + break; + case STI_GDP_1: + layer_id = GAM_DEPTH_GDP1_ID; + break; + case STI_GDP_2: + layer_id = GAM_DEPTH_GDP2_ID; + break; + case STI_GDP_3: + layer_id = GAM_DEPTH_GDP3_ID; + break; + case STI_VID_0: + layer_id = GAM_DEPTH_VID0_ID; + break; + case STI_VID_1: + layer_id = GAM_DEPTH_VID1_ID; + break; + default: + DRM_ERROR("Unknown layer %d\n", layer->desc); + return 1; + } + mask = GAM_DEPTH_MASK_ID << (3 * depth); + layer_id = layer_id << (3 * depth); + + DRM_DEBUG_DRIVER("%s %s depth=%d\n", sti_mixer_to_str(mixer), + sti_layer_to_str(layer), depth); + dev_dbg(mixer->dev, "GAM_MIXER_CRB val 0x%x mask 0x%x\n", + layer_id, mask); + + val = sti_mixer_reg_read(mixer, GAM_MIXER_CRB); + val &= ~mask; + val |= layer_id; + sti_mixer_reg_write(mixer, GAM_MIXER_CRB, val); + + dev_dbg(mixer->dev, "Read GAM_MIXER_CRB 0x%x\n", + sti_mixer_reg_read(mixer, GAM_MIXER_CRB)); + return 0; +} + +int sti_mixer_active_video_area(struct sti_mixer *mixer, + struct drm_display_mode *mode) +{ + u32 ydo, xdo, yds, xds; + + ydo = sti_vtg_get_line_number(*mode, 0); + yds = sti_vtg_get_line_number(*mode, mode->vdisplay - 1); + xdo = sti_vtg_get_pixel_number(*mode, 0); + xds = sti_vtg_get_pixel_number(*mode, mode->hdisplay - 1); + + DRM_DEBUG_DRIVER("%s active video area xdo:%d ydo:%d xds:%d yds:%d\n", + sti_mixer_to_str(mixer), xdo, ydo, xds, yds); + sti_mixer_reg_write(mixer, GAM_MIXER_AVO, ydo << 16 | xdo); + sti_mixer_reg_write(mixer, GAM_MIXER_AVS, yds << 16 | xds); + + sti_mixer_set_background_color(mixer, 0xFF, 0, 0); + + sti_mixer_set_background_area(mixer, mode); + sti_mixer_set_background_status(mixer, true); + return 0; +} + +static u32 sti_mixer_get_layer_mask(struct sti_layer *layer) +{ + switch (layer->desc) { + case STI_BACK: + return GAM_CTL_BACK_MASK; + case STI_GDP_0: + return GAM_CTL_GDP0_MASK; + case STI_GDP_1: + return GAM_CTL_GDP1_MASK; + case STI_GDP_2: + return GAM_CTL_GDP2_MASK; + case STI_GDP_3: + return GAM_CTL_GDP3_MASK; + case STI_VID_0: + return GAM_CTL_VID0_MASK; + case STI_VID_1: + return GAM_CTL_VID1_MASK; + default: + return 0; + } +} + +int sti_mixer_set_layer_status(struct sti_mixer *mixer, + struct sti_layer *layer, bool status) +{ + u32 mask, val; + + DRM_DEBUG_DRIVER("%s %s %s\n", status ? "enable" : "disable", + sti_mixer_to_str(mixer), sti_layer_to_str(layer)); + + mask = sti_mixer_get_layer_mask(layer); + if (!mask) { + DRM_ERROR("Can not find layer mask\n"); + return -EINVAL; + } + + val = sti_mixer_reg_read(mixer, GAM_MIXER_CTL); + val &= ~mask; + val |= status ? mask : 0; + sti_mixer_reg_write(mixer, GAM_MIXER_CTL, val); + + return 0; +} + +void sti_mixer_set_matrix(struct sti_mixer *mixer) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(mixerColorSpaceMatIdentity); i++) + sti_mixer_reg_write(mixer, GAM_MIXER_MX0 + (i * 4), + mixerColorSpaceMatIdentity[i]); +} + +struct sti_mixer *sti_mixer_create(struct device *dev, int id, + void __iomem *baseaddr) +{ + struct sti_mixer *mixer = devm_kzalloc(dev, sizeof(*mixer), GFP_KERNEL); + struct device_node *np = dev->of_node; + + dev_dbg(dev, "%s\n", __func__); + if (!mixer) { + DRM_ERROR("Failed to allocated memory for mixer\n"); + return NULL; + } + mixer->regs = baseaddr; + mixer->dev = dev; + mixer->id = id; + + if (of_device_is_compatible(np, "st,stih416-compositor")) + sti_mixer_set_matrix(mixer); + + DRM_DEBUG_DRIVER("%s created. Regs=%p\n", + sti_mixer_to_str(mixer), mixer->regs); + + return mixer; +} diff --git a/drivers/gpu/drm/sti/sti_mixer.h b/drivers/gpu/drm/sti/sti_mixer.h new file mode 100644 index 000000000000..874372102e52 --- /dev/null +++ b/drivers/gpu/drm/sti/sti_mixer.h @@ -0,0 +1,54 @@ +/* + * Copyright (C) STMicroelectronics SA 2014 + * Authors: Benjamin Gaignard <benjamin.gaignard@st.com> + * Fabien Dessenne <fabien.dessenne@st.com> + * for STMicroelectronics. + * License terms: GNU General Public License (GPL), version 2 + */ + +#ifndef _STI_MIXER_H_ +#define _STI_MIXER_H_ + +#include <drm/drmP.h> + +#include "sti_layer.h" + +#define to_sti_mixer(x) container_of(x, struct sti_mixer, drm_crtc) + +/** + * STI Mixer subdevice structure + * + * @dev: driver device + * @regs: mixer registers + * @id: id of the mixer + * @drm_crtc: crtc object link to the mixer + * @pending_event: set if a flip event is pending on crtc + */ +struct sti_mixer { + struct device *dev; + void __iomem *regs; + int id; + struct drm_crtc drm_crtc; + struct drm_pending_vblank_event *pending_event; +}; + +const char *sti_mixer_to_str(struct sti_mixer *mixer); + +struct sti_mixer *sti_mixer_create(struct device *dev, int id, + void __iomem *baseaddr); + +int sti_mixer_set_layer_status(struct sti_mixer *mixer, + struct sti_layer *layer, bool status); +int sti_mixer_set_layer_depth(struct sti_mixer *mixer, struct sti_layer *layer); +int sti_mixer_active_video_area(struct sti_mixer *mixer, + struct drm_display_mode *mode); + +void sti_mixer_set_background_status(struct sti_mixer *mixer, bool enable); + +/* depth in Cross-bar control = z order */ +#define GAM_MIXER_NB_DEPTH_LEVEL 7 + +#define STI_MIXER_MAIN 0 +#define STI_MIXER_AUX 1 + +#endif diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c new file mode 100644 index 000000000000..b8afe490356a --- /dev/null +++ b/drivers/gpu/drm/sti/sti_tvout.c @@ -0,0 +1,648 @@ +/* + * Copyright (C) STMicroelectronics SA 2014 + * Authors: Benjamin Gaignard <benjamin.gaignard@st.com> + * Vincent Abriou <vincent.abriou@st.com> + * for STMicroelectronics. + * License terms: GNU General Public License (GPL), version 2 + */ + +#include <linux/clk.h> +#include <linux/component.h> +#include <linux/module.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/reset.h> + +#include <drm/drmP.h> +#include <drm/drm_crtc_helper.h> + +/* glue registers */ +#define TVO_CSC_MAIN_M0 0x000 +#define TVO_CSC_MAIN_M1 0x004 +#define TVO_CSC_MAIN_M2 0x008 +#define TVO_CSC_MAIN_M3 0x00c +#define TVO_CSC_MAIN_M4 0x010 +#define TVO_CSC_MAIN_M5 0x014 +#define TVO_CSC_MAIN_M6 0x018 +#define TVO_CSC_MAIN_M7 0x01c +#define TVO_MAIN_IN_VID_FORMAT 0x030 +#define TVO_CSC_AUX_M0 0x100 +#define TVO_CSC_AUX_M1 0x104 +#define TVO_CSC_AUX_M2 0x108 +#define TVO_CSC_AUX_M3 0x10c +#define TVO_CSC_AUX_M4 0x110 +#define TVO_CSC_AUX_M5 0x114 +#define TVO_CSC_AUX_M6 0x118 +#define TVO_CSC_AUX_M7 0x11c +#define TVO_AUX_IN_VID_FORMAT 0x130 +#define TVO_VIP_HDF 0x400 +#define TVO_HD_SYNC_SEL 0x418 +#define TVO_HD_DAC_CFG_OFF 0x420 +#define TVO_VIP_HDMI 0x500 +#define TVO_HDMI_FORCE_COLOR_0 0x504 +#define TVO_HDMI_FORCE_COLOR_1 0x508 +#define TVO_HDMI_CLIP_VALUE_B_CB 0x50c +#define TVO_HDMI_CLIP_VALUE_Y_G 0x510 +#define TVO_HDMI_CLIP_VALUE_R_CR 0x514 +#define TVO_HDMI_SYNC_SEL 0x518 +#define TVO_HDMI_DFV_OBS 0x540 + +#define TVO_IN_FMT_SIGNED BIT(0) +#define TVO_SYNC_EXT BIT(4) + +#define TVO_VIP_REORDER_R_SHIFT 24 +#define TVO_VIP_REORDER_G_SHIFT 20 +#define TVO_VIP_REORDER_B_SHIFT 16 +#define TVO_VIP_REORDER_MASK 0x3 +#define TVO_VIP_REORDER_Y_G_SEL 0 +#define TVO_VIP_REORDER_CB_B_SEL 1 +#define TVO_VIP_REORDER_CR_R_SEL 2 + +#define TVO_VIP_CLIP_SHIFT 8 +#define TVO_VIP_CLIP_MASK 0x7 +#define TVO_VIP_CLIP_DISABLED 0 +#define TVO_VIP_CLIP_EAV_SAV 1 +#define TVO_VIP_CLIP_LIMITED_RANGE_RGB_Y 2 +#define TVO_VIP_CLIP_LIMITED_RANGE_CB_CR 3 +#define TVO_VIP_CLIP_PROG_RANGE 4 + +#define TVO_VIP_RND_SHIFT 4 +#define TVO_VIP_RND_MASK 0x3 +#define TVO_VIP_RND_8BIT_ROUNDED 0 +#define TVO_VIP_RND_10BIT_ROUNDED 1 +#define TVO_VIP_RND_12BIT_ROUNDED 2 + +#define TVO_VIP_SEL_INPUT_MASK 0xf +#define TVO_VIP_SEL_INPUT_MAIN 0x0 +#define TVO_VIP_SEL_INPUT_AUX 0x8 +#define TVO_VIP_SEL_INPUT_FORCE_COLOR 0xf +#define TVO_VIP_SEL_INPUT_BYPASS_MASK 0x1 +#define TVO_VIP_SEL_INPUT_BYPASSED 1 + +#define TVO_SYNC_MAIN_VTG_SET_REF 0x00 +#define TVO_SYNC_MAIN_VTG_SET_1 0x01 +#define TVO_SYNC_MAIN_VTG_SET_2 0x02 +#define TVO_SYNC_MAIN_VTG_SET_3 0x03 +#define TVO_SYNC_MAIN_VTG_SET_4 0x04 +#define TVO_SYNC_MAIN_VTG_SET_5 0x05 +#define TVO_SYNC_MAIN_VTG_SET_6 0x06 +#define TVO_SYNC_AUX_VTG_SET_REF 0x10 +#define TVO_SYNC_AUX_VTG_SET_1 0x11 +#define TVO_SYNC_AUX_VTG_SET_2 0x12 +#define TVO_SYNC_AUX_VTG_SET_3 0x13 +#define TVO_SYNC_AUX_VTG_SET_4 0x14 +#define TVO_SYNC_AUX_VTG_SET_5 0x15 +#define TVO_SYNC_AUX_VTG_SET_6 0x16 + +#define TVO_SYNC_HD_DCS_SHIFT 8 + +#define ENCODER_MAIN_CRTC_MASK BIT(0) + +/* enum listing the supported output data format */ +enum sti_tvout_video_out_type { + STI_TVOUT_VIDEO_OUT_RGB, + STI_TVOUT_VIDEO_OUT_YUV, +}; + +struct sti_tvout { + struct device *dev; + struct drm_device *drm_dev; + void __iomem *regs; + struct reset_control *reset; + struct drm_encoder *hdmi; + struct drm_encoder *hda; +}; + +struct sti_tvout_encoder { + struct drm_encoder encoder; + struct sti_tvout *tvout; +}; + +#define to_sti_tvout_encoder(x) \ + container_of(x, struct sti_tvout_encoder, encoder) + +#define to_sti_tvout(x) to_sti_tvout_encoder(x)->tvout + +/* preformatter conversion matrix */ +static const u32 rgb_to_ycbcr_601[8] = { + 0xF927082E, 0x04C9FEAB, 0x01D30964, 0xFA95FD3D, + 0x0000082E, 0x00002000, 0x00002000, 0x00000000 +}; + +/* 709 RGB to YCbCr */ +static const u32 rgb_to_ycbcr_709[8] = { + 0xF891082F, 0x0367FF40, 0x01280B71, 0xF9B1FE20, + 0x0000082F, 0x00002000, 0x00002000, 0x00000000 +}; + +static u32 tvout_read(struct sti_tvout *tvout, int offset) +{ + return readl(tvout->regs + offset); +} + +static void tvout_write(struct sti_tvout *tvout, u32 val, int offset) +{ + writel(val, tvout->regs + offset); +} + +/** + * Set the clipping mode of a VIP + * + * @tvout: tvout structure + * @cr_r: + * @y_g: + * @cb_b: + */ +static void tvout_vip_set_color_order(struct sti_tvout *tvout, + u32 cr_r, u32 y_g, u32 cb_b) +{ + u32 val = tvout_read(tvout, TVO_VIP_HDMI); + + val &= ~(TVO_VIP_REORDER_MASK << TVO_VIP_REORDER_R_SHIFT); + val &= ~(TVO_VIP_REORDER_MASK << TVO_VIP_REORDER_G_SHIFT); + val &= ~(TVO_VIP_REORDER_MASK << TVO_VIP_REORDER_B_SHIFT); + val |= cr_r << TVO_VIP_REORDER_R_SHIFT; + val |= y_g << TVO_VIP_REORDER_G_SHIFT; + val |= cb_b << TVO_VIP_REORDER_B_SHIFT; + + tvout_write(tvout, val, TVO_VIP_HDMI); +} + +/** + * Set the clipping mode of a VIP + * + * @tvout: tvout structure + * @range: clipping range + */ +static void tvout_vip_set_clip_mode(struct sti_tvout *tvout, u32 range) +{ + u32 val = tvout_read(tvout, TVO_VIP_HDMI); + + val &= ~(TVO_VIP_CLIP_MASK << TVO_VIP_CLIP_SHIFT); + val |= range << TVO_VIP_CLIP_SHIFT; + tvout_write(tvout, val, TVO_VIP_HDMI); +} + +/** + * Set the rounded value of a VIP + * + * @tvout: tvout structure + * @rnd: rounded val per component + */ +static void tvout_vip_set_rnd(struct sti_tvout *tvout, u32 rnd) +{ + u32 val = tvout_read(tvout, TVO_VIP_HDMI); + + val &= ~(TVO_VIP_RND_MASK << TVO_VIP_RND_SHIFT); + val |= rnd << TVO_VIP_RND_SHIFT; + tvout_write(tvout, val, TVO_VIP_HDMI); +} + +/** + * Select the VIP input + * + * @tvout: tvout structure + * @sel_input: selected_input (main/aux + conv) + */ +static void tvout_vip_set_sel_input(struct sti_tvout *tvout, + bool main_path, + bool sel_input_logic_inverted, + enum sti_tvout_video_out_type video_out) +{ + u32 sel_input; + u32 val = tvout_read(tvout, TVO_VIP_HDMI); + + if (main_path) + sel_input = TVO_VIP_SEL_INPUT_MAIN; + else + sel_input = TVO_VIP_SEL_INPUT_AUX; + + switch (video_out) { + case STI_TVOUT_VIDEO_OUT_RGB: + sel_input |= TVO_VIP_SEL_INPUT_BYPASSED; + break; + case STI_TVOUT_VIDEO_OUT_YUV: + sel_input &= ~TVO_VIP_SEL_INPUT_BYPASSED; + break; + } + + /* on stih407 chip the sel_input bypass mode logic is inverted */ + if (sel_input_logic_inverted) + sel_input = sel_input ^ TVO_VIP_SEL_INPUT_BYPASS_MASK; + + val &= ~TVO_VIP_SEL_INPUT_MASK; + val |= sel_input; + tvout_write(tvout, val, TVO_VIP_HDMI); +} + +/** + * Select the input video signed or unsigned + * + * @tvout: tvout structure + * @in_vid_signed: used video input format + */ +static void tvout_vip_set_in_vid_fmt(struct sti_tvout *tvout, u32 in_vid_fmt) +{ + u32 val = tvout_read(tvout, TVO_VIP_HDMI); + + val &= ~TVO_IN_FMT_SIGNED; + val |= in_vid_fmt; + tvout_write(tvout, val, TVO_MAIN_IN_VID_FORMAT); +} + +/** + * Start VIP block for HDMI output + * + * @tvout: pointer on tvout structure + * @main_path: true if main path has to be used in the vip configuration + * else aux path is used. + */ +static void tvout_hdmi_start(struct sti_tvout *tvout, bool main_path) +{ + struct device_node *node = tvout->dev->of_node; + bool sel_input_logic_inverted = false; + + dev_dbg(tvout->dev, "%s\n", __func__); + + if (main_path) { + DRM_DEBUG_DRIVER("main vip for hdmi\n"); + /* select the input sync for hdmi = VTG set 1 */ + tvout_write(tvout, TVO_SYNC_MAIN_VTG_SET_1, TVO_HDMI_SYNC_SEL); + } else { + DRM_DEBUG_DRIVER("aux vip for hdmi\n"); + /* select the input sync for hdmi = VTG set 1 */ + tvout_write(tvout, TVO_SYNC_AUX_VTG_SET_1, TVO_HDMI_SYNC_SEL); + } + + /* set color channel order */ + tvout_vip_set_color_order(tvout, + TVO_VIP_REORDER_CR_R_SEL, + TVO_VIP_REORDER_Y_G_SEL, + TVO_VIP_REORDER_CB_B_SEL); + + /* set clipping mode (Limited range RGB/Y) */ + tvout_vip_set_clip_mode(tvout, TVO_VIP_CLIP_LIMITED_RANGE_RGB_Y); + + /* set round mode (rounded to 8-bit per component) */ + tvout_vip_set_rnd(tvout, TVO_VIP_RND_8BIT_ROUNDED); + + if (of_device_is_compatible(node, "st,stih407-tvout")) { + /* set input video format */ + tvout_vip_set_in_vid_fmt(tvout->regs + TVO_MAIN_IN_VID_FORMAT, + TVO_IN_FMT_SIGNED); + sel_input_logic_inverted = true; + } + + /* input selection */ + tvout_vip_set_sel_input(tvout, main_path, + sel_input_logic_inverted, STI_TVOUT_VIDEO_OUT_RGB); +} + +/** + * Start HDF VIP and HD DAC + * + * @tvout: pointer on tvout structure + * @main_path: true if main path has to be used in the vip configuration + * else aux path is used. + */ +static void tvout_hda_start(struct sti_tvout *tvout, bool main_path) +{ + struct device_node *node = tvout->dev->of_node; + bool sel_input_logic_inverted = false; + + dev_dbg(tvout->dev, "%s\n", __func__); + + if (!main_path) { + DRM_ERROR("HD Analog on aux not implemented\n"); + return; + } + + DRM_DEBUG_DRIVER("main vip for HDF\n"); + + /* set color channel order */ + tvout_vip_set_color_order(tvout->regs + TVO_VIP_HDF, + TVO_VIP_REORDER_CR_R_SEL, + TVO_VIP_REORDER_Y_G_SEL, + TVO_VIP_REORDER_CB_B_SEL); + + /* set clipping mode (Limited range RGB/Y) */ + tvout_vip_set_clip_mode(tvout->regs + TVO_VIP_HDF, + TVO_VIP_CLIP_LIMITED_RANGE_CB_CR); + + /* set round mode (rounded to 10-bit per component) */ + tvout_vip_set_rnd(tvout->regs + TVO_VIP_HDF, TVO_VIP_RND_10BIT_ROUNDED); + + if (of_device_is_compatible(node, "st,stih407-tvout")) { + /* set input video format */ + tvout_vip_set_in_vid_fmt(tvout, TVO_IN_FMT_SIGNED); + sel_input_logic_inverted = true; + } + + /* Input selection */ + tvout_vip_set_sel_input(tvout->regs + TVO_VIP_HDF, + main_path, + sel_input_logic_inverted, + STI_TVOUT_VIDEO_OUT_YUV); + + /* select the input sync for HD analog = VTG set 3 + * and HD DCS = VTG set 2 */ + tvout_write(tvout, + (TVO_SYNC_MAIN_VTG_SET_2 << TVO_SYNC_HD_DCS_SHIFT) + | TVO_SYNC_MAIN_VTG_SET_3, + TVO_HD_SYNC_SEL); + + /* power up HD DAC */ + tvout_write(tvout, 0, TVO_HD_DAC_CFG_OFF); +} + +static void sti_tvout_encoder_dpms(struct drm_encoder *encoder, int mode) +{ +} + +static bool sti_tvout_encoder_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + return true; +} + +static void sti_tvout_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ +} + +static void sti_tvout_encoder_prepare(struct drm_encoder *encoder) +{ +} + +static void sti_tvout_encoder_destroy(struct drm_encoder *encoder) +{ + struct sti_tvout_encoder *sti_encoder = to_sti_tvout_encoder(encoder); + + drm_encoder_cleanup(encoder); + kfree(sti_encoder); +} + +static const struct drm_encoder_funcs sti_tvout_encoder_funcs = { + .destroy = sti_tvout_encoder_destroy, +}; + +static void sti_hda_encoder_commit(struct drm_encoder *encoder) +{ + struct sti_tvout *tvout = to_sti_tvout(encoder); + + tvout_hda_start(tvout, true); +} + +static void sti_hda_encoder_disable(struct drm_encoder *encoder) +{ + struct sti_tvout *tvout = to_sti_tvout(encoder); + + /* reset VIP register */ + tvout_write(tvout, 0x0, TVO_VIP_HDF); + + /* power down HD DAC */ + tvout_write(tvout, 1, TVO_HD_DAC_CFG_OFF); +} + +static const struct drm_encoder_helper_funcs sti_hda_encoder_helper_funcs = { + .dpms = sti_tvout_encoder_dpms, + .mode_fixup = sti_tvout_encoder_mode_fixup, + .mode_set = sti_tvout_encoder_mode_set, + .prepare = sti_tvout_encoder_prepare, + .commit = sti_hda_encoder_commit, + .disable = sti_hda_encoder_disable, +}; + +static struct drm_encoder *sti_tvout_create_hda_encoder(struct drm_device *dev, + struct sti_tvout *tvout) +{ + struct sti_tvout_encoder *encoder; + struct drm_encoder *drm_encoder; + + encoder = devm_kzalloc(tvout->dev, sizeof(*encoder), GFP_KERNEL); + if (!encoder) + return NULL; + + encoder->tvout = tvout; + + drm_encoder = (struct drm_encoder *) encoder; + + drm_encoder->possible_crtcs = ENCODER_MAIN_CRTC_MASK; + drm_encoder->possible_clones = 1 << 0; + + drm_encoder_init(dev, drm_encoder, + &sti_tvout_encoder_funcs, DRM_MODE_ENCODER_DAC); + + drm_encoder_helper_add(drm_encoder, &sti_hda_encoder_helper_funcs); + + return drm_encoder; +} + +static void sti_hdmi_encoder_commit(struct drm_encoder *encoder) +{ + struct sti_tvout *tvout = to_sti_tvout(encoder); + + tvout_hdmi_start(tvout, true); +} + +static void sti_hdmi_encoder_disable(struct drm_encoder *encoder) +{ + struct sti_tvout *tvout = to_sti_tvout(encoder); + + /* reset VIP register */ + tvout_write(tvout, 0x0, TVO_VIP_HDMI); +} + +static const struct drm_encoder_helper_funcs sti_hdmi_encoder_helper_funcs = { + .dpms = sti_tvout_encoder_dpms, + .mode_fixup = sti_tvout_encoder_mode_fixup, + .mode_set = sti_tvout_encoder_mode_set, + .prepare = sti_tvout_encoder_prepare, + .commit = sti_hdmi_encoder_commit, + .disable = sti_hdmi_encoder_disable, +}; + +static struct drm_encoder *sti_tvout_create_hdmi_encoder(struct drm_device *dev, + struct sti_tvout *tvout) +{ + struct sti_tvout_encoder *encoder; + struct drm_encoder *drm_encoder; + + encoder = devm_kzalloc(tvout->dev, sizeof(*encoder), GFP_KERNEL); + if (!encoder) + return NULL; + + encoder->tvout = tvout; + + drm_encoder = (struct drm_encoder *) encoder; + + drm_encoder->possible_crtcs = ENCODER_MAIN_CRTC_MASK; + drm_encoder->possible_clones = 1 << 1; + + drm_encoder_init(dev, drm_encoder, + &sti_tvout_encoder_funcs, DRM_MODE_ENCODER_TMDS); + + drm_encoder_helper_add(drm_encoder, &sti_hdmi_encoder_helper_funcs); + + return drm_encoder; +} + +static void sti_tvout_create_encoders(struct drm_device *dev, + struct sti_tvout *tvout) +{ + tvout->hdmi = sti_tvout_create_hdmi_encoder(dev, tvout); + tvout->hda = sti_tvout_create_hda_encoder(dev, tvout); +} + +static void sti_tvout_destroy_encoders(struct sti_tvout *tvout) +{ + if (tvout->hdmi) + drm_encoder_cleanup(tvout->hdmi); + tvout->hdmi = NULL; + + if (tvout->hda) + drm_encoder_cleanup(tvout->hda); + tvout->hda = NULL; +} + +static int sti_tvout_bind(struct device *dev, struct device *master, void *data) +{ + struct sti_tvout *tvout = dev_get_drvdata(dev); + struct drm_device *drm_dev = data; + unsigned int i; + int ret; + + tvout->drm_dev = drm_dev; + + /* set preformatter matrix */ + for (i = 0; i < 8; i++) { + tvout_write(tvout, rgb_to_ycbcr_601[i], + TVO_CSC_MAIN_M0 + (i * 4)); + tvout_write(tvout, rgb_to_ycbcr_601[i], + TVO_CSC_AUX_M0 + (i * 4)); + } + + sti_tvout_create_encoders(drm_dev, tvout); + + ret = component_bind_all(dev, drm_dev); + if (ret) + sti_tvout_destroy_encoders(tvout); + + return ret; +} + +static void sti_tvout_unbind(struct device *dev, struct device *master, + void *data) +{ + /* do nothing */ +} + +static const struct component_ops sti_tvout_ops = { + .bind = sti_tvout_bind, + .unbind = sti_tvout_unbind, +}; + +static int compare_of(struct device *dev, void *data) +{ + return dev->of_node == data; +} + +static int sti_tvout_master_bind(struct device *dev) +{ + return 0; +} + +static void sti_tvout_master_unbind(struct device *dev) +{ + /* do nothing */ +} + +static const struct component_master_ops sti_tvout_master_ops = { + .bind = sti_tvout_master_bind, + .unbind = sti_tvout_master_unbind, +}; + +static int sti_tvout_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *node = dev->of_node; + struct sti_tvout *tvout; + struct resource *res; + struct device_node *child_np; + struct component_match *match = NULL; + + DRM_INFO("%s\n", __func__); + + if (!node) + return -ENODEV; + + tvout = devm_kzalloc(dev, sizeof(*tvout), GFP_KERNEL); + if (!tvout) + return -ENOMEM; + + tvout->dev = dev; + + /* get Memory ressources */ + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tvout-reg"); + if (!res) { + DRM_ERROR("Invalid glue resource\n"); + return -ENOMEM; + } + tvout->regs = devm_ioremap_nocache(dev, res->start, resource_size(res)); + if (!tvout->regs) + return -ENOMEM; + + /* get reset resources */ + tvout->reset = devm_reset_control_get(dev, "tvout"); + /* take tvout out of reset */ + if (!IS_ERR(tvout->reset)) + reset_control_deassert(tvout->reset); + + platform_set_drvdata(pdev, tvout); + + of_platform_populate(node, NULL, NULL, dev); + + child_np = of_get_next_available_child(node, NULL); + + while (child_np) { + component_match_add(dev, &match, compare_of, child_np); + of_node_put(child_np); + child_np = of_get_next_available_child(node, child_np); + } + + component_master_add_with_match(dev, &sti_tvout_master_ops, match); + + return component_add(dev, &sti_tvout_ops); +} + +static int sti_tvout_remove(struct platform_device *pdev) +{ + component_master_del(&pdev->dev, &sti_tvout_master_ops); + component_del(&pdev->dev, &sti_tvout_ops); + return 0; +} + +static const struct of_device_id tvout_of_match[] = { + { .compatible = "st,stih416-tvout", }, + { .compatible = "st,stih407-tvout", }, + { /* end node */ } +}; +MODULE_DEVICE_TABLE(of, tvout_of_match); + +struct platform_driver sti_tvout_driver = { + .driver = { + .name = "sti-tvout", + .owner = THIS_MODULE, + .of_match_table = tvout_of_match, + }, + .probe = sti_tvout_probe, + .remove = sti_tvout_remove, +}; + +module_platform_driver(sti_tvout_driver); + +MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>"); +MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/sti/sti_vid.c b/drivers/gpu/drm/sti/sti_vid.c new file mode 100644 index 000000000000..10ced6a479f4 --- /dev/null +++ b/drivers/gpu/drm/sti/sti_vid.c @@ -0,0 +1,138 @@ +/* + * Copyright (C) STMicroelectronics SA 2014 + * Author: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics. + * License terms: GNU General Public License (GPL), version 2 + */ + +#include <drm/drmP.h> + +#include "sti_layer.h" +#include "sti_vid.h" +#include "sti_vtg.h" + +/* Registers */ +#define VID_CTL 0x00 +#define VID_ALP 0x04 +#define VID_CLF 0x08 +#define VID_VPO 0x0C +#define VID_VPS 0x10 +#define VID_KEY1 0x28 +#define VID_KEY2 0x2C +#define VID_MPR0 0x30 +#define VID_MPR1 0x34 +#define VID_MPR2 0x38 +#define VID_MPR3 0x3C +#define VID_MST 0x68 +#define VID_BC 0x70 +#define VID_TINT 0x74 +#define VID_CSAT 0x78 + +/* Registers values */ +#define VID_CTL_IGNORE (BIT(31) | BIT(30)) +#define VID_CTL_PSI_ENABLE (BIT(2) | BIT(1) | BIT(0)) +#define VID_ALP_OPAQUE 0x00000080 +#define VID_BC_DFLT 0x00008000 +#define VID_TINT_DFLT 0x00000000 +#define VID_CSAT_DFLT 0x00000080 +/* YCbCr to RGB BT709: + * R = Y+1.5391Cr + * G = Y-0.4590Cr-0.1826Cb + * B = Y+1.8125Cb */ +#define VID_MPR0_BT709 0x0A800000 +#define VID_MPR1_BT709 0x0AC50000 +#define VID_MPR2_BT709 0x07150545 +#define VID_MPR3_BT709 0x00000AE8 + +static int sti_vid_prepare_layer(struct sti_layer *vid, bool first_prepare) +{ + u32 val; + + /* Unmask */ + val = readl(vid->regs + VID_CTL); + val &= ~VID_CTL_IGNORE; + writel(val, vid->regs + VID_CTL); + + return 0; +} + +static int sti_vid_commit_layer(struct sti_layer *vid) +{ + struct drm_display_mode *mode = vid->mode; + u32 ydo, xdo, yds, xds; + + ydo = sti_vtg_get_line_number(*mode, vid->dst_y); + yds = sti_vtg_get_line_number(*mode, vid->dst_y + vid->dst_h - 1); + xdo = sti_vtg_get_pixel_number(*mode, vid->dst_x); + xds = sti_vtg_get_pixel_number(*mode, vid->dst_x + vid->dst_w - 1); + + writel((ydo << 16) | xdo, vid->regs + VID_VPO); + writel((yds << 16) | xds, vid->regs + VID_VPS); + + return 0; +} + +static int sti_vid_disable_layer(struct sti_layer *vid) +{ + u32 val; + + /* Mask */ + val = readl(vid->regs + VID_CTL); + val |= VID_CTL_IGNORE; + writel(val, vid->regs + VID_CTL); + + return 0; +} + +static const uint32_t *sti_vid_get_formats(struct sti_layer *layer) +{ + return NULL; +} + +static unsigned int sti_vid_get_nb_formats(struct sti_layer *layer) +{ + return 0; +} + +static void sti_vid_init(struct sti_layer *vid) +{ + /* Enable PSI, Mask layer */ + writel(VID_CTL_PSI_ENABLE | VID_CTL_IGNORE, vid->regs + VID_CTL); + + /* Opaque */ + writel(VID_ALP_OPAQUE, vid->regs + VID_ALP); + + /* Color conversion parameters */ + writel(VID_MPR0_BT709, vid->regs + VID_MPR0); + writel(VID_MPR1_BT709, vid->regs + VID_MPR1); + writel(VID_MPR2_BT709, vid->regs + VID_MPR2); + writel(VID_MPR3_BT709, vid->regs + VID_MPR3); + + /* Brightness, contrast, tint, saturation */ + writel(VID_BC_DFLT, vid->regs + VID_BC); + writel(VID_TINT_DFLT, vid->regs + VID_TINT); + writel(VID_CSAT_DFLT, vid->regs + VID_CSAT); +} + +static const struct sti_layer_funcs vid_ops = { + .get_formats = sti_vid_get_formats, + .get_nb_formats = sti_vid_get_nb_formats, + .init = sti_vid_init, + .prepare = sti_vid_prepare_layer, + .commit = sti_vid_commit_layer, + .disable = sti_vid_disable_layer, +}; + +struct sti_layer *sti_vid_create(struct device *dev) +{ + struct sti_layer *vid; + + vid = devm_kzalloc(dev, sizeof(*vid), GFP_KERNEL); + if (!vid) { + DRM_ERROR("Failed to allocate memory for VID\n"); + return NULL; + } + + vid->ops = &vid_ops; + + return vid; +} diff --git a/drivers/gpu/drm/sti/sti_vid.h b/drivers/gpu/drm/sti/sti_vid.h new file mode 100644 index 000000000000..2c0aecd63294 --- /dev/null +++ b/drivers/gpu/drm/sti/sti_vid.h @@ -0,0 +1,12 @@ +/* + * Copyright (C) STMicroelectronics SA 2014 + * Author: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics. + * License terms: GNU General Public License (GPL), version 2 + */ + +#ifndef _STI_VID_H_ +#define _STI_VID_H_ + +struct sti_layer *sti_vid_create(struct device *dev); + +#endif diff --git a/drivers/gpu/drm/sti/sti_vtac.c b/drivers/gpu/drm/sti/sti_vtac.c new file mode 100644 index 000000000000..82a51d488434 --- /dev/null +++ b/drivers/gpu/drm/sti/sti_vtac.c @@ -0,0 +1,215 @@ +/* + * Copyright (C) STMicroelectronics SA 2014 + * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics. + * License terms: GNU General Public License (GPL), version 2 + */ + +#include <linux/clk.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> + +#include <drm/drmP.h> + +/* registers offset */ +#define VTAC_CONFIG 0x00 +#define VTAC_RX_FIFO_CONFIG 0x04 +#define VTAC_FIFO_CONFIG_VAL 0x04 + +#define VTAC_SYS_CFG8521 0x824 +#define VTAC_SYS_CFG8522 0x828 + +/* Number of phyts per pixel */ +#define VTAC_2_5_PPP 0x0005 +#define VTAC_3_PPP 0x0006 +#define VTAC_4_PPP 0x0008 +#define VTAC_5_PPP 0x000A +#define VTAC_6_PPP 0x000C +#define VTAC_13_PPP 0x001A +#define VTAC_14_PPP 0x001C +#define VTAC_15_PPP 0x001E +#define VTAC_16_PPP 0x0020 +#define VTAC_17_PPP 0x0022 +#define VTAC_18_PPP 0x0024 + +/* enable bits */ +#define VTAC_ENABLE 0x3003 + +#define VTAC_TX_PHY_ENABLE_CLK_PHY BIT(0) +#define VTAC_TX_PHY_ENABLE_CLK_DLL BIT(1) +#define VTAC_TX_PHY_PLL_NOT_OSC_MODE BIT(3) +#define VTAC_TX_PHY_RST_N_DLL_SWITCH BIT(4) +#define VTAC_TX_PHY_PROG_N3 BIT(9) + + +/** + * VTAC mode structure + * + * @vid_in_width: Video Data Resolution + * @phyts_width: Width of phyt buses(phyt low and phyt high). + * @phyts_per_pixel: Number of phyts sent per pixel + */ +struct sti_vtac_mode { + u32 vid_in_width; + u32 phyts_width; + u32 phyts_per_pixel; +}; + +static const struct sti_vtac_mode vtac_mode_main = {0x2, 0x2, VTAC_5_PPP}; +static const struct sti_vtac_mode vtac_mode_aux = {0x1, 0x0, VTAC_17_PPP}; + +/** + * VTAC structure + * + * @dev: pointer to device structure + * @regs: ioremapped registers for RX and TX devices + * @phy_regs: phy registers for TX device + * @clk: clock + * @mode: main or auxillary configuration mode + */ +struct sti_vtac { + struct device *dev; + void __iomem *regs; + void __iomem *phy_regs; + struct clk *clk; + const struct sti_vtac_mode *mode; +}; + +static void sti_vtac_rx_set_config(struct sti_vtac *vtac) +{ + u32 config; + + /* Enable VTAC clock */ + if (clk_prepare_enable(vtac->clk)) + DRM_ERROR("Failed to prepare/enable vtac_rx clock.\n"); + + writel(VTAC_FIFO_CONFIG_VAL, vtac->regs + VTAC_RX_FIFO_CONFIG); + + config = VTAC_ENABLE; + config |= vtac->mode->vid_in_width << 4; + config |= vtac->mode->phyts_width << 16; + config |= vtac->mode->phyts_per_pixel << 23; + writel(config, vtac->regs + VTAC_CONFIG); +} + +static void sti_vtac_tx_set_config(struct sti_vtac *vtac) +{ + u32 phy_config; + u32 config; + + /* Enable VTAC clock */ + if (clk_prepare_enable(vtac->clk)) + DRM_ERROR("Failed to prepare/enable vtac_tx clock.\n"); + + /* Configure vtac phy */ + phy_config = 0x00000000; + writel(phy_config, vtac->phy_regs + VTAC_SYS_CFG8522); + phy_config = VTAC_TX_PHY_ENABLE_CLK_PHY; + writel(phy_config, vtac->phy_regs + VTAC_SYS_CFG8521); + phy_config = readl(vtac->phy_regs + VTAC_SYS_CFG8521); + phy_config |= VTAC_TX_PHY_PROG_N3; + writel(phy_config, vtac->phy_regs + VTAC_SYS_CFG8521); + phy_config = readl(vtac->phy_regs + VTAC_SYS_CFG8521); + phy_config |= VTAC_TX_PHY_ENABLE_CLK_DLL; + writel(phy_config, vtac->phy_regs + VTAC_SYS_CFG8521); + phy_config = readl(vtac->phy_regs + VTAC_SYS_CFG8521); + phy_config |= VTAC_TX_PHY_RST_N_DLL_SWITCH; + writel(phy_config, vtac->phy_regs + VTAC_SYS_CFG8521); + phy_config = readl(vtac->phy_regs + VTAC_SYS_CFG8521); + phy_config |= VTAC_TX_PHY_PLL_NOT_OSC_MODE; + writel(phy_config, vtac->phy_regs + VTAC_SYS_CFG8521); + + /* Configure vtac tx */ + config = VTAC_ENABLE; + config |= vtac->mode->vid_in_width << 4; + config |= vtac->mode->phyts_width << 16; + config |= vtac->mode->phyts_per_pixel << 23; + writel(config, vtac->regs + VTAC_CONFIG); +} + +static const struct of_device_id vtac_of_match[] = { + { + .compatible = "st,vtac-main", + .data = &vtac_mode_main, + }, { + .compatible = "st,vtac-aux", + .data = &vtac_mode_aux, + }, { + /* end node */ + } +}; +MODULE_DEVICE_TABLE(of, vtac_of_match); + +static int sti_vtac_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + const struct of_device_id *id; + struct sti_vtac *vtac; + struct resource *res; + + vtac = devm_kzalloc(dev, sizeof(*vtac), GFP_KERNEL); + if (!vtac) + return -ENOMEM; + + vtac->dev = dev; + + id = of_match_node(vtac_of_match, np); + if (!id) + return -ENOMEM; + + vtac->mode = id->data; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + DRM_ERROR("Invalid resource\n"); + return -ENOMEM; + } + vtac->regs = devm_ioremap_resource(dev, res); + if (IS_ERR(vtac->regs)) + return PTR_ERR(vtac->regs); + + + vtac->clk = devm_clk_get(dev, "vtac"); + if (IS_ERR(vtac->clk)) { + DRM_ERROR("Cannot get vtac clock\n"); + return PTR_ERR(vtac->clk); + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (res) { + vtac->phy_regs = devm_ioremap_nocache(dev, res->start, + resource_size(res)); + sti_vtac_tx_set_config(vtac); + } else { + + sti_vtac_rx_set_config(vtac); + } + + platform_set_drvdata(pdev, vtac); + DRM_INFO("%s %s\n", __func__, dev_name(vtac->dev)); + + return 0; +} + +static int sti_vtac_remove(struct platform_device *pdev) +{ + return 0; +} + +struct platform_driver sti_vtac_driver = { + .driver = { + .name = "sti-vtac", + .owner = THIS_MODULE, + .of_match_table = vtac_of_match, + }, + .probe = sti_vtac_probe, + .remove = sti_vtac_remove, +}; + +module_platform_driver(sti_vtac_driver); + +MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>"); +MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c new file mode 100644 index 000000000000..740d6e347a62 --- /dev/null +++ b/drivers/gpu/drm/sti/sti_vtg.c @@ -0,0 +1,366 @@ +/* + * Copyright (C) STMicroelectronics SA 2014 + * Authors: Benjamin Gaignard <benjamin.gaignard@st.com> + * Fabien Dessenne <fabien.dessenne@st.com> + * Vincent Abriou <vincent.abriou@st.com> + * for STMicroelectronics. + * License terms: GNU General Public License (GPL), version 2 + */ + +#include <linux/module.h> +#include <linux/notifier.h> +#include <linux/platform_device.h> + +#include <drm/drmP.h> + +#include "sti_vtg.h" + +#define VTG_TYPE_MASTER 0 +#define VTG_TYPE_SLAVE_BY_EXT0 1 + +/* registers offset */ +#define VTG_MODE 0x0000 +#define VTG_CLKLN 0x0008 +#define VTG_HLFLN 0x000C +#define VTG_DRST_AUTOC 0x0010 +#define VTG_VID_TFO 0x0040 +#define VTG_VID_TFS 0x0044 +#define VTG_VID_BFO 0x0048 +#define VTG_VID_BFS 0x004C + +#define VTG_HOST_ITS 0x0078 +#define VTG_HOST_ITS_BCLR 0x007C +#define VTG_HOST_ITM_BCLR 0x0088 +#define VTG_HOST_ITM_BSET 0x008C + +#define VTG_H_HD_1 0x00C0 +#define VTG_TOP_V_VD_1 0x00C4 +#define VTG_BOT_V_VD_1 0x00C8 +#define VTG_TOP_V_HD_1 0x00CC +#define VTG_BOT_V_HD_1 0x00D0 + +#define VTG_H_HD_2 0x00E0 +#define VTG_TOP_V_VD_2 0x00E4 +#define VTG_BOT_V_VD_2 0x00E8 +#define VTG_TOP_V_HD_2 0x00EC +#define VTG_BOT_V_HD_2 0x00F0 + +#define VTG_H_HD_3 0x0100 +#define VTG_TOP_V_VD_3 0x0104 +#define VTG_BOT_V_VD_3 0x0108 +#define VTG_TOP_V_HD_3 0x010C +#define VTG_BOT_V_HD_3 0x0110 + +#define VTG_IRQ_BOTTOM BIT(0) +#define VTG_IRQ_TOP BIT(1) +#define VTG_IRQ_MASK (VTG_IRQ_TOP | VTG_IRQ_BOTTOM) + +/* delay introduced by the Arbitrary Waveform Generator in nb of pixels */ +#define AWG_DELAY_HD (-9) +#define AWG_DELAY_ED (-8) +#define AWG_DELAY_SD (-7) + +LIST_HEAD(vtg_lookup); + +/** + * STI VTG structure + * + * @dev: pointer to device driver + * @data: data associated to the device + * @irq: VTG irq + * @type: VTG type (main or aux) + * @notifier_list: notifier callback + * @crtc_id: the crtc id for vblank event + * @slave: slave vtg + * @link: List node to link the structure in lookup list + */ +struct sti_vtg { + struct device *dev; + struct device_node *np; + void __iomem *regs; + int irq; + u32 irq_status; + struct raw_notifier_head notifier_list; + int crtc_id; + struct sti_vtg *slave; + struct list_head link; +}; + +static void vtg_register(struct sti_vtg *vtg) +{ + list_add_tail(&vtg->link, &vtg_lookup); +} + +struct sti_vtg *of_vtg_find(struct device_node *np) +{ + struct sti_vtg *vtg; + + list_for_each_entry(vtg, &vtg_lookup, link) { + if (vtg->np == np) + return vtg; + } + return NULL; +} +EXPORT_SYMBOL(of_vtg_find); + +static void vtg_reset(struct sti_vtg *vtg) +{ + /* reset slave and then master */ + if (vtg->slave) + vtg_reset(vtg->slave); + + writel(1, vtg->regs + VTG_DRST_AUTOC); +} + +static void vtg_set_mode(struct sti_vtg *vtg, + int type, const struct drm_display_mode *mode) +{ + u32 tmp; + + if (vtg->slave) + vtg_set_mode(vtg->slave, VTG_TYPE_SLAVE_BY_EXT0, mode); + + writel(mode->htotal, vtg->regs + VTG_CLKLN); + writel(mode->vtotal * 2, vtg->regs + VTG_HLFLN); + + tmp = (mode->vtotal - mode->vsync_start + 1) << 16; + tmp |= mode->htotal - mode->hsync_start; + writel(tmp, vtg->regs + VTG_VID_TFO); + writel(tmp, vtg->regs + VTG_VID_BFO); + + tmp = (mode->vdisplay + mode->vtotal - mode->vsync_start + 1) << 16; + tmp |= mode->hdisplay + mode->htotal - mode->hsync_start; + writel(tmp, vtg->regs + VTG_VID_TFS); + writel(tmp, vtg->regs + VTG_VID_BFS); + + /* prepare VTG set 1 and 2 for HDMI and VTG set 3 for HD DAC */ + tmp = (mode->hsync_end - mode->hsync_start) << 16; + writel(tmp, vtg->regs + VTG_H_HD_1); + writel(tmp, vtg->regs + VTG_H_HD_2); + + tmp = (mode->vsync_end - mode->vsync_start + 1) << 16; + tmp |= 1; + writel(tmp, vtg->regs + VTG_TOP_V_VD_1); + writel(tmp, vtg->regs + VTG_BOT_V_VD_1); + writel(0, vtg->regs + VTG_TOP_V_HD_1); + writel(0, vtg->regs + VTG_BOT_V_HD_1); + + /* prepare VTG set 2 for for HD DCS */ + writel(tmp, vtg->regs + VTG_TOP_V_VD_2); + writel(tmp, vtg->regs + VTG_BOT_V_VD_2); + writel(0, vtg->regs + VTG_TOP_V_HD_2); + writel(0, vtg->regs + VTG_BOT_V_HD_2); + + /* prepare VTG set 3 for HD Analog in HD mode */ + tmp = (mode->hsync_end - mode->hsync_start + AWG_DELAY_HD) << 16; + tmp |= mode->htotal + AWG_DELAY_HD; + writel(tmp, vtg->regs + VTG_H_HD_3); + + tmp = (mode->vsync_end - mode->vsync_start) << 16; + tmp |= mode->vtotal; + writel(tmp, vtg->regs + VTG_TOP_V_VD_3); + writel(tmp, vtg->regs + VTG_BOT_V_VD_3); + + tmp = (mode->htotal + AWG_DELAY_HD) << 16; + tmp |= mode->htotal + AWG_DELAY_HD; + writel(tmp, vtg->regs + VTG_TOP_V_HD_3); + writel(tmp, vtg->regs + VTG_BOT_V_HD_3); + + /* mode */ + writel(type, vtg->regs + VTG_MODE); +} + +static void vtg_enable_irq(struct sti_vtg *vtg) +{ + /* clear interrupt status and mask */ + writel(0xFFFF, vtg->regs + VTG_HOST_ITS_BCLR); + writel(0xFFFF, vtg->regs + VTG_HOST_ITM_BCLR); + writel(VTG_IRQ_MASK, vtg->regs + VTG_HOST_ITM_BSET); +} + +void sti_vtg_set_config(struct sti_vtg *vtg, + const struct drm_display_mode *mode) +{ + /* write configuration */ + vtg_set_mode(vtg, VTG_TYPE_MASTER, mode); + + vtg_reset(vtg); + + /* enable irq for the vtg vblank synchro */ + if (vtg->slave) + vtg_enable_irq(vtg->slave); + else + vtg_enable_irq(vtg); +} +EXPORT_SYMBOL(sti_vtg_set_config); + +/** + * sti_vtg_get_line_number + * + * @mode: display mode to be used + * @y: line + * + * Return the line number according to the display mode taking + * into account the Sync and Back Porch information. + * Video frame line numbers start at 1, y starts at 0. + * In interlaced modes the start line is the field line number of the odd + * field, but y is still defined as a progressive frame. + */ +u32 sti_vtg_get_line_number(struct drm_display_mode mode, int y) +{ + u32 start_line = mode.vtotal - mode.vsync_start + 1; + + if (mode.flags & DRM_MODE_FLAG_INTERLACE) + start_line *= 2; + + return start_line + y; +} +EXPORT_SYMBOL(sti_vtg_get_line_number); + +/** + * sti_vtg_get_pixel_number + * + * @mode: display mode to be used + * @x: row + * + * Return the pixel number according to the display mode taking + * into account the Sync and Back Porch information. + * Pixels are counted from 0. + */ +u32 sti_vtg_get_pixel_number(struct drm_display_mode mode, int x) +{ + return mode.htotal - mode.hsync_start + x; +} +EXPORT_SYMBOL(sti_vtg_get_pixel_number); + +int sti_vtg_register_client(struct sti_vtg *vtg, + struct notifier_block *nb, int crtc_id) +{ + if (vtg->slave) + return sti_vtg_register_client(vtg->slave, nb, crtc_id); + + vtg->crtc_id = crtc_id; + return raw_notifier_chain_register(&vtg->notifier_list, nb); +} +EXPORT_SYMBOL(sti_vtg_register_client); + +int sti_vtg_unregister_client(struct sti_vtg *vtg, struct notifier_block *nb) +{ + if (vtg->slave) + return sti_vtg_unregister_client(vtg->slave, nb); + + return raw_notifier_chain_unregister(&vtg->notifier_list, nb); +} +EXPORT_SYMBOL(sti_vtg_unregister_client); + +static irqreturn_t vtg_irq_thread(int irq, void *arg) +{ + struct sti_vtg *vtg = arg; + u32 event; + + event = (vtg->irq_status & VTG_IRQ_TOP) ? + VTG_TOP_FIELD_EVENT : VTG_BOTTOM_FIELD_EVENT; + + raw_notifier_call_chain(&vtg->notifier_list, event, &vtg->crtc_id); + + return IRQ_HANDLED; +} + +static irqreturn_t vtg_irq(int irq, void *arg) +{ + struct sti_vtg *vtg = arg; + + vtg->irq_status = readl(vtg->regs + VTG_HOST_ITS); + + writel(vtg->irq_status, vtg->regs + VTG_HOST_ITS_BCLR); + + /* force sync bus write */ + readl(vtg->regs + VTG_HOST_ITS); + + return IRQ_WAKE_THREAD; +} + +static int vtg_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np; + struct sti_vtg *vtg; + struct resource *res; + char irq_name[32]; + int ret; + + vtg = devm_kzalloc(dev, sizeof(*vtg), GFP_KERNEL); + if (!vtg) + return -ENOMEM; + + vtg->dev = dev; + vtg->np = pdev->dev.of_node; + + /* Get Memory ressources */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + DRM_ERROR("Get memory resource failed\n"); + return -ENOMEM; + } + vtg->regs = devm_ioremap_nocache(dev, res->start, resource_size(res)); + + np = of_parse_phandle(pdev->dev.of_node, "st,slave", 0); + if (np) { + vtg->slave = of_vtg_find(np); + + if (!vtg->slave) + return -EPROBE_DEFER; + } else { + vtg->irq = platform_get_irq(pdev, 0); + if (IS_ERR_VALUE(vtg->irq)) { + DRM_ERROR("Failed to get VTG interrupt\n"); + return vtg->irq; + } + + snprintf(irq_name, sizeof(irq_name), "vsync-%s", + dev_name(vtg->dev)); + + RAW_INIT_NOTIFIER_HEAD(&vtg->notifier_list); + + ret = devm_request_threaded_irq(dev, vtg->irq, vtg_irq, + vtg_irq_thread, IRQF_ONESHOT, irq_name, vtg); + if (IS_ERR_VALUE(ret)) { + DRM_ERROR("Failed to register VTG interrupt\n"); + return ret; + } + } + + vtg_register(vtg); + platform_set_drvdata(pdev, vtg); + + DRM_INFO("%s %s\n", __func__, dev_name(vtg->dev)); + + return 0; +} + +static int vtg_remove(struct platform_device *pdev) +{ + return 0; +} + +static const struct of_device_id vtg_of_match[] = { + { .compatible = "st,vtg", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, vtg_of_match); + +struct platform_driver sti_vtg_driver = { + .driver = { + .name = "sti-vtg", + .owner = THIS_MODULE, + .of_match_table = vtg_of_match, + }, + .probe = vtg_probe, + .remove = vtg_remove, +}; + +module_platform_driver(sti_vtg_driver); + +MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>"); +MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/sti/sti_vtg.h b/drivers/gpu/drm/sti/sti_vtg.h new file mode 100644 index 000000000000..e84d23f1f57f --- /dev/null +++ b/drivers/gpu/drm/sti/sti_vtg.h @@ -0,0 +1,28 @@ +/* + * Copyright (C) STMicroelectronics SA 2014 + * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics. + * License terms: GNU General Public License (GPL), version 2 + */ + +#ifndef _STI_VTG_H_ +#define _STI_VTG_H_ + +#define VTG_TOP_FIELD_EVENT 1 +#define VTG_BOTTOM_FIELD_EVENT 2 + +struct sti_vtg; +struct drm_display_mode; +struct notifier_block; + +struct sti_vtg *of_vtg_find(struct device_node *np); +void sti_vtg_set_config(struct sti_vtg *vtg, + const struct drm_display_mode *mode); +int sti_vtg_register_client(struct sti_vtg *vtg, + struct notifier_block *nb, int crtc_id); +int sti_vtg_unregister_client(struct sti_vtg *vtg, + struct notifier_block *nb); + +u32 sti_vtg_get_line_number(struct drm_display_mode mode, int y); +u32 sti_vtg_get_pixel_number(struct drm_display_mode mode, int x); + +#endif diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c index ef40381f3909..6553fd238685 100644 --- a/drivers/gpu/drm/tegra/dc.c +++ b/drivers/gpu/drm/tegra/dc.c @@ -18,6 +18,8 @@ struct tegra_dc_soc_info { bool supports_interlacing; bool supports_cursor; + bool supports_block_linear; + unsigned int pitch_align; }; struct tegra_plane { @@ -212,15 +214,44 @@ static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index, tegra_dc_writel(dc, h_offset, DC_WINBUF_ADDR_H_OFFSET); tegra_dc_writel(dc, v_offset, DC_WINBUF_ADDR_V_OFFSET); - if (window->tiled) { - value = DC_WIN_BUFFER_ADDR_MODE_TILE_UV | - DC_WIN_BUFFER_ADDR_MODE_TILE; + if (dc->soc->supports_block_linear) { + unsigned long height = window->tiling.value; + + switch (window->tiling.mode) { + case TEGRA_BO_TILING_MODE_PITCH: + value = DC_WINBUF_SURFACE_KIND_PITCH; + break; + + case TEGRA_BO_TILING_MODE_TILED: + value = DC_WINBUF_SURFACE_KIND_TILED; + break; + + case TEGRA_BO_TILING_MODE_BLOCK: + value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(height) | + DC_WINBUF_SURFACE_KIND_BLOCK; + break; + } + + tegra_dc_writel(dc, value, DC_WINBUF_SURFACE_KIND); } else { - value = DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV | - DC_WIN_BUFFER_ADDR_MODE_LINEAR; - } + switch (window->tiling.mode) { + case TEGRA_BO_TILING_MODE_PITCH: + value = DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV | + DC_WIN_BUFFER_ADDR_MODE_LINEAR; + break; - tegra_dc_writel(dc, value, DC_WIN_BUFFER_ADDR_MODE); + case TEGRA_BO_TILING_MODE_TILED: + value = DC_WIN_BUFFER_ADDR_MODE_TILE_UV | + DC_WIN_BUFFER_ADDR_MODE_TILE; + break; + + case TEGRA_BO_TILING_MODE_BLOCK: + DRM_ERROR("hardware doesn't support block linear mode\n"); + return -EINVAL; + } + + tegra_dc_writel(dc, value, DC_WIN_BUFFER_ADDR_MODE); + } value = WIN_ENABLE; @@ -288,6 +319,7 @@ static int tegra_plane_update(struct drm_plane *plane, struct drm_crtc *crtc, struct tegra_dc *dc = to_tegra_dc(crtc); struct tegra_dc_window window; unsigned int i; + int err; memset(&window, 0, sizeof(window)); window.src.x = src_x >> 16; @@ -301,7 +333,10 @@ static int tegra_plane_update(struct drm_plane *plane, struct drm_crtc *crtc, window.format = tegra_dc_format(fb->pixel_format, &window.swap); window.bits_per_pixel = fb->bits_per_pixel; window.bottom_up = tegra_fb_is_bottom_up(fb); - window.tiled = tegra_fb_is_tiled(fb); + + err = tegra_fb_get_tiling(fb, &window.tiling); + if (err < 0) + return err; for (i = 0; i < drm_format_num_planes(fb->pixel_format); i++) { struct tegra_bo *bo = tegra_fb_get_plane(fb, i); @@ -402,8 +437,14 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y, { struct tegra_bo *bo = tegra_fb_get_plane(fb, 0); unsigned int h_offset = 0, v_offset = 0; + struct tegra_bo_tiling tiling; unsigned int format, swap; unsigned long value; + int err; + + err = tegra_fb_get_tiling(fb, &tiling); + if (err < 0) + return err; tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER); @@ -417,15 +458,44 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y, tegra_dc_writel(dc, format, DC_WIN_COLOR_DEPTH); tegra_dc_writel(dc, swap, DC_WIN_BYTE_SWAP); - if (tegra_fb_is_tiled(fb)) { - value = DC_WIN_BUFFER_ADDR_MODE_TILE_UV | - DC_WIN_BUFFER_ADDR_MODE_TILE; + if (dc->soc->supports_block_linear) { + unsigned long height = tiling.value; + + switch (tiling.mode) { + case TEGRA_BO_TILING_MODE_PITCH: + value = DC_WINBUF_SURFACE_KIND_PITCH; + break; + + case TEGRA_BO_TILING_MODE_TILED: + value = DC_WINBUF_SURFACE_KIND_TILED; + break; + + case TEGRA_BO_TILING_MODE_BLOCK: + value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(height) | + DC_WINBUF_SURFACE_KIND_BLOCK; + break; + } + + tegra_dc_writel(dc, value, DC_WINBUF_SURFACE_KIND); } else { - value = DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV | - DC_WIN_BUFFER_ADDR_MODE_LINEAR; - } + switch (tiling.mode) { + case TEGRA_BO_TILING_MODE_PITCH: + value = DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV | + DC_WIN_BUFFER_ADDR_MODE_LINEAR; + break; + + case TEGRA_BO_TILING_MODE_TILED: + value = DC_WIN_BUFFER_ADDR_MODE_TILE_UV | + DC_WIN_BUFFER_ADDR_MODE_TILE; + break; + + case TEGRA_BO_TILING_MODE_BLOCK: + DRM_ERROR("hardware doesn't support block linear mode\n"); + return -EINVAL; + } - tegra_dc_writel(dc, value, DC_WIN_BUFFER_ADDR_MODE); + tegra_dc_writel(dc, value, DC_WIN_BUFFER_ADDR_MODE); + } /* make sure bottom-up buffers are properly displayed */ if (tegra_fb_is_bottom_up(fb)) { @@ -1214,12 +1284,20 @@ static int tegra_dc_init(struct host1x_client *client) { struct drm_device *drm = dev_get_drvdata(client->parent); struct tegra_dc *dc = host1x_client_to_dc(client); + struct tegra_drm *tegra = drm->dev_private; int err; drm_crtc_init(drm, &dc->base, &tegra_crtc_funcs); drm_mode_crtc_set_gamma_size(&dc->base, 256); drm_crtc_helper_add(&dc->base, &tegra_crtc_helper_funcs); + /* + * Keep track of the minimum pitch alignment across all display + * controllers. + */ + if (dc->soc->pitch_align > tegra->pitch_align) + tegra->pitch_align = dc->soc->pitch_align; + err = tegra_dc_rgb_init(drm, dc); if (err < 0 && err != -ENODEV) { dev_err(dc->dev, "failed to initialize RGB output: %d\n", err); @@ -1277,16 +1355,29 @@ static const struct host1x_client_ops dc_client_ops = { static const struct tegra_dc_soc_info tegra20_dc_soc_info = { .supports_interlacing = false, .supports_cursor = false, + .supports_block_linear = false, + .pitch_align = 8, }; static const struct tegra_dc_soc_info tegra30_dc_soc_info = { .supports_interlacing = false, .supports_cursor = false, + .supports_block_linear = false, + .pitch_align = 8, +}; + +static const struct tegra_dc_soc_info tegra114_dc_soc_info = { + .supports_interlacing = false, + .supports_cursor = false, + .supports_block_linear = false, + .pitch_align = 64, }; static const struct tegra_dc_soc_info tegra124_dc_soc_info = { .supports_interlacing = true, .supports_cursor = true, + .supports_block_linear = true, + .pitch_align = 64, }; static const struct of_device_id tegra_dc_of_match[] = { @@ -1303,6 +1394,7 @@ static const struct of_device_id tegra_dc_of_match[] = { /* sentinel */ } }; +MODULE_DEVICE_TABLE(of, tegra_dc_of_match); static int tegra_dc_parse_dt(struct tegra_dc *dc) { @@ -1430,6 +1522,7 @@ static int tegra_dc_remove(struct platform_device *pdev) return err; } + reset_control_assert(dc->rst); clk_disable_unprepare(dc->clk); return 0; diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h index 78c5feff95d2..705c93b00794 100644 --- a/drivers/gpu/drm/tegra/dc.h +++ b/drivers/gpu/drm/tegra/dc.h @@ -428,6 +428,11 @@ #define DC_WINBUF_ADDR_V_OFFSET_NS 0x809 #define DC_WINBUF_UFLOW_STATUS 0x80a +#define DC_WINBUF_SURFACE_KIND 0x80b +#define DC_WINBUF_SURFACE_KIND_PITCH (0 << 0) +#define DC_WINBUF_SURFACE_KIND_TILED (1 << 0) +#define DC_WINBUF_SURFACE_KIND_BLOCK (2 << 0) +#define DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(x) (((x) & 0x7) << 4) #define DC_WINBUF_AD_UFLOW_STATUS 0xbca #define DC_WINBUF_BD_UFLOW_STATUS 0xdca diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c index 3f132e356e9c..708f783ead47 100644 --- a/drivers/gpu/drm/tegra/dpaux.c +++ b/drivers/gpu/drm/tegra/dpaux.c @@ -382,6 +382,7 @@ static const struct of_device_id tegra_dpaux_of_match[] = { { .compatible = "nvidia,tegra124-dpaux", }, { }, }; +MODULE_DEVICE_TABLE(of, tegra_dpaux_of_match); struct platform_driver tegra_dpaux_driver = { .driver = { diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index 3396f9f6a9f7..59736bb810cd 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -40,6 +40,12 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags) drm_mode_config_init(drm); + err = tegra_drm_fb_prepare(drm); + if (err < 0) + return err; + + drm_kms_helper_poll_init(drm); + err = host1x_device_init(device); if (err < 0) return err; @@ -59,8 +65,6 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags) if (err < 0) return err; - drm_kms_helper_poll_init(drm); - return 0; } @@ -128,6 +132,45 @@ host1x_bo_lookup(struct drm_device *drm, struct drm_file *file, u32 handle) return &bo->base; } +static int host1x_reloc_copy_from_user(struct host1x_reloc *dest, + struct drm_tegra_reloc __user *src, + struct drm_device *drm, + struct drm_file *file) +{ + u32 cmdbuf, target; + int err; + + err = get_user(cmdbuf, &src->cmdbuf.handle); + if (err < 0) + return err; + + err = get_user(dest->cmdbuf.offset, &src->cmdbuf.offset); + if (err < 0) + return err; + + err = get_user(target, &src->target.handle); + if (err < 0) + return err; + + err = get_user(dest->target.offset, &src->cmdbuf.offset); + if (err < 0) + return err; + + err = get_user(dest->shift, &src->shift); + if (err < 0) + return err; + + dest->cmdbuf.bo = host1x_bo_lookup(drm, file, cmdbuf); + if (!dest->cmdbuf.bo) + return -ENOENT; + + dest->target.bo = host1x_bo_lookup(drm, file, target); + if (!dest->target.bo) + return -ENOENT; + + return 0; +} + int tegra_drm_submit(struct tegra_drm_context *context, struct drm_tegra_submit *args, struct drm_device *drm, struct drm_file *file) @@ -180,26 +223,13 @@ int tegra_drm_submit(struct tegra_drm_context *context, cmdbufs++; } - if (copy_from_user(job->relocarray, relocs, - sizeof(*relocs) * num_relocs)) { - err = -EFAULT; - goto fail; - } - + /* copy and resolve relocations from submit */ while (num_relocs--) { - struct host1x_reloc *reloc = &job->relocarray[num_relocs]; - struct host1x_bo *cmdbuf, *target; - - cmdbuf = host1x_bo_lookup(drm, file, (u32)reloc->cmdbuf); - target = host1x_bo_lookup(drm, file, (u32)reloc->target); - - reloc->cmdbuf = cmdbuf; - reloc->target = target; - - if (!reloc->target || !reloc->cmdbuf) { - err = -ENOENT; + err = host1x_reloc_copy_from_user(&job->relocarray[num_relocs], + &relocs[num_relocs], drm, + file); + if (err < 0) goto fail; - } } if (copy_from_user(job->waitchk, waitchks, @@ -451,11 +481,151 @@ static int tegra_get_syncpt_base(struct drm_device *drm, void *data, return 0; } + +static int tegra_gem_set_tiling(struct drm_device *drm, void *data, + struct drm_file *file) +{ + struct drm_tegra_gem_set_tiling *args = data; + enum tegra_bo_tiling_mode mode; + struct drm_gem_object *gem; + unsigned long value = 0; + struct tegra_bo *bo; + + switch (args->mode) { + case DRM_TEGRA_GEM_TILING_MODE_PITCH: + mode = TEGRA_BO_TILING_MODE_PITCH; + + if (args->value != 0) + return -EINVAL; + + break; + + case DRM_TEGRA_GEM_TILING_MODE_TILED: + mode = TEGRA_BO_TILING_MODE_TILED; + + if (args->value != 0) + return -EINVAL; + + break; + + case DRM_TEGRA_GEM_TILING_MODE_BLOCK: + mode = TEGRA_BO_TILING_MODE_BLOCK; + + if (args->value > 5) + return -EINVAL; + + value = args->value; + break; + + default: + return -EINVAL; + } + + gem = drm_gem_object_lookup(drm, file, args->handle); + if (!gem) + return -ENOENT; + + bo = to_tegra_bo(gem); + + bo->tiling.mode = mode; + bo->tiling.value = value; + + drm_gem_object_unreference(gem); + + return 0; +} + +static int tegra_gem_get_tiling(struct drm_device *drm, void *data, + struct drm_file *file) +{ + struct drm_tegra_gem_get_tiling *args = data; + struct drm_gem_object *gem; + struct tegra_bo *bo; + int err = 0; + + gem = drm_gem_object_lookup(drm, file, args->handle); + if (!gem) + return -ENOENT; + + bo = to_tegra_bo(gem); + + switch (bo->tiling.mode) { + case TEGRA_BO_TILING_MODE_PITCH: + args->mode = DRM_TEGRA_GEM_TILING_MODE_PITCH; + args->value = 0; + break; + + case TEGRA_BO_TILING_MODE_TILED: + args->mode = DRM_TEGRA_GEM_TILING_MODE_TILED; + args->value = 0; + break; + + case TEGRA_BO_TILING_MODE_BLOCK: + args->mode = DRM_TEGRA_GEM_TILING_MODE_BLOCK; + args->value = bo->tiling.value; + break; + + default: + err = -EINVAL; + break; + } + + drm_gem_object_unreference(gem); + + return err; +} + +static int tegra_gem_set_flags(struct drm_device *drm, void *data, + struct drm_file *file) +{ + struct drm_tegra_gem_set_flags *args = data; + struct drm_gem_object *gem; + struct tegra_bo *bo; + + if (args->flags & ~DRM_TEGRA_GEM_FLAGS) + return -EINVAL; + + gem = drm_gem_object_lookup(drm, file, args->handle); + if (!gem) + return -ENOENT; + + bo = to_tegra_bo(gem); + bo->flags = 0; + + if (args->flags & DRM_TEGRA_GEM_BOTTOM_UP) + bo->flags |= TEGRA_BO_BOTTOM_UP; + + drm_gem_object_unreference(gem); + + return 0; +} + +static int tegra_gem_get_flags(struct drm_device *drm, void *data, + struct drm_file *file) +{ + struct drm_tegra_gem_get_flags *args = data; + struct drm_gem_object *gem; + struct tegra_bo *bo; + + gem = drm_gem_object_lookup(drm, file, args->handle); + if (!gem) + return -ENOENT; + + bo = to_tegra_bo(gem); + args->flags = 0; + + if (bo->flags & TEGRA_BO_BOTTOM_UP) + args->flags |= DRM_TEGRA_GEM_BOTTOM_UP; + + drm_gem_object_unreference(gem); + + return 0; +} #endif static const struct drm_ioctl_desc tegra_drm_ioctls[] = { #ifdef CONFIG_DRM_TEGRA_STAGING - DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, DRM_UNLOCKED | DRM_AUTH), + DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read, DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr, DRM_UNLOCKED), @@ -465,6 +635,10 @@ static const struct drm_ioctl_desc tegra_drm_ioctls[] = { DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt, DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit, DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT_BASE, tegra_get_syncpt_base, DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_TILING, tegra_gem_set_tiling, DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_TILING, tegra_gem_get_tiling, DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_FLAGS, tegra_gem_set_flags, DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_FLAGS, tegra_gem_get_flags, DRM_UNLOCKED), #endif }; diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h index 6b8fe9d86ed4..e89c70fa82d5 100644 --- a/drivers/gpu/drm/tegra/drm.h +++ b/drivers/gpu/drm/tegra/drm.h @@ -19,6 +19,8 @@ #include <drm/drm_fb_helper.h> #include <drm/drm_fixed.h> +#include "gem.h" + struct reset_control; struct tegra_fb { @@ -43,6 +45,8 @@ struct tegra_drm { #ifdef CONFIG_DRM_TEGRA_FBDEV struct tegra_fbdev *fbdev; #endif + + unsigned int pitch_align; }; struct tegra_drm_client; @@ -160,7 +164,8 @@ struct tegra_dc_window { unsigned int stride[2]; unsigned long base[3]; bool bottom_up; - bool tiled; + + struct tegra_bo_tiling tiling; }; /* from dc.c */ @@ -279,7 +284,9 @@ int tegra_dpaux_train(struct tegra_dpaux *dpaux, struct drm_dp_link *link, struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer, unsigned int index); bool tegra_fb_is_bottom_up(struct drm_framebuffer *framebuffer); -bool tegra_fb_is_tiled(struct drm_framebuffer *framebuffer); +int tegra_fb_get_tiling(struct drm_framebuffer *framebuffer, + struct tegra_bo_tiling *tiling); +int tegra_drm_fb_prepare(struct drm_device *drm); int tegra_drm_fb_init(struct drm_device *drm); void tegra_drm_fb_exit(struct drm_device *drm); #ifdef CONFIG_DRM_TEGRA_FBDEV diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c index bd56f2affa78..f7874458926a 100644 --- a/drivers/gpu/drm/tegra/dsi.c +++ b/drivers/gpu/drm/tegra/dsi.c @@ -474,7 +474,8 @@ static int tegra_output_dsi_enable(struct tegra_output *output) tegra_dsi_writel(dsi, value, DSI_HOST_CONTROL); value = tegra_dsi_readl(dsi, DSI_CONTROL); - value |= DSI_CONTROL_HS_CLK_CTRL; + if (dsi->flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) + value |= DSI_CONTROL_HS_CLK_CTRL; value &= ~DSI_CONTROL_TX_TRIG(3); value &= ~DSI_CONTROL_DCS_ENABLE; value |= DSI_CONTROL_VIDEO_ENABLE; @@ -982,6 +983,7 @@ static const struct of_device_id tegra_dsi_of_match[] = { { .compatible = "nvidia,tegra114-dsi", }, { }, }; +MODULE_DEVICE_TABLE(of, tegra_dsi_of_match); struct platform_driver tegra_dsi_driver = { .driver = { diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c index 9798a7080322..3513d12d5aa1 100644 --- a/drivers/gpu/drm/tegra/fb.c +++ b/drivers/gpu/drm/tegra/fb.c @@ -46,14 +46,15 @@ bool tegra_fb_is_bottom_up(struct drm_framebuffer *framebuffer) return false; } -bool tegra_fb_is_tiled(struct drm_framebuffer *framebuffer) +int tegra_fb_get_tiling(struct drm_framebuffer *framebuffer, + struct tegra_bo_tiling *tiling) { struct tegra_fb *fb = to_tegra_fb(framebuffer); - if (fb->planes[0]->flags & TEGRA_BO_TILED) - return true; + /* TODO: handle YUV formats? */ + *tiling = fb->planes[0]->tiling; - return false; + return 0; } static void tegra_fb_destroy(struct drm_framebuffer *framebuffer) @@ -193,6 +194,7 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) { struct tegra_fbdev *fbdev = to_tegra_fbdev(helper); + struct tegra_drm *tegra = helper->dev->dev_private; struct drm_device *drm = helper->dev; struct drm_mode_fb_cmd2 cmd = { 0 }; unsigned int bytes_per_pixel; @@ -207,7 +209,8 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper, cmd.width = sizes->surface_width; cmd.height = sizes->surface_height; - cmd.pitches[0] = sizes->surface_width * bytes_per_pixel; + cmd.pitches[0] = round_up(sizes->surface_width * bytes_per_pixel, + tegra->pitch_align); cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth); @@ -267,18 +270,13 @@ release: return err; } -static struct drm_fb_helper_funcs tegra_fb_helper_funcs = { +static const struct drm_fb_helper_funcs tegra_fb_helper_funcs = { .fb_probe = tegra_fbdev_probe, }; -static struct tegra_fbdev *tegra_fbdev_create(struct drm_device *drm, - unsigned int preferred_bpp, - unsigned int num_crtc, - unsigned int max_connectors) +static struct tegra_fbdev *tegra_fbdev_create(struct drm_device *drm) { - struct drm_fb_helper *helper; struct tegra_fbdev *fbdev; - int err; fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL); if (!fbdev) { @@ -286,13 +284,23 @@ static struct tegra_fbdev *tegra_fbdev_create(struct drm_device *drm, return ERR_PTR(-ENOMEM); } - fbdev->base.funcs = &tegra_fb_helper_funcs; - helper = &fbdev->base; + drm_fb_helper_prepare(drm, &fbdev->base, &tegra_fb_helper_funcs); + + return fbdev; +} + +static int tegra_fbdev_init(struct tegra_fbdev *fbdev, + unsigned int preferred_bpp, + unsigned int num_crtc, + unsigned int max_connectors) +{ + struct drm_device *drm = fbdev->base.dev; + int err; err = drm_fb_helper_init(drm, &fbdev->base, num_crtc, max_connectors); if (err < 0) { dev_err(drm->dev, "failed to initialize DRM FB helper\n"); - goto free; + return err; } err = drm_fb_helper_single_add_all_connectors(&fbdev->base); @@ -301,21 +309,17 @@ static struct tegra_fbdev *tegra_fbdev_create(struct drm_device *drm, goto fini; } - drm_helper_disable_unused_functions(drm); - err = drm_fb_helper_initial_config(&fbdev->base, preferred_bpp); if (err < 0) { dev_err(drm->dev, "failed to set initial configuration\n"); goto fini; } - return fbdev; + return 0; fini: drm_fb_helper_fini(&fbdev->base); -free: - kfree(fbdev); - return ERR_PTR(err); + return err; } static void tegra_fbdev_free(struct tegra_fbdev *fbdev) @@ -366,7 +370,7 @@ static const struct drm_mode_config_funcs tegra_drm_mode_funcs = { #endif }; -int tegra_drm_fb_init(struct drm_device *drm) +int tegra_drm_fb_prepare(struct drm_device *drm) { #ifdef CONFIG_DRM_TEGRA_FBDEV struct tegra_drm *tegra = drm->dev_private; @@ -381,8 +385,7 @@ int tegra_drm_fb_init(struct drm_device *drm) drm->mode_config.funcs = &tegra_drm_mode_funcs; #ifdef CONFIG_DRM_TEGRA_FBDEV - tegra->fbdev = tegra_fbdev_create(drm, 32, drm->mode_config.num_crtc, - drm->mode_config.num_connector); + tegra->fbdev = tegra_fbdev_create(drm); if (IS_ERR(tegra->fbdev)) return PTR_ERR(tegra->fbdev); #endif @@ -390,6 +393,21 @@ int tegra_drm_fb_init(struct drm_device *drm) return 0; } +int tegra_drm_fb_init(struct drm_device *drm) +{ +#ifdef CONFIG_DRM_TEGRA_FBDEV + struct tegra_drm *tegra = drm->dev_private; + int err; + + err = tegra_fbdev_init(tegra->fbdev, 32, drm->mode_config.num_crtc, + drm->mode_config.num_connector); + if (err < 0) + return err; +#endif + + return 0; +} + void tegra_drm_fb_exit(struct drm_device *drm) { #ifdef CONFIG_DRM_TEGRA_FBDEV diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c index 78cc8143760a..ce023fa3e8ae 100644 --- a/drivers/gpu/drm/tegra/gem.c +++ b/drivers/gpu/drm/tegra/gem.c @@ -16,6 +16,7 @@ #include <linux/dma-buf.h> #include <drm/tegra_drm.h> +#include "drm.h" #include "gem.h" static inline struct tegra_bo *host1x_to_tegra_bo(struct host1x_bo *bo) @@ -126,7 +127,7 @@ struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size, goto err_mmap; if (flags & DRM_TEGRA_GEM_CREATE_TILED) - bo->flags |= TEGRA_BO_TILED; + bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED; if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP) bo->flags |= TEGRA_BO_BOTTOM_UP; @@ -259,8 +260,10 @@ int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm, struct drm_mode_create_dumb *args) { int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); + struct tegra_drm *tegra = drm->dev_private; struct tegra_bo *bo; + min_pitch = round_up(min_pitch, tegra->pitch_align); if (args->pitch < min_pitch) args->pitch = min_pitch; diff --git a/drivers/gpu/drm/tegra/gem.h b/drivers/gpu/drm/tegra/gem.h index 2f3fe96c5154..43a25c853357 100644 --- a/drivers/gpu/drm/tegra/gem.h +++ b/drivers/gpu/drm/tegra/gem.h @@ -16,8 +16,18 @@ #include <drm/drm.h> #include <drm/drmP.h> -#define TEGRA_BO_TILED (1 << 0) -#define TEGRA_BO_BOTTOM_UP (1 << 1) +#define TEGRA_BO_BOTTOM_UP (1 << 0) + +enum tegra_bo_tiling_mode { + TEGRA_BO_TILING_MODE_PITCH, + TEGRA_BO_TILING_MODE_TILED, + TEGRA_BO_TILING_MODE_BLOCK, +}; + +struct tegra_bo_tiling { + enum tegra_bo_tiling_mode mode; + unsigned long value; +}; struct tegra_bo { struct drm_gem_object gem; @@ -26,6 +36,8 @@ struct tegra_bo { struct sg_table *sgt; dma_addr_t paddr; void *vaddr; + + struct tegra_bo_tiling tiling; }; static inline struct tegra_bo *to_tegra_bo(struct drm_gem_object *gem) diff --git a/drivers/gpu/drm/tegra/gr2d.c b/drivers/gpu/drm/tegra/gr2d.c index 7c53941f2a9e..02cd3e37a6ec 100644 --- a/drivers/gpu/drm/tegra/gr2d.c +++ b/drivers/gpu/drm/tegra/gr2d.c @@ -121,6 +121,7 @@ static const struct of_device_id gr2d_match[] = { { .compatible = "nvidia,tegra20-gr2d" }, { }, }; +MODULE_DEVICE_TABLE(of, gr2d_match); static const u32 gr2d_addr_regs[] = { GR2D_UA_BASE_ADDR, diff --git a/drivers/gpu/drm/tegra/gr3d.c b/drivers/gpu/drm/tegra/gr3d.c index 30f5ba9bd6d0..0b3f2b977ba0 100644 --- a/drivers/gpu/drm/tegra/gr3d.c +++ b/drivers/gpu/drm/tegra/gr3d.c @@ -12,7 +12,8 @@ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/reset.h> -#include <linux/tegra-powergate.h> + +#include <soc/tegra/pmc.h> #include "drm.h" #include "gem.h" @@ -130,6 +131,7 @@ static const struct of_device_id tegra_gr3d_match[] = { { .compatible = "nvidia,tegra20-gr3d" }, { } }; +MODULE_DEVICE_TABLE(of, tegra_gr3d_match); static const u32 gr3d_addr_regs[] = { GR3D_IDX_ATTRIBUTE( 0), diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c index ba067bb767e3..ffe26547328d 100644 --- a/drivers/gpu/drm/tegra/hdmi.c +++ b/drivers/gpu/drm/tegra/hdmi.c @@ -1450,6 +1450,7 @@ static const struct of_device_id tegra_hdmi_of_match[] = { { .compatible = "nvidia,tegra20-hdmi", .data = &tegra20_hdmi_config }, { }, }; +MODULE_DEVICE_TABLE(of, tegra_hdmi_of_match); static int tegra_hdmi_probe(struct platform_device *pdev) { diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c index a3e4f1eca6f7..0c67d7eebc94 100644 --- a/drivers/gpu/drm/tegra/output.c +++ b/drivers/gpu/drm/tegra/output.c @@ -105,7 +105,7 @@ static void drm_connector_clear(struct drm_connector *connector) static void tegra_connector_destroy(struct drm_connector *connector) { - drm_sysfs_connector_remove(connector); + drm_connector_unregister(connector); drm_connector_cleanup(connector); drm_connector_clear(connector); } @@ -140,7 +140,9 @@ static void tegra_encoder_dpms(struct drm_encoder *encoder, int mode) if (mode != DRM_MODE_DPMS_ON) { drm_panel_disable(panel); tegra_output_disable(output); + drm_panel_unprepare(panel); } else { + drm_panel_prepare(panel); tegra_output_enable(output); drm_panel_enable(panel); } @@ -318,7 +320,7 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output) drm_encoder_helper_add(&output->encoder, &encoder_helper_funcs); drm_mode_connector_attach_encoder(&output->connector, &output->encoder); - drm_sysfs_connector_add(&output->connector); + drm_connector_register(&output->connector); output->encoder.possible_crtcs = 0x3; diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c index 27c979b50111..7829e81f065d 100644 --- a/drivers/gpu/drm/tegra/sor.c +++ b/drivers/gpu/drm/tegra/sor.c @@ -11,7 +11,8 @@ #include <linux/io.h> #include <linux/platform_device.h> #include <linux/reset.h> -#include <linux/tegra-powergate.h> + +#include <soc/tegra/pmc.h> #include <drm/drm_dp_helper.h> @@ -516,7 +517,7 @@ static int tegra_output_sor_enable(struct tegra_output *output) if (err < 0) { dev_err(sor->dev, "failed to probe eDP link: %d\n", err); - return err; + goto unlock; } } @@ -525,7 +526,7 @@ static int tegra_output_sor_enable(struct tegra_output *output) dev_err(sor->dev, "failed to set safe parent clock: %d\n", err); memset(&config, 0, sizeof(config)); - config.bits_per_pixel = 24; /* XXX: don't hardcode? */ + config.bits_per_pixel = output->connector.display_info.bpc * 3; err = tegra_sor_calc_config(sor, mode, &config, &link); if (err < 0) @@ -815,12 +816,22 @@ static int tegra_output_sor_enable(struct tegra_output *output) * configure panel (24bpp, vsync-, hsync-, DP-A protocol, complete * raster, associate with display controller) */ - value = SOR_STATE_ASY_VSYNCPOL | - SOR_STATE_ASY_HSYNCPOL | - SOR_STATE_ASY_PROTOCOL_DP_A | + value = SOR_STATE_ASY_PROTOCOL_DP_A | SOR_STATE_ASY_CRC_MODE_COMPLETE | SOR_STATE_ASY_OWNER(dc->pipe + 1); + if (mode->flags & DRM_MODE_FLAG_PHSYNC) + value &= ~SOR_STATE_ASY_HSYNCPOL; + + if (mode->flags & DRM_MODE_FLAG_NHSYNC) + value |= SOR_STATE_ASY_HSYNCPOL; + + if (mode->flags & DRM_MODE_FLAG_PVSYNC) + value &= ~SOR_STATE_ASY_VSYNCPOL; + + if (mode->flags & DRM_MODE_FLAG_NVSYNC) + value |= SOR_STATE_ASY_VSYNCPOL; + switch (config.bits_per_pixel) { case 24: value |= SOR_STATE_ASY_PIXELDEPTH_BPP_24_444; @@ -1455,6 +1466,7 @@ static const struct of_device_id tegra_sor_of_match[] = { { .compatible = "nvidia,tegra124-sor", }, { }, }; +MODULE_DEVICE_TABLE(of, tegra_sor_of_match); struct platform_driver tegra_sor_driver = { .driver = { diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c index b20b69488dc9..6be623b4a86f 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c @@ -120,8 +120,8 @@ static int cpufreq_transition(struct notifier_block *nb, static int tilcdc_unload(struct drm_device *dev) { struct tilcdc_drm_private *priv = dev->dev_private; - struct tilcdc_module *mod, *cur; + drm_fbdev_cma_fini(priv->fbdev); drm_kms_helper_poll_fini(dev); drm_mode_config_cleanup(dev); drm_vblank_cleanup(dev); @@ -148,11 +148,6 @@ static int tilcdc_unload(struct drm_device *dev) pm_runtime_disable(dev->dev); - list_for_each_entry_safe(mod, cur, &module_list, list) { - DBG("destroying module: %s", mod->name); - mod->funcs->destroy(mod); - } - kfree(priv); return 0; @@ -628,13 +623,13 @@ static int __init tilcdc_drm_init(void) static void __exit tilcdc_drm_fini(void) { DBG("fini"); - tilcdc_tfp410_fini(); - tilcdc_slave_fini(); - tilcdc_panel_fini(); platform_driver_unregister(&tilcdc_platform_driver); + tilcdc_panel_fini(); + tilcdc_slave_fini(); + tilcdc_tfp410_fini(); } -late_initcall(tilcdc_drm_init); +module_init(tilcdc_drm_init); module_exit(tilcdc_drm_fini); MODULE_AUTHOR("Rob Clark <robdclark@gmail.com"); diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.h b/drivers/gpu/drm/tilcdc/tilcdc_drv.h index 093803683b25..7596c144a9fb 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_drv.h +++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.h @@ -98,7 +98,6 @@ struct tilcdc_module; struct tilcdc_module_ops { /* create appropriate encoders/connectors: */ int (*modeset_init)(struct tilcdc_module *mod, struct drm_device *dev); - void (*destroy)(struct tilcdc_module *mod); #ifdef CONFIG_DEBUG_FS /* create debugfs nodes (can be NULL): */ int (*debugfs_init)(struct tilcdc_module *mod, struct drm_minor *minor); diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c index 86c67329b605..4c7aa1d8134f 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c @@ -151,6 +151,7 @@ struct panel_connector { static void panel_connector_destroy(struct drm_connector *connector) { struct panel_connector *panel_connector = to_panel_connector(connector); + drm_connector_unregister(connector); drm_connector_cleanup(connector); kfree(panel_connector); } @@ -247,7 +248,7 @@ static struct drm_connector *panel_connector_create(struct drm_device *dev, if (ret) goto fail; - drm_sysfs_connector_add(connector); + drm_connector_register(connector); return connector; @@ -281,23 +282,8 @@ static int panel_modeset_init(struct tilcdc_module *mod, struct drm_device *dev) return 0; } -static void panel_destroy(struct tilcdc_module *mod) -{ - struct panel_module *panel_mod = to_panel_module(mod); - - if (panel_mod->timings) { - display_timings_release(panel_mod->timings); - kfree(panel_mod->timings); - } - - tilcdc_module_cleanup(mod); - kfree(panel_mod->info); - kfree(panel_mod); -} - static const struct tilcdc_module_ops panel_module_ops = { .modeset_init = panel_modeset_init, - .destroy = panel_destroy, }; /* @@ -373,6 +359,7 @@ static int panel_probe(struct platform_device *pdev) return -ENOMEM; mod = &panel_mod->base; + pdev->dev.platform_data = mod; tilcdc_module_init(mod, "panel", &panel_module_ops); @@ -380,17 +367,16 @@ static int panel_probe(struct platform_device *pdev) if (IS_ERR(pinctrl)) dev_warn(&pdev->dev, "pins are not configured\n"); - panel_mod->timings = of_get_display_timings(node); if (!panel_mod->timings) { dev_err(&pdev->dev, "could not get panel timings\n"); - goto fail; + goto fail_free; } panel_mod->info = of_get_panel_info(node); if (!panel_mod->info) { dev_err(&pdev->dev, "could not get panel info\n"); - goto fail; + goto fail_timings; } mod->preferred_bpp = panel_mod->info->bpp; @@ -401,13 +387,26 @@ static int panel_probe(struct platform_device *pdev) return 0; -fail: - panel_destroy(mod); +fail_timings: + display_timings_release(panel_mod->timings); + +fail_free: + kfree(panel_mod); + tilcdc_module_cleanup(mod); return ret; } static int panel_remove(struct platform_device *pdev) { + struct tilcdc_module *mod = dev_get_platdata(&pdev->dev); + struct panel_module *panel_mod = to_panel_module(mod); + + display_timings_release(panel_mod->timings); + + tilcdc_module_cleanup(mod); + kfree(panel_mod->info); + kfree(panel_mod); + return 0; } diff --git a/drivers/gpu/drm/tilcdc/tilcdc_slave.c b/drivers/gpu/drm/tilcdc/tilcdc_slave.c index 595068ba2d5e..3775fd49dac4 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_slave.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_slave.c @@ -166,6 +166,7 @@ struct slave_connector { static void slave_connector_destroy(struct drm_connector *connector) { struct slave_connector *slave_connector = to_slave_connector(connector); + drm_connector_unregister(connector); drm_connector_cleanup(connector); kfree(slave_connector); } @@ -261,7 +262,7 @@ static struct drm_connector *slave_connector_create(struct drm_device *dev, if (ret) goto fail; - drm_sysfs_connector_add(connector); + drm_connector_register(connector); return connector; @@ -295,17 +296,8 @@ static int slave_modeset_init(struct tilcdc_module *mod, struct drm_device *dev) return 0; } -static void slave_destroy(struct tilcdc_module *mod) -{ - struct slave_module *slave_mod = to_slave_module(mod); - - tilcdc_module_cleanup(mod); - kfree(slave_mod); -} - static const struct tilcdc_module_ops slave_module_ops = { .modeset_init = slave_modeset_init, - .destroy = slave_destroy, }; /* @@ -355,10 +347,13 @@ static int slave_probe(struct platform_device *pdev) } slave_mod = kzalloc(sizeof(*slave_mod), GFP_KERNEL); - if (!slave_mod) - return -ENOMEM; + if (!slave_mod) { + ret = -ENOMEM; + goto fail_adapter; + } mod = &slave_mod->base; + pdev->dev.platform_data = mod; mod->preferred_bpp = slave_info.bpp; @@ -373,10 +368,20 @@ static int slave_probe(struct platform_device *pdev) tilcdc_slave_probedefer(false); return 0; + +fail_adapter: + i2c_put_adapter(slavei2c); + return ret; } static int slave_remove(struct platform_device *pdev) { + struct tilcdc_module *mod = dev_get_platdata(&pdev->dev); + struct slave_module *slave_mod = to_slave_module(mod); + + tilcdc_module_cleanup(mod); + kfree(slave_mod); + return 0; } diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c index c38b56b268ac..354c47ca6374 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c @@ -167,6 +167,7 @@ struct tfp410_connector { static void tfp410_connector_destroy(struct drm_connector *connector) { struct tfp410_connector *tfp410_connector = to_tfp410_connector(connector); + drm_connector_unregister(connector); drm_connector_cleanup(connector); kfree(tfp410_connector); } @@ -261,7 +262,7 @@ static struct drm_connector *tfp410_connector_create(struct drm_device *dev, if (ret) goto fail; - drm_sysfs_connector_add(connector); + drm_connector_register(connector); return connector; @@ -295,23 +296,8 @@ static int tfp410_modeset_init(struct tilcdc_module *mod, struct drm_device *dev return 0; } -static void tfp410_destroy(struct tilcdc_module *mod) -{ - struct tfp410_module *tfp410_mod = to_tfp410_module(mod); - - if (tfp410_mod->i2c) - i2c_put_adapter(tfp410_mod->i2c); - - if (!IS_ERR_VALUE(tfp410_mod->gpio)) - gpio_free(tfp410_mod->gpio); - - tilcdc_module_cleanup(mod); - kfree(tfp410_mod); -} - static const struct tilcdc_module_ops tfp410_module_ops = { .modeset_init = tfp410_modeset_init, - .destroy = tfp410_destroy, }; /* @@ -341,6 +327,7 @@ static int tfp410_probe(struct platform_device *pdev) return -ENOMEM; mod = &tfp410_mod->base; + pdev->dev.platform_data = mod; tilcdc_module_init(mod, "tfp410", &tfp410_module_ops); @@ -364,6 +351,7 @@ static int tfp410_probe(struct platform_device *pdev) tfp410_mod->i2c = of_find_i2c_adapter_by_node(i2c_node); if (!tfp410_mod->i2c) { dev_err(&pdev->dev, "could not get i2c\n"); + of_node_put(i2c_node); goto fail; } @@ -377,19 +365,32 @@ static int tfp410_probe(struct platform_device *pdev) ret = gpio_request(tfp410_mod->gpio, "DVI_PDn"); if (ret) { dev_err(&pdev->dev, "could not get DVI_PDn gpio\n"); - goto fail; + goto fail_adapter; } } return 0; +fail_adapter: + i2c_put_adapter(tfp410_mod->i2c); + fail: - tfp410_destroy(mod); + kfree(tfp410_mod); + tilcdc_module_cleanup(mod); return ret; } static int tfp410_remove(struct platform_device *pdev) { + struct tilcdc_module *mod = dev_get_platdata(&pdev->dev); + struct tfp410_module *tfp410_mod = to_tfp410_module(mod); + + i2c_put_adapter(tfp410_mod->i2c); + gpio_free(tfp410_mod->gpio); + + tilcdc_module_cleanup(mod); + kfree(tfp410_mod); + return 0; } diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 4ab9f7171c4f..3da89d5dab60 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -412,7 +412,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) int ret; spin_lock(&glob->lru_lock); - ret = __ttm_bo_reserve(bo, false, true, false, 0); + ret = __ttm_bo_reserve(bo, false, true, false, NULL); spin_lock(&bdev->fence_lock); (void) ttm_bo_wait(bo, false, false, true); @@ -514,7 +514,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, return ret; spin_lock(&glob->lru_lock); - ret = __ttm_bo_reserve(bo, false, true, false, 0); + ret = __ttm_bo_reserve(bo, false, true, false, NULL); /* * We raced, and lost, someone else holds the reservation now, @@ -577,11 +577,11 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) kref_get(&nentry->list_kref); } - ret = __ttm_bo_reserve(entry, false, true, false, 0); + ret = __ttm_bo_reserve(entry, false, true, false, NULL); if (remove_all && ret) { spin_unlock(&glob->lru_lock); ret = __ttm_bo_reserve(entry, false, false, - false, 0); + false, NULL); spin_lock(&glob->lru_lock); } @@ -726,7 +726,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev, spin_lock(&glob->lru_lock); list_for_each_entry(bo, &man->lru, lru) { - ret = __ttm_bo_reserve(bo, false, true, false, 0); + ret = __ttm_bo_reserve(bo, false, true, false, NULL); if (!ret) break; } @@ -784,7 +784,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, int ret; do { - ret = (*man->func->get_node)(man, bo, placement, mem); + ret = (*man->func->get_node)(man, bo, placement, 0, mem); if (unlikely(ret != 0)) return ret; if (mem->mm_node) @@ -897,7 +897,8 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, if (man->has_type && man->use_type) { type_found = true; - ret = (*man->func->get_node)(man, bo, placement, mem); + ret = (*man->func->get_node)(man, bo, placement, + cur_flags, mem); if (unlikely(ret)) return ret; } @@ -937,7 +938,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, ttm_flag_masked(&cur_flags, placement->busy_placement[i], ~TTM_PL_MASK_MEMTYPE); - if (mem_type == TTM_PL_SYSTEM) { mem->mem_type = mem_type; mem->placement = cur_flags; @@ -1595,7 +1595,7 @@ int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) * Using ttm_bo_reserve makes sure the lru lists are updated. */ - ret = ttm_bo_reserve(bo, true, no_wait, false, 0); + ret = ttm_bo_reserve(bo, true, no_wait, false, NULL); if (unlikely(ret != 0)) return ret; spin_lock(&bdev->fence_lock); @@ -1630,7 +1630,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) spin_lock(&glob->lru_lock); list_for_each_entry(bo, &glob->swap_lru, swap) { - ret = __ttm_bo_reserve(bo, false, true, false, 0); + ret = __ttm_bo_reserve(bo, false, true, false, NULL); if (!ret) break; } diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c index bd850c9f4bca..9e103a4875c8 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_manager.c +++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c @@ -50,6 +50,7 @@ struct ttm_range_manager { static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, struct ttm_buffer_object *bo, struct ttm_placement *placement, + uint32_t flags, struct ttm_mem_reg *mem) { struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; @@ -67,7 +68,7 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, if (!node) return -ENOMEM; - if (bo->mem.placement & TTM_PL_FLAG_TOPDOWN) + if (flags & TTM_PL_FLAG_TOPDOWN) aflags = DRM_MM_CREATE_TOP; spin_lock(&rman->lock); diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 1df856f78568..30e5d90cb7bc 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -500,7 +500,7 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp) pgprot_val(tmp) |= _PAGE_GUARDED; } #endif -#if defined(__ia64__) +#if defined(__ia64__) || defined(__arm__) if (caching_flags & TTM_PL_FLAG_WC) tmp = pgprot_writecombine(tmp); else diff --git a/drivers/gpu/drm/ttm/ttm_module.c b/drivers/gpu/drm/ttm/ttm_module.c index d7f92fe9d904..66fc6395eb54 100644 --- a/drivers/gpu/drm/ttm/ttm_module.c +++ b/drivers/gpu/drm/ttm/ttm_module.c @@ -35,7 +35,7 @@ #include <drm/drm_sysfs.h> static DECLARE_WAIT_QUEUE_HEAD(exit_q); -atomic_t device_released; +static atomic_t device_released; static struct device_type ttm_drm_class_type = { .name = "ttm", diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index 863bef9f9234..09874d695188 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -297,8 +297,10 @@ static void ttm_pool_update_free_locked(struct ttm_page_pool *pool, * * @pool: to free the pages from * @free_all: If set to true will free all pages in pool + * @gfp: GFP flags. **/ -static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free) +static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free, + gfp_t gfp) { unsigned long irq_flags; struct page *p; @@ -309,8 +311,7 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free) if (NUM_PAGES_TO_ALLOC < nr_free) npages_to_free = NUM_PAGES_TO_ALLOC; - pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), - GFP_KERNEL); + pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), gfp); if (!pages_to_free) { pr_err("Failed to allocate memory for pool free operation\n"); return 0; @@ -382,32 +383,35 @@ out: * * XXX: (dchinner) Deadlock warning! * - * ttm_page_pool_free() does memory allocation using GFP_KERNEL. that means - * this can deadlock when called a sc->gfp_mask that is not equal to - * GFP_KERNEL. + * We need to pass sc->gfp_mask to ttm_page_pool_free(). * * This code is crying out for a shrinker per pool.... */ static unsigned long ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) { - static atomic_t start_pool = ATOMIC_INIT(0); + static DEFINE_MUTEX(lock); + static unsigned start_pool; unsigned i; - unsigned pool_offset = atomic_add_return(1, &start_pool); + unsigned pool_offset; struct ttm_page_pool *pool; int shrink_pages = sc->nr_to_scan; unsigned long freed = 0; - pool_offset = pool_offset % NUM_POOLS; + if (!mutex_trylock(&lock)) + return SHRINK_STOP; + pool_offset = ++start_pool % NUM_POOLS; /* select start pool in round robin fashion */ for (i = 0; i < NUM_POOLS; ++i) { unsigned nr_free = shrink_pages; if (shrink_pages == 0) break; pool = &_manager->pools[(i + pool_offset)%NUM_POOLS]; - shrink_pages = ttm_page_pool_free(pool, nr_free); + shrink_pages = ttm_page_pool_free(pool, nr_free, + sc->gfp_mask); freed += nr_free - shrink_pages; } + mutex_unlock(&lock); return freed; } @@ -706,7 +710,7 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags, } spin_unlock_irqrestore(&pool->lock, irq_flags); if (npages) - ttm_page_pool_free(pool, npages); + ttm_page_pool_free(pool, npages, GFP_KERNEL); } /* @@ -790,7 +794,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags, return 0; } -static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags, +static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags, char *name) { spin_lock_init(&pool->lock); @@ -846,7 +850,8 @@ void ttm_page_alloc_fini(void) ttm_pool_mm_shrink_fini(_manager); for (i = 0; i < NUM_POOLS; ++i) - ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES); + ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES, + GFP_KERNEL); kobject_put(&_manager->kobj); _manager = NULL; diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c index fb8259f69839..c96db433f8af 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c @@ -411,8 +411,10 @@ static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page) * * @pool: to free the pages from * @nr_free: If set to true will free all pages in pool + * @gfp: GFP flags. **/ -static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free) +static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free, + gfp_t gfp) { unsigned long irq_flags; struct dma_page *dma_p, *tmp; @@ -430,8 +432,7 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free) npages_to_free, nr_free); } #endif - pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), - GFP_KERNEL); + pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), gfp); if (!pages_to_free) { pr_err("%s: Failed to allocate memory for pool free operation\n", @@ -530,7 +531,7 @@ static void ttm_dma_free_pool(struct device *dev, enum pool_type type) if (pool->type != type) continue; /* Takes a spinlock.. */ - ttm_dma_page_pool_free(pool, FREE_ALL_PAGES); + ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, GFP_KERNEL); WARN_ON(((pool->npages_in_use + pool->npages_free) != 0)); /* This code path is called after _all_ references to the * struct device has been dropped - so nobody should be @@ -847,6 +848,7 @@ static int ttm_dma_pool_get_pages(struct dma_pool *pool, if (count) { d_page = list_first_entry(&pool->free_list, struct dma_page, page_list); ttm->pages[index] = d_page->p; + ttm_dma->cpu_address[index] = d_page->vaddr; ttm_dma->dma_address[index] = d_page->dma; list_move_tail(&d_page->page_list, &ttm_dma->pages_list); r = 0; @@ -978,12 +980,13 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev) INIT_LIST_HEAD(&ttm_dma->pages_list); for (i = 0; i < ttm->num_pages; i++) { ttm->pages[i] = NULL; + ttm_dma->cpu_address[i] = 0; ttm_dma->dma_address[i] = 0; } /* shrink pool if necessary (only on !is_cached pools)*/ if (npages) - ttm_dma_page_pool_free(pool, npages); + ttm_dma_page_pool_free(pool, npages, GFP_KERNEL); ttm->state = tt_unpopulated; } EXPORT_SYMBOL_GPL(ttm_dma_unpopulate); @@ -993,10 +996,7 @@ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate); * * XXX: (dchinner) Deadlock warning! * - * ttm_dma_page_pool_free() does GFP_KERNEL memory allocation, and so attention - * needs to be paid to sc->gfp_mask to determine if this can be done or not. - * GFP_KERNEL memory allocation in a GFP_ATOMIC reclaim context woul dbe really - * bad. + * We need to pass sc->gfp_mask to ttm_dma_page_pool_free(). * * I'm getting sadder as I hear more pathetical whimpers about needing per-pool * shrinkers @@ -1004,9 +1004,9 @@ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate); static unsigned long ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) { - static atomic_t start_pool = ATOMIC_INIT(0); + static unsigned start_pool; unsigned idx = 0; - unsigned pool_offset = atomic_add_return(1, &start_pool); + unsigned pool_offset; unsigned shrink_pages = sc->nr_to_scan; struct device_pools *p; unsigned long freed = 0; @@ -1014,8 +1014,11 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) if (list_empty(&_manager->pools)) return SHRINK_STOP; - mutex_lock(&_manager->lock); - pool_offset = pool_offset % _manager->npools; + if (!mutex_trylock(&_manager->lock)) + return SHRINK_STOP; + if (!_manager->npools) + goto out; + pool_offset = ++start_pool % _manager->npools; list_for_each_entry(p, &_manager->pools, pools) { unsigned nr_free; @@ -1027,13 +1030,15 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) if (++idx < pool_offset) continue; nr_free = shrink_pages; - shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free); + shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, + sc->gfp_mask); freed += nr_free - shrink_pages; pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n", p->pool->dev_name, p->pool->name, current->pid, nr_free, shrink_pages); } +out: mutex_unlock(&_manager->lock); return freed; } @@ -1044,7 +1049,8 @@ ttm_dma_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc) struct device_pools *p; unsigned long count = 0; - mutex_lock(&_manager->lock); + if (!mutex_trylock(&_manager->lock)) + return 0; list_for_each_entry(p, &_manager->pools, pools) count += p->pool->npages_free; mutex_unlock(&_manager->lock); diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 75f319090043..bf080abc86d1 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -55,9 +55,12 @@ static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm) static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm) { - ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages, sizeof(void*)); - ttm->dma_address = drm_calloc_large(ttm->ttm.num_pages, - sizeof(*ttm->dma_address)); + ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages, + sizeof(*ttm->ttm.pages) + + sizeof(*ttm->dma_address) + + sizeof(*ttm->cpu_address)); + ttm->cpu_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages); + ttm->dma_address = (void *) (ttm->cpu_address + ttm->ttm.num_pages); } #ifdef CONFIG_X86 @@ -228,7 +231,7 @@ int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev, INIT_LIST_HEAD(&ttm_dma->pages_list); ttm_dma_tt_alloc_page_directory(ttm_dma); - if (!ttm->pages || !ttm_dma->dma_address) { + if (!ttm->pages) { ttm_tt_destroy(ttm); pr_err("Failed allocating page table\n"); return -ENOMEM; @@ -243,7 +246,7 @@ void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma) drm_free_large(ttm->pages); ttm->pages = NULL; - drm_free_large(ttm_dma->dma_address); + ttm_dma->cpu_address = NULL; ttm_dma->dma_address = NULL; } EXPORT_SYMBOL(ttm_dma_tt_fini); diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c index b44d548c56f8..e026a9e2942a 100644 --- a/drivers/gpu/drm/udl/udl_connector.c +++ b/drivers/gpu/drm/udl/udl_connector.c @@ -105,14 +105,7 @@ static struct drm_encoder* udl_best_single_encoder(struct drm_connector *connector) { int enc_id = connector->encoder_ids[0]; - struct drm_mode_object *obj; - struct drm_encoder *encoder; - - obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER); - if (!obj) - return NULL; - encoder = obj_to_encoder(obj); - return encoder; + return drm_encoder_find(connector->dev, enc_id); } static int udl_connector_set_property(struct drm_connector *connector, @@ -124,7 +117,7 @@ static int udl_connector_set_property(struct drm_connector *connector, static void udl_connector_destroy(struct drm_connector *connector) { - drm_sysfs_connector_remove(connector); + drm_connector_unregister(connector); drm_connector_cleanup(connector); kfree(connector); } @@ -154,7 +147,7 @@ int udl_connector_init(struct drm_device *dev, struct drm_encoder *encoder) drm_connector_init(dev, connector, &udl_connector_funcs, DRM_MODE_CONNECTOR_DVII); drm_connector_helper_add(connector, &udl_connector_helper_funcs); - drm_sysfs_connector_add(connector); + drm_connector_register(connector); drm_mode_connector_attach_encoder(connector, encoder); drm_object_attach_property(&connector->base, diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c index 377176372da8..d1da339843ca 100644 --- a/drivers/gpu/drm/udl/udl_fb.c +++ b/drivers/gpu/drm/udl/udl_fb.c @@ -550,7 +550,7 @@ out: return ret; } -static struct drm_fb_helper_funcs udl_fb_helper_funcs = { +static const struct drm_fb_helper_funcs udl_fb_helper_funcs = { .fb_probe = udlfb_create, }; @@ -583,7 +583,8 @@ int udl_fbdev_init(struct drm_device *dev) return -ENOMEM; udl->fbdev = ufbdev; - ufbdev->helper.funcs = &udl_fb_helper_funcs; + + drm_fb_helper_prepare(dev, &ufbdev->helper, &udl_fb_helper_funcs); ret = drm_fb_helper_init(dev, &ufbdev->helper, 1, 1); diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c index c041cd73f399..8044f5fb7c49 100644 --- a/drivers/gpu/drm/udl/udl_gem.c +++ b/drivers/gpu/drm/udl/udl_gem.c @@ -107,14 +107,14 @@ int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) } } -static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask) +static int udl_gem_get_pages(struct udl_gem_object *obj) { struct page **pages; if (obj->pages) return 0; - pages = drm_gem_get_pages(&obj->base, gfpmask); + pages = drm_gem_get_pages(&obj->base); if (IS_ERR(pages)) return PTR_ERR(pages); @@ -147,7 +147,7 @@ int udl_gem_vmap(struct udl_gem_object *obj) return 0; } - ret = udl_gem_get_pages(obj, GFP_KERNEL); + ret = udl_gem_get_pages(obj); if (ret) return ret; @@ -205,7 +205,7 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev, } gobj = to_udl_bo(obj); - ret = udl_gem_get_pages(gobj, GFP_KERNEL); + ret = udl_gem_get_pages(gobj); if (ret) goto out; ret = drm_gem_create_mmap_offset(obj); diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c index 7094b92d1ec7..42795674bc07 100644 --- a/drivers/gpu/drm/udl/udl_main.c +++ b/drivers/gpu/drm/udl/udl_main.c @@ -306,10 +306,23 @@ int udl_driver_load(struct drm_device *dev, unsigned long flags) DRM_DEBUG("\n"); ret = udl_modeset_init(dev); + if (ret) + goto err; ret = udl_fbdev_init(dev); + if (ret) + goto err; + + ret = drm_vblank_init(dev, 1); + if (ret) + goto err_fb; + return 0; +err_fb: + udl_fbdev_cleanup(dev); err: + if (udl->urbs.count) + udl_free_urb_list(dev); kfree(udl); DRM_ERROR("%d\n", ret); return ret; @@ -325,6 +338,8 @@ int udl_driver_unload(struct drm_device *dev) { struct udl_device *udl = dev->dev_private; + drm_vblank_cleanup(dev); + if (udl->urbs.count) udl_free_urb_list(dev); diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c index cddc4fcf35cf..dc145d320b25 100644 --- a/drivers/gpu/drm/udl/udl_modeset.c +++ b/drivers/gpu/drm/udl/udl_modeset.c @@ -363,6 +363,26 @@ static void udl_crtc_destroy(struct drm_crtc *crtc) kfree(crtc); } +static int udl_crtc_page_flip(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event, + uint32_t page_flip_flags) +{ + struct udl_framebuffer *ufb = to_udl_fb(fb); + struct drm_device *dev = crtc->dev; + unsigned long flags; + + udl_handle_damage(ufb, 0, 0, fb->width, fb->height); + + spin_lock_irqsave(&dev->event_lock, flags); + if (event) + drm_send_vblank_event(dev, 0, event); + spin_unlock_irqrestore(&dev->event_lock, flags); + crtc->primary->fb = fb; + + return 0; +} + static void udl_crtc_prepare(struct drm_crtc *crtc) { } @@ -384,6 +404,7 @@ static struct drm_crtc_helper_funcs udl_helper_funcs = { static const struct drm_crtc_funcs udl_crtc_funcs = { .set_config = drm_crtc_helper_set_config, .destroy = udl_crtc_destroy, + .page_flip = udl_crtc_page_flip, }; static int udl_crtc_init(struct drm_device *dev) diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile index 458cdf6d81e8..ce0ab951f507 100644 --- a/drivers/gpu/drm/vmwgfx/Makefile +++ b/drivers/gpu/drm/vmwgfx/Makefile @@ -6,6 +6,7 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \ vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \ - vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o + vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \ + vmwgfx_cmdbuf_res.o \ obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c new file mode 100644 index 000000000000..bfeb4b1f2acf --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c @@ -0,0 +1,341 @@ +/************************************************************************** + * + * Copyright © 2014 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +#include "vmwgfx_drv.h" + +#define VMW_CMDBUF_RES_MAN_HT_ORDER 12 + +enum vmw_cmdbuf_res_state { + VMW_CMDBUF_RES_COMMITED, + VMW_CMDBUF_RES_ADD, + VMW_CMDBUF_RES_DEL +}; + +/** + * struct vmw_cmdbuf_res - Command buffer managed resource entry. + * + * @res: Refcounted pointer to a struct vmw_resource. + * @hash: Hash entry for the manager hash table. + * @head: List head used either by the staging list or the manager list + * of commited resources. + * @state: Staging state of this resource entry. + * @man: Pointer to a resource manager for this entry. + */ +struct vmw_cmdbuf_res { + struct vmw_resource *res; + struct drm_hash_item hash; + struct list_head head; + enum vmw_cmdbuf_res_state state; + struct vmw_cmdbuf_res_manager *man; +}; + +/** + * struct vmw_cmdbuf_res_manager - Command buffer resource manager. + * + * @resources: Hash table containing staged and commited command buffer + * resources + * @list: List of commited command buffer resources. + * @dev_priv: Pointer to a device private structure. + * + * @resources and @list are protected by the cmdbuf mutex for now. + */ +struct vmw_cmdbuf_res_manager { + struct drm_open_hash resources; + struct list_head list; + struct vmw_private *dev_priv; +}; + + +/** + * vmw_cmdbuf_res_lookup - Look up a command buffer resource + * + * @man: Pointer to the command buffer resource manager + * @resource_type: The resource type, that combined with the user key + * identifies the resource. + * @user_key: The user key. + * + * Returns a valid refcounted struct vmw_resource pointer on success, + * an error pointer on failure. + */ +struct vmw_resource * +vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man, + enum vmw_cmdbuf_res_type res_type, + u32 user_key) +{ + struct drm_hash_item *hash; + int ret; + unsigned long key = user_key | (res_type << 24); + + ret = drm_ht_find_item(&man->resources, key, &hash); + if (unlikely(ret != 0)) + return ERR_PTR(ret); + + return vmw_resource_reference + (drm_hash_entry(hash, struct vmw_cmdbuf_res, hash)->res); +} + +/** + * vmw_cmdbuf_res_free - Free a command buffer resource. + * + * @man: Pointer to the command buffer resource manager + * @entry: Pointer to a struct vmw_cmdbuf_res. + * + * Frees a struct vmw_cmdbuf_res entry and drops its reference to the + * struct vmw_resource. + */ +static void vmw_cmdbuf_res_free(struct vmw_cmdbuf_res_manager *man, + struct vmw_cmdbuf_res *entry) +{ + list_del(&entry->head); + WARN_ON(drm_ht_remove_item(&man->resources, &entry->hash)); + vmw_resource_unreference(&entry->res); + kfree(entry); +} + +/** + * vmw_cmdbuf_res_commit - Commit a list of command buffer resource actions + * + * @list: Caller's list of command buffer resource actions. + * + * This function commits a list of command buffer resource + * additions or removals. + * It is typically called when the execbuf ioctl call triggering these + * actions has commited the fifo contents to the device. + */ +void vmw_cmdbuf_res_commit(struct list_head *list) +{ + struct vmw_cmdbuf_res *entry, *next; + + list_for_each_entry_safe(entry, next, list, head) { + list_del(&entry->head); + switch (entry->state) { + case VMW_CMDBUF_RES_ADD: + entry->state = VMW_CMDBUF_RES_COMMITED; + list_add_tail(&entry->head, &entry->man->list); + break; + case VMW_CMDBUF_RES_DEL: + vmw_resource_unreference(&entry->res); + kfree(entry); + break; + default: + BUG(); + break; + } + } +} + +/** + * vmw_cmdbuf_res_revert - Revert a list of command buffer resource actions + * + * @man: Pointer to the command buffer resource manager + * @list: Caller's list of command buffer resource action + * + * This function reverts a list of command buffer resource + * additions or removals. + * It is typically called when the execbuf ioctl call triggering these + * actions failed for some reason, and the command stream was never + * submitted. + */ +void vmw_cmdbuf_res_revert(struct list_head *list) +{ + struct vmw_cmdbuf_res *entry, *next; + int ret; + + list_for_each_entry_safe(entry, next, list, head) { + switch (entry->state) { + case VMW_CMDBUF_RES_ADD: + vmw_cmdbuf_res_free(entry->man, entry); + break; + case VMW_CMDBUF_RES_DEL: + ret = drm_ht_insert_item(&entry->man->resources, + &entry->hash); + list_del(&entry->head); + list_add_tail(&entry->head, &entry->man->list); + entry->state = VMW_CMDBUF_RES_COMMITED; + break; + default: + BUG(); + break; + } + } +} + +/** + * vmw_cmdbuf_res_add - Stage a command buffer managed resource for addition. + * + * @man: Pointer to the command buffer resource manager. + * @res_type: The resource type. + * @user_key: The user-space id of the resource. + * @res: Valid (refcount != 0) pointer to a struct vmw_resource. + * @list: The staging list. + * + * This function allocates a struct vmw_cmdbuf_res entry and adds the + * resource to the hash table of the manager identified by @man. The + * entry is then put on the staging list identified by @list. + */ +int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man, + enum vmw_cmdbuf_res_type res_type, + u32 user_key, + struct vmw_resource *res, + struct list_head *list) +{ + struct vmw_cmdbuf_res *cres; + int ret; + + cres = kzalloc(sizeof(*cres), GFP_KERNEL); + if (unlikely(cres == NULL)) + return -ENOMEM; + + cres->hash.key = user_key | (res_type << 24); + ret = drm_ht_insert_item(&man->resources, &cres->hash); + if (unlikely(ret != 0)) + goto out_invalid_key; + + cres->state = VMW_CMDBUF_RES_ADD; + cres->res = vmw_resource_reference(res); + cres->man = man; + list_add_tail(&cres->head, list); + +out_invalid_key: + return ret; +} + +/** + * vmw_cmdbuf_res_remove - Stage a command buffer managed resource for removal. + * + * @man: Pointer to the command buffer resource manager. + * @res_type: The resource type. + * @user_key: The user-space id of the resource. + * @list: The staging list. + * + * This function looks up the struct vmw_cmdbuf_res entry from the manager + * hash table and, if it exists, removes it. Depending on its current staging + * state it then either removes the entry from the staging list or adds it + * to it with a staging state of removal. + */ +int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man, + enum vmw_cmdbuf_res_type res_type, + u32 user_key, + struct list_head *list) +{ + struct vmw_cmdbuf_res *entry; + struct drm_hash_item *hash; + int ret; + + ret = drm_ht_find_item(&man->resources, user_key, &hash); + if (likely(ret != 0)) + return -EINVAL; + + entry = drm_hash_entry(hash, struct vmw_cmdbuf_res, hash); + + switch (entry->state) { + case VMW_CMDBUF_RES_ADD: + vmw_cmdbuf_res_free(man, entry); + break; + case VMW_CMDBUF_RES_COMMITED: + (void) drm_ht_remove_item(&man->resources, &entry->hash); + list_del(&entry->head); + entry->state = VMW_CMDBUF_RES_DEL; + list_add_tail(&entry->head, list); + break; + default: + BUG(); + break; + } + + return 0; +} + +/** + * vmw_cmdbuf_res_man_create - Allocate a command buffer managed resource + * manager. + * + * @dev_priv: Pointer to a struct vmw_private + * + * Allocates and initializes a command buffer managed resource manager. Returns + * an error pointer on failure. + */ +struct vmw_cmdbuf_res_manager * +vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv) +{ + struct vmw_cmdbuf_res_manager *man; + int ret; + + man = kzalloc(sizeof(*man), GFP_KERNEL); + if (man == NULL) + return ERR_PTR(-ENOMEM); + + man->dev_priv = dev_priv; + INIT_LIST_HEAD(&man->list); + ret = drm_ht_create(&man->resources, VMW_CMDBUF_RES_MAN_HT_ORDER); + if (ret == 0) + return man; + + kfree(man); + return ERR_PTR(ret); +} + +/** + * vmw_cmdbuf_res_man_destroy - Destroy a command buffer managed resource + * manager. + * + * @man: Pointer to the manager to destroy. + * + * This function destroys a command buffer managed resource manager and + * unreferences / frees all command buffer managed resources and -entries + * associated with it. + */ +void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man) +{ + struct vmw_cmdbuf_res *entry, *next; + + list_for_each_entry_safe(entry, next, &man->list, head) + vmw_cmdbuf_res_free(man, entry); + + kfree(man); +} + +/** + * + * vmw_cmdbuf_res_man_size - Return the size of a command buffer managed + * resource manager + * + * Returns the approximate allocation size of a command buffer managed + * resource manager. + */ +size_t vmw_cmdbuf_res_man_size(void) +{ + static size_t res_man_size; + + if (unlikely(res_man_size == 0)) + res_man_size = + ttm_round_pot(sizeof(struct vmw_cmdbuf_res_manager)) + + ttm_round_pot(sizeof(struct hlist_head) << + VMW_CMDBUF_RES_MAN_HT_ORDER); + + return res_man_size; +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c index 8bb26dcd9eae..5ac92874404d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c @@ -33,6 +33,7 @@ struct vmw_user_context { struct ttm_base_object base; struct vmw_resource res; struct vmw_ctx_binding_state cbs; + struct vmw_cmdbuf_res_manager *man; }; @@ -103,7 +104,8 @@ static const vmw_scrub_func vmw_scrub_funcs[vmw_ctx_binding_max] = { static void vmw_hw_context_destroy(struct vmw_resource *res) { - + struct vmw_user_context *uctx = + container_of(res, struct vmw_user_context, res); struct vmw_private *dev_priv = res->dev_priv; struct { SVGA3dCmdHeader header; @@ -113,9 +115,9 @@ static void vmw_hw_context_destroy(struct vmw_resource *res) if (res->func->destroy == vmw_gb_context_destroy) { mutex_lock(&dev_priv->cmdbuf_mutex); + vmw_cmdbuf_res_man_destroy(uctx->man); mutex_lock(&dev_priv->binding_mutex); - (void) vmw_context_binding_state_kill - (&container_of(res, struct vmw_user_context, res)->cbs); + (void) vmw_context_binding_state_kill(&uctx->cbs); (void) vmw_gb_context_destroy(res); mutex_unlock(&dev_priv->binding_mutex); if (dev_priv->pinned_bo != NULL && @@ -152,13 +154,16 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv, ret = vmw_resource_init(dev_priv, res, true, res_free, &vmw_gb_context_func); res->backup_size = SVGA3D_CONTEXT_DATA_SIZE; + if (unlikely(ret != 0)) + goto out_err; - if (unlikely(ret != 0)) { - if (res_free) - res_free(res); - else - kfree(res); - return ret; + if (dev_priv->has_mob) { + uctx->man = vmw_cmdbuf_res_man_create(dev_priv); + if (unlikely(IS_ERR(uctx->man))) { + ret = PTR_ERR(uctx->man); + uctx->man = NULL; + goto out_err; + } } memset(&uctx->cbs, 0, sizeof(uctx->cbs)); @@ -166,6 +171,13 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv, vmw_resource_activate(res, vmw_hw_context_destroy); return 0; + +out_err: + if (res_free) + res_free(res); + else + kfree(res); + return ret; } static int vmw_context_init(struct vmw_private *dev_priv, @@ -471,7 +483,8 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data, */ if (unlikely(vmw_user_context_size == 0)) - vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128; + vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128 + + ((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0); ret = ttm_read_lock(&dev_priv->reservation_sem, true); if (unlikely(ret != 0)) @@ -901,3 +914,8 @@ struct list_head *vmw_context_binding_list(struct vmw_resource *ctx) { return &(container_of(ctx, struct vmw_user_context, res)->cbs.list); } + +struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx) +{ + return container_of(ctx, struct vmw_user_context, res)->man; +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c index 70ddce8358b0..ed1d51006ab1 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c @@ -61,7 +61,7 @@ int vmw_dmabuf_to_placement(struct vmw_private *dev_priv, vmw_execbuf_release_pinned_bo(dev_priv); - ret = ttm_bo_reserve(bo, interruptible, false, false, 0); + ret = ttm_bo_reserve(bo, interruptible, false, false, NULL); if (unlikely(ret != 0)) goto err; @@ -105,7 +105,7 @@ int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv, if (pin) vmw_execbuf_release_pinned_bo(dev_priv); - ret = ttm_bo_reserve(bo, interruptible, false, false, 0); + ret = ttm_bo_reserve(bo, interruptible, false, false, NULL); if (unlikely(ret != 0)) goto err; @@ -212,7 +212,7 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv, if (pin) vmw_execbuf_release_pinned_bo(dev_priv); - ret = ttm_bo_reserve(bo, interruptible, false, false, 0); + ret = ttm_bo_reserve(bo, interruptible, false, false, NULL); if (unlikely(ret != 0)) goto err_unlock; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 246a62bab378..18b54acacfbb 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -316,7 +316,7 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) if (unlikely(ret != 0)) return ret; - ret = ttm_bo_reserve(bo, false, true, false, 0); + ret = ttm_bo_reserve(bo, false, true, false, NULL); BUG_ON(ret != 0); ret = ttm_bo_kmap(bo, 0, 1, &map); @@ -946,7 +946,6 @@ static void vmw_postclose(struct drm_device *dev, drm_master_put(&vmw_fp->locked_master); } - vmw_compat_shader_man_destroy(vmw_fp->shman); ttm_object_file_release(&vmw_fp->tfile); kfree(vmw_fp); } @@ -966,16 +965,10 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv) if (unlikely(vmw_fp->tfile == NULL)) goto out_no_tfile; - vmw_fp->shman = vmw_compat_shader_man_create(dev_priv); - if (IS_ERR(vmw_fp->shman)) - goto out_no_shman; - file_priv->driver_priv = vmw_fp; return 0; -out_no_shman: - ttm_object_file_release(&vmw_fp->tfile); out_no_tfile: kfree(vmw_fp); return ret; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index c886c024c637..99f731757c4b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -40,10 +40,10 @@ #include <drm/ttm/ttm_module.h> #include "vmwgfx_fence.h" -#define VMWGFX_DRIVER_DATE "20140325" +#define VMWGFX_DRIVER_DATE "20140704" #define VMWGFX_DRIVER_MAJOR 2 #define VMWGFX_DRIVER_MINOR 6 -#define VMWGFX_DRIVER_PATCHLEVEL 0 +#define VMWGFX_DRIVER_PATCHLEVEL 1 #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) #define VMWGFX_MAX_RELOCATIONS 2048 @@ -75,14 +75,11 @@ #define VMW_RES_FENCE ttm_driver_type3 #define VMW_RES_SHADER ttm_driver_type4 -struct vmw_compat_shader_manager; - struct vmw_fpriv { struct drm_master *locked_master; struct ttm_object_file *tfile; struct list_head fence_events; bool gb_aware; - struct vmw_compat_shader_manager *shman; }; struct vmw_dma_buffer { @@ -124,6 +121,10 @@ struct vmw_resource { void (*hw_destroy) (struct vmw_resource *res); }; + +/* + * Resources that are managed using ioctls. + */ enum vmw_res_type { vmw_res_context, vmw_res_surface, @@ -132,6 +133,15 @@ enum vmw_res_type { vmw_res_max }; +/* + * Resources that are managed using command streams. + */ +enum vmw_cmdbuf_res_type { + vmw_cmdbuf_res_compat_shader +}; + +struct vmw_cmdbuf_res_manager; + struct vmw_cursor_snooper { struct drm_crtc *crtc; size_t age; @@ -341,7 +351,7 @@ struct vmw_sw_context{ bool needs_post_query_barrier; struct vmw_resource *error_resource; struct vmw_ctx_binding_state staged_bindings; - struct list_head staged_shaders; + struct list_head staged_cmd_res; }; struct vmw_legacy_display; @@ -974,7 +984,8 @@ extern void vmw_context_binding_res_list_kill(struct list_head *head); extern void vmw_context_binding_res_list_scrub(struct list_head *head); extern int vmw_context_rebind_all(struct vmw_resource *ctx); extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx); - +extern struct vmw_cmdbuf_res_manager * +vmw_context_res_man(struct vmw_resource *ctx); /* * Surface management - vmwgfx_surface.c */ @@ -1008,27 +1019,42 @@ extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -extern int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man, - SVGA3dShaderType shader_type, - u32 *user_key); -extern void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man, - struct list_head *list); -extern void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man, - struct list_head *list); -extern int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man, - u32 user_key, - SVGA3dShaderType shader_type, - struct list_head *list); -extern int vmw_compat_shader_add(struct vmw_compat_shader_manager *man, +extern int vmw_compat_shader_add(struct vmw_private *dev_priv, + struct vmw_cmdbuf_res_manager *man, u32 user_key, const void *bytecode, SVGA3dShaderType shader_type, size_t size, - struct ttm_object_file *tfile, struct list_head *list); -extern struct vmw_compat_shader_manager * -vmw_compat_shader_man_create(struct vmw_private *dev_priv); -extern void -vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man); +extern int vmw_compat_shader_remove(struct vmw_cmdbuf_res_manager *man, + u32 user_key, SVGA3dShaderType shader_type, + struct list_head *list); +extern struct vmw_resource * +vmw_compat_shader_lookup(struct vmw_cmdbuf_res_manager *man, + u32 user_key, SVGA3dShaderType shader_type); + +/* + * Command buffer managed resources - vmwgfx_cmdbuf_res.c + */ + +extern struct vmw_cmdbuf_res_manager * +vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv); +extern void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man); +extern size_t vmw_cmdbuf_res_man_size(void); +extern struct vmw_resource * +vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man, + enum vmw_cmdbuf_res_type res_type, + u32 user_key); +extern void vmw_cmdbuf_res_revert(struct list_head *list); +extern void vmw_cmdbuf_res_commit(struct list_head *list); +extern int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man, + enum vmw_cmdbuf_res_type res_type, + u32 user_key, + struct vmw_resource *res, + struct list_head *list); +extern int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man, + enum vmw_cmdbuf_res_type res_type, + u32 user_key, + struct list_head *list); /** diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 87df0b3674fd..36b871686d3c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -422,28 +422,90 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context) return 0; } + /** - * vmw_cmd_compat_res_check - Check that a resource is present and if so, put it + * vmw_cmd_res_reloc_add - Add a resource to a software context's + * relocation- and validation lists. + * + * @dev_priv: Pointer to a struct vmw_private identifying the device. + * @sw_context: Pointer to the software context. + * @res_type: Resource type. + * @id_loc: Pointer to where the id that needs translation is located. + * @res: Valid pointer to a struct vmw_resource. + * @p_val: If non null, a pointer to the struct vmw_resource_validate_node + * used for this resource is returned here. + */ +static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + enum vmw_res_type res_type, + uint32_t *id_loc, + struct vmw_resource *res, + struct vmw_resource_val_node **p_val) +{ + int ret; + struct vmw_resource_val_node *node; + + *p_val = NULL; + ret = vmw_resource_relocation_add(&sw_context->res_relocations, + res, + id_loc - sw_context->buf_start); + if (unlikely(ret != 0)) + return ret; + + ret = vmw_resource_val_add(sw_context, res, &node); + if (unlikely(ret != 0)) + return ret; + + if (res_type == vmw_res_context && dev_priv->has_mob && + node->first_usage) { + + /* + * Put contexts first on the list to be able to exit + * list traversal for contexts early. + */ + list_del(&node->head); + list_add(&node->head, &sw_context->resource_list); + + ret = vmw_resource_context_res_add(dev_priv, sw_context, res); + if (unlikely(ret != 0)) + return ret; + node->staged_bindings = + kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL); + if (node->staged_bindings == NULL) { + DRM_ERROR("Failed to allocate context binding " + "information.\n"); + return -ENOMEM; + } + INIT_LIST_HEAD(&node->staged_bindings->list); + } + + if (p_val) + *p_val = node; + + return 0; +} + + +/** + * vmw_cmd_res_check - Check that a resource is present and if so, put it * on the resource validate list unless it's already there. * * @dev_priv: Pointer to a device private structure. * @sw_context: Pointer to the software context. * @res_type: Resource type. * @converter: User-space visisble type specific information. - * @id: user-space resource id handle. * @id_loc: Pointer to the location in the command buffer currently being * parsed from where the user-space resource id handle is located. * @p_val: Pointer to pointer to resource validalidation node. Populated * on exit. */ static int -vmw_cmd_compat_res_check(struct vmw_private *dev_priv, - struct vmw_sw_context *sw_context, - enum vmw_res_type res_type, - const struct vmw_user_resource_conv *converter, - uint32_t id, - uint32_t *id_loc, - struct vmw_resource_val_node **p_val) +vmw_cmd_res_check(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + enum vmw_res_type res_type, + const struct vmw_user_resource_conv *converter, + uint32_t *id_loc, + struct vmw_resource_val_node **p_val) { struct vmw_res_cache_entry *rcache = &sw_context->res_cache[res_type]; @@ -451,7 +513,7 @@ vmw_cmd_compat_res_check(struct vmw_private *dev_priv, struct vmw_resource_val_node *node; int ret; - if (id == SVGA3D_INVALID_ID) { + if (*id_loc == SVGA3D_INVALID_ID) { if (p_val) *p_val = NULL; if (res_type == vmw_res_context) { @@ -466,7 +528,7 @@ vmw_cmd_compat_res_check(struct vmw_private *dev_priv, * resource */ - if (likely(rcache->valid && id == rcache->handle)) { + if (likely(rcache->valid && *id_loc == rcache->handle)) { const struct vmw_resource *res = rcache->res; rcache->node->first_usage = false; @@ -480,49 +542,28 @@ vmw_cmd_compat_res_check(struct vmw_private *dev_priv, ret = vmw_user_resource_lookup_handle(dev_priv, sw_context->fp->tfile, - id, + *id_loc, converter, &res); if (unlikely(ret != 0)) { DRM_ERROR("Could not find or use resource 0x%08x.\n", - (unsigned) id); + (unsigned) *id_loc); dump_stack(); return ret; } rcache->valid = true; rcache->res = res; - rcache->handle = id; - - ret = vmw_resource_relocation_add(&sw_context->res_relocations, - res, - id_loc - sw_context->buf_start); - if (unlikely(ret != 0)) - goto out_no_reloc; + rcache->handle = *id_loc; - ret = vmw_resource_val_add(sw_context, res, &node); + ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, res_type, id_loc, + res, &node); if (unlikely(ret != 0)) goto out_no_reloc; rcache->node = node; if (p_val) *p_val = node; - - if (dev_priv->has_mob && node->first_usage && - res_type == vmw_res_context) { - ret = vmw_resource_context_res_add(dev_priv, sw_context, res); - if (unlikely(ret != 0)) - goto out_no_reloc; - node->staged_bindings = - kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL); - if (node->staged_bindings == NULL) { - DRM_ERROR("Failed to allocate context binding " - "information.\n"); - goto out_no_reloc; - } - INIT_LIST_HEAD(&node->staged_bindings->list); - } - vmw_resource_unreference(&res); return 0; @@ -534,31 +575,6 @@ out_no_reloc: } /** - * vmw_cmd_res_check - Check that a resource is present and if so, put it - * on the resource validate list unless it's already there. - * - * @dev_priv: Pointer to a device private structure. - * @sw_context: Pointer to the software context. - * @res_type: Resource type. - * @converter: User-space visisble type specific information. - * @id_loc: Pointer to the location in the command buffer currently being - * parsed from where the user-space resource id handle is located. - * @p_val: Pointer to pointer to resource validalidation node. Populated - * on exit. - */ -static int -vmw_cmd_res_check(struct vmw_private *dev_priv, - struct vmw_sw_context *sw_context, - enum vmw_res_type res_type, - const struct vmw_user_resource_conv *converter, - uint32_t *id_loc, - struct vmw_resource_val_node **p_val) -{ - return vmw_cmd_compat_res_check(dev_priv, sw_context, res_type, - converter, *id_loc, id_loc, p_val); -} - -/** * vmw_rebind_contexts - Rebind all resources previously bound to * referenced contexts. * @@ -572,8 +588,8 @@ static int vmw_rebind_contexts(struct vmw_sw_context *sw_context) int ret; list_for_each_entry(val, &sw_context->resource_list, head) { - if (likely(!val->staged_bindings)) - continue; + if (unlikely(!val->staged_bindings)) + break; ret = vmw_context_rebind_all(val->res); if (unlikely(ret != 0)) { @@ -1626,13 +1642,14 @@ static int vmw_cmd_shader_define(struct vmw_private *dev_priv, } *cmd; int ret; size_t size; + struct vmw_resource_val_node *val; cmd = container_of(header, struct vmw_shader_define_cmd, header); ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, user_context_converter, &cmd->body.cid, - NULL); + &val); if (unlikely(ret != 0)) return ret; @@ -1640,11 +1657,11 @@ static int vmw_cmd_shader_define(struct vmw_private *dev_priv, return 0; size = cmd->header.size - sizeof(cmd->body); - ret = vmw_compat_shader_add(sw_context->fp->shman, + ret = vmw_compat_shader_add(dev_priv, + vmw_context_res_man(val->res), cmd->body.shid, cmd + 1, cmd->body.type, size, - sw_context->fp->tfile, - &sw_context->staged_shaders); + &sw_context->staged_cmd_res); if (unlikely(ret != 0)) return ret; @@ -1672,23 +1689,24 @@ static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv, SVGA3dCmdDestroyShader body; } *cmd; int ret; + struct vmw_resource_val_node *val; cmd = container_of(header, struct vmw_shader_destroy_cmd, header); ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, user_context_converter, &cmd->body.cid, - NULL); + &val); if (unlikely(ret != 0)) return ret; if (unlikely(!dev_priv->has_mob)) return 0; - ret = vmw_compat_shader_remove(sw_context->fp->shman, + ret = vmw_compat_shader_remove(vmw_context_res_man(val->res), cmd->body.shid, cmd->body.type, - &sw_context->staged_shaders); + &sw_context->staged_cmd_res); if (unlikely(ret != 0)) return ret; @@ -1715,7 +1733,9 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv, SVGA3dCmdHeader header; SVGA3dCmdSetShader body; } *cmd; - struct vmw_resource_val_node *ctx_node; + struct vmw_resource_val_node *ctx_node, *res_node = NULL; + struct vmw_ctx_bindinfo bi; + struct vmw_resource *res = NULL; int ret; cmd = container_of(header, struct vmw_set_shader_cmd, @@ -1727,32 +1747,40 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv, if (unlikely(ret != 0)) return ret; - if (dev_priv->has_mob) { - struct vmw_ctx_bindinfo bi; - struct vmw_resource_val_node *res_node; - u32 shid = cmd->body.shid; - - if (shid != SVGA3D_INVALID_ID) - (void) vmw_compat_shader_lookup(sw_context->fp->shman, - cmd->body.type, - &shid); - - ret = vmw_cmd_compat_res_check(dev_priv, sw_context, - vmw_res_shader, - user_shader_converter, - shid, - &cmd->body.shid, &res_node); + if (!dev_priv->has_mob) + return 0; + + if (cmd->body.shid != SVGA3D_INVALID_ID) { + res = vmw_compat_shader_lookup + (vmw_context_res_man(ctx_node->res), + cmd->body.shid, + cmd->body.type); + + if (!IS_ERR(res)) { + ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, + vmw_res_shader, + &cmd->body.shid, res, + &res_node); + vmw_resource_unreference(&res); + if (unlikely(ret != 0)) + return ret; + } + } + + if (!res_node) { + ret = vmw_cmd_res_check(dev_priv, sw_context, + vmw_res_shader, + user_shader_converter, + &cmd->body.shid, &res_node); if (unlikely(ret != 0)) return ret; - - bi.ctx = ctx_node->res; - bi.res = res_node ? res_node->res : NULL; - bi.bt = vmw_ctx_binding_shader; - bi.i1.shader_type = cmd->body.type; - return vmw_context_binding_add(ctx_node->staged_bindings, &bi); } - return 0; + bi.ctx = ctx_node->res; + bi.res = res_node ? res_node->res : NULL; + bi.bt = vmw_ctx_binding_shader; + bi.i1.shader_type = cmd->body.type; + return vmw_context_binding_add(ctx_node->staged_bindings, &bi); } /** @@ -2394,6 +2422,8 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, } } + + int vmw_execbuf_process(struct drm_file *file_priv, struct vmw_private *dev_priv, void __user *user_commands, @@ -2453,7 +2483,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, goto out_unlock; sw_context->res_ht_initialized = true; } - INIT_LIST_HEAD(&sw_context->staged_shaders); + INIT_LIST_HEAD(&sw_context->staged_cmd_res); INIT_LIST_HEAD(&resource_list); ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands, @@ -2548,8 +2578,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, } list_splice_init(&sw_context->resource_list, &resource_list); - vmw_compat_shaders_commit(sw_context->fp->shman, - &sw_context->staged_shaders); + vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res); mutex_unlock(&dev_priv->cmdbuf_mutex); /* @@ -2576,8 +2605,7 @@ out_unlock: list_splice_init(&sw_context->resource_list, &resource_list); error_resource = sw_context->error_resource; sw_context->error_resource = NULL; - vmw_compat_shaders_revert(sw_context->fp->shman, - &sw_context->staged_shaders); + vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res); mutex_unlock(&dev_priv->cmdbuf_mutex); /* diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c index 6ccd993e26bf..6eae14d2a3f7 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c @@ -180,8 +180,9 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) mutex_lock(&dev_priv->hw_mutex); + vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC); while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0) - vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC); + ; dev_priv->last_read_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c index b1273e8e9a69..26f8bdde3529 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c @@ -47,6 +47,7 @@ struct vmwgfx_gmrid_man { static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man, struct ttm_buffer_object *bo, struct ttm_placement *placement, + uint32_t flags, struct ttm_mem_reg *mem) { struct vmwgfx_gmrid_man *gman = diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 8f3edc4710f2..d2bc2b03d4c6 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -75,7 +75,7 @@ void vmw_display_unit_cleanup(struct vmw_display_unit *du) vmw_surface_unreference(&du->cursor_surface); if (du->cursor_dmabuf) vmw_dmabuf_unreference(&du->cursor_dmabuf); - drm_sysfs_connector_remove(&du->connector); + drm_connector_unregister(&du->connector); drm_crtc_cleanup(&du->crtc); drm_encoder_cleanup(&du->encoder); drm_connector_cleanup(&du->connector); @@ -136,7 +136,7 @@ int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv, kmap_offset = 0; kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT; - ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0); + ret = ttm_bo_reserve(&dmabuf->base, true, false, false, NULL); if (unlikely(ret != 0)) { DRM_ERROR("reserve failed\n"); return -EINVAL; @@ -343,7 +343,7 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf, kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT; kmap_num = (64*64*4) >> PAGE_SHIFT; - ret = ttm_bo_reserve(bo, true, false, false, 0); + ret = ttm_bo_reserve(bo, true, false, false, NULL); if (unlikely(ret != 0)) { DRM_ERROR("reserve failed\n"); return; @@ -1501,7 +1501,6 @@ int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data, { struct drm_vmw_cursor_bypass_arg *arg = data; struct vmw_display_unit *du; - struct drm_mode_object *obj; struct drm_crtc *crtc; int ret = 0; @@ -1519,13 +1518,12 @@ int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data, return 0; } - obj = drm_mode_object_find(dev, arg->crtc_id, DRM_MODE_OBJECT_CRTC); - if (!obj) { + crtc = drm_crtc_find(dev, arg->crtc_id); + if (!crtc) { ret = -ENOENT; goto out; } - crtc = obj_to_crtc(obj); du = vmw_crtc_to_du(crtc); du->hotspot_x = arg->xhot; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index b2b9bd23aeee..15e185ae4c99 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c @@ -371,7 +371,7 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit) encoder->possible_crtcs = (1 << unit); encoder->possible_clones = 0; - (void) drm_sysfs_connector_add(connector); + (void) drm_connector_register(connector); drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 01d68f0a69dc..a432c0db257c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -127,7 +127,7 @@ static void vmw_resource_release(struct kref *kref) if (res->backup) { struct ttm_buffer_object *bo = &res->backup->base; - ttm_bo_reserve(bo, false, false, false, 0); + ttm_bo_reserve(bo, false, false, false, NULL); if (!list_empty(&res->mob_head) && res->func->unbind != NULL) { struct ttm_validate_buffer val_buf; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c index a95d3a0cabe4..b295463a60b3 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c @@ -467,7 +467,7 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit) encoder->possible_crtcs = (1 << unit); encoder->possible_clones = 0; - (void) drm_sysfs_connector_add(connector); + (void) drm_connector_register(connector); drm_crtc_init(dev, crtc, &vmw_screen_object_crtc_funcs); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c index c1559eeaffe9..8719fb3cccc9 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c @@ -29,8 +29,6 @@ #include "vmwgfx_resource_priv.h" #include "ttm/ttm_placement.h" -#define VMW_COMPAT_SHADER_HT_ORDER 12 - struct vmw_shader { struct vmw_resource res; SVGA3dShaderType type; @@ -42,49 +40,8 @@ struct vmw_user_shader { struct vmw_shader shader; }; -/** - * enum vmw_compat_shader_state - Staging state for compat shaders - */ -enum vmw_compat_shader_state { - VMW_COMPAT_COMMITED, - VMW_COMPAT_ADD, - VMW_COMPAT_DEL -}; - -/** - * struct vmw_compat_shader - Metadata for compat shaders. - * - * @handle: The TTM handle of the guest backed shader. - * @tfile: The struct ttm_object_file the guest backed shader is registered - * with. - * @hash: Hash item for lookup. - * @head: List head for staging lists or the compat shader manager list. - * @state: Staging state. - * - * The structure is protected by the cmdbuf lock. - */ -struct vmw_compat_shader { - u32 handle; - struct ttm_object_file *tfile; - struct drm_hash_item hash; - struct list_head head; - enum vmw_compat_shader_state state; -}; - -/** - * struct vmw_compat_shader_manager - Compat shader manager. - * - * @shaders: Hash table containing staged and commited compat shaders - * @list: List of commited shaders. - * @dev_priv: Pointer to a device private structure. - * - * @shaders and @list are protected by the cmdbuf mutex for now. - */ -struct vmw_compat_shader_manager { - struct drm_open_hash shaders; - struct list_head list; - struct vmw_private *dev_priv; -}; +static uint64_t vmw_user_shader_size; +static uint64_t vmw_shader_size; static void vmw_user_shader_free(struct vmw_resource *res); static struct vmw_resource * @@ -98,8 +55,6 @@ static int vmw_gb_shader_unbind(struct vmw_resource *res, struct ttm_validate_buffer *val_buf); static int vmw_gb_shader_destroy(struct vmw_resource *res); -static uint64_t vmw_user_shader_size; - static const struct vmw_user_resource_conv user_shader_conv = { .object_type = VMW_RES_SHADER, .base_obj_to_res = vmw_user_shader_base_to_res, @@ -347,6 +302,16 @@ static void vmw_user_shader_free(struct vmw_resource *res) vmw_user_shader_size); } +static void vmw_shader_free(struct vmw_resource *res) +{ + struct vmw_shader *shader = vmw_res_to_shader(res); + struct vmw_private *dev_priv = res->dev_priv; + + kfree(shader); + ttm_mem_global_free(vmw_mem_glob(dev_priv), + vmw_shader_size); +} + /** * This function is called when user space has no more references on the * base object. It releases the base-object's reference on the resource object. @@ -371,13 +336,13 @@ int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data, TTM_REF_USAGE); } -static int vmw_shader_alloc(struct vmw_private *dev_priv, - struct vmw_dma_buffer *buffer, - size_t shader_size, - size_t offset, - SVGA3dShaderType shader_type, - struct ttm_object_file *tfile, - u32 *handle) +static int vmw_user_shader_alloc(struct vmw_private *dev_priv, + struct vmw_dma_buffer *buffer, + size_t shader_size, + size_t offset, + SVGA3dShaderType shader_type, + struct ttm_object_file *tfile, + u32 *handle) { struct vmw_user_shader *ushader; struct vmw_resource *res, *tmp; @@ -442,6 +407,56 @@ out: } +struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv, + struct vmw_dma_buffer *buffer, + size_t shader_size, + size_t offset, + SVGA3dShaderType shader_type) +{ + struct vmw_shader *shader; + struct vmw_resource *res; + int ret; + + /* + * Approximate idr memory usage with 128 bytes. It will be limited + * by maximum number_of shaders anyway. + */ + if (unlikely(vmw_shader_size == 0)) + vmw_shader_size = + ttm_round_pot(sizeof(struct vmw_shader)) + 128; + + ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), + vmw_shader_size, + false, true); + if (unlikely(ret != 0)) { + if (ret != -ERESTARTSYS) + DRM_ERROR("Out of graphics memory for shader " + "creation.\n"); + goto out_err; + } + + shader = kzalloc(sizeof(*shader), GFP_KERNEL); + if (unlikely(shader == NULL)) { + ttm_mem_global_free(vmw_mem_glob(dev_priv), + vmw_shader_size); + ret = -ENOMEM; + goto out_err; + } + + res = &shader->res; + + /* + * From here on, the destructor takes over resource freeing. + */ + ret = vmw_gb_shader_init(dev_priv, res, shader_size, + offset, shader_type, buffer, + vmw_shader_free); + +out_err: + return ret ? ERR_PTR(ret) : res; +} + + int vmw_shader_define_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { @@ -490,8 +505,8 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data, if (unlikely(ret != 0)) goto out_bad_arg; - ret = vmw_shader_alloc(dev_priv, buffer, arg->size, arg->offset, - shader_type, tfile, &arg->shader_handle); + ret = vmw_user_shader_alloc(dev_priv, buffer, arg->size, arg->offset, + shader_type, tfile, &arg->shader_handle); ttm_read_unlock(&dev_priv->reservation_sem); out_bad_arg: @@ -500,202 +515,83 @@ out_bad_arg: } /** - * vmw_compat_shader_lookup - Look up a compat shader - * - * @man: Pointer to the compat shader manager. - * @shader_type: The shader type, that combined with the user_key identifies - * the shader. - * @user_key: On entry, this should be a pointer to the user_key. - * On successful exit, it will contain the guest-backed shader's TTM handle. + * vmw_compat_shader_id_ok - Check whether a compat shader user key and + * shader type are within valid bounds. * - * Returns 0 on success. Non-zero on failure, in which case the value pointed - * to by @user_key is unmodified. - */ -int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man, - SVGA3dShaderType shader_type, - u32 *user_key) -{ - struct drm_hash_item *hash; - int ret; - unsigned long key = *user_key | (shader_type << 24); - - ret = drm_ht_find_item(&man->shaders, key, &hash); - if (unlikely(ret != 0)) - return ret; - - *user_key = drm_hash_entry(hash, struct vmw_compat_shader, - hash)->handle; - - return 0; -} - -/** - * vmw_compat_shader_free - Free a compat shader. - * - * @man: Pointer to the compat shader manager. - * @entry: Pointer to a struct vmw_compat_shader. - * - * Frees a struct vmw_compat_shder entry and drops its reference to the - * guest backed shader. - */ -static void vmw_compat_shader_free(struct vmw_compat_shader_manager *man, - struct vmw_compat_shader *entry) -{ - list_del(&entry->head); - WARN_ON(drm_ht_remove_item(&man->shaders, &entry->hash)); - WARN_ON(ttm_ref_object_base_unref(entry->tfile, entry->handle, - TTM_REF_USAGE)); - kfree(entry); -} - -/** - * vmw_compat_shaders_commit - Commit a list of compat shader actions. - * - * @man: Pointer to the compat shader manager. - * @list: Caller's list of compat shader actions. + * @user_key: User space id of the shader. + * @shader_type: Shader type. * - * This function commits a list of compat shader additions or removals. - * It is typically called when the execbuf ioctl call triggering these - * actions has commited the fifo contents to the device. + * Returns true if valid false if not. */ -void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man, - struct list_head *list) +static bool vmw_compat_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type) { - struct vmw_compat_shader *entry, *next; - - list_for_each_entry_safe(entry, next, list, head) { - list_del(&entry->head); - switch (entry->state) { - case VMW_COMPAT_ADD: - entry->state = VMW_COMPAT_COMMITED; - list_add_tail(&entry->head, &man->list); - break; - case VMW_COMPAT_DEL: - ttm_ref_object_base_unref(entry->tfile, entry->handle, - TTM_REF_USAGE); - kfree(entry); - break; - default: - BUG(); - break; - } - } + return user_key <= ((1 << 20) - 1) && (unsigned) shader_type < 16; } /** - * vmw_compat_shaders_revert - Revert a list of compat shader actions + * vmw_compat_shader_key - Compute a hash key suitable for a compat shader. * - * @man: Pointer to the compat shader manager. - * @list: Caller's list of compat shader actions. + * @user_key: User space id of the shader. + * @shader_type: Shader type. * - * This function reverts a list of compat shader additions or removals. - * It is typically called when the execbuf ioctl call triggering these - * actions failed for some reason, and the command stream was never - * submitted. + * Returns a hash key suitable for a command buffer managed resource + * manager hash table. */ -void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man, - struct list_head *list) +static u32 vmw_compat_shader_key(u32 user_key, SVGA3dShaderType shader_type) { - struct vmw_compat_shader *entry, *next; - int ret; - - list_for_each_entry_safe(entry, next, list, head) { - switch (entry->state) { - case VMW_COMPAT_ADD: - vmw_compat_shader_free(man, entry); - break; - case VMW_COMPAT_DEL: - ret = drm_ht_insert_item(&man->shaders, &entry->hash); - list_del(&entry->head); - list_add_tail(&entry->head, &man->list); - entry->state = VMW_COMPAT_COMMITED; - break; - default: - BUG(); - break; - } - } + return user_key | (shader_type << 20); } /** * vmw_compat_shader_remove - Stage a compat shader for removal. * - * @man: Pointer to the compat shader manager + * @man: Pointer to the compat shader manager identifying the shader namespace. * @user_key: The key that is used to identify the shader. The key is * unique to the shader type. * @shader_type: Shader type. - * @list: Caller's list of staged shader actions. - * - * This function stages a compat shader for removal and removes the key from - * the shader manager's hash table. If the shader was previously only staged - * for addition it is completely removed (But the execbuf code may keep a - * reference if it was bound to a context between addition and removal). If - * it was previously commited to the manager, it is staged for removal. + * @list: Caller's list of staged command buffer resource actions. */ -int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man, +int vmw_compat_shader_remove(struct vmw_cmdbuf_res_manager *man, u32 user_key, SVGA3dShaderType shader_type, struct list_head *list) { - struct vmw_compat_shader *entry; - struct drm_hash_item *hash; - int ret; - - ret = drm_ht_find_item(&man->shaders, user_key | (shader_type << 24), - &hash); - if (likely(ret != 0)) + if (!vmw_compat_shader_id_ok(user_key, shader_type)) return -EINVAL; - entry = drm_hash_entry(hash, struct vmw_compat_shader, hash); - - switch (entry->state) { - case VMW_COMPAT_ADD: - vmw_compat_shader_free(man, entry); - break; - case VMW_COMPAT_COMMITED: - (void) drm_ht_remove_item(&man->shaders, &entry->hash); - list_del(&entry->head); - entry->state = VMW_COMPAT_DEL; - list_add_tail(&entry->head, list); - break; - default: - BUG(); - break; - } - - return 0; + return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_compat_shader, + vmw_compat_shader_key(user_key, + shader_type), + list); } /** - * vmw_compat_shader_add - Create a compat shader and add the - * key to the manager + * vmw_compat_shader_add - Create a compat shader and stage it for addition + * as a command buffer managed resource. * - * @man: Pointer to the compat shader manager + * @man: Pointer to the compat shader manager identifying the shader namespace. * @user_key: The key that is used to identify the shader. The key is * unique to the shader type. * @bytecode: Pointer to the bytecode of the shader. * @shader_type: Shader type. * @tfile: Pointer to a struct ttm_object_file that the guest-backed shader is * to be created with. - * @list: Caller's list of staged shader actions. + * @list: Caller's list of staged command buffer resource actions. * - * Note that only the key is added to the shader manager's hash table. - * The shader is not yet added to the shader manager's list of shaders. */ -int vmw_compat_shader_add(struct vmw_compat_shader_manager *man, +int vmw_compat_shader_add(struct vmw_private *dev_priv, + struct vmw_cmdbuf_res_manager *man, u32 user_key, const void *bytecode, SVGA3dShaderType shader_type, size_t size, - struct ttm_object_file *tfile, struct list_head *list) { struct vmw_dma_buffer *buf; struct ttm_bo_kmap_obj map; bool is_iomem; - struct vmw_compat_shader *compat; - u32 handle; int ret; + struct vmw_resource *res; - if (user_key > ((1 << 24) - 1) || (unsigned) shader_type > 16) + if (!vmw_compat_shader_id_ok(user_key, shader_type)) return -EINVAL; /* Allocate and pin a DMA buffer */ @@ -703,7 +599,7 @@ int vmw_compat_shader_add(struct vmw_compat_shader_manager *man, if (unlikely(buf == NULL)) return -ENOMEM; - ret = vmw_dmabuf_init(man->dev_priv, buf, size, &vmw_sys_ne_placement, + ret = vmw_dmabuf_init(dev_priv, buf, size, &vmw_sys_ne_placement, true, vmw_dmabuf_bo_free); if (unlikely(ret != 0)) goto out; @@ -728,84 +624,40 @@ int vmw_compat_shader_add(struct vmw_compat_shader_manager *man, WARN_ON(ret != 0); ttm_bo_unreserve(&buf->base); - /* Create a guest-backed shader container backed by the dma buffer */ - ret = vmw_shader_alloc(man->dev_priv, buf, size, 0, shader_type, - tfile, &handle); - vmw_dmabuf_unreference(&buf); + res = vmw_shader_alloc(dev_priv, buf, size, 0, shader_type); if (unlikely(ret != 0)) goto no_reserve; - /* - * Create a compat shader structure and stage it for insertion - * in the manager - */ - compat = kzalloc(sizeof(*compat), GFP_KERNEL); - if (compat == NULL) - goto no_compat; - - compat->hash.key = user_key | (shader_type << 24); - ret = drm_ht_insert_item(&man->shaders, &compat->hash); - if (unlikely(ret != 0)) - goto out_invalid_key; - - compat->state = VMW_COMPAT_ADD; - compat->handle = handle; - compat->tfile = tfile; - list_add_tail(&compat->head, list); - - return 0; -out_invalid_key: - kfree(compat); -no_compat: - ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE); + ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_compat_shader, + vmw_compat_shader_key(user_key, shader_type), + res, list); + vmw_resource_unreference(&res); no_reserve: + vmw_dmabuf_unreference(&buf); out: return ret; } /** - * vmw_compat_shader_man_create - Create a compat shader manager - * - * @dev_priv: Pointer to a device private structure. - * - * Typically done at file open time. If successful returns a pointer to a - * compat shader manager. Otherwise returns an error pointer. - */ -struct vmw_compat_shader_manager * -vmw_compat_shader_man_create(struct vmw_private *dev_priv) -{ - struct vmw_compat_shader_manager *man; - int ret; - - man = kzalloc(sizeof(*man), GFP_KERNEL); - if (man == NULL) - return ERR_PTR(-ENOMEM); - - man->dev_priv = dev_priv; - INIT_LIST_HEAD(&man->list); - ret = drm_ht_create(&man->shaders, VMW_COMPAT_SHADER_HT_ORDER); - if (ret == 0) - return man; - - kfree(man); - return ERR_PTR(ret); -} - -/** - * vmw_compat_shader_man_destroy - Destroy a compat shader manager + * vmw_compat_shader_lookup - Look up a compat shader * - * @man: Pointer to the shader manager to destroy. + * @man: Pointer to the command buffer managed resource manager identifying + * the shader namespace. + * @user_key: The user space id of the shader. + * @shader_type: The shader type. * - * Typically done at file close time. + * Returns a refcounted pointer to a struct vmw_resource if the shader was + * found. An error pointer otherwise. */ -void vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man) +struct vmw_resource * +vmw_compat_shader_lookup(struct vmw_cmdbuf_res_manager *man, + u32 user_key, + SVGA3dShaderType shader_type) { - struct vmw_compat_shader *entry, *next; - - mutex_lock(&man->dev_priv->cmdbuf_mutex); - list_for_each_entry_safe(entry, next, &man->list, head) - vmw_compat_shader_free(man, entry); + if (!vmw_compat_shader_id_ok(user_key, shader_type)) + return ERR_PTR(-EINVAL); - mutex_unlock(&man->dev_priv->cmdbuf_mutex); - kfree(man); + return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_compat_shader, + vmw_compat_shader_key(user_key, + shader_type)); } diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c index 112f27e51bc7..63bd63f3c7df 100644 --- a/drivers/gpu/host1x/job.c +++ b/drivers/gpu/host1x/job.c @@ -185,16 +185,16 @@ static unsigned int pin_job(struct host1x_job *job) struct sg_table *sgt; dma_addr_t phys_addr; - reloc->target = host1x_bo_get(reloc->target); - if (!reloc->target) + reloc->target.bo = host1x_bo_get(reloc->target.bo); + if (!reloc->target.bo) goto unpin; - phys_addr = host1x_bo_pin(reloc->target, &sgt); + phys_addr = host1x_bo_pin(reloc->target.bo, &sgt); if (!phys_addr) goto unpin; job->addr_phys[job->num_unpins] = phys_addr; - job->unpins[job->num_unpins].bo = reloc->target; + job->unpins[job->num_unpins].bo = reloc->target.bo; job->unpins[job->num_unpins].sgt = sgt; job->num_unpins++; } @@ -235,21 +235,21 @@ static unsigned int do_relocs(struct host1x_job *job, struct host1x_bo *cmdbuf) for (i = 0; i < job->num_relocs; i++) { struct host1x_reloc *reloc = &job->relocarray[i]; u32 reloc_addr = (job->reloc_addr_phys[i] + - reloc->target_offset) >> reloc->shift; + reloc->target.offset) >> reloc->shift; u32 *target; /* skip all other gathers */ - if (cmdbuf != reloc->cmdbuf) + if (cmdbuf != reloc->cmdbuf.bo) continue; - if (last_page != reloc->cmdbuf_offset >> PAGE_SHIFT) { + if (last_page != reloc->cmdbuf.offset >> PAGE_SHIFT) { if (cmdbuf_page_addr) host1x_bo_kunmap(cmdbuf, last_page, cmdbuf_page_addr); cmdbuf_page_addr = host1x_bo_kmap(cmdbuf, - reloc->cmdbuf_offset >> PAGE_SHIFT); - last_page = reloc->cmdbuf_offset >> PAGE_SHIFT; + reloc->cmdbuf.offset >> PAGE_SHIFT); + last_page = reloc->cmdbuf.offset >> PAGE_SHIFT; if (unlikely(!cmdbuf_page_addr)) { pr_err("Could not map cmdbuf for relocation\n"); @@ -257,7 +257,7 @@ static unsigned int do_relocs(struct host1x_job *job, struct host1x_bo *cmdbuf) } } - target = cmdbuf_page_addr + (reloc->cmdbuf_offset & ~PAGE_MASK); + target = cmdbuf_page_addr + (reloc->cmdbuf.offset & ~PAGE_MASK); *target = reloc_addr; } @@ -272,7 +272,7 @@ static bool check_reloc(struct host1x_reloc *reloc, struct host1x_bo *cmdbuf, { offset *= sizeof(u32); - if (reloc->cmdbuf != cmdbuf || reloc->cmdbuf_offset != offset) + if (reloc->cmdbuf.bo != cmdbuf || reloc->cmdbuf.offset != offset) return false; return true; diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c index 6866448083b2..37ac7b5dbd06 100644 --- a/drivers/gpu/vga/vga_switcheroo.c +++ b/drivers/gpu/vga/vga_switcheroo.c @@ -660,6 +660,12 @@ int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain * } EXPORT_SYMBOL(vga_switcheroo_init_domain_pm_ops); +void vga_switcheroo_fini_domain_pm_ops(struct device *dev) +{ + dev->pm_domain = NULL; +} +EXPORT_SYMBOL(vga_switcheroo_fini_domain_pm_ops); + static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c index af0259708358..77711623b973 100644 --- a/drivers/gpu/vga/vgaarb.c +++ b/drivers/gpu/vga/vgaarb.c @@ -41,6 +41,7 @@ #include <linux/poll.h> #include <linux/miscdevice.h> #include <linux/slab.h> +#include <linux/screen_info.h> #include <linux/uaccess.h> @@ -112,10 +113,8 @@ both: return 1; } -#ifndef __ARCH_HAS_VGA_DEFAULT_DEVICE /* this is only used a cookie - it should not be dereferenced */ static struct pci_dev *vga_default; -#endif static void vga_arb_device_card_gone(struct pci_dev *pdev); @@ -131,7 +130,6 @@ static struct vga_device *vgadev_find(struct pci_dev *pdev) } /* Returns the default VGA device (vgacon's babe) */ -#ifndef __ARCH_HAS_VGA_DEFAULT_DEVICE struct pci_dev *vga_default_device(void) { return vga_default; @@ -147,7 +145,6 @@ void vga_set_default_device(struct pci_dev *pdev) pci_dev_put(vga_default); vga_default = pci_dev_get(pdev); } -#endif static inline void vga_irq_set_state(struct vga_device *vgadev, bool state) { @@ -237,12 +234,10 @@ static struct vga_device *__vga_tryget(struct vga_device *vgadev, if (conflict->locks & lwants) return conflict; - /* Ok, now check if he owns the resource we want. We don't need - * to check "decodes" since it should be impossible to own - * own legacy resources you don't decode unless I have a bug - * in this code... + /* Ok, now check if it owns the resource we want. We can + * lock resources that are not decoded, therefore a device + * can own resources it doesn't decode. */ - WARN_ON(conflict->owns & ~conflict->decodes); match = lwants & conflict->owns; if (!match) continue; @@ -254,13 +249,19 @@ static struct vga_device *__vga_tryget(struct vga_device *vgadev, flags = 0; pci_bits = 0; + /* If we can't control legacy resources via the bridge, we + * also need to disable normal decoding. + */ if (!conflict->bridge_has_one_vga) { - vga_irq_set_state(conflict, false); - flags |= PCI_VGA_STATE_CHANGE_DECODES; - if (match & (VGA_RSRC_LEGACY_MEM|VGA_RSRC_NORMAL_MEM)) + if ((match & conflict->decodes) & VGA_RSRC_LEGACY_MEM) pci_bits |= PCI_COMMAND_MEMORY; - if (match & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO)) + if ((match & conflict->decodes) & VGA_RSRC_LEGACY_IO) pci_bits |= PCI_COMMAND_IO; + + if (pci_bits) { + vga_irq_set_state(conflict, false); + flags |= PCI_VGA_STATE_CHANGE_DECODES; + } } if (change_bridge) @@ -268,18 +269,19 @@ static struct vga_device *__vga_tryget(struct vga_device *vgadev, pci_set_vga_state(conflict->pdev, false, pci_bits, flags); conflict->owns &= ~match; - /* If he also owned non-legacy, that is no longer the case */ - if (match & VGA_RSRC_LEGACY_MEM) + + /* If we disabled normal decoding, reflect it in owns */ + if (pci_bits & PCI_COMMAND_MEMORY) conflict->owns &= ~VGA_RSRC_NORMAL_MEM; - if (match & VGA_RSRC_LEGACY_IO) + if (pci_bits & PCI_COMMAND_IO) conflict->owns &= ~VGA_RSRC_NORMAL_IO; } enable_them: /* ok dude, we got it, everybody conflicting has been disabled, let's - * enable us. Make sure we don't mark a bit in "owns" that we don't - * also have in "decodes". We can lock resources we don't decode but - * not own them. + * enable us. Mark any bits in "owns" regardless of whether we + * decoded them. We can lock resources we don't decode, therefore + * we must track them via "owns". */ flags = 0; pci_bits = 0; @@ -291,7 +293,7 @@ enable_them: if (wants & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO)) pci_bits |= PCI_COMMAND_IO; } - if (!!(wants & VGA_RSRC_LEGACY_MASK)) + if (wants & VGA_RSRC_LEGACY_MASK) flags |= PCI_VGA_STATE_CHANGE_BRIDGE; pci_set_vga_state(vgadev->pdev, true, pci_bits, flags); @@ -299,7 +301,7 @@ enable_them: if (!vgadev->bridge_has_one_vga) { vga_irq_set_state(vgadev, true); } - vgadev->owns |= (wants & vgadev->decodes); + vgadev->owns |= wants; lock_them: vgadev->locks |= (rsrc & VGA_RSRC_LEGACY_MASK); if (rsrc & VGA_RSRC_LEGACY_IO) @@ -578,11 +580,12 @@ static bool vga_arbiter_add_pci_device(struct pci_dev *pdev) /* Deal with VGA default device. Use first enabled one * by default if arch doesn't have it's own hook */ -#ifndef __ARCH_HAS_VGA_DEFAULT_DEVICE if (vga_default == NULL && - ((vgadev->owns & VGA_RSRC_LEGACY_MASK) == VGA_RSRC_LEGACY_MASK)) + ((vgadev->owns & VGA_RSRC_LEGACY_MASK) == VGA_RSRC_LEGACY_MASK)) { + pr_info("vgaarb: setting as boot device: PCI:%s\n", + pci_name(pdev)); vga_set_default_device(pdev); -#endif + } vga_arbiter_check_bridge_sharing(vgadev); @@ -616,10 +619,8 @@ static bool vga_arbiter_del_pci_device(struct pci_dev *pdev) goto bail; } -#ifndef __ARCH_HAS_VGA_DEFAULT_DEVICE if (vga_default == pdev) vga_set_default_device(NULL); -#endif if (vgadev->decodes & (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM)) vga_decode_count--; @@ -649,7 +650,6 @@ static inline void vga_update_device_decodes(struct vga_device *vgadev, old_decodes = vgadev->decodes; decodes_removed = ~new_decodes & old_decodes; decodes_unlocked = vgadev->locks & decodes_removed; - vgadev->owns &= ~decodes_removed; vgadev->decodes = new_decodes; pr_info("vgaarb: device changed decodes: PCI:%s,olddecodes=%s,decodes=%s:owns=%s\n", @@ -1316,6 +1316,38 @@ static int __init vga_arb_device_init(void) pr_info("vgaarb: loaded\n"); list_for_each_entry(vgadev, &vga_list, list) { +#if defined(CONFIG_X86) || defined(CONFIG_IA64) + /* Override I/O based detection done by vga_arbiter_add_pci_device() + * as it may take the wrong device (e.g. on Apple system under EFI). + * + * Select the device owning the boot framebuffer if there is one. + */ + resource_size_t start, end; + int i; + + /* Does firmware framebuffer belong to us? */ + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { + if (!(pci_resource_flags(vgadev->pdev, i) & IORESOURCE_MEM)) + continue; + + start = pci_resource_start(vgadev->pdev, i); + end = pci_resource_end(vgadev->pdev, i); + + if (!start || !end) + continue; + + if (screen_info.lfb_base < start || + (screen_info.lfb_base + screen_info.lfb_size) >= end) + continue; + if (!vga_default_device()) + pr_info("vgaarb: setting as boot device: PCI:%s\n", + pci_name(vgadev->pdev)); + else if (vgadev->pdev != vga_default_device()) + pr_info("vgaarb: overriding boot device: PCI:%s\n", + pci_name(vgadev->pdev)); + vga_set_default_device(vgadev->pdev); + } +#endif if (vgadev->bridge_has_one_vga) pr_info("vgaarb: bridge control possible %s\n", pci_name(vgadev->pdev)); else diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index e02cf59b048d..c18d5d71062d 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig @@ -261,6 +261,20 @@ config HOLTEK_FF Say Y here if you have a Holtek On Line Grip based game controller and want to have force feedback support for it. +config HID_GT683R + tristate "MSI GT68xR LED support" + depends on LEDS_CLASS && USB_HID + ---help--- + Say Y here if you want to enable support for the three MSI GT68xR LEDs + + This driver support following modes: + - Normal: LEDs are fully on when enabled + - Audio: LEDs brightness depends on sound level + - Breathing: LEDs brightness varies at human breathing rate + + Currently the following devices are know to be supported: + - MSI GT683R + config HID_HUION tristate "Huion tablets" depends on USB_HID @@ -750,12 +764,17 @@ config THRUSTMASTER_FF Rumble Force or Force Feedback Wheel. config HID_WACOM - tristate "Wacom Bluetooth devices support" + tristate "Wacom Intuos/Graphire tablet support (USB)" depends on HID - depends on LEDS_CLASS select POWER_SUPPLY - ---help--- - Support for Wacom Graphire Bluetooth and Intuos4 WL tablets. + select NEW_LEDS + select LEDS_CLASS + help + Say Y here if you want to use the USB or BT version of the Wacom Intuos + or Graphire tablet. + + To compile this driver as a module, choose M here: the + module will be called wacom. config HID_WIIMOTE tristate "Nintendo Wii / Wii U peripherals" diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile index 5e96be3ab280..4dbac7f8530c 100644 --- a/drivers/hid/Makefile +++ b/drivers/hid/Makefile @@ -48,6 +48,7 @@ obj-$(CONFIG_HID_EMS_FF) += hid-emsff.o obj-$(CONFIG_HID_ELECOM) += hid-elecom.o obj-$(CONFIG_HID_ELO) += hid-elo.o obj-$(CONFIG_HID_EZKEY) += hid-ezkey.o +obj-$(CONFIG_HID_GT683R) += hid-gt683r.o obj-$(CONFIG_HID_GYRATION) += hid-gyration.o obj-$(CONFIG_HID_HOLTEK) += hid-holtek-kbd.o obj-$(CONFIG_HID_HOLTEK) += hid-holtek-mouse.o @@ -115,7 +116,9 @@ obj-$(CONFIG_HID_UCLOGIC) += hid-uclogic.o obj-$(CONFIG_HID_XINMO) += hid-xinmo.o obj-$(CONFIG_HID_ZEROPLUS) += hid-zpff.o obj-$(CONFIG_HID_ZYDACRON) += hid-zydacron.o -obj-$(CONFIG_HID_WACOM) += hid-wacom.o + +wacom-objs := wacom_wac.o wacom_sys.o +obj-$(CONFIG_HID_WACOM) += wacom.o obj-$(CONFIG_HID_WALTOP) += hid-waltop.o obj-$(CONFIG_HID_WIIMOTE) += hid-wiimote.o obj-$(CONFIG_HID_SENSOR_HUB) += hid-sensor-hub.o diff --git a/drivers/hid/hid-cherry.c b/drivers/hid/hid-cherry.c index 1bdcccc54a1d..f745d2c1325e 100644 --- a/drivers/hid/hid-cherry.c +++ b/drivers/hid/hid-cherry.c @@ -28,7 +28,7 @@ static __u8 *ch_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { - if (*rsize >= 17 && rdesc[11] == 0x3c && rdesc[12] == 0x02) { + if (*rsize >= 18 && rdesc[11] == 0x3c && rdesc[12] == 0x02) { hid_info(hdev, "fixing up Cherry Cymotion report descriptor\n"); rdesc[11] = rdesc[16] = 0xff; rdesc[12] = rdesc[17] = 0x03; diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 6c813c6092f8..12b6e67d9de0 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -789,6 +789,15 @@ static int hid_scan_report(struct hid_device *hid) /* hid-rmi should take care of them, not hid-generic */ hid->group = HID_GROUP_RMI; + /* + * Vendor specific handlings + */ + switch (hid->vendor) { + case USB_VENDOR_ID_WACOM: + hid->group = HID_GROUP_WACOM; + break; + } + vfree(parser); return 0; } @@ -1849,6 +1858,7 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0) }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_OFFICE_KB) }, { HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) }, + { HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) }, { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) }, { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_1) }, { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_2) }, @@ -1937,8 +1947,6 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_3_PRO) }, { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_DUAL_BOX_PRO) }, { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_5_PRO) }, - { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH) }, - { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH) }, { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_5_8_INCH) }, { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_12_1_INCH) }, { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_Q_PAD) }, @@ -2344,7 +2352,6 @@ static const struct hid_device_id hid_ignore_list[] = { { HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_SKIP) }, { HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_CYCLOPS) }, { HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_LCSPEC) }, - { HID_USB_DEVICE(USB_VENDOR_ID_WACOM, HID_ANY_ID) }, { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_4_PHIDGETSERVO_20) }, { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_1_PHIDGETSERVO_20) }, { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_8_8_4_IF_KIT) }, diff --git a/drivers/hid/hid-gt683r.c b/drivers/hid/hid-gt683r.c new file mode 100644 index 000000000000..0d6f135e266c --- /dev/null +++ b/drivers/hid/hid-gt683r.c @@ -0,0 +1,321 @@ +/* + * MSI GT683R led driver + * + * Copyright (c) 2014 Janne Kanniainen <janne.kanniainen@gmail.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/device.h> +#include <linux/hid.h> +#include <linux/kernel.h> +#include <linux/leds.h> +#include <linux/module.h> + +#include "hid-ids.h" + +#define GT683R_BUFFER_SIZE 8 + +/* + * GT683R_LED_OFF: all LEDs are off + * GT683R_LED_AUDIO: LEDs brightness depends on sound level + * GT683R_LED_BREATHING: LEDs brightness varies at human breathing rate + * GT683R_LED_NORMAL: LEDs are fully on when enabled + */ +enum gt683r_led_mode { + GT683R_LED_OFF = 0, + GT683R_LED_AUDIO = 2, + GT683R_LED_BREATHING = 3, + GT683R_LED_NORMAL = 5 +}; + +enum gt683r_panels { + GT683R_LED_BACK = 0, + GT683R_LED_SIDE = 1, + GT683R_LED_FRONT = 2, + GT683R_LED_COUNT, +}; + +static const char * const gt683r_panel_names[] = { + "back", + "side", + "front", +}; + +struct gt683r_led { + struct hid_device *hdev; + struct led_classdev led_devs[GT683R_LED_COUNT]; + struct mutex lock; + struct work_struct work; + enum led_brightness brightnesses[GT683R_LED_COUNT]; + enum gt683r_led_mode mode; +}; + +static const struct hid_device_id gt683r_led_id[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) }, + { } +}; + +static void gt683r_brightness_set(struct led_classdev *led_cdev, + enum led_brightness brightness) +{ + int i; + struct device *dev = led_cdev->dev->parent; + struct hid_device *hdev = container_of(dev, struct hid_device, dev); + struct gt683r_led *led = hid_get_drvdata(hdev); + + for (i = 0; i < GT683R_LED_COUNT; i++) { + if (led_cdev == &led->led_devs[i]) + break; + } + + if (i < GT683R_LED_COUNT) { + led->brightnesses[i] = brightness; + schedule_work(&led->work); + } +} + +static ssize_t mode_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + u8 sysfs_mode; + struct hid_device *hdev = container_of(dev->parent, + struct hid_device, dev); + struct gt683r_led *led = hid_get_drvdata(hdev); + + if (led->mode == GT683R_LED_NORMAL) + sysfs_mode = 0; + else if (led->mode == GT683R_LED_AUDIO) + sysfs_mode = 1; + else + sysfs_mode = 2; + + return scnprintf(buf, PAGE_SIZE, "%u\n", sysfs_mode); +} + +static ssize_t mode_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + u8 sysfs_mode; + struct hid_device *hdev = container_of(dev->parent, + struct hid_device, dev); + struct gt683r_led *led = hid_get_drvdata(hdev); + + + if (kstrtou8(buf, 10, &sysfs_mode) || sysfs_mode > 2) + return -EINVAL; + + mutex_lock(&led->lock); + + if (sysfs_mode == 0) + led->mode = GT683R_LED_NORMAL; + else if (sysfs_mode == 1) + led->mode = GT683R_LED_AUDIO; + else + led->mode = GT683R_LED_BREATHING; + + mutex_unlock(&led->lock); + schedule_work(&led->work); + + return count; +} + +static int gt683r_led_snd_msg(struct gt683r_led *led, u8 *msg) +{ + int ret; + + ret = hid_hw_raw_request(led->hdev, msg[0], msg, GT683R_BUFFER_SIZE, + HID_FEATURE_REPORT, HID_REQ_SET_REPORT); + if (ret != GT683R_BUFFER_SIZE) { + hid_err(led->hdev, + "failed to send set report request: %i\n", ret); + if (ret < 0) + return ret; + return -EIO; + } + + return 0; +} + +static int gt683r_leds_set(struct gt683r_led *led, u8 leds) +{ + int ret; + u8 *buffer; + + buffer = kzalloc(GT683R_BUFFER_SIZE, GFP_KERNEL); + if (!buffer) + return -ENOMEM; + + buffer[0] = 0x01; + buffer[1] = 0x02; + buffer[2] = 0x30; + buffer[3] = leds; + ret = gt683r_led_snd_msg(led, buffer); + + kfree(buffer); + return ret; +} + +static int gt683r_mode_set(struct gt683r_led *led, u8 mode) +{ + int ret; + u8 *buffer; + + buffer = kzalloc(GT683R_BUFFER_SIZE, GFP_KERNEL); + if (!buffer) + return -ENOMEM; + + buffer[0] = 0x01; + buffer[1] = 0x02; + buffer[2] = 0x20; + buffer[3] = mode; + buffer[4] = 0x01; + ret = gt683r_led_snd_msg(led, buffer); + + kfree(buffer); + return ret; +} + +static void gt683r_led_work(struct work_struct *work) +{ + int i; + u8 leds = 0; + u8 mode; + struct gt683r_led *led = container_of(work, struct gt683r_led, work); + + mutex_lock(&led->lock); + + for (i = 0; i < GT683R_LED_COUNT; i++) { + if (led->brightnesses[i]) + leds |= BIT(i); + } + + if (gt683r_leds_set(led, leds)) + goto fail; + + if (leds) + mode = led->mode; + else + mode = GT683R_LED_OFF; + + gt683r_mode_set(led, mode); +fail: + mutex_unlock(&led->lock); +} + +static DEVICE_ATTR_RW(mode); + +static struct attribute *gt683r_led_attrs[] = { + &dev_attr_mode.attr, + NULL +}; + +static const struct attribute_group gt683r_led_group = { + .name = "gt683r", + .attrs = gt683r_led_attrs, +}; + +static const struct attribute_group *gt683r_led_groups[] = { + >683r_led_group, + NULL +}; + +static int gt683r_led_probe(struct hid_device *hdev, + const struct hid_device_id *id) +{ + int i; + int ret; + int name_sz; + char *name; + struct gt683r_led *led; + + led = devm_kzalloc(&hdev->dev, sizeof(*led), GFP_KERNEL); + if (!led) + return -ENOMEM; + + mutex_init(&led->lock); + INIT_WORK(&led->work, gt683r_led_work); + + led->mode = GT683R_LED_NORMAL; + led->hdev = hdev; + hid_set_drvdata(hdev, led); + + ret = hid_parse(hdev); + if (ret) { + hid_err(hdev, "hid parsing failed\n"); + return ret; + } + + ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW); + if (ret) { + hid_err(hdev, "hw start failed\n"); + return ret; + } + + for (i = 0; i < GT683R_LED_COUNT; i++) { + name_sz = strlen(dev_name(&hdev->dev)) + + strlen(gt683r_panel_names[i]) + 3; + + name = devm_kzalloc(&hdev->dev, name_sz, GFP_KERNEL); + if (!name) { + ret = -ENOMEM; + goto fail; + } + + snprintf(name, name_sz, "%s::%s", + dev_name(&hdev->dev), gt683r_panel_names[i]); + led->led_devs[i].name = name; + led->led_devs[i].max_brightness = 1; + led->led_devs[i].brightness_set = gt683r_brightness_set; + led->led_devs[i].groups = gt683r_led_groups; + + ret = led_classdev_register(&hdev->dev, &led->led_devs[i]); + if (ret) { + hid_err(hdev, "could not register led device\n"); + goto fail; + } + } + + return 0; + +fail: + for (i = i - 1; i >= 0; i--) + led_classdev_unregister(&led->led_devs[i]); + hid_hw_stop(hdev); + return ret; +} + +static void gt683r_led_remove(struct hid_device *hdev) +{ + int i; + struct gt683r_led *led = hid_get_drvdata(hdev); + + for (i = 0; i < GT683R_LED_COUNT; i++) + led_classdev_unregister(&led->led_devs[i]); + flush_work(&led->work); + hid_hw_stop(hdev); +} + +static struct hid_driver gt683r_led_driver = { + .probe = gt683r_led_probe, + .remove = gt683r_led_remove, + .name = "gt683r_led", + .id_table = gt683r_led_id, +}; + +module_hid_driver(gt683r_led_driver); + +MODULE_AUTHOR("Janne Kanniainen"); +MODULE_DESCRIPTION("MSI GT683R led driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/hid/hid-huion.c b/drivers/hid/hid-huion.c index 60f44cd1b0ed..61b68ca27790 100644 --- a/drivers/hid/hid-huion.c +++ b/drivers/hid/hid-huion.c @@ -84,6 +84,15 @@ static const __u8 huion_tablet_rdesc_template[] = { 0xC0 /* End Collection */ }; +/* Parameter indices */ +enum huion_prm { + HUION_PRM_X_LM = 1, + HUION_PRM_Y_LM = 2, + HUION_PRM_PRESSURE_LM = 4, + HUION_PRM_RESOLUTION = 5, + HUION_PRM_NUM +}; + /* Driver data */ struct huion_drvdata { __u8 *rdesc; @@ -115,7 +124,12 @@ static int huion_tablet_enable(struct hid_device *hdev) int rc; struct usb_device *usb_dev = hid_to_usb_dev(hdev); struct huion_drvdata *drvdata = hid_get_drvdata(hdev); - __le16 buf[6]; + __le16 *buf = NULL; + size_t len; + s32 params[HUION_PH_ID_NUM]; + s32 resolution; + __u8 *p; + s32 v; /* * Read string descriptor containing tablet parameters. The specific @@ -123,65 +137,79 @@ static int huion_tablet_enable(struct hid_device *hdev) * driver traffic. * NOTE: This enables fully-functional tablet mode. */ + len = HUION_PRM_NUM * sizeof(*buf); + buf = kmalloc(len, GFP_KERNEL); + if (buf == NULL) { + hid_err(hdev, "failed to allocate parameter buffer\n"); + rc = -ENOMEM; + goto cleanup; + } rc = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0), USB_REQ_GET_DESCRIPTOR, USB_DIR_IN, (USB_DT_STRING << 8) + 0x64, - 0x0409, buf, sizeof(buf), + 0x0409, buf, len, USB_CTRL_GET_TIMEOUT); - if (rc == -EPIPE) - hid_warn(hdev, "device parameters not found\n"); - else if (rc < 0) - hid_warn(hdev, "failed to get device parameters: %d\n", rc); - else if (rc != sizeof(buf)) - hid_warn(hdev, "invalid device parameters\n"); - else { - s32 params[HUION_PH_ID_NUM]; - s32 resolution; - __u8 *p; - s32 v; + if (rc == -EPIPE) { + hid_err(hdev, "device parameters not found\n"); + rc = -ENODEV; + goto cleanup; + } else if (rc < 0) { + hid_err(hdev, "failed to get device parameters: %d\n", rc); + rc = -ENODEV; + goto cleanup; + } else if (rc != len) { + hid_err(hdev, "invalid device parameters\n"); + rc = -ENODEV; + goto cleanup; + } - /* Extract device parameters */ - params[HUION_PH_ID_X_LM] = le16_to_cpu(buf[1]); - params[HUION_PH_ID_Y_LM] = le16_to_cpu(buf[2]); - params[HUION_PH_ID_PRESSURE_LM] = le16_to_cpu(buf[4]); - resolution = le16_to_cpu(buf[5]); - if (resolution == 0) { - params[HUION_PH_ID_X_PM] = 0; - params[HUION_PH_ID_Y_PM] = 0; - } else { - params[HUION_PH_ID_X_PM] = params[HUION_PH_ID_X_LM] * - 1000 / resolution; - params[HUION_PH_ID_Y_PM] = params[HUION_PH_ID_Y_LM] * - 1000 / resolution; - } + /* Extract device parameters */ + params[HUION_PH_ID_X_LM] = le16_to_cpu(buf[HUION_PRM_X_LM]); + params[HUION_PH_ID_Y_LM] = le16_to_cpu(buf[HUION_PRM_Y_LM]); + params[HUION_PH_ID_PRESSURE_LM] = + le16_to_cpu(buf[HUION_PRM_PRESSURE_LM]); + resolution = le16_to_cpu(buf[HUION_PRM_RESOLUTION]); + if (resolution == 0) { + params[HUION_PH_ID_X_PM] = 0; + params[HUION_PH_ID_Y_PM] = 0; + } else { + params[HUION_PH_ID_X_PM] = params[HUION_PH_ID_X_LM] * + 1000 / resolution; + params[HUION_PH_ID_Y_PM] = params[HUION_PH_ID_Y_LM] * + 1000 / resolution; + } - /* Allocate fixed report descriptor */ - drvdata->rdesc = devm_kmalloc(&hdev->dev, - sizeof(huion_tablet_rdesc_template), - GFP_KERNEL); - if (drvdata->rdesc == NULL) { - hid_err(hdev, "failed to allocate fixed rdesc\n"); - return -ENOMEM; - } - drvdata->rsize = sizeof(huion_tablet_rdesc_template); + /* Allocate fixed report descriptor */ + drvdata->rdesc = devm_kmalloc(&hdev->dev, + sizeof(huion_tablet_rdesc_template), + GFP_KERNEL); + if (drvdata->rdesc == NULL) { + hid_err(hdev, "failed to allocate fixed rdesc\n"); + rc = -ENOMEM; + goto cleanup; + } + drvdata->rsize = sizeof(huion_tablet_rdesc_template); - /* Format fixed report descriptor */ - memcpy(drvdata->rdesc, huion_tablet_rdesc_template, - drvdata->rsize); - for (p = drvdata->rdesc; - p <= drvdata->rdesc + drvdata->rsize - 4;) { - if (p[0] == 0xFE && p[1] == 0xED && p[2] == 0x1D && - p[3] < sizeof(params)) { - v = params[p[3]]; - put_unaligned(cpu_to_le32(v), (s32 *)p); - p += 4; - } else { - p++; - } + /* Format fixed report descriptor */ + memcpy(drvdata->rdesc, huion_tablet_rdesc_template, + drvdata->rsize); + for (p = drvdata->rdesc; + p <= drvdata->rdesc + drvdata->rsize - 4;) { + if (p[0] == 0xFE && p[1] == 0xED && p[2] == 0x1D && + p[3] < sizeof(params)) { + v = params[p[3]]; + put_unaligned(cpu_to_le32(v), (s32 *)p); + p += 4; + } else { + p++; } } - return 0; + rc = 0; + +cleanup: + kfree(buf); + return rc; } static int huion_probe(struct hid_device *hdev, const struct hid_device_id *id) diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index d53bdda26207..25cd674d6064 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -650,7 +650,7 @@ #define USB_DEVICE_ID_GENIUS_KB29E 0x3004 #define USB_VENDOR_ID_MSI 0x1770 -#define USB_DEVICE_ID_MSI_GX680R_LED_PANEL 0xff00 +#define USB_DEVICE_ID_MSI_GT683R_LED_PANEL 0xff00 #define USB_VENDOR_ID_NATIONAL_SEMICONDUCTOR 0x0400 #define USB_DEVICE_ID_N_S_HARMONY 0xc359 diff --git a/drivers/hid/hid-kye.c b/drivers/hid/hid-kye.c index e77696367591..b92bf01a1ae8 100644 --- a/drivers/hid/hid-kye.c +++ b/drivers/hid/hid-kye.c @@ -300,7 +300,7 @@ static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc, * - change the button usage range to 4-7 for the extra * buttons */ - if (*rsize >= 74 && + if (*rsize >= 75 && rdesc[61] == 0x05 && rdesc[62] == 0x08 && rdesc[63] == 0x19 && rdesc[64] == 0x08 && rdesc[65] == 0x29 && rdesc[66] == 0x0f && diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c index a976f48263f6..f91ff145db9a 100644 --- a/drivers/hid/hid-lg.c +++ b/drivers/hid/hid-lg.c @@ -345,14 +345,14 @@ static __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc, struct usb_device_descriptor *udesc; __u16 bcdDevice, rev_maj, rev_min; - if ((drv_data->quirks & LG_RDESC) && *rsize >= 90 && rdesc[83] == 0x26 && + if ((drv_data->quirks & LG_RDESC) && *rsize >= 91 && rdesc[83] == 0x26 && rdesc[84] == 0x8c && rdesc[85] == 0x02) { hid_info(hdev, "fixing up Logitech keyboard report descriptor\n"); rdesc[84] = rdesc[89] = 0x4d; rdesc[85] = rdesc[90] = 0x10; } - if ((drv_data->quirks & LG_RDESC_REL_ABS) && *rsize >= 50 && + if ((drv_data->quirks & LG_RDESC_REL_ABS) && *rsize >= 51 && rdesc[32] == 0x81 && rdesc[33] == 0x06 && rdesc[49] == 0x81 && rdesc[50] == 0x06) { hid_info(hdev, diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c index cc2bd2022198..7835717bc020 100644 --- a/drivers/hid/hid-lg4ff.c +++ b/drivers/hid/hid-lg4ff.c @@ -451,13 +451,13 @@ static ssize_t lg4ff_range_store(struct device *dev, struct device_attribute *at drv_data = hid_get_drvdata(hid); if (!drv_data) { hid_err(hid, "Private driver data not found!\n"); - return 0; + return -EINVAL; } entry = drv_data->device_props; if (!entry) { hid_err(hid, "Device properties not found!\n"); - return 0; + return -EINVAL; } if (range == 0) diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c index 486dbde2ba2d..9bf8637747a5 100644 --- a/drivers/hid/hid-logitech-dj.c +++ b/drivers/hid/hid-logitech-dj.c @@ -238,13 +238,6 @@ static void logi_dj_recv_add_djhid_device(struct dj_receiver_dev *djrcv_dev, return; } - if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) || - (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) { - dev_err(&djrcv_hdev->dev, "%s: invalid device index:%d\n", - __func__, dj_report->device_index); - return; - } - if (djrcv_dev->paired_dj_devices[dj_report->device_index]) { /* The device is already known. No need to reallocate it. */ dbg_hid("%s: device is already known\n", __func__); @@ -557,7 +550,7 @@ static int logi_dj_ll_raw_request(struct hid_device *hid, if (!out_buf) return -ENOMEM; - if (count < DJREPORT_SHORT_LENGTH - 2) + if (count > DJREPORT_SHORT_LENGTH - 2) count = DJREPORT_SHORT_LENGTH - 2; out_buf[0] = REPORT_ID_DJ_SHORT; @@ -663,7 +656,6 @@ static int logi_dj_raw_event(struct hid_device *hdev, struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev); struct dj_report *dj_report = (struct dj_report *) data; unsigned long flags; - bool report_processed = false; dbg_hid("%s, size:%d\n", __func__, size); @@ -691,27 +683,41 @@ static int logi_dj_raw_event(struct hid_device *hdev, * anything else with it. */ + /* case 1) */ + if (data[0] != REPORT_ID_DJ_SHORT) + return false; + + if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) || + (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) { + /* + * Device index is wrong, bail out. + * This driver can ignore safely the receiver notifications, + * so ignore those reports too. + */ + if (dj_report->device_index != DJ_RECEIVER_INDEX) + dev_err(&hdev->dev, "%s: invalid device index:%d\n", + __func__, dj_report->device_index); + return false; + } + spin_lock_irqsave(&djrcv_dev->lock, flags); - if (dj_report->report_id == REPORT_ID_DJ_SHORT) { - switch (dj_report->report_type) { - case REPORT_TYPE_NOTIF_DEVICE_PAIRED: - case REPORT_TYPE_NOTIF_DEVICE_UNPAIRED: - logi_dj_recv_queue_notification(djrcv_dev, dj_report); - break; - case REPORT_TYPE_NOTIF_CONNECTION_STATUS: - if (dj_report->report_params[CONNECTION_STATUS_PARAM_STATUS] == - STATUS_LINKLOSS) { - logi_dj_recv_forward_null_report(djrcv_dev, dj_report); - } - break; - default: - logi_dj_recv_forward_report(djrcv_dev, dj_report); + switch (dj_report->report_type) { + case REPORT_TYPE_NOTIF_DEVICE_PAIRED: + case REPORT_TYPE_NOTIF_DEVICE_UNPAIRED: + logi_dj_recv_queue_notification(djrcv_dev, dj_report); + break; + case REPORT_TYPE_NOTIF_CONNECTION_STATUS: + if (dj_report->report_params[CONNECTION_STATUS_PARAM_STATUS] == + STATUS_LINKLOSS) { + logi_dj_recv_forward_null_report(djrcv_dev, dj_report); } - report_processed = true; + break; + default: + logi_dj_recv_forward_report(djrcv_dev, dj_report); } spin_unlock_irqrestore(&djrcv_dev->lock, flags); - return report_processed; + return true; } static int logi_dj_probe(struct hid_device *hdev, diff --git a/drivers/hid/hid-logitech-dj.h b/drivers/hid/hid-logitech-dj.h index 4a4000340ce1..daeb0aa4bee9 100644 --- a/drivers/hid/hid-logitech-dj.h +++ b/drivers/hid/hid-logitech-dj.h @@ -27,6 +27,7 @@ #define DJ_MAX_PAIRED_DEVICES 6 #define DJ_MAX_NUMBER_NOTIFICATIONS 8 +#define DJ_RECEIVER_INDEX 0 #define DJ_DEVICE_INDEX_MIN 1 #define DJ_DEVICE_INDEX_MAX 6 diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c index ecc2cbf300cc..29a74c1efcb8 100644 --- a/drivers/hid/hid-magicmouse.c +++ b/drivers/hid/hid-magicmouse.c @@ -290,6 +290,11 @@ static int magicmouse_raw_event(struct hid_device *hdev, if (size < 4 || ((size - 4) % 9) != 0) return 0; npoints = (size - 4) / 9; + if (npoints > 15) { + hid_warn(hdev, "invalid size value (%d) for TRACKPAD_REPORT_ID\n", + size); + return 0; + } msc->ntouches = 0; for (ii = 0; ii < npoints; ii++) magicmouse_emit_touch(msc, ii, data + ii * 9 + 4); @@ -307,6 +312,11 @@ static int magicmouse_raw_event(struct hid_device *hdev, if (size < 6 || ((size - 6) % 8) != 0) return 0; npoints = (size - 6) / 8; + if (npoints > 15) { + hid_warn(hdev, "invalid size value (%d) for MOUSE_REPORT_ID\n", + size); + return 0; + } msc->ntouches = 0; for (ii = 0; ii < npoints; ii++) magicmouse_emit_touch(msc, ii, data + ii * 8 + 6); diff --git a/drivers/hid/hid-monterey.c b/drivers/hid/hid-monterey.c index 9e14c00eb1b6..25daf28b26bd 100644 --- a/drivers/hid/hid-monterey.c +++ b/drivers/hid/hid-monterey.c @@ -24,7 +24,7 @@ static __u8 *mr_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { - if (*rsize >= 30 && rdesc[29] == 0x05 && rdesc[30] == 0x09) { + if (*rsize >= 31 && rdesc[29] == 0x05 && rdesc[30] == 0x09) { hid_info(hdev, "fixing up button/consumer in HID report descriptor\n"); rdesc[30] = 0x0c; } diff --git a/drivers/hid/hid-petalynx.c b/drivers/hid/hid-petalynx.c index 736b2502df4f..6aca4f2554bf 100644 --- a/drivers/hid/hid-petalynx.c +++ b/drivers/hid/hid-petalynx.c @@ -25,7 +25,7 @@ static __u8 *pl_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { - if (*rsize >= 60 && rdesc[39] == 0x2a && rdesc[40] == 0xf5 && + if (*rsize >= 62 && rdesc[39] == 0x2a && rdesc[40] == 0xf5 && rdesc[41] == 0x00 && rdesc[59] == 0x26 && rdesc[60] == 0xf9 && rdesc[61] == 0x00) { hid_info(hdev, "fixing up Petalynx Maxter Remote report descriptor\n"); diff --git a/drivers/hid/hid-picolcd_core.c b/drivers/hid/hid-picolcd_core.c index acbb021065ec..020df3c2e8b4 100644 --- a/drivers/hid/hid-picolcd_core.c +++ b/drivers/hid/hid-picolcd_core.c @@ -350,6 +350,12 @@ static int picolcd_raw_event(struct hid_device *hdev, if (!data) return 1; + if (size > 64) { + hid_warn(hdev, "invalid size value (%d) for picolcd raw event\n", + size); + return 0; + } + if (report->id == REPORT_KEY_STATE) { if (data->input_keys) ret = picolcd_raw_keypad(data, report, raw_data+1, size-1); diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c index 0dc25142f451..8389e8109218 100644 --- a/drivers/hid/hid-rmi.c +++ b/drivers/hid/hid-rmi.c @@ -909,10 +909,15 @@ static int rmi_probe(struct hid_device *hdev, const struct hid_device_id *id) return ret; } - if (!test_bit(RMI_STARTED, &data->flags)) { - hid_hw_stop(hdev); - return -EIO; - } + if (!test_bit(RMI_STARTED, &data->flags)) + /* + * The device maybe in the bootloader if rmi_input_configured + * failed to find F11 in the PDT. Print an error, but don't + * return an error from rmi_probe so that hidraw will be + * accessible from userspace. That way a userspace tool + * can be used to reload working firmware on the touchpad. + */ + hid_err(hdev, "Device failed to be properly configured\n"); return 0; } diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c index e244e449cbba..2ac25760a9a9 100644 --- a/drivers/hid/hid-sensor-hub.c +++ b/drivers/hid/hid-sensor-hub.c @@ -604,9 +604,9 @@ static int sensor_hub_probe(struct hid_device *hdev, ret = -EINVAL; goto err_stop_hw; } - sd->hid_sensor_hub_client_devs = kzalloc(dev_cnt * - sizeof(struct mfd_cell), - GFP_KERNEL); + sd->hid_sensor_hub_client_devs = devm_kzalloc(&hdev->dev, dev_cnt * + sizeof(struct mfd_cell), + GFP_KERNEL); if (sd->hid_sensor_hub_client_devs == NULL) { hid_err(hdev, "Failed to allocate memory for mfd cells\n"); ret = -ENOMEM; @@ -618,11 +618,12 @@ static int sensor_hub_probe(struct hid_device *hdev, if (collection->type == HID_COLLECTION_PHYSICAL) { - hsdev = kzalloc(sizeof(*hsdev), GFP_KERNEL); + hsdev = devm_kzalloc(&hdev->dev, sizeof(*hsdev), + GFP_KERNEL); if (!hsdev) { hid_err(hdev, "cannot allocate hid_sensor_hub_device\n"); ret = -ENOMEM; - goto err_no_mem; + goto err_stop_hw; } hsdev->hdev = hdev; hsdev->vendor_id = hdev->vendor; @@ -631,13 +632,13 @@ static int sensor_hub_probe(struct hid_device *hdev, if (last_hsdev) last_hsdev->end_collection_index = i; last_hsdev = hsdev; - name = kasprintf(GFP_KERNEL, "HID-SENSOR-%x", - collection->usage); + name = devm_kasprintf(&hdev->dev, GFP_KERNEL, + "HID-SENSOR-%x", + collection->usage); if (name == NULL) { hid_err(hdev, "Failed MFD device name\n"); ret = -ENOMEM; - kfree(hsdev); - goto err_no_mem; + goto err_stop_hw; } sd->hid_sensor_hub_client_devs[ sd->hid_sensor_client_cnt].id = @@ -661,16 +662,10 @@ static int sensor_hub_probe(struct hid_device *hdev, ret = mfd_add_devices(&hdev->dev, 0, sd->hid_sensor_hub_client_devs, sd->hid_sensor_client_cnt, NULL, 0, NULL); if (ret < 0) - goto err_no_mem; + goto err_stop_hw; return ret; -err_no_mem: - for (i = 0; i < sd->hid_sensor_client_cnt; ++i) { - kfree(sd->hid_sensor_hub_client_devs[i].name); - kfree(sd->hid_sensor_hub_client_devs[i].platform_data); - } - kfree(sd->hid_sensor_hub_client_devs); err_stop_hw: hid_hw_stop(hdev); @@ -681,7 +676,6 @@ static void sensor_hub_remove(struct hid_device *hdev) { struct sensor_hub_data *data = hid_get_drvdata(hdev); unsigned long flags; - int i; hid_dbg(hdev, " hardware removed\n"); hid_hw_close(hdev); @@ -691,11 +685,6 @@ static void sensor_hub_remove(struct hid_device *hdev) complete(&data->pending.ready); spin_unlock_irqrestore(&data->lock, flags); mfd_remove_devices(&hdev->dev); - for (i = 0; i < data->hid_sensor_client_cnt; ++i) { - kfree(data->hid_sensor_hub_client_devs[i].name); - kfree(data->hid_sensor_hub_client_devs[i].platform_data); - } - kfree(data->hid_sensor_hub_client_devs); hid_set_drvdata(hdev, NULL); mutex_destroy(&data->mutex); } diff --git a/drivers/hid/hid-sunplus.c b/drivers/hid/hid-sunplus.c index 87fc91e1c8de..91072fa54663 100644 --- a/drivers/hid/hid-sunplus.c +++ b/drivers/hid/hid-sunplus.c @@ -24,7 +24,7 @@ static __u8 *sp_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { - if (*rsize >= 107 && rdesc[104] == 0x26 && rdesc[105] == 0x80 && + if (*rsize >= 112 && rdesc[104] == 0x26 && rdesc[105] == 0x80 && rdesc[106] == 0x03) { hid_info(hdev, "fixing up Sunplus Wireless Desktop report descriptor\n"); rdesc[105] = rdesc[110] = 0x03; diff --git a/drivers/hid/hid-wacom.c b/drivers/hid/hid-wacom.c deleted file mode 100644 index 902013ec041b..000000000000 --- a/drivers/hid/hid-wacom.c +++ /dev/null @@ -1,973 +0,0 @@ -/* - * Bluetooth Wacom Tablet support - * - * Copyright (c) 1999 Andreas Gal - * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> - * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc - * Copyright (c) 2006-2007 Jiri Kosina - * Copyright (c) 2008 Jiri Slaby <jirislaby@gmail.com> - * Copyright (c) 2006 Andrew Zabolotny <zap@homelink.ru> - * Copyright (c) 2009 Bastien Nocera <hadess@hadess.net> - * Copyright (c) 2011 PrzemysÅ‚aw Firszt <przemo@firszt.eu> - */ - -/* - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/device.h> -#include <linux/hid.h> -#include <linux/module.h> -#include <linux/leds.h> -#include <linux/slab.h> -#include <linux/power_supply.h> - -#include "hid-ids.h" - -#define PAD_DEVICE_ID 0x0F - -#define WAC_CMD_LED_CONTROL 0x20 -#define WAC_CMD_ICON_START_STOP 0x21 -#define WAC_CMD_ICON_TRANSFER 0x26 - -struct wacom_data { - __u16 tool; - __u16 butstate; - __u8 whlstate; - __u8 features; - __u32 id; - __u32 serial; - unsigned char high_speed; - __u8 battery_capacity; - __u8 power_raw; - __u8 ps_connected; - __u8 bat_charging; - struct power_supply battery; - struct power_supply ac; - __u8 led_selector; - struct led_classdev *leds[4]; -}; - -/*percent of battery capacity for Graphire - 8th value means AC online and show 100% capacity */ -static unsigned short batcap_gr[8] = { 1, 15, 25, 35, 50, 70, 100, 100 }; -/*percent of battery capacity for Intuos4 WL, AC has a separate bit*/ -static unsigned short batcap_i4[8] = { 1, 15, 30, 45, 60, 70, 85, 100 }; - -static enum power_supply_property wacom_battery_props[] = { - POWER_SUPPLY_PROP_PRESENT, - POWER_SUPPLY_PROP_CAPACITY, - POWER_SUPPLY_PROP_SCOPE, - POWER_SUPPLY_PROP_STATUS, -}; - -static enum power_supply_property wacom_ac_props[] = { - POWER_SUPPLY_PROP_PRESENT, - POWER_SUPPLY_PROP_ONLINE, - POWER_SUPPLY_PROP_SCOPE, -}; - -static void wacom_scramble(__u8 *image) -{ - __u16 mask; - __u16 s1; - __u16 s2; - __u16 r1 ; - __u16 r2 ; - __u16 r; - __u8 buf[256]; - int i, w, x, y, z; - - for (x = 0; x < 32; x++) { - for (y = 0; y < 8; y++) - buf[(8 * x) + (7 - y)] = image[(8 * x) + y]; - } - - /* Change 76543210 into GECA6420 as required by Intuos4 WL - * HGFEDCBA HFDB7531 - */ - for (x = 0; x < 4; x++) { - for (y = 0; y < 4; y++) { - for (z = 0; z < 8; z++) { - mask = 0x0001; - r1 = 0; - r2 = 0; - i = (x << 6) + (y << 4) + z; - s1 = buf[i]; - s2 = buf[i+8]; - for (w = 0; w < 8; w++) { - r1 |= (s1 & mask); - r2 |= (s2 & mask); - s1 <<= 1; - s2 <<= 1; - mask <<= 2; - } - r = r1 | (r2 << 1); - i = (x << 6) + (y << 4) + (z << 1); - image[i] = 0xFF & r; - image[i+1] = (0xFF00 & r) >> 8; - } - } - } -} - -static void wacom_set_image(struct hid_device *hdev, const char *image, - __u8 icon_no) -{ - __u8 rep_data[68]; - __u8 p[256]; - int ret, i, j; - - for (i = 0; i < 256; i++) - p[i] = image[i]; - - rep_data[0] = WAC_CMD_ICON_START_STOP; - rep_data[1] = 0; - ret = hid_hw_raw_request(hdev, rep_data[0], rep_data, 2, - HID_FEATURE_REPORT, HID_REQ_SET_REPORT); - if (ret < 0) - goto err; - - rep_data[0] = WAC_CMD_ICON_TRANSFER; - rep_data[1] = icon_no & 0x07; - - wacom_scramble(p); - - for (i = 0; i < 4; i++) { - for (j = 0; j < 64; j++) - rep_data[j + 3] = p[(i << 6) + j]; - - rep_data[2] = i; - ret = hid_hw_raw_request(hdev, rep_data[0], rep_data, 67, - HID_FEATURE_REPORT, HID_REQ_SET_REPORT); - } - - rep_data[0] = WAC_CMD_ICON_START_STOP; - rep_data[1] = 0; - - ret = hid_hw_raw_request(hdev, rep_data[0], rep_data, 2, - HID_FEATURE_REPORT, HID_REQ_SET_REPORT); - -err: - return; -} - -static void wacom_leds_set_brightness(struct led_classdev *led_dev, - enum led_brightness value) -{ - struct device *dev = led_dev->dev->parent; - struct hid_device *hdev; - struct wacom_data *wdata; - unsigned char *buf; - __u8 led = 0; - int i; - - hdev = container_of(dev, struct hid_device, dev); - wdata = hid_get_drvdata(hdev); - for (i = 0; i < 4; ++i) { - if (wdata->leds[i] == led_dev) - wdata->led_selector = i; - } - - led = wdata->led_selector | 0x04; - buf = kzalloc(9, GFP_KERNEL); - if (buf) { - buf[0] = WAC_CMD_LED_CONTROL; - buf[1] = led; - buf[2] = value >> 2; - buf[3] = value; - /* use fixed brightness for OLEDs */ - buf[4] = 0x08; - hid_hw_raw_request(hdev, buf[0], buf, 9, HID_FEATURE_REPORT, - HID_REQ_SET_REPORT); - kfree(buf); - } - - return; -} - -static enum led_brightness wacom_leds_get_brightness(struct led_classdev *led_dev) -{ - struct wacom_data *wdata; - struct device *dev = led_dev->dev->parent; - int value = 0; - int i; - - wdata = hid_get_drvdata(container_of(dev, struct hid_device, dev)); - - for (i = 0; i < 4; ++i) { - if (wdata->leds[i] == led_dev) { - value = wdata->leds[i]->brightness; - break; - } - } - - return value; -} - - -static int wacom_initialize_leds(struct hid_device *hdev) -{ - struct wacom_data *wdata = hid_get_drvdata(hdev); - struct led_classdev *led; - struct device *dev = &hdev->dev; - size_t namesz = strlen(dev_name(dev)) + 12; - char *name; - int i, ret; - - wdata->led_selector = 0; - - for (i = 0; i < 4; i++) { - led = kzalloc(sizeof(struct led_classdev) + namesz, GFP_KERNEL); - if (!led) { - hid_warn(hdev, - "can't allocate memory for LED selector\n"); - ret = -ENOMEM; - goto err; - } - - name = (void *)&led[1]; - snprintf(name, namesz, "%s:selector:%d", dev_name(dev), i); - led->name = name; - led->brightness = 0; - led->max_brightness = 127; - led->brightness_get = wacom_leds_get_brightness; - led->brightness_set = wacom_leds_set_brightness; - - wdata->leds[i] = led; - - ret = led_classdev_register(dev, wdata->leds[i]); - - if (ret) { - wdata->leds[i] = NULL; - kfree(led); - hid_warn(hdev, "can't register LED\n"); - goto err; - } - } - -err: - return ret; -} - -static void wacom_destroy_leds(struct hid_device *hdev) -{ - struct wacom_data *wdata = hid_get_drvdata(hdev); - struct led_classdev *led; - int i; - - for (i = 0; i < 4; ++i) { - if (wdata->leds[i]) { - led = wdata->leds[i]; - wdata->leds[i] = NULL; - led_classdev_unregister(led); - kfree(led); - } - } - -} - -static int wacom_battery_get_property(struct power_supply *psy, - enum power_supply_property psp, - union power_supply_propval *val) -{ - struct wacom_data *wdata = container_of(psy, - struct wacom_data, battery); - int ret = 0; - - switch (psp) { - case POWER_SUPPLY_PROP_PRESENT: - val->intval = 1; - break; - case POWER_SUPPLY_PROP_SCOPE: - val->intval = POWER_SUPPLY_SCOPE_DEVICE; - break; - case POWER_SUPPLY_PROP_CAPACITY: - val->intval = wdata->battery_capacity; - break; - case POWER_SUPPLY_PROP_STATUS: - if (wdata->bat_charging) - val->intval = POWER_SUPPLY_STATUS_CHARGING; - else - if (wdata->battery_capacity == 100 && wdata->ps_connected) - val->intval = POWER_SUPPLY_STATUS_FULL; - else - val->intval = POWER_SUPPLY_STATUS_DISCHARGING; - break; - default: - ret = -EINVAL; - break; - } - return ret; -} - -static int wacom_ac_get_property(struct power_supply *psy, - enum power_supply_property psp, - union power_supply_propval *val) -{ - struct wacom_data *wdata = container_of(psy, struct wacom_data, ac); - int ret = 0; - - switch (psp) { - case POWER_SUPPLY_PROP_PRESENT: - /* fall through */ - case POWER_SUPPLY_PROP_ONLINE: - val->intval = wdata->ps_connected; - break; - case POWER_SUPPLY_PROP_SCOPE: - val->intval = POWER_SUPPLY_SCOPE_DEVICE; - break; - default: - ret = -EINVAL; - break; - } - return ret; -} - -static void wacom_set_features(struct hid_device *hdev, u8 speed) -{ - struct wacom_data *wdata = hid_get_drvdata(hdev); - int limit, ret; - __u8 rep_data[2]; - - switch (hdev->product) { - case USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH: - rep_data[0] = 0x03 ; rep_data[1] = 0x00; - limit = 3; - do { - ret = hid_hw_raw_request(hdev, rep_data[0], rep_data, 2, - HID_FEATURE_REPORT, HID_REQ_SET_REPORT); - } while (ret < 0 && limit-- > 0); - - if (ret >= 0) { - if (speed == 0) - rep_data[0] = 0x05; - else - rep_data[0] = 0x06; - - rep_data[1] = 0x00; - limit = 3; - do { - ret = hid_hw_raw_request(hdev, rep_data[0], - rep_data, 2, HID_FEATURE_REPORT, - HID_REQ_SET_REPORT); - } while (ret < 0 && limit-- > 0); - - if (ret >= 0) { - wdata->high_speed = speed; - return; - } - } - - /* - * Note that if the raw queries fail, it's not a hard failure - * and it is safe to continue - */ - hid_warn(hdev, "failed to poke device, command %d, err %d\n", - rep_data[0], ret); - break; - case USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH: - if (speed == 1) - wdata->features &= ~0x20; - else - wdata->features |= 0x20; - - rep_data[0] = 0x03; - rep_data[1] = wdata->features; - - ret = hid_hw_raw_request(hdev, rep_data[0], rep_data, 2, - HID_FEATURE_REPORT, HID_REQ_SET_REPORT); - if (ret >= 0) - wdata->high_speed = speed; - break; - } - - return; -} - -static ssize_t wacom_show_speed(struct device *dev, - struct device_attribute - *attr, char *buf) -{ - struct wacom_data *wdata = dev_get_drvdata(dev); - - return snprintf(buf, PAGE_SIZE, "%i\n", wdata->high_speed); -} - -static ssize_t wacom_store_speed(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct hid_device *hdev = container_of(dev, struct hid_device, dev); - int new_speed; - - if (sscanf(buf, "%1d", &new_speed ) != 1) - return -EINVAL; - - if (new_speed == 0 || new_speed == 1) { - wacom_set_features(hdev, new_speed); - return strnlen(buf, PAGE_SIZE); - } else - return -EINVAL; -} - -static DEVICE_ATTR(speed, S_IRUGO | S_IWUSR | S_IWGRP, - wacom_show_speed, wacom_store_speed); - -#define WACOM_STORE(OLED_ID) \ -static ssize_t wacom_oled##OLED_ID##_store(struct device *dev, \ - struct device_attribute *attr, \ - const char *buf, size_t count) \ -{ \ - struct hid_device *hdev = container_of(dev, struct hid_device, \ - dev); \ - \ - if (count != 256) \ - return -EINVAL; \ - \ - wacom_set_image(hdev, buf, OLED_ID); \ - \ - return count; \ -} \ - \ -static DEVICE_ATTR(oled##OLED_ID##_img, S_IWUSR | S_IWGRP, NULL, \ - wacom_oled##OLED_ID##_store) - -WACOM_STORE(0); -WACOM_STORE(1); -WACOM_STORE(2); -WACOM_STORE(3); -WACOM_STORE(4); -WACOM_STORE(5); -WACOM_STORE(6); -WACOM_STORE(7); - -static int wacom_gr_parse_report(struct hid_device *hdev, - struct wacom_data *wdata, - struct input_dev *input, unsigned char *data) -{ - int tool, x, y, rw; - - tool = 0; - /* Get X & Y positions */ - x = le16_to_cpu(*(__le16 *) &data[2]); - y = le16_to_cpu(*(__le16 *) &data[4]); - - /* Get current tool identifier */ - if (data[1] & 0x90) { /* If pen is in the in/active area */ - switch ((data[1] >> 5) & 3) { - case 0: /* Pen */ - tool = BTN_TOOL_PEN; - break; - - case 1: /* Rubber */ - tool = BTN_TOOL_RUBBER; - break; - - case 2: /* Mouse with wheel */ - case 3: /* Mouse without wheel */ - tool = BTN_TOOL_MOUSE; - break; - } - - /* Reset tool if out of active tablet area */ - if (!(data[1] & 0x10)) - tool = 0; - } - - /* If tool changed, notify input subsystem */ - if (wdata->tool != tool) { - if (wdata->tool) { - /* Completely reset old tool state */ - if (wdata->tool == BTN_TOOL_MOUSE) { - input_report_key(input, BTN_LEFT, 0); - input_report_key(input, BTN_RIGHT, 0); - input_report_key(input, BTN_MIDDLE, 0); - input_report_abs(input, ABS_DISTANCE, - input_abs_get_max(input, ABS_DISTANCE)); - } else { - input_report_key(input, BTN_TOUCH, 0); - input_report_key(input, BTN_STYLUS, 0); - input_report_key(input, BTN_STYLUS2, 0); - input_report_abs(input, ABS_PRESSURE, 0); - } - input_report_key(input, wdata->tool, 0); - input_sync(input); - } - wdata->tool = tool; - if (tool) - input_report_key(input, tool, 1); - } - - if (tool) { - input_report_abs(input, ABS_X, x); - input_report_abs(input, ABS_Y, y); - - switch ((data[1] >> 5) & 3) { - case 2: /* Mouse with wheel */ - input_report_key(input, BTN_MIDDLE, data[1] & 0x04); - rw = (data[6] & 0x01) ? -1 : - (data[6] & 0x02) ? 1 : 0; - input_report_rel(input, REL_WHEEL, rw); - /* fall through */ - - case 3: /* Mouse without wheel */ - input_report_key(input, BTN_LEFT, data[1] & 0x01); - input_report_key(input, BTN_RIGHT, data[1] & 0x02); - /* Compute distance between mouse and tablet */ - rw = 44 - (data[6] >> 2); - if (rw < 0) - rw = 0; - else if (rw > 31) - rw = 31; - input_report_abs(input, ABS_DISTANCE, rw); - break; - - default: - input_report_abs(input, ABS_PRESSURE, - data[6] | (((__u16) (data[1] & 0x08)) << 5)); - input_report_key(input, BTN_TOUCH, data[1] & 0x01); - input_report_key(input, BTN_STYLUS, data[1] & 0x02); - input_report_key(input, BTN_STYLUS2, (tool == BTN_TOOL_PEN) && data[1] & 0x04); - break; - } - - input_sync(input); - } - - /* Report the state of the two buttons at the top of the tablet - * as two extra fingerpad keys (buttons 4 & 5). */ - rw = data[7] & 0x03; - if (rw != wdata->butstate) { - wdata->butstate = rw; - input_report_key(input, BTN_0, rw & 0x02); - input_report_key(input, BTN_1, rw & 0x01); - input_report_key(input, BTN_TOOL_FINGER, 0xf0); - input_event(input, EV_MSC, MSC_SERIAL, 0xf0); - input_sync(input); - } - - /* Store current battery capacity and power supply state*/ - rw = (data[7] >> 2 & 0x07); - if (rw != wdata->power_raw) { - wdata->power_raw = rw; - wdata->battery_capacity = batcap_gr[rw]; - if (rw == 7) - wdata->ps_connected = 1; - else - wdata->ps_connected = 0; - } - return 1; -} - -static void wacom_i4_parse_button_report(struct wacom_data *wdata, - struct input_dev *input, unsigned char *data) -{ - __u16 new_butstate; - __u8 new_whlstate; - __u8 sync = 0; - - new_whlstate = data[1]; - if (new_whlstate != wdata->whlstate) { - wdata->whlstate = new_whlstate; - if (new_whlstate & 0x80) { - input_report_key(input, BTN_TOUCH, 1); - input_report_abs(input, ABS_WHEEL, (new_whlstate & 0x7f)); - input_report_key(input, BTN_TOOL_FINGER, 1); - } else { - input_report_key(input, BTN_TOUCH, 0); - input_report_abs(input, ABS_WHEEL, 0); - input_report_key(input, BTN_TOOL_FINGER, 0); - } - sync = 1; - } - - new_butstate = (data[3] << 1) | (data[2] & 0x01); - if (new_butstate != wdata->butstate) { - wdata->butstate = new_butstate; - input_report_key(input, BTN_0, new_butstate & 0x001); - input_report_key(input, BTN_1, new_butstate & 0x002); - input_report_key(input, BTN_2, new_butstate & 0x004); - input_report_key(input, BTN_3, new_butstate & 0x008); - input_report_key(input, BTN_4, new_butstate & 0x010); - input_report_key(input, BTN_5, new_butstate & 0x020); - input_report_key(input, BTN_6, new_butstate & 0x040); - input_report_key(input, BTN_7, new_butstate & 0x080); - input_report_key(input, BTN_8, new_butstate & 0x100); - input_report_key(input, BTN_TOOL_FINGER, 1); - sync = 1; - } - - if (sync) { - input_report_abs(input, ABS_MISC, PAD_DEVICE_ID); - input_event(input, EV_MSC, MSC_SERIAL, 0xffffffff); - input_sync(input); - } -} - -static void wacom_i4_parse_pen_report(struct wacom_data *wdata, - struct input_dev *input, unsigned char *data) -{ - __u16 x, y, pressure; - __u8 distance; - __u8 tilt_x, tilt_y; - - switch (data[1]) { - case 0x80: /* Out of proximity report */ - input_report_key(input, BTN_TOUCH, 0); - input_report_abs(input, ABS_PRESSURE, 0); - input_report_key(input, BTN_STYLUS, 0); - input_report_key(input, BTN_STYLUS2, 0); - input_report_key(input, wdata->tool, 0); - input_report_abs(input, ABS_MISC, 0); - input_event(input, EV_MSC, MSC_SERIAL, wdata->serial); - wdata->tool = 0; - input_sync(input); - break; - case 0xC2: /* Tool report */ - wdata->id = ((data[2] << 4) | (data[3] >> 4) | - ((data[7] & 0x0f) << 20) | - ((data[8] & 0xf0) << 12)); - wdata->serial = ((data[3] & 0x0f) << 28) + - (data[4] << 20) + (data[5] << 12) + - (data[6] << 4) + (data[7] >> 4); - - switch (wdata->id) { - case 0x100802: - wdata->tool = BTN_TOOL_PEN; - break; - case 0x10080A: - wdata->tool = BTN_TOOL_RUBBER; - break; - } - break; - default: /* Position/pressure report */ - x = data[2] << 9 | data[3] << 1 | ((data[9] & 0x02) >> 1); - y = data[4] << 9 | data[5] << 1 | (data[9] & 0x01); - pressure = (data[6] << 3) | ((data[7] & 0xC0) >> 5) - | (data[1] & 0x01); - distance = (data[9] >> 2) & 0x3f; - tilt_x = ((data[7] << 1) & 0x7e) | (data[8] >> 7); - tilt_y = data[8] & 0x7f; - - input_report_key(input, BTN_TOUCH, pressure > 1); - - input_report_key(input, BTN_STYLUS, data[1] & 0x02); - input_report_key(input, BTN_STYLUS2, data[1] & 0x04); - input_report_key(input, wdata->tool, 1); - input_report_abs(input, ABS_X, x); - input_report_abs(input, ABS_Y, y); - input_report_abs(input, ABS_PRESSURE, pressure); - input_report_abs(input, ABS_DISTANCE, distance); - input_report_abs(input, ABS_TILT_X, tilt_x); - input_report_abs(input, ABS_TILT_Y, tilt_y); - input_report_abs(input, ABS_MISC, wdata->id); - input_event(input, EV_MSC, MSC_SERIAL, wdata->serial); - input_report_key(input, wdata->tool, 1); - input_sync(input); - break; - } - - return; -} - -static void wacom_i4_parse_report(struct hid_device *hdev, - struct wacom_data *wdata, - struct input_dev *input, unsigned char *data) -{ - switch (data[0]) { - case 0x00: /* Empty report */ - break; - case 0x02: /* Pen report */ - wacom_i4_parse_pen_report(wdata, input, data); - break; - case 0x03: /* Features Report */ - wdata->features = data[2]; - break; - case 0x0C: /* Button report */ - wacom_i4_parse_button_report(wdata, input, data); - break; - default: - hid_err(hdev, "Unknown report: %d,%d\n", data[0], data[1]); - break; - } -} - -static int wacom_raw_event(struct hid_device *hdev, struct hid_report *report, - u8 *raw_data, int size) -{ - struct wacom_data *wdata = hid_get_drvdata(hdev); - struct hid_input *hidinput; - struct input_dev *input; - unsigned char *data = (unsigned char *) raw_data; - int i; - __u8 power_raw; - - if (!(hdev->claimed & HID_CLAIMED_INPUT)) - return 0; - - hidinput = list_entry(hdev->inputs.next, struct hid_input, list); - input = hidinput->input; - - switch (hdev->product) { - case USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH: - if (data[0] == 0x03) { - return wacom_gr_parse_report(hdev, wdata, input, data); - } else { - hid_err(hdev, "Unknown report: %d,%d size:%d\n", - data[0], data[1], size); - return 0; - } - break; - case USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH: - i = 1; - - switch (data[0]) { - case 0x04: - wacom_i4_parse_report(hdev, wdata, input, data + i); - i += 10; - /* fall through */ - case 0x03: - wacom_i4_parse_report(hdev, wdata, input, data + i); - i += 10; - wacom_i4_parse_report(hdev, wdata, input, data + i); - power_raw = data[i+10]; - if (power_raw != wdata->power_raw) { - wdata->power_raw = power_raw; - wdata->battery_capacity = batcap_i4[power_raw & 0x07]; - wdata->bat_charging = (power_raw & 0x08) ? 1 : 0; - wdata->ps_connected = (power_raw & 0x10) ? 1 : 0; - } - - break; - default: - hid_err(hdev, "Unknown report: %d,%d size:%d\n", - data[0], data[1], size); - return 0; - } - } - return 1; -} - -static int wacom_input_mapped(struct hid_device *hdev, struct hid_input *hi, - struct hid_field *field, struct hid_usage *usage, unsigned long **bit, - int *max) -{ - struct input_dev *input = hi->input; - - __set_bit(INPUT_PROP_POINTER, input->propbit); - - /* Basics */ - input->evbit[0] |= BIT(EV_KEY) | BIT(EV_ABS) | BIT(EV_REL); - - __set_bit(REL_WHEEL, input->relbit); - - __set_bit(BTN_TOOL_PEN, input->keybit); - __set_bit(BTN_TOUCH, input->keybit); - __set_bit(BTN_STYLUS, input->keybit); - __set_bit(BTN_STYLUS2, input->keybit); - __set_bit(BTN_LEFT, input->keybit); - __set_bit(BTN_RIGHT, input->keybit); - __set_bit(BTN_MIDDLE, input->keybit); - - /* Pad */ - input_set_capability(input, EV_MSC, MSC_SERIAL); - - __set_bit(BTN_0, input->keybit); - __set_bit(BTN_1, input->keybit); - __set_bit(BTN_TOOL_FINGER, input->keybit); - - /* Distance, rubber and mouse */ - __set_bit(BTN_TOOL_RUBBER, input->keybit); - __set_bit(BTN_TOOL_MOUSE, input->keybit); - - switch (hdev->product) { - case USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH: - input_set_abs_params(input, ABS_X, 0, 16704, 4, 0); - input_set_abs_params(input, ABS_Y, 0, 12064, 4, 0); - input_set_abs_params(input, ABS_PRESSURE, 0, 511, 0, 0); - input_set_abs_params(input, ABS_DISTANCE, 0, 32, 0, 0); - break; - case USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH: - __set_bit(ABS_WHEEL, input->absbit); - __set_bit(ABS_MISC, input->absbit); - __set_bit(BTN_2, input->keybit); - __set_bit(BTN_3, input->keybit); - __set_bit(BTN_4, input->keybit); - __set_bit(BTN_5, input->keybit); - __set_bit(BTN_6, input->keybit); - __set_bit(BTN_7, input->keybit); - __set_bit(BTN_8, input->keybit); - input_set_abs_params(input, ABS_WHEEL, 0, 71, 0, 0); - input_set_abs_params(input, ABS_X, 0, 40640, 4, 0); - input_set_abs_params(input, ABS_Y, 0, 25400, 4, 0); - input_set_abs_params(input, ABS_PRESSURE, 0, 2047, 0, 0); - input_set_abs_params(input, ABS_DISTANCE, 0, 63, 0, 0); - input_set_abs_params(input, ABS_TILT_X, 0, 127, 0, 0); - input_set_abs_params(input, ABS_TILT_Y, 0, 127, 0, 0); - break; - } - - return 0; -} - -static int wacom_probe(struct hid_device *hdev, - const struct hid_device_id *id) -{ - struct wacom_data *wdata; - int ret; - - wdata = kzalloc(sizeof(*wdata), GFP_KERNEL); - if (wdata == NULL) { - hid_err(hdev, "can't alloc wacom descriptor\n"); - return -ENOMEM; - } - - hid_set_drvdata(hdev, wdata); - - /* Parse the HID report now */ - ret = hid_parse(hdev); - if (ret) { - hid_err(hdev, "parse failed\n"); - goto err_free; - } - - ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); - if (ret) { - hid_err(hdev, "hw start failed\n"); - goto err_free; - } - - ret = device_create_file(&hdev->dev, &dev_attr_speed); - if (ret) - hid_warn(hdev, - "can't create sysfs speed attribute err: %d\n", ret); - -#define OLED_INIT(OLED_ID) \ - do { \ - ret = device_create_file(&hdev->dev, \ - &dev_attr_oled##OLED_ID##_img); \ - if (ret) \ - hid_warn(hdev, \ - "can't create sysfs oled attribute, err: %d\n", ret);\ - } while (0) - -OLED_INIT(0); -OLED_INIT(1); -OLED_INIT(2); -OLED_INIT(3); -OLED_INIT(4); -OLED_INIT(5); -OLED_INIT(6); -OLED_INIT(7); - - wdata->features = 0; - wacom_set_features(hdev, 1); - - if (hdev->product == USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH) { - sprintf(hdev->name, "%s", "Wacom Intuos4 WL"); - ret = wacom_initialize_leds(hdev); - if (ret) - hid_warn(hdev, - "can't create led attribute, err: %d\n", ret); - } - - wdata->battery.properties = wacom_battery_props; - wdata->battery.num_properties = ARRAY_SIZE(wacom_battery_props); - wdata->battery.get_property = wacom_battery_get_property; - wdata->battery.name = "wacom_battery"; - wdata->battery.type = POWER_SUPPLY_TYPE_BATTERY; - wdata->battery.use_for_apm = 0; - - - ret = power_supply_register(&hdev->dev, &wdata->battery); - if (ret) { - hid_err(hdev, "can't create sysfs battery attribute, err: %d\n", - ret); - goto err_battery; - } - - power_supply_powers(&wdata->battery, &hdev->dev); - - wdata->ac.properties = wacom_ac_props; - wdata->ac.num_properties = ARRAY_SIZE(wacom_ac_props); - wdata->ac.get_property = wacom_ac_get_property; - wdata->ac.name = "wacom_ac"; - wdata->ac.type = POWER_SUPPLY_TYPE_MAINS; - wdata->ac.use_for_apm = 0; - - ret = power_supply_register(&hdev->dev, &wdata->ac); - if (ret) { - hid_err(hdev, - "can't create ac battery attribute, err: %d\n", ret); - goto err_ac; - } - - power_supply_powers(&wdata->ac, &hdev->dev); - return 0; - -err_ac: - power_supply_unregister(&wdata->battery); -err_battery: - wacom_destroy_leds(hdev); - device_remove_file(&hdev->dev, &dev_attr_oled0_img); - device_remove_file(&hdev->dev, &dev_attr_oled1_img); - device_remove_file(&hdev->dev, &dev_attr_oled2_img); - device_remove_file(&hdev->dev, &dev_attr_oled3_img); - device_remove_file(&hdev->dev, &dev_attr_oled4_img); - device_remove_file(&hdev->dev, &dev_attr_oled5_img); - device_remove_file(&hdev->dev, &dev_attr_oled6_img); - device_remove_file(&hdev->dev, &dev_attr_oled7_img); - device_remove_file(&hdev->dev, &dev_attr_speed); - hid_hw_stop(hdev); -err_free: - kfree(wdata); - return ret; -} - -static void wacom_remove(struct hid_device *hdev) -{ - struct wacom_data *wdata = hid_get_drvdata(hdev); - - wacom_destroy_leds(hdev); - device_remove_file(&hdev->dev, &dev_attr_oled0_img); - device_remove_file(&hdev->dev, &dev_attr_oled1_img); - device_remove_file(&hdev->dev, &dev_attr_oled2_img); - device_remove_file(&hdev->dev, &dev_attr_oled3_img); - device_remove_file(&hdev->dev, &dev_attr_oled4_img); - device_remove_file(&hdev->dev, &dev_attr_oled5_img); - device_remove_file(&hdev->dev, &dev_attr_oled6_img); - device_remove_file(&hdev->dev, &dev_attr_oled7_img); - device_remove_file(&hdev->dev, &dev_attr_speed); - hid_hw_stop(hdev); - - power_supply_unregister(&wdata->battery); - power_supply_unregister(&wdata->ac); - kfree(hid_get_drvdata(hdev)); -} - -static const struct hid_device_id wacom_devices[] = { - { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH) }, - { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH) }, - - { } -}; -MODULE_DEVICE_TABLE(hid, wacom_devices); - -static struct hid_driver wacom_driver = { - .name = "wacom", - .id_table = wacom_devices, - .probe = wacom_probe, - .remove = wacom_remove, - .raw_event = wacom_raw_event, - .input_mapped = wacom_input_mapped, -}; -module_hid_driver(wacom_driver); - -MODULE_DESCRIPTION("Driver for Wacom Graphire Bluetooth and Wacom Intuos4 WL"); -MODULE_LICENSE("GPL"); diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index 0dd568170d6e..15225f3eaed1 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c @@ -74,7 +74,7 @@ static const struct hid_blacklist { { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET }, { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET }, - { USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GX680R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS }, + { USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_NEXIO, USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_1610, HID_QUIRK_NOGET }, diff --git a/drivers/input/tablet/wacom.h b/drivers/hid/wacom.h index 9ebf0ed3b3b3..64bc1b296d91 100644 --- a/drivers/input/tablet/wacom.h +++ b/drivers/hid/wacom.h @@ -12,6 +12,7 @@ * Copyright (c) 2001 Frederic Lepied <flepied@mandrakesoft.com> * Copyright (c) 2004 Panagiotis Issaris <panagiotis.issaris@mech.kuleuven.ac.be> * Copyright (c) 2002-2011 Ping Cheng <pingc@wacom.com> + * Copyright (c) 2014 Benjamin Tissoires <benjamin.tissoires@redhat.com> * * ChangeLog: * v0.1 (vp) - Initial release @@ -72,6 +73,8 @@ * v1.52 (pc) - Query Wacom data upon system resume * - add defines for features->type * - add new devices (0x9F, 0xE2, and 0XE3) + * v2.00 (bt) - conversion to a HID driver + * - integration of the Bluetooth devices */ /* @@ -93,35 +96,30 @@ /* * Version Information */ -#define DRIVER_VERSION "v1.53" +#define DRIVER_VERSION "v2.00" #define DRIVER_AUTHOR "Vojtech Pavlik <vojtech@ucw.cz>" #define DRIVER_DESC "USB Wacom tablet driver" #define DRIVER_LICENSE "GPL" -MODULE_AUTHOR(DRIVER_AUTHOR); -MODULE_DESCRIPTION(DRIVER_DESC); -MODULE_LICENSE(DRIVER_LICENSE); - #define USB_VENDOR_ID_WACOM 0x056a #define USB_VENDOR_ID_LENOVO 0x17ef struct wacom { - dma_addr_t data_dma; struct usb_device *usbdev; struct usb_interface *intf; - struct urb *irq; struct wacom_wac wacom_wac; + struct hid_device *hdev; struct mutex lock; struct work_struct work; - bool open; - char phys[32]; struct wacom_led { u8 select[2]; /* status led selector (0..3) */ u8 llv; /* status led brightness no button (1..127) */ u8 hlv; /* status led brightness button pressed (1..127) */ u8 img_lum; /* OLED matrix display brightness */ } led; + bool led_initialized; struct power_supply battery; + struct power_supply ac; }; static inline void wacom_schedule_work(struct wacom_wac *wacom_wac) @@ -130,10 +128,19 @@ static inline void wacom_schedule_work(struct wacom_wac *wacom_wac) schedule_work(&wacom->work); } -extern const struct usb_device_id wacom_ids[]; +static inline void wacom_notify_battery(struct wacom_wac *wacom_wac) +{ + struct wacom *wacom = container_of(wacom_wac, struct wacom, wacom_wac); + + power_supply_changed(&wacom->battery); +} + +extern const struct hid_device_id wacom_ids[]; void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len); void wacom_setup_device_quirks(struct wacom_features *features); int wacom_setup_input_capabilities(struct input_dev *input_dev, struct wacom_wac *wacom_wac); +int wacom_setup_pad_input_capabilities(struct input_dev *input_dev, + struct wacom_wac *wacom_wac); #endif diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/hid/wacom_sys.c index 2c613cd41dd6..f0db7eca9023 100644 --- a/drivers/input/tablet/wacom_sys.c +++ b/drivers/hid/wacom_sys.c @@ -13,246 +13,106 @@ #include "wacom_wac.h" #include "wacom.h" +#include <linux/hid.h> -/* defines to get HID report descriptor */ -#define HID_DEVICET_HID (USB_TYPE_CLASS | 0x01) -#define HID_DEVICET_REPORT (USB_TYPE_CLASS | 0x02) -#define HID_USAGE_UNDEFINED 0x00 -#define HID_USAGE_PAGE 0x05 -#define HID_USAGE_PAGE_DIGITIZER 0x0d -#define HID_USAGE_PAGE_DESKTOP 0x01 -#define HID_USAGE 0x09 -#define HID_USAGE_X ((HID_USAGE_PAGE_DESKTOP << 16) | 0x30) -#define HID_USAGE_Y ((HID_USAGE_PAGE_DESKTOP << 16) | 0x31) -#define HID_USAGE_PRESSURE ((HID_USAGE_PAGE_DIGITIZER << 16) | 0x30) -#define HID_USAGE_X_TILT ((HID_USAGE_PAGE_DIGITIZER << 16) | 0x3d) -#define HID_USAGE_Y_TILT ((HID_USAGE_PAGE_DIGITIZER << 16) | 0x3e) -#define HID_USAGE_FINGER ((HID_USAGE_PAGE_DIGITIZER << 16) | 0x22) -#define HID_USAGE_STYLUS ((HID_USAGE_PAGE_DIGITIZER << 16) | 0x20) -#define HID_USAGE_CONTACTMAX ((HID_USAGE_PAGE_DIGITIZER << 16) | 0x55) -#define HID_COLLECTION 0xa1 -#define HID_COLLECTION_LOGICAL 0x02 -#define HID_COLLECTION_END 0xc0 - -struct hid_descriptor { - struct usb_descriptor_header header; - __le16 bcdHID; - u8 bCountryCode; - u8 bNumDescriptors; - u8 bDescriptorType; - __le16 wDescriptorLength; -} __attribute__ ((packed)); - -/* defines to get/set USB message */ -#define USB_REQ_GET_REPORT 0x01 -#define USB_REQ_SET_REPORT 0x09 - -#define WAC_HID_FEATURE_REPORT 0x03 #define WAC_MSG_RETRIES 5 #define WAC_CMD_LED_CONTROL 0x20 #define WAC_CMD_ICON_START 0x21 #define WAC_CMD_ICON_XFER 0x23 +#define WAC_CMD_ICON_BT_XFER 0x26 #define WAC_CMD_RETRIES 10 -static int wacom_get_report(struct usb_interface *intf, u8 type, u8 id, +static int wacom_get_report(struct hid_device *hdev, u8 type, u8 id, void *buf, size_t size, unsigned int retries) { - struct usb_device *dev = interface_to_usbdev(intf); int retval; do { - retval = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), - USB_REQ_GET_REPORT, - USB_DIR_IN | USB_TYPE_CLASS | - USB_RECIP_INTERFACE, - (type << 8) + id, - intf->altsetting[0].desc.bInterfaceNumber, - buf, size, 100); + retval = hid_hw_raw_request(hdev, id, buf, size, type, + HID_REQ_GET_REPORT); } while ((retval == -ETIMEDOUT || retval == -EPIPE) && --retries); return retval; } -static int wacom_set_report(struct usb_interface *intf, u8 type, u8 id, - void *buf, size_t size, unsigned int retries) +static int wacom_set_report(struct hid_device *hdev, u8 type, u8 *buf, + size_t size, unsigned int retries) { - struct usb_device *dev = interface_to_usbdev(intf); int retval; do { - retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), - USB_REQ_SET_REPORT, - USB_TYPE_CLASS | USB_RECIP_INTERFACE, - (type << 8) + id, - intf->altsetting[0].desc.bInterfaceNumber, - buf, size, 1000); + retval = hid_hw_raw_request(hdev, buf[0], buf, size, type, + HID_REQ_SET_REPORT); } while ((retval == -ETIMEDOUT || retval == -EPIPE) && --retries); return retval; } -static void wacom_sys_irq(struct urb *urb) +static int wacom_raw_event(struct hid_device *hdev, struct hid_report *report, + u8 *raw_data, int size) { - struct wacom *wacom = urb->context; - struct device *dev = &wacom->intf->dev; - int retval; + struct wacom *wacom = hid_get_drvdata(hdev); - switch (urb->status) { - case 0: - /* success */ - break; - case -ECONNRESET: - case -ENOENT: - case -ESHUTDOWN: - /* this urb is terminated, clean up */ - dev_dbg(dev, "%s - urb shutting down with status: %d\n", - __func__, urb->status); - return; - default: - dev_dbg(dev, "%s - nonzero urb status received: %d\n", - __func__, urb->status); - goto exit; - } + if (size > WACOM_PKGLEN_MAX) + return 1; + + memcpy(wacom->wacom_wac.data, raw_data, size); - wacom_wac_irq(&wacom->wacom_wac, urb->actual_length); + wacom_wac_irq(&wacom->wacom_wac, size); - exit: - usb_mark_last_busy(wacom->usbdev); - retval = usb_submit_urb(urb, GFP_ATOMIC); - if (retval) - dev_err(dev, "%s - usb_submit_urb failed with result %d\n", - __func__, retval); + return 0; } static int wacom_open(struct input_dev *dev) { struct wacom *wacom = input_get_drvdata(dev); - int retval = 0; - - if (usb_autopm_get_interface(wacom->intf) < 0) - return -EIO; + int retval; mutex_lock(&wacom->lock); - - if (usb_submit_urb(wacom->irq, GFP_KERNEL)) { - retval = -EIO; - goto out; - } - - wacom->open = true; - wacom->intf->needs_remote_wakeup = 1; - -out: + retval = hid_hw_open(wacom->hdev); mutex_unlock(&wacom->lock); - usb_autopm_put_interface(wacom->intf); + return retval; } static void wacom_close(struct input_dev *dev) { struct wacom *wacom = input_get_drvdata(dev); - int autopm_error; - - autopm_error = usb_autopm_get_interface(wacom->intf); mutex_lock(&wacom->lock); - usb_kill_urb(wacom->irq); - wacom->open = false; - wacom->intf->needs_remote_wakeup = 0; + hid_hw_close(wacom->hdev); mutex_unlock(&wacom->lock); - - if (!autopm_error) - usb_autopm_put_interface(wacom->intf); } /* - * Calculate the resolution of the X or Y axis, given appropriate HID data. - * This function is little more than hidinput_calc_abs_res stripped down. + * Calculate the resolution of the X or Y axis using hidinput_calc_abs_res. */ static int wacom_calc_hid_res(int logical_extents, int physical_extents, - unsigned char unit, unsigned char exponent) -{ - int prev, unit_exponent; - - /* Check if the extents are sane */ - if (logical_extents <= 0 || physical_extents <= 0) - return 0; - - /* Get signed value of nybble-sized twos-compliment exponent */ - unit_exponent = exponent; - if (unit_exponent > 7) - unit_exponent -= 16; - - /* Convert physical_extents to millimeters */ - if (unit == 0x11) { /* If centimeters */ - unit_exponent += 1; - } else if (unit == 0x13) { /* If inches */ - prev = physical_extents; - physical_extents *= 254; - if (physical_extents < prev) - return 0; - unit_exponent -= 1; - } else { - return 0; - } - - /* Apply negative unit exponent */ - for (; unit_exponent < 0; unit_exponent++) { - prev = logical_extents; - logical_extents *= 10; - if (logical_extents < prev) - return 0; - } - /* Apply positive unit exponent */ - for (; unit_exponent > 0; unit_exponent--) { - prev = physical_extents; - physical_extents *= 10; - if (physical_extents < prev) - return 0; - } - - /* Calculate resolution */ - return logical_extents / physical_extents; -} - -static int wacom_parse_logical_collection(unsigned char *report, - struct wacom_features *features) + unsigned unit, int exponent) { - int length = 0; - - if (features->type == BAMBOO_PT) { - - /* Logical collection is only used by 3rd gen Bamboo Touch */ - features->pktlen = WACOM_PKGLEN_BBTOUCH3; - features->device_type = BTN_TOOL_FINGER; - - features->x_max = features->y_max = - get_unaligned_le16(&report[10]); - - length = 11; - } - return length; + struct hid_field field = { + .logical_maximum = logical_extents, + .physical_maximum = physical_extents, + .unit = unit, + .unit_exponent = exponent, + }; + + return hidinput_calc_abs_res(&field, ABS_X); } -static void wacom_retrieve_report_data(struct usb_interface *intf, - struct wacom_features *features) +static void wacom_feature_mapping(struct hid_device *hdev, + struct hid_field *field, struct hid_usage *usage) { - int result = 0; - unsigned char *rep_data; - - rep_data = kmalloc(2, GFP_KERNEL); - if (rep_data) { - - rep_data[0] = 12; - result = wacom_get_report(intf, WAC_HID_FEATURE_REPORT, - rep_data[0], rep_data, 2, - WAC_MSG_RETRIES); - - if (result >= 0 && rep_data[1] > 2) - features->touch_max = rep_data[1]; + struct wacom *wacom = hid_get_drvdata(hdev); + struct wacom_features *features = &wacom->wacom_wac.features; - kfree(rep_data); + switch (usage->hid) { + case HID_DG_CONTACTMAX: + /* leave touch_max as is if predefined */ + if (!features->touch_max) + features->touch_max = field->value[0]; + break; } } @@ -285,243 +145,100 @@ static void wacom_retrieve_report_data(struct usb_interface *intf, * interfaces haven't supported pressure or distance, this is enough * information to override invalid values in the wacom_features table. * - * 3rd gen Bamboo Touch no longer define a Digitizer-Finger Pysical - * Collection. Instead they define a Logical Collection with a single - * Logical Maximum for both X and Y. - * - * Intuos5 touch interface does not contain useful data. We deal with - * this after returning from this function. + * Intuos5 touch interface and 3rd gen Bamboo Touch do not contain useful + * data. We deal with them after returning from this function. */ -static int wacom_parse_hid(struct usb_interface *intf, - struct hid_descriptor *hid_desc, - struct wacom_features *features) +static void wacom_usage_mapping(struct hid_device *hdev, + struct hid_field *field, struct hid_usage *usage) { - struct usb_device *dev = interface_to_usbdev(intf); - char limit = 0; - /* result has to be defined as int for some devices */ - int result = 0, touch_max = 0; - int i = 0, page = 0, finger = 0, pen = 0; - unsigned char *report; - - report = kzalloc(hid_desc->wDescriptorLength, GFP_KERNEL); - if (!report) - return -ENOMEM; - - /* retrive report descriptors */ - do { - result = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), - USB_REQ_GET_DESCRIPTOR, - USB_RECIP_INTERFACE | USB_DIR_IN, - HID_DEVICET_REPORT << 8, - intf->altsetting[0].desc.bInterfaceNumber, /* interface */ - report, - hid_desc->wDescriptorLength, - 5000); /* 5 secs */ - } while (result < 0 && limit++ < WAC_MSG_RETRIES); - - /* No need to parse the Descriptor. It isn't an error though */ - if (result < 0) - goto out; - - for (i = 0; i < hid_desc->wDescriptorLength; i++) { - - switch (report[i]) { - case HID_USAGE_PAGE: - page = report[i + 1]; - i++; - break; - - case HID_USAGE: - switch (page << 16 | report[i + 1]) { - case HID_USAGE_X: - if (finger) { - features->device_type = BTN_TOOL_FINGER; - /* touch device at least supports one touch point */ - touch_max = 1; - switch (features->type) { - case TABLETPC2FG: - features->pktlen = WACOM_PKGLEN_TPC2FG; - break; - - case MTSCREEN: - case WACOM_24HDT: - features->pktlen = WACOM_PKGLEN_MTOUCH; - break; - - case MTTPC: - case MTTPC_B: - features->pktlen = WACOM_PKGLEN_MTTPC; - break; - - case BAMBOO_PT: - features->pktlen = WACOM_PKGLEN_BBTOUCH; - break; - - default: - features->pktlen = WACOM_PKGLEN_GRAPHIRE; - break; - } - - switch (features->type) { - case BAMBOO_PT: - features->x_phy = - get_unaligned_le16(&report[i + 5]); - features->x_max = - get_unaligned_le16(&report[i + 8]); - i += 15; - break; - - case WACOM_24HDT: - features->x_max = - get_unaligned_le16(&report[i + 3]); - features->x_phy = - get_unaligned_le16(&report[i + 8]); - features->unit = report[i - 1]; - features->unitExpo = report[i - 3]; - i += 12; - break; - - case MTTPC_B: - features->x_max = - get_unaligned_le16(&report[i + 3]); - features->x_phy = - get_unaligned_le16(&report[i + 6]); - features->unit = report[i - 5]; - features->unitExpo = report[i - 3]; - i += 9; - break; - - default: - features->x_max = - get_unaligned_le16(&report[i + 3]); - features->x_phy = - get_unaligned_le16(&report[i + 6]); - features->unit = report[i + 9]; - features->unitExpo = report[i + 11]; - i += 12; - break; - } - } else if (pen) { - /* penabled only accepts exact bytes of data */ - if (features->type >= TABLETPC) - features->pktlen = WACOM_PKGLEN_GRAPHIRE; - features->device_type = BTN_TOOL_PEN; - features->x_max = - get_unaligned_le16(&report[i + 3]); - i += 4; - } - break; - - case HID_USAGE_Y: - if (finger) { - switch (features->type) { - case TABLETPC2FG: - case MTSCREEN: - case MTTPC: - features->y_max = - get_unaligned_le16(&report[i + 3]); - features->y_phy = - get_unaligned_le16(&report[i + 6]); - i += 7; - break; - - case WACOM_24HDT: - features->y_max = - get_unaligned_le16(&report[i + 3]); - features->y_phy = - get_unaligned_le16(&report[i - 2]); - i += 7; - break; - - case BAMBOO_PT: - features->y_phy = - get_unaligned_le16(&report[i + 3]); - features->y_max = - get_unaligned_le16(&report[i + 6]); - i += 12; - break; - - case MTTPC_B: - features->y_max = - get_unaligned_le16(&report[i + 3]); - features->y_phy = - get_unaligned_le16(&report[i + 6]); - i += 9; - break; - - default: - features->y_max = - features->x_max; - features->y_phy = - get_unaligned_le16(&report[i + 3]); - i += 4; - break; - } - } else if (pen) { - features->y_max = - get_unaligned_le16(&report[i + 3]); - i += 4; - } - break; - - case HID_USAGE_FINGER: - finger = 1; - i++; - break; + struct wacom *wacom = hid_get_drvdata(hdev); + struct wacom_features *features = &wacom->wacom_wac.features; + bool finger = (field->logical == HID_DG_FINGER) || + (field->physical == HID_DG_FINGER); + bool pen = (field->logical == HID_DG_STYLUS) || + (field->physical == HID_DG_STYLUS); - /* - * Requiring Stylus Usage will ignore boot mouse - * X/Y values and some cases of invalid Digitizer X/Y - * values commonly reported. - */ - case HID_USAGE_STYLUS: - pen = 1; - i++; - break; + /* + * Requiring Stylus Usage will ignore boot mouse + * X/Y values and some cases of invalid Digitizer X/Y + * values commonly reported. + */ + if (!pen && !finger) + return; - case HID_USAGE_CONTACTMAX: - /* leave touch_max as is if predefined */ - if (!features->touch_max) - wacom_retrieve_report_data(intf, features); - i++; - break; + if (finger && !features->touch_max) + /* touch device at least supports one touch point */ + features->touch_max = 1; - case HID_USAGE_PRESSURE: - if (pen) { - features->pressure_max = - get_unaligned_le16(&report[i + 3]); - i += 4; - } - break; + switch (usage->hid) { + case HID_GD_X: + features->x_max = field->logical_maximum; + if (finger) { + features->device_type = BTN_TOOL_FINGER; + features->x_phy = field->physical_maximum; + if (features->type != BAMBOO_PT) { + features->unit = field->unit; + features->unitExpo = field->unit_exponent; } - break; - - case HID_COLLECTION_END: - /* reset UsagePage and Finger */ - finger = page = 0; - break; + } else { + features->device_type = BTN_TOOL_PEN; + } + break; + case HID_GD_Y: + features->y_max = field->logical_maximum; + if (finger) { + features->y_phy = field->physical_maximum; + if (features->type != BAMBOO_PT) { + features->unit = field->unit; + features->unitExpo = field->unit_exponent; + } + } + break; + case HID_DG_TIPPRESSURE: + if (pen) + features->pressure_max = field->logical_maximum; + break; + } +} - case HID_COLLECTION: - i++; - switch (report[i]) { - case HID_COLLECTION_LOGICAL: - i += wacom_parse_logical_collection(&report[i], - features); - break; +static void wacom_parse_hid(struct hid_device *hdev, + struct wacom_features *features) +{ + struct hid_report_enum *rep_enum; + struct hid_report *hreport; + int i, j; + + /* check features first */ + rep_enum = &hdev->report_enum[HID_FEATURE_REPORT]; + list_for_each_entry(hreport, &rep_enum->report_list, list) { + for (i = 0; i < hreport->maxfield; i++) { + /* Ignore if report count is out of bounds. */ + if (hreport->field[i]->report_count < 1) + continue; + + for (j = 0; j < hreport->field[i]->maxusage; j++) { + wacom_feature_mapping(hdev, hreport->field[i], + hreport->field[i]->usage + j); } - break; } } - out: - if (!features->touch_max && touch_max) - features->touch_max = touch_max; - result = 0; - kfree(report); - return result; + /* now check the input usages */ + rep_enum = &hdev->report_enum[HID_INPUT_REPORT]; + list_for_each_entry(hreport, &rep_enum->report_list, list) { + + if (!hreport->maxfield) + continue; + + for (i = 0; i < hreport->maxfield; i++) + for (j = 0; j < hreport->field[i]->maxusage; j++) + wacom_usage_mapping(hdev, hreport->field[i], + hreport->field[i]->usage + j); + } } -static int wacom_set_device_mode(struct usb_interface *intf, int report_id, int length, int mode) +static int wacom_set_device_mode(struct hid_device *hdev, int report_id, + int length, int mode) { unsigned char *rep_data; int error = -ENOMEM, limit = 0; @@ -534,8 +251,11 @@ static int wacom_set_device_mode(struct usb_interface *intf, int report_id, int rep_data[0] = report_id; rep_data[1] = mode; - error = wacom_set_report(intf, WAC_HID_FEATURE_REPORT, - report_id, rep_data, length, 1); + error = wacom_set_report(hdev, HID_FEATURE_REPORT, rep_data, + length, 1); + if (error >= 0) + error = wacom_get_report(hdev, HID_FEATURE_REPORT, + report_id, rep_data, length, 1); } while ((error < 0 || rep_data[1] != mode) && limit++ < WAC_MSG_RETRIES); kfree(rep_data); @@ -543,6 +263,59 @@ static int wacom_set_device_mode(struct usb_interface *intf, int report_id, int return error < 0 ? error : 0; } +static int wacom_bt_query_tablet_data(struct hid_device *hdev, u8 speed, + struct wacom_features *features) +{ + struct wacom *wacom = hid_get_drvdata(hdev); + int ret; + u8 rep_data[2]; + + switch (features->type) { + case GRAPHIRE_BT: + rep_data[0] = 0x03; + rep_data[1] = 0x00; + ret = wacom_set_report(hdev, HID_FEATURE_REPORT, rep_data, 2, + 3); + + if (ret >= 0) { + rep_data[0] = speed == 0 ? 0x05 : 0x06; + rep_data[1] = 0x00; + + ret = wacom_set_report(hdev, HID_FEATURE_REPORT, + rep_data, 2, 3); + + if (ret >= 0) { + wacom->wacom_wac.bt_high_speed = speed; + return 0; + } + } + + /* + * Note that if the raw queries fail, it's not a hard failure + * and it is safe to continue + */ + hid_warn(hdev, "failed to poke device, command %d, err %d\n", + rep_data[0], ret); + break; + case INTUOS4WL: + if (speed == 1) + wacom->wacom_wac.bt_features &= ~0x20; + else + wacom->wacom_wac.bt_features |= 0x20; + + rep_data[0] = 0x03; + rep_data[1] = wacom->wacom_wac.bt_features; + + ret = wacom_set_report(hdev, HID_FEATURE_REPORT, rep_data, 2, + 1); + if (ret >= 0) + wacom->wacom_wac.bt_high_speed = speed; + break; + } + + return 0; +} + /* * Switch the tablet into its most-capable mode. Wacom tablets are * typically configured to power-up in a mode which sends mouse-like @@ -550,31 +323,34 @@ static int wacom_set_device_mode(struct usb_interface *intf, int report_id, int * from the tablet, it is necessary to switch the tablet out of this * mode and into one which sends the full range of tablet data. */ -static int wacom_query_tablet_data(struct usb_interface *intf, struct wacom_features *features) +static int wacom_query_tablet_data(struct hid_device *hdev, + struct wacom_features *features) { + if (hdev->bus == BUS_BLUETOOTH) + return wacom_bt_query_tablet_data(hdev, 1, features); + if (features->device_type == BTN_TOOL_FINGER) { if (features->type > TABLETPC) { /* MT Tablet PC touch */ - return wacom_set_device_mode(intf, 3, 4, 4); + return wacom_set_device_mode(hdev, 3, 4, 4); } else if (features->type == WACOM_24HDT || features->type == CINTIQ_HYBRID) { - return wacom_set_device_mode(intf, 18, 3, 2); + return wacom_set_device_mode(hdev, 18, 3, 2); } } else if (features->device_type == BTN_TOOL_PEN) { if (features->type <= BAMBOO_PT && features->type != WIRELESS) { - return wacom_set_device_mode(intf, 2, 2, 2); + return wacom_set_device_mode(hdev, 2, 2, 2); } } return 0; } -static int wacom_retrieve_hid_descriptor(struct usb_interface *intf, +static void wacom_retrieve_hid_descriptor(struct hid_device *hdev, struct wacom_features *features) { - int error = 0; - struct usb_host_interface *interface = intf->cur_altsetting; - struct hid_descriptor *hid_desc; + struct wacom *wacom = hid_get_drvdata(hdev); + struct usb_interface *intf = wacom->intf; /* default features */ features->device_type = BTN_TOOL_PEN; @@ -599,66 +375,54 @@ static int wacom_retrieve_hid_descriptor(struct usb_interface *intf, } /* only devices that support touch need to retrieve the info */ - if (features->type < BAMBOO_PT) { - goto out; - } - - error = usb_get_extra_descriptor(interface, HID_DEVICET_HID, &hid_desc); - if (error) { - error = usb_get_extra_descriptor(&interface->endpoint[0], - HID_DEVICET_REPORT, &hid_desc); - if (error) { - dev_err(&intf->dev, - "can not retrieve extra class descriptor\n"); - goto out; - } - } - error = wacom_parse_hid(intf, hid_desc, features); + if (features->type < BAMBOO_PT) + return; - out: - return error; + wacom_parse_hid(hdev, features); } -struct wacom_usbdev_data { +struct wacom_hdev_data { struct list_head list; struct kref kref; - struct usb_device *dev; + struct hid_device *dev; struct wacom_shared shared; }; static LIST_HEAD(wacom_udev_list); static DEFINE_MUTEX(wacom_udev_list_lock); -static struct usb_device *wacom_get_sibling(struct usb_device *dev, int vendor, int product) +static bool wacom_are_sibling(struct hid_device *hdev, + struct hid_device *sibling) { - int port1; - struct usb_device *sibling; - - if (vendor == 0 && product == 0) - return dev; + struct wacom *wacom = hid_get_drvdata(hdev); + struct wacom_features *features = &wacom->wacom_wac.features; + int vid = features->oVid; + int pid = features->oPid; + int n1,n2; - if (dev->parent == NULL) - return NULL; + if (vid == 0 && pid == 0) { + vid = hdev->vendor; + pid = hdev->product; + } - usb_hub_for_each_child(dev->parent, port1, sibling) { - struct usb_device_descriptor *d; - if (sibling == NULL) - continue; + if (vid != sibling->vendor || pid != sibling->product) + return false; - d = &sibling->descriptor; - if (d->idVendor == vendor && d->idProduct == product) - return sibling; - } + /* Compare the physical path. */ + n1 = strrchr(hdev->phys, '.') - hdev->phys; + n2 = strrchr(sibling->phys, '.') - sibling->phys; + if (n1 != n2 || n1 <= 0 || n2 <= 0) + return false; - return NULL; + return !strncmp(hdev->phys, sibling->phys, n1); } -static struct wacom_usbdev_data *wacom_get_usbdev_data(struct usb_device *dev) +static struct wacom_hdev_data *wacom_get_hdev_data(struct hid_device *hdev) { - struct wacom_usbdev_data *data; + struct wacom_hdev_data *data; list_for_each_entry(data, &wacom_udev_list, list) { - if (data->dev == dev) { + if (wacom_are_sibling(hdev, data->dev)) { kref_get(&data->kref); return data; } @@ -667,28 +431,29 @@ static struct wacom_usbdev_data *wacom_get_usbdev_data(struct usb_device *dev) return NULL; } -static int wacom_add_shared_data(struct wacom_wac *wacom, - struct usb_device *dev) +static int wacom_add_shared_data(struct hid_device *hdev) { - struct wacom_usbdev_data *data; + struct wacom *wacom = hid_get_drvdata(hdev); + struct wacom_wac *wacom_wac = &wacom->wacom_wac; + struct wacom_hdev_data *data; int retval = 0; mutex_lock(&wacom_udev_list_lock); - data = wacom_get_usbdev_data(dev); + data = wacom_get_hdev_data(hdev); if (!data) { - data = kzalloc(sizeof(struct wacom_usbdev_data), GFP_KERNEL); + data = kzalloc(sizeof(struct wacom_hdev_data), GFP_KERNEL); if (!data) { retval = -ENOMEM; goto out; } kref_init(&data->kref); - data->dev = dev; + data->dev = hdev; list_add_tail(&data->list, &wacom_udev_list); } - wacom->shared = &data->shared; + wacom_wac->shared = &data->shared; out: mutex_unlock(&wacom_udev_list_lock); @@ -697,8 +462,8 @@ out: static void wacom_release_shared_data(struct kref *kref) { - struct wacom_usbdev_data *data = - container_of(kref, struct wacom_usbdev_data, kref); + struct wacom_hdev_data *data = + container_of(kref, struct wacom_hdev_data, kref); mutex_lock(&wacom_udev_list_lock); list_del(&data->list); @@ -709,10 +474,10 @@ static void wacom_release_shared_data(struct kref *kref) static void wacom_remove_shared_data(struct wacom_wac *wacom) { - struct wacom_usbdev_data *data; + struct wacom_hdev_data *data; if (wacom->shared) { - data = container_of(wacom->shared, struct wacom_usbdev_data, shared); + data = container_of(wacom->shared, struct wacom_hdev_data, shared); kref_put(&data->kref, wacom_release_shared_data); wacom->shared = NULL; } @@ -755,38 +520,40 @@ static int wacom_led_control(struct wacom *wacom) buf[4] = wacom->led.img_lum; } - retval = wacom_set_report(wacom->intf, 0x03, WAC_CMD_LED_CONTROL, - buf, 9, WAC_CMD_RETRIES); + retval = wacom_set_report(wacom->hdev, HID_FEATURE_REPORT, buf, 9, + WAC_CMD_RETRIES); kfree(buf); return retval; } -static int wacom_led_putimage(struct wacom *wacom, int button_id, const void *img) +static int wacom_led_putimage(struct wacom *wacom, int button_id, u8 xfer_id, + const unsigned len, const void *img) { unsigned char *buf; int i, retval; + const unsigned chunk_len = len / 4; /* 4 chunks are needed to be sent */ - buf = kzalloc(259, GFP_KERNEL); + buf = kzalloc(chunk_len + 3 , GFP_KERNEL); if (!buf) return -ENOMEM; /* Send 'start' command */ buf[0] = WAC_CMD_ICON_START; buf[1] = 1; - retval = wacom_set_report(wacom->intf, 0x03, WAC_CMD_ICON_START, - buf, 2, WAC_CMD_RETRIES); + retval = wacom_set_report(wacom->hdev, HID_FEATURE_REPORT, buf, 2, + WAC_CMD_RETRIES); if (retval < 0) goto out; - buf[0] = WAC_CMD_ICON_XFER; + buf[0] = xfer_id; buf[1] = button_id & 0x07; for (i = 0; i < 4; i++) { buf[2] = i; - memcpy(buf + 3, img + i * 256, 256); + memcpy(buf + 3, img + i * chunk_len, chunk_len); - retval = wacom_set_report(wacom->intf, 0x03, WAC_CMD_ICON_XFER, - buf, 259, WAC_CMD_RETRIES); + retval = wacom_set_report(wacom->hdev, HID_FEATURE_REPORT, + buf, chunk_len + 3, WAC_CMD_RETRIES); if (retval < 0) break; } @@ -794,8 +561,8 @@ static int wacom_led_putimage(struct wacom *wacom, int button_id, const void *im /* Send 'stop' */ buf[0] = WAC_CMD_ICON_START; buf[1] = 0; - wacom_set_report(wacom->intf, 0x03, WAC_CMD_ICON_START, - buf, 2, WAC_CMD_RETRIES); + wacom_set_report(wacom->hdev, HID_FEATURE_REPORT, buf, 2, + WAC_CMD_RETRIES); out: kfree(buf); @@ -805,7 +572,8 @@ out: static ssize_t wacom_led_select_store(struct device *dev, int set_id, const char *buf, size_t count) { - struct wacom *wacom = dev_get_drvdata(dev); + struct hid_device *hdev = container_of(dev, struct hid_device, dev); + struct wacom *wacom = hid_get_drvdata(hdev); unsigned int id; int err; @@ -832,7 +600,8 @@ static ssize_t wacom_led##SET_ID##_select_store(struct device *dev, \ static ssize_t wacom_led##SET_ID##_select_show(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ - struct wacom *wacom = dev_get_drvdata(dev); \ + struct hid_device *hdev = container_of(dev, struct hid_device, dev);\ + struct wacom *wacom = hid_get_drvdata(hdev); \ return snprintf(buf, 2, "%d\n", wacom->led.select[SET_ID]); \ } \ static DEVICE_ATTR(status_led##SET_ID##_select, S_IWUSR | S_IRUSR, \ @@ -866,7 +635,8 @@ static ssize_t wacom_luminance_store(struct wacom *wacom, u8 *dest, static ssize_t wacom_##name##_luminance_store(struct device *dev, \ struct device_attribute *attr, const char *buf, size_t count) \ { \ - struct wacom *wacom = dev_get_drvdata(dev); \ + struct hid_device *hdev = container_of(dev, struct hid_device, dev);\ + struct wacom *wacom = hid_get_drvdata(hdev); \ \ return wacom_luminance_store(wacom, &wacom->led.field, \ buf, count); \ @@ -881,15 +651,26 @@ DEVICE_LUMINANCE_ATTR(buttons, img_lum); static ssize_t wacom_button_image_store(struct device *dev, int button_id, const char *buf, size_t count) { - struct wacom *wacom = dev_get_drvdata(dev); + struct hid_device *hdev = container_of(dev, struct hid_device, dev); + struct wacom *wacom = hid_get_drvdata(hdev); int err; + unsigned len; + u8 xfer_id; + + if (hdev->bus == BUS_BLUETOOTH) { + len = 256; + xfer_id = WAC_CMD_ICON_BT_XFER; + } else { + len = 1024; + xfer_id = WAC_CMD_ICON_XFER; + } - if (count != 1024) + if (count != len) return -EINVAL; mutex_lock(&wacom->lock); - err = wacom_led_putimage(wacom, button_id, buf); + err = wacom_led_putimage(wacom, button_id, xfer_id, len, buf); mutex_unlock(&wacom->lock); @@ -965,13 +746,14 @@ static int wacom_initialize_leds(struct wacom *wacom) switch (wacom->wacom_wac.features.type) { case INTUOS4S: case INTUOS4: + case INTUOS4WL: case INTUOS4L: wacom->led.select[0] = 0; wacom->led.select[1] = 0; wacom->led.llv = 10; wacom->led.hlv = 20; wacom->led.img_lum = 10; - error = sysfs_create_group(&wacom->intf->dev.kobj, + error = sysfs_create_group(&wacom->hdev->dev.kobj, &intuos4_led_attr_group); break; @@ -983,7 +765,7 @@ static int wacom_initialize_leds(struct wacom *wacom) wacom->led.hlv = 0; wacom->led.img_lum = 0; - error = sysfs_create_group(&wacom->intf->dev.kobj, + error = sysfs_create_group(&wacom->hdev->dev.kobj, &cintiq_led_attr_group); break; @@ -1000,7 +782,7 @@ static int wacom_initialize_leds(struct wacom *wacom) wacom->led.hlv = 0; wacom->led.img_lum = 0; - error = sysfs_create_group(&wacom->intf->dev.kobj, + error = sysfs_create_group(&wacom->hdev->dev.kobj, &intuos5_led_attr_group); } else return 0; @@ -1011,28 +793,35 @@ static int wacom_initialize_leds(struct wacom *wacom) } if (error) { - dev_err(&wacom->intf->dev, + hid_err(wacom->hdev, "cannot create sysfs group err: %d\n", error); return error; } wacom_led_control(wacom); + wacom->led_initialized = true; return 0; } static void wacom_destroy_leds(struct wacom *wacom) { + if (!wacom->led_initialized) + return; + + wacom->led_initialized = false; + switch (wacom->wacom_wac.features.type) { case INTUOS4S: case INTUOS4: + case INTUOS4WL: case INTUOS4L: - sysfs_remove_group(&wacom->intf->dev.kobj, + sysfs_remove_group(&wacom->hdev->dev.kobj, &intuos4_led_attr_group); break; case WACOM_24HD: case WACOM_21UX2: - sysfs_remove_group(&wacom->intf->dev.kobj, + sysfs_remove_group(&wacom->hdev->dev.kobj, &cintiq_led_attr_group); break; @@ -1043,17 +832,24 @@ static void wacom_destroy_leds(struct wacom *wacom) case INTUOSPM: case INTUOSPL: if (wacom->wacom_wac.features.device_type == BTN_TOOL_PEN) - sysfs_remove_group(&wacom->intf->dev.kobj, + sysfs_remove_group(&wacom->hdev->dev.kobj, &intuos5_led_attr_group); break; } } static enum power_supply_property wacom_battery_props[] = { + POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_SCOPE, POWER_SUPPLY_PROP_CAPACITY }; +static enum power_supply_property wacom_ac_props[] = { + POWER_SUPPLY_PROP_PRESENT, + POWER_SUPPLY_PROP_ONLINE, + POWER_SUPPLY_PROP_SCOPE, +}; + static int wacom_battery_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) @@ -1067,7 +863,16 @@ static int wacom_battery_get_property(struct power_supply *psy, break; case POWER_SUPPLY_PROP_CAPACITY: val->intval = - wacom->wacom_wac.battery_capacity * 100 / 31; + wacom->wacom_wac.battery_capacity; + break; + case POWER_SUPPLY_PROP_STATUS: + if (wacom->wacom_wac.bat_charging) + val->intval = POWER_SUPPLY_STATUS_CHARGING; + else if (wacom->wacom_wac.battery_capacity == 100 && + wacom->wacom_wac.ps_connected) + val->intval = POWER_SUPPLY_STATUS_FULL; + else + val->intval = POWER_SUPPLY_STATUS_DISCHARGING; break; default: ret = -EINVAL; @@ -1077,74 +882,201 @@ static int wacom_battery_get_property(struct power_supply *psy, return ret; } +static int wacom_ac_get_property(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val) +{ + struct wacom *wacom = container_of(psy, struct wacom, ac); + int ret = 0; + + switch (psp) { + case POWER_SUPPLY_PROP_PRESENT: + /* fall through */ + case POWER_SUPPLY_PROP_ONLINE: + val->intval = wacom->wacom_wac.ps_connected; + break; + case POWER_SUPPLY_PROP_SCOPE: + val->intval = POWER_SUPPLY_SCOPE_DEVICE; + break; + default: + ret = -EINVAL; + break; + } + return ret; +} + static int wacom_initialize_battery(struct wacom *wacom) { - int error = 0; + static atomic_t battery_no = ATOMIC_INIT(0); + int error; + unsigned long n; + + if (wacom->wacom_wac.features.quirks & WACOM_QUIRK_BATTERY) { + n = atomic_inc_return(&battery_no) - 1; - if (wacom->wacom_wac.features.quirks & WACOM_QUIRK_MONITOR) { wacom->battery.properties = wacom_battery_props; wacom->battery.num_properties = ARRAY_SIZE(wacom_battery_props); wacom->battery.get_property = wacom_battery_get_property; - wacom->battery.name = "wacom_battery"; + sprintf(wacom->wacom_wac.bat_name, "wacom_battery_%ld", n); + wacom->battery.name = wacom->wacom_wac.bat_name; wacom->battery.type = POWER_SUPPLY_TYPE_BATTERY; wacom->battery.use_for_apm = 0; - error = power_supply_register(&wacom->usbdev->dev, + wacom->ac.properties = wacom_ac_props; + wacom->ac.num_properties = ARRAY_SIZE(wacom_ac_props); + wacom->ac.get_property = wacom_ac_get_property; + sprintf(wacom->wacom_wac.ac_name, "wacom_ac_%ld", n); + wacom->ac.name = wacom->wacom_wac.ac_name; + wacom->ac.type = POWER_SUPPLY_TYPE_MAINS; + wacom->ac.use_for_apm = 0; + + error = power_supply_register(&wacom->hdev->dev, &wacom->battery); + if (error) + return error; + + power_supply_powers(&wacom->battery, &wacom->hdev->dev); - if (!error) - power_supply_powers(&wacom->battery, - &wacom->usbdev->dev); + error = power_supply_register(&wacom->hdev->dev, &wacom->ac); + if (error) { + power_supply_unregister(&wacom->battery); + return error; + } + + power_supply_powers(&wacom->ac, &wacom->hdev->dev); } - return error; + return 0; } static void wacom_destroy_battery(struct wacom *wacom) { - if (wacom->wacom_wac.features.quirks & WACOM_QUIRK_MONITOR && - wacom->battery.dev) { + if ((wacom->wacom_wac.features.quirks & WACOM_QUIRK_BATTERY) && + wacom->battery.dev) { power_supply_unregister(&wacom->battery); wacom->battery.dev = NULL; + power_supply_unregister(&wacom->ac); + wacom->ac.dev = NULL; } } -static int wacom_register_input(struct wacom *wacom) +static ssize_t wacom_show_speed(struct device *dev, + struct device_attribute + *attr, char *buf) +{ + struct hid_device *hdev = container_of(dev, struct hid_device, dev); + struct wacom *wacom = hid_get_drvdata(hdev); + + return snprintf(buf, PAGE_SIZE, "%i\n", wacom->wacom_wac.bt_high_speed); +} + +static ssize_t wacom_store_speed(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct hid_device *hdev = container_of(dev, struct hid_device, dev); + struct wacom *wacom = hid_get_drvdata(hdev); + u8 new_speed; + + if (kstrtou8(buf, 0, &new_speed)) + return -EINVAL; + + if (new_speed != 0 && new_speed != 1) + return -EINVAL; + + wacom_bt_query_tablet_data(hdev, new_speed, &wacom->wacom_wac.features); + + return count; +} + +static DEVICE_ATTR(speed, S_IRUGO | S_IWUSR | S_IWGRP, + wacom_show_speed, wacom_store_speed); + +static struct input_dev *wacom_allocate_input(struct wacom *wacom) { struct input_dev *input_dev; - struct usb_interface *intf = wacom->intf; - struct usb_device *dev = interface_to_usbdev(intf); + struct hid_device *hdev = wacom->hdev; struct wacom_wac *wacom_wac = &(wacom->wacom_wac); - int error; input_dev = input_allocate_device(); - if (!input_dev) { - error = -ENOMEM; - goto fail1; - } + if (!input_dev) + return NULL; input_dev->name = wacom_wac->name; - input_dev->dev.parent = &intf->dev; + input_dev->phys = hdev->phys; + input_dev->dev.parent = &hdev->dev; input_dev->open = wacom_open; input_dev->close = wacom_close; - usb_to_input_id(dev, &input_dev->id); + input_dev->uniq = hdev->uniq; + input_dev->id.bustype = hdev->bus; + input_dev->id.vendor = hdev->vendor; + input_dev->id.product = hdev->product; + input_dev->id.version = hdev->version; input_set_drvdata(input_dev, wacom); + return input_dev; +} + +static void wacom_unregister_inputs(struct wacom *wacom) +{ + if (wacom->wacom_wac.input) + input_unregister_device(wacom->wacom_wac.input); + if (wacom->wacom_wac.pad_input) + input_unregister_device(wacom->wacom_wac.pad_input); + wacom->wacom_wac.input = NULL; + wacom->wacom_wac.pad_input = NULL; +} + +static int wacom_register_inputs(struct wacom *wacom) +{ + struct input_dev *input_dev, *pad_input_dev; + struct wacom_wac *wacom_wac = &(wacom->wacom_wac); + int error; + + input_dev = wacom_allocate_input(wacom); + pad_input_dev = wacom_allocate_input(wacom); + if (!input_dev || !pad_input_dev) { + error = -ENOMEM; + goto fail1; + } + wacom_wac->input = input_dev; + wacom_wac->pad_input = pad_input_dev; + wacom_wac->pad_input->name = wacom_wac->pad_name; + error = wacom_setup_input_capabilities(input_dev, wacom_wac); if (error) - goto fail1; + goto fail2; error = input_register_device(input_dev); if (error) goto fail2; + error = wacom_setup_pad_input_capabilities(pad_input_dev, wacom_wac); + if (error) { + /* no pad in use on this interface */ + input_free_device(pad_input_dev); + wacom_wac->pad_input = NULL; + pad_input_dev = NULL; + } else { + error = input_register_device(pad_input_dev); + if (error) + goto fail3; + } + return 0; +fail3: + input_unregister_device(input_dev); + input_dev = NULL; fail2: - input_free_device(input_dev); wacom_wac->input = NULL; + wacom_wac->pad_input = NULL; fail1: + if (input_dev) + input_free_device(input_dev); + if (pad_input_dev) + input_free_device(pad_input_dev); return error; } @@ -1153,6 +1085,7 @@ static void wacom_wireless_work(struct work_struct *work) struct wacom *wacom = container_of(work, struct wacom, work); struct usb_device *usbdev = wacom->usbdev; struct wacom_wac *wacom_wac = &wacom->wacom_wac; + struct hid_device *hdev1, *hdev2; struct wacom *wacom1, *wacom2; struct wacom_wac *wacom_wac1, *wacom_wac2; int error; @@ -1165,50 +1098,49 @@ static void wacom_wireless_work(struct work_struct *work) wacom_destroy_battery(wacom); /* Stylus interface */ - wacom1 = usb_get_intfdata(usbdev->config->interface[1]); + hdev1 = usb_get_intfdata(usbdev->config->interface[1]); + wacom1 = hid_get_drvdata(hdev1); wacom_wac1 = &(wacom1->wacom_wac); - if (wacom_wac1->input) - input_unregister_device(wacom_wac1->input); - wacom_wac1->input = NULL; + wacom_unregister_inputs(wacom1); /* Touch interface */ - wacom2 = usb_get_intfdata(usbdev->config->interface[2]); + hdev2 = usb_get_intfdata(usbdev->config->interface[2]); + wacom2 = hid_get_drvdata(hdev2); wacom_wac2 = &(wacom2->wacom_wac); - if (wacom_wac2->input) - input_unregister_device(wacom_wac2->input); - wacom_wac2->input = NULL; + wacom_unregister_inputs(wacom2); if (wacom_wac->pid == 0) { - dev_info(&wacom->intf->dev, "wireless tablet disconnected\n"); + hid_info(wacom->hdev, "wireless tablet disconnected\n"); + wacom_wac1->shared->type = 0; } else { - const struct usb_device_id *id = wacom_ids; + const struct hid_device_id *id = wacom_ids; - dev_info(&wacom->intf->dev, - "wireless tablet connected with PID %x\n", + hid_info(wacom->hdev, "wireless tablet connected with PID %x\n", wacom_wac->pid); - while (id->match_flags) { - if (id->idVendor == USB_VENDOR_ID_WACOM && - id->idProduct == wacom_wac->pid) + while (id->bus) { + if (id->vendor == USB_VENDOR_ID_WACOM && + id->product == wacom_wac->pid) break; id++; } - if (!id->match_flags) { - dev_info(&wacom->intf->dev, - "ignoring unknown PID.\n"); + if (!id->bus) { + hid_info(wacom->hdev, "ignoring unknown PID.\n"); return; } /* Stylus interface */ wacom_wac1->features = - *((struct wacom_features *)id->driver_info); + *((struct wacom_features *)id->driver_data); wacom_wac1->features.device_type = BTN_TOOL_PEN; snprintf(wacom_wac1->name, WACOM_NAME_MAX, "%s (WL) Pen", wacom_wac1->features.name); + snprintf(wacom_wac1->pad_name, WACOM_NAME_MAX, "%s (WL) Pad", + wacom_wac1->features.name); wacom_wac1->shared->touch_max = wacom_wac1->features.touch_max; wacom_wac1->shared->type = wacom_wac1->features.type; - error = wacom_register_input(wacom1); + error = wacom_register_inputs(wacom1); if (error) goto fail; @@ -1216,7 +1148,7 @@ static void wacom_wireless_work(struct work_struct *work) if (wacom_wac1->features.touch_max || wacom_wac1->features.type == INTUOSHT) { wacom_wac2->features = - *((struct wacom_features *)id->driver_info); + *((struct wacom_features *)id->driver_data); wacom_wac2->features.pktlen = WACOM_PKGLEN_BBTOUCH3; wacom_wac2->features.device_type = BTN_TOOL_FINGER; wacom_wac2->features.x_max = wacom_wac2->features.y_max = 4096; @@ -1226,7 +1158,9 @@ static void wacom_wireless_work(struct work_struct *work) else snprintf(wacom_wac2->name, WACOM_NAME_MAX, "%s (WL) Pad",wacom_wac2->features.name); - error = wacom_register_input(wacom2); + snprintf(wacom_wac2->pad_name, WACOM_NAME_MAX, + "%s (WL) Pad", wacom_wac2->features.name); + error = wacom_register_inputs(wacom2); if (error) goto fail; @@ -1243,15 +1177,8 @@ static void wacom_wireless_work(struct work_struct *work) return; fail: - if (wacom_wac2->input) { - input_unregister_device(wacom_wac2->input); - wacom_wac2->input = NULL; - } - - if (wacom_wac1->input) { - input_unregister_device(wacom_wac1->input); - wacom_wac1->input = NULL; - } + wacom_unregister_inputs(wacom1); + wacom_unregister_inputs(wacom2); return; } @@ -1282,69 +1209,89 @@ static void wacom_calculate_res(struct wacom_features *features) features->unitExpo); } -static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *id) +static int wacom_hid_report_len(struct hid_report *report) +{ + /* equivalent to DIV_ROUND_UP(report->size, 8) + !!(report->id > 0) */ + return ((report->size - 1) >> 3) + 1 + (report->id > 0); +} + +static size_t wacom_compute_pktlen(struct hid_device *hdev) +{ + struct hid_report_enum *report_enum; + struct hid_report *report; + size_t size = 0; + + report_enum = hdev->report_enum + HID_INPUT_REPORT; + + list_for_each_entry(report, &report_enum->report_list, list) { + size_t report_size = wacom_hid_report_len(report); + if (report_size > size) + size = report_size; + } + + return size; +} + +static int wacom_probe(struct hid_device *hdev, + const struct hid_device_id *id) { + struct usb_interface *intf = to_usb_interface(hdev->dev.parent); struct usb_device *dev = interface_to_usbdev(intf); - struct usb_endpoint_descriptor *endpoint; struct wacom *wacom; struct wacom_wac *wacom_wac; struct wacom_features *features; int error; - if (!id->driver_info) + if (!id->driver_data) return -EINVAL; wacom = kzalloc(sizeof(struct wacom), GFP_KERNEL); if (!wacom) return -ENOMEM; + hid_set_drvdata(hdev, wacom); + wacom->hdev = hdev; + + /* ask for the report descriptor to be loaded by HID */ + error = hid_parse(hdev); + if (error) { + hid_err(hdev, "parse failed\n"); + goto fail1; + } + wacom_wac = &wacom->wacom_wac; - wacom_wac->features = *((struct wacom_features *)id->driver_info); + wacom_wac->features = *((struct wacom_features *)id->driver_data); features = &wacom_wac->features; + features->pktlen = wacom_compute_pktlen(hdev); if (features->pktlen > WACOM_PKGLEN_MAX) { error = -EINVAL; goto fail1; } - wacom_wac->data = usb_alloc_coherent(dev, WACOM_PKGLEN_MAX, - GFP_KERNEL, &wacom->data_dma); - if (!wacom_wac->data) { - error = -ENOMEM; + if (features->check_for_hid_type && features->hid_type != hdev->type) { + error = -ENODEV; goto fail1; } - wacom->irq = usb_alloc_urb(0, GFP_KERNEL); - if (!wacom->irq) { - error = -ENOMEM; - goto fail2; - } - wacom->usbdev = dev; wacom->intf = intf; mutex_init(&wacom->lock); INIT_WORK(&wacom->work, wacom_wireless_work); - usb_make_path(dev, wacom->phys, sizeof(wacom->phys)); - strlcat(wacom->phys, "/input0", sizeof(wacom->phys)); - - endpoint = &intf->cur_altsetting->endpoint[0].desc; /* set the default size in case we do not get them from hid */ wacom_set_default_phy(features); /* Retrieve the physical and logical size for touch devices */ - error = wacom_retrieve_hid_descriptor(intf, features); - if (error) - goto fail3; + wacom_retrieve_hid_descriptor(hdev, features); /* * Intuos5 has no useful data about its touch interface in its - * HID descriptor. If this is the touch interface (wMaxPacketSize + * HID descriptor. If this is the touch interface (PacketSize * of WACOM_PKGLEN_BBTOUCH3), override the table values. */ if (features->type >= INTUOS5S && features->type <= INTUOSHT) { - if (endpoint->wMaxPacketSize == WACOM_PKGLEN_BBTOUCH3) { + if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) { features->device_type = BTN_TOOL_FINGER; - features->pktlen = WACOM_PKGLEN_BBTOUCH3; features->x_max = 4096; features->y_max = 4096; @@ -1353,20 +1300,35 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i } } + /* + * Same thing for Bamboo 3rd gen. + */ + if ((features->type == BAMBOO_PT) && + (features->pktlen == WACOM_PKGLEN_BBTOUCH3) && + (features->device_type == BTN_TOOL_PEN)) { + features->device_type = BTN_TOOL_FINGER; + + features->x_max = 4096; + features->y_max = 4096; + } + + if (hdev->bus == BUS_BLUETOOTH) + features->quirks |= WACOM_QUIRK_BATTERY; + wacom_setup_device_quirks(features); /* set unit to "100th of a mm" for devices not reported by HID */ if (!features->unit) { features->unit = 0x11; - features->unitExpo = 16 - 3; + features->unitExpo = -3; } wacom_calculate_res(features); strlcpy(wacom_wac->name, features->name, sizeof(wacom_wac->name)); + snprintf(wacom_wac->pad_name, sizeof(wacom_wac->pad_name), + "%s Pad", features->name); if (features->quirks & WACOM_QUIRK_MULTI_INPUT) { - struct usb_device *other_dev; - /* Append the device type to the name */ if (features->device_type != BTN_TOOL_FINGER) strlcat(wacom_wac->name, " Pen", WACOM_NAME_MAX); @@ -1375,43 +1337,49 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i else strlcat(wacom_wac->name, " Pad", WACOM_NAME_MAX); - other_dev = wacom_get_sibling(dev, features->oVid, features->oPid); - if (other_dev == NULL || wacom_get_usbdev_data(other_dev) == NULL) - other_dev = dev; - error = wacom_add_shared_data(wacom_wac, other_dev); + error = wacom_add_shared_data(hdev); if (error) - goto fail3; + goto fail1; } - usb_fill_int_urb(wacom->irq, dev, - usb_rcvintpipe(dev, endpoint->bEndpointAddress), - wacom_wac->data, features->pktlen, - wacom_sys_irq, wacom, endpoint->bInterval); - wacom->irq->transfer_dma = wacom->data_dma; - wacom->irq->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; - error = wacom_initialize_leds(wacom); if (error) - goto fail4; + goto fail2; + + if (!(features->quirks & WACOM_QUIRK_MONITOR) && + (features->quirks & WACOM_QUIRK_BATTERY)) { + error = wacom_initialize_battery(wacom); + if (error) + goto fail3; + } if (!(features->quirks & WACOM_QUIRK_NO_INPUT)) { - error = wacom_register_input(wacom); + error = wacom_register_inputs(wacom); if (error) - goto fail5; + goto fail4; } - /* Note that if query fails it is not a hard failure */ - wacom_query_tablet_data(intf, features); + if (hdev->bus == BUS_BLUETOOTH) { + error = device_create_file(&hdev->dev, &dev_attr_speed); + if (error) + hid_warn(hdev, + "can't create sysfs speed attribute err: %d\n", + error); + } - usb_set_intfdata(intf, wacom); + /* Note that if query fails it is not a hard failure */ + wacom_query_tablet_data(hdev, features); - if (features->quirks & WACOM_QUIRK_MONITOR) { - if (usb_submit_urb(wacom->irq, GFP_KERNEL)) { - error = -EIO; - goto fail5; - } + /* Regular HID work starts now */ + error = hid_hw_start(hdev, HID_CONNECT_HIDRAW); + if (error) { + hid_err(hdev, "hw start failed\n"); + goto fail5; } + if (features->quirks & WACOM_QUIRK_MONITOR) + error = hid_hw_open(hdev); + if (wacom_wac->features.type == INTUOSHT && wacom_wac->features.touch_max) { if (wacom_wac->features.device_type == BTN_TOOL_FINGER) wacom_wac->shared->touch_input = wacom_wac->input; @@ -1419,79 +1387,72 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i return 0; - fail5: wacom_destroy_leds(wacom); - fail4: wacom_remove_shared_data(wacom_wac); - fail3: usb_free_urb(wacom->irq); - fail2: usb_free_coherent(dev, WACOM_PKGLEN_MAX, wacom_wac->data, wacom->data_dma); + fail5: if (hdev->bus == BUS_BLUETOOTH) + device_remove_file(&hdev->dev, &dev_attr_speed); + wacom_unregister_inputs(wacom); + fail4: wacom_destroy_battery(wacom); + fail3: wacom_destroy_leds(wacom); + fail2: wacom_remove_shared_data(wacom_wac); fail1: kfree(wacom); + hid_set_drvdata(hdev, NULL); return error; } -static void wacom_disconnect(struct usb_interface *intf) +static void wacom_remove(struct hid_device *hdev) { - struct wacom *wacom = usb_get_intfdata(intf); + struct wacom *wacom = hid_get_drvdata(hdev); - usb_set_intfdata(intf, NULL); + hid_hw_stop(hdev); - usb_kill_urb(wacom->irq); cancel_work_sync(&wacom->work); - if (wacom->wacom_wac.input) - input_unregister_device(wacom->wacom_wac.input); + wacom_unregister_inputs(wacom); + if (hdev->bus == BUS_BLUETOOTH) + device_remove_file(&hdev->dev, &dev_attr_speed); wacom_destroy_battery(wacom); wacom_destroy_leds(wacom); - usb_free_urb(wacom->irq); - usb_free_coherent(interface_to_usbdev(intf), WACOM_PKGLEN_MAX, - wacom->wacom_wac.data, wacom->data_dma); wacom_remove_shared_data(&wacom->wacom_wac); - kfree(wacom); -} - -static int wacom_suspend(struct usb_interface *intf, pm_message_t message) -{ - struct wacom *wacom = usb_get_intfdata(intf); - - mutex_lock(&wacom->lock); - usb_kill_urb(wacom->irq); - mutex_unlock(&wacom->lock); - return 0; + hid_set_drvdata(hdev, NULL); + kfree(wacom); } -static int wacom_resume(struct usb_interface *intf) +#ifdef CONFIG_PM +static int wacom_resume(struct hid_device *hdev) { - struct wacom *wacom = usb_get_intfdata(intf); + struct wacom *wacom = hid_get_drvdata(hdev); struct wacom_features *features = &wacom->wacom_wac.features; - int rv = 0; mutex_lock(&wacom->lock); /* switch to wacom mode first */ - wacom_query_tablet_data(intf, features); + wacom_query_tablet_data(hdev, features); wacom_led_control(wacom); - if ((wacom->open || (features->quirks & WACOM_QUIRK_MONITOR)) && - usb_submit_urb(wacom->irq, GFP_NOIO) < 0) - rv = -EIO; - mutex_unlock(&wacom->lock); - return rv; + return 0; } -static int wacom_reset_resume(struct usb_interface *intf) +static int wacom_reset_resume(struct hid_device *hdev) { - return wacom_resume(intf); + return wacom_resume(hdev); } +#endif /* CONFIG_PM */ -static struct usb_driver wacom_driver = { +static struct hid_driver wacom_driver = { .name = "wacom", .id_table = wacom_ids, .probe = wacom_probe, - .disconnect = wacom_disconnect, - .suspend = wacom_suspend, + .remove = wacom_remove, +#ifdef CONFIG_PM .resume = wacom_resume, .reset_resume = wacom_reset_resume, - .supports_autosuspend = 1, +#endif + .raw_event = wacom_raw_event, }; +module_hid_driver(wacom_driver); -module_usb_driver(wacom_driver); +MODULE_VERSION(DRIVER_VERSION); +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE(DRIVER_LICENSE); diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/hid/wacom_wac.c index e73cf2c71f35..aa6a08eb7ad6 100644 --- a/drivers/input/tablet/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -25,11 +25,23 @@ #define WACOM_INTUOS_RES 100 #define WACOM_INTUOS3_RES 200 -/* Scale factor relating reported contact size to logical contact area. +/* + * Scale factor relating reported contact size to logical contact area. * 2^14/pi is a good approximation on Intuos5 and 3rd-gen Bamboo */ #define WACOM_CONTACT_AREA_SCALE 2607 +/* + * Percent of battery capacity for Graphire. + * 8th value means AC online and show 100% capacity. + */ +static unsigned short batcap_gr[8] = { 1, 15, 25, 35, 50, 70, 100, 100 }; + +/* + * Percent of battery capacity for Intuos4 WL, AC has a separate bit. + */ +static unsigned short batcap_i4[8] = { 1, 15, 30, 45, 60, 70, 85, 100 }; + static int wacom_penpartner_irq(struct wacom_wac *wacom) { unsigned char *data = wacom->data; @@ -217,17 +229,13 @@ static int wacom_dtus_irq(struct wacom_wac *wacom) "%s: received unknown report #%d", __func__, data[0]); return 0; } else if (data[0] == WACOM_REPORT_DTUSPAD) { + input = wacom->pad_input; input_report_key(input, BTN_0, (data[1] & 0x01)); input_report_key(input, BTN_1, (data[1] & 0x02)); input_report_key(input, BTN_2, (data[1] & 0x04)); input_report_key(input, BTN_3, (data[1] & 0x08)); input_report_abs(input, ABS_MISC, data[1] & 0x0f ? PAD_DEVICE_ID : 0); - /* - * Serial number is required when expresskeys are - * reported through pen interface. - */ - input_event(input, EV_MSC, MSC_SERIAL, 0xf0); return 1; } else { prox = data[1] & 0x80; @@ -257,7 +265,6 @@ static int wacom_dtus_irq(struct wacom_wac *wacom) wacom->id[0] = 0; input_report_key(input, wacom->tool[0], prox); input_report_abs(input, ABS_MISC, wacom->id[0]); - input_event(input, EV_MSC, MSC_SERIAL, 1); return 1; } } @@ -267,11 +274,20 @@ static int wacom_graphire_irq(struct wacom_wac *wacom) struct wacom_features *features = &wacom->features; unsigned char *data = wacom->data; struct input_dev *input = wacom->input; + struct input_dev *pad_input = wacom->pad_input; + int battery_capacity, ps_connected; int prox; int rw = 0; int retval = 0; - if (data[0] != WACOM_REPORT_PENABLED) { + if (features->type == GRAPHIRE_BT) { + if (data[0] != WACOM_REPORT_PENABLED_BT) { + dev_dbg(input->dev.parent, + "%s: received unknown report #%d\n", __func__, + data[0]); + goto exit; + } + } else if (data[0] != WACOM_REPORT_PENABLED) { dev_dbg(input->dev.parent, "%s: received unknown report #%d\n", __func__, data[0]); goto exit; @@ -305,7 +321,12 @@ static int wacom_graphire_irq(struct wacom_wac *wacom) input_report_abs(input, ABS_X, le16_to_cpup((__le16 *)&data[2])); input_report_abs(input, ABS_Y, le16_to_cpup((__le16 *)&data[4])); if (wacom->tool[0] != BTN_TOOL_MOUSE) { - input_report_abs(input, ABS_PRESSURE, data[6] | ((data[7] & 0x03) << 8)); + if (features->type == GRAPHIRE_BT) + input_report_abs(input, ABS_PRESSURE, data[6] | + (((__u16) (data[1] & 0x08)) << 5)); + else + input_report_abs(input, ABS_PRESSURE, data[6] | + ((data[7] & 0x03) << 8)); input_report_key(input, BTN_TOUCH, data[1] & 0x01); input_report_key(input, BTN_STYLUS, data[1] & 0x02); input_report_key(input, BTN_STYLUS2, data[1] & 0x04); @@ -316,6 +337,20 @@ static int wacom_graphire_irq(struct wacom_wac *wacom) features->type == WACOM_MO) { input_report_abs(input, ABS_DISTANCE, data[6] & 0x3f); rw = (data[7] & 0x04) - (data[7] & 0x03); + } else if (features->type == GRAPHIRE_BT) { + /* Compute distance between mouse and tablet */ + rw = 44 - (data[6] >> 2); + rw = clamp_val(rw, 0, 31); + input_report_abs(input, ABS_DISTANCE, rw); + if (((data[1] >> 5) & 3) == 2) { + /* Mouse with wheel */ + input_report_key(input, BTN_MIDDLE, + data[1] & 0x04); + rw = (data[6] & 0x01) ? -1 : + (data[6] & 0x02) ? 1 : 0; + } else { + rw = 0; + } } else { input_report_abs(input, ABS_DISTANCE, data[7] & 0x3f); rw = -(signed char)data[6]; @@ -327,7 +362,6 @@ static int wacom_graphire_irq(struct wacom_wac *wacom) wacom->id[0] = 0; input_report_abs(input, ABS_MISC, wacom->id[0]); /* report tool id */ input_report_key(input, wacom->tool[0], prox); - input_event(input, EV_MSC, MSC_SERIAL, 1); input_sync(input); /* sync last event */ } @@ -337,14 +371,13 @@ static int wacom_graphire_irq(struct wacom_wac *wacom) prox = data[7] & 0xf8; if (prox || wacom->id[1]) { wacom->id[1] = PAD_DEVICE_ID; - input_report_key(input, BTN_BACK, (data[7] & 0x40)); - input_report_key(input, BTN_FORWARD, (data[7] & 0x80)); + input_report_key(pad_input, BTN_BACK, (data[7] & 0x40)); + input_report_key(pad_input, BTN_FORWARD, (data[7] & 0x80)); rw = ((data[7] & 0x18) >> 3) - ((data[7] & 0x20) >> 3); - input_report_rel(input, REL_WHEEL, rw); + input_report_rel(pad_input, REL_WHEEL, rw); if (!prox) wacom->id[1] = 0; - input_report_abs(input, ABS_MISC, wacom->id[1]); - input_event(input, EV_MSC, MSC_SERIAL, 0xf0); + input_report_abs(pad_input, ABS_MISC, wacom->id[1]); retval = 1; } break; @@ -353,19 +386,43 @@ static int wacom_graphire_irq(struct wacom_wac *wacom) prox = (data[7] & 0xf8) || data[8]; if (prox || wacom->id[1]) { wacom->id[1] = PAD_DEVICE_ID; - input_report_key(input, BTN_BACK, (data[7] & 0x08)); - input_report_key(input, BTN_LEFT, (data[7] & 0x20)); - input_report_key(input, BTN_FORWARD, (data[7] & 0x10)); - input_report_key(input, BTN_RIGHT, (data[7] & 0x40)); - input_report_abs(input, ABS_WHEEL, (data[8] & 0x7f)); + input_report_key(pad_input, BTN_BACK, (data[7] & 0x08)); + input_report_key(pad_input, BTN_LEFT, (data[7] & 0x20)); + input_report_key(pad_input, BTN_FORWARD, (data[7] & 0x10)); + input_report_key(pad_input, BTN_RIGHT, (data[7] & 0x40)); + input_report_abs(pad_input, ABS_WHEEL, (data[8] & 0x7f)); + if (!prox) + wacom->id[1] = 0; + input_report_abs(pad_input, ABS_MISC, wacom->id[1]); + retval = 1; + } + break; + case GRAPHIRE_BT: + prox = data[7] & 0x03; + if (prox || wacom->id[1]) { + wacom->id[1] = PAD_DEVICE_ID; + input_report_key(pad_input, BTN_0, (data[7] & 0x02)); + input_report_key(pad_input, BTN_1, (data[7] & 0x01)); if (!prox) wacom->id[1] = 0; - input_report_abs(input, ABS_MISC, wacom->id[1]); - input_event(input, EV_MSC, MSC_SERIAL, 0xf0); + input_report_abs(pad_input, ABS_MISC, wacom->id[1]); retval = 1; } break; } + + /* Store current battery capacity and power supply state */ + if (features->type == GRAPHIRE_BT) { + rw = (data[7] >> 2 & 0x07); + battery_capacity = batcap_gr[rw]; + ps_connected = rw == 7; + if ((wacom->battery_capacity != battery_capacity) || + (wacom->ps_connected != ps_connected)) { + wacom->battery_capacity = battery_capacity; + wacom->ps_connected = ps_connected; + wacom_notify_battery(wacom); + } + } exit: return retval; } @@ -584,6 +641,7 @@ static int wacom_intuos_irq(struct wacom_wac *wacom) /* pad packets. Works as a second tool and is always in prox */ if (data[0] == WACOM_REPORT_INTUOSPAD || data[0] == WACOM_REPORT_INTUOS5PAD) { + input = wacom->pad_input; if (features->type >= INTUOS4S && features->type <= INTUOS4L) { input_report_key(input, BTN_0, (data[2] & 0x01)); input_report_key(input, BTN_1, (data[3] & 0x01)); @@ -773,7 +831,6 @@ static int wacom_intuos_irq(struct wacom_wac *wacom) input_report_abs(input, ABS_MISC, 0); } } - input_event(input, EV_MSC, MSC_SERIAL, 0xffffffff); return 1; } @@ -901,6 +958,58 @@ static int int_dist(int x1, int y1, int x2, int y2) return int_sqrt(x*x + y*y); } +static void wacom_intuos_bt_process_data(struct wacom_wac *wacom, + unsigned char *data) +{ + memcpy(wacom->data, data, 10); + wacom_intuos_irq(wacom); + + input_sync(wacom->input); + if (wacom->pad_input) + input_sync(wacom->pad_input); +} + +static int wacom_intuos_bt_irq(struct wacom_wac *wacom, size_t len) +{ + unsigned char data[WACOM_PKGLEN_MAX]; + int i = 1; + unsigned power_raw, battery_capacity, bat_charging, ps_connected; + + memcpy(data, wacom->data, len); + + switch (data[0]) { + case 0x04: + wacom_intuos_bt_process_data(wacom, data + i); + i += 10; + /* fall through */ + case 0x03: + wacom_intuos_bt_process_data(wacom, data + i); + i += 10; + wacom_intuos_bt_process_data(wacom, data + i); + i += 10; + power_raw = data[i]; + bat_charging = (power_raw & 0x08) ? 1 : 0; + ps_connected = (power_raw & 0x10) ? 1 : 0; + battery_capacity = batcap_i4[power_raw & 0x07]; + if ((wacom->battery_capacity != battery_capacity) || + (wacom->bat_charging != bat_charging) || + (wacom->ps_connected != ps_connected)) { + wacom->battery_capacity = battery_capacity; + wacom->bat_charging = bat_charging; + wacom->ps_connected = ps_connected; + wacom_notify_battery(wacom); + } + + break; + default: + dev_dbg(wacom->input->dev.parent, + "Unknown report: %d,%d size:%zu\n", + data[0], data[1], len); + return 0; + } + return 0; +} + static int wacom_24hdt_irq(struct wacom_wac *wacom) { struct input_dev *input = wacom->input; @@ -1093,7 +1202,7 @@ static int wacom_tpc_pen(struct wacom_wac *wacom) input_report_key(input, BTN_STYLUS2, data[1] & 0x10); input_report_abs(input, ABS_X, le16_to_cpup((__le16 *)&data[2])); input_report_abs(input, ABS_Y, le16_to_cpup((__le16 *)&data[4])); - input_report_abs(input, ABS_PRESSURE, ((data[7] & 0x03) << 8) | data[6]); + input_report_abs(input, ABS_PRESSURE, ((data[7] & 0x07) << 8) | data[6]); input_report_key(input, BTN_TOUCH, data[1] & 0x05); input_report_key(input, wacom->tool[0], prox); return 1; @@ -1143,6 +1252,7 @@ static int wacom_bpt_touch(struct wacom_wac *wacom) { struct wacom_features *features = &wacom->features; struct input_dev *input = wacom->input; + struct input_dev *pad_input = wacom->pad_input; unsigned char *data = wacom->data; int i; @@ -1177,14 +1287,12 @@ static int wacom_bpt_touch(struct wacom_wac *wacom) input_mt_report_pointer_emulation(input, true); - input_report_key(input, BTN_LEFT, (data[1] & 0x08) != 0); - input_report_key(input, BTN_FORWARD, (data[1] & 0x04) != 0); - input_report_key(input, BTN_BACK, (data[1] & 0x02) != 0); - input_report_key(input, BTN_RIGHT, (data[1] & 0x01) != 0); - - input_sync(input); + input_report_key(pad_input, BTN_LEFT, (data[1] & 0x08) != 0); + input_report_key(pad_input, BTN_FORWARD, (data[1] & 0x04) != 0); + input_report_key(pad_input, BTN_BACK, (data[1] & 0x02) != 0); + input_report_key(pad_input, BTN_RIGHT, (data[1] & 0x01) != 0); - return 0; + return 1; } static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data) @@ -1232,7 +1340,7 @@ static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data) static void wacom_bpt3_button_msg(struct wacom_wac *wacom, unsigned char *data) { - struct input_dev *input = wacom->input; + struct input_dev *input = wacom->pad_input; struct wacom_features *features = &wacom->features; if (features->type == INTUOSHT) { @@ -1269,9 +1377,7 @@ static int wacom_bpt3_touch(struct wacom_wac *wacom) } input_mt_report_pointer_emulation(input, true); - input_sync(input); - - return 0; + return 1; } static int wacom_bpt_pen(struct wacom_wac *wacom) @@ -1375,7 +1481,7 @@ static int wacom_wireless_irq(struct wacom_wac *wacom, size_t len) connected = data[1] & 0x01; if (connected) { - int pid, battery; + int pid, battery, ps_connected; if ((wacom->shared->type == INTUOSHT) && wacom->shared->touch_max) { @@ -1385,17 +1491,29 @@ static int wacom_wireless_irq(struct wacom_wac *wacom, size_t len) } pid = get_unaligned_be16(&data[6]); - battery = data[5] & 0x3f; + battery = (data[5] & 0x3f) * 100 / 31; + ps_connected = !!(data[5] & 0x80); if (wacom->pid != pid) { wacom->pid = pid; wacom_schedule_work(wacom); } - wacom->battery_capacity = battery; + + if (wacom->shared->type && + (battery != wacom->battery_capacity || + ps_connected != wacom->ps_connected)) { + wacom->battery_capacity = battery; + wacom->ps_connected = ps_connected; + wacom->bat_charging = ps_connected && + wacom->battery_capacity < 100; + wacom_notify_battery(wacom); + } } else if (wacom->pid != 0) { /* disconnected while previously connected */ wacom->pid = 0; wacom_schedule_work(wacom); wacom->battery_capacity = 0; + wacom->bat_charging = 0; + wacom->ps_connected = 0; } return 0; @@ -1416,6 +1534,7 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len) case WACOM_G4: case GRAPHIRE: + case GRAPHIRE_BT: case WACOM_MO: sync = wacom_graphire_irq(wacom_wac); break; @@ -1450,6 +1569,10 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len) sync = wacom_intuos_irq(wacom_wac); break; + case INTUOS4WL: + sync = wacom_intuos_bt_irq(wacom_wac, len); + break; + case WACOM_24HDT: sync = wacom_24hdt_irq(wacom_wac); break; @@ -1489,8 +1612,11 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len) break; } - if (sync) + if (sync) { input_sync(wacom_wac->input); + if (wacom_wac->pad_input) + input_sync(wacom_wac->pad_input); + } } static void wacom_setup_cintiq(struct wacom_wac *wacom_wac) @@ -1565,8 +1691,10 @@ void wacom_setup_device_quirks(struct wacom_features *features) features->quirks |= WACOM_QUIRK_NO_INPUT; /* must be monitor interface if no device_type set */ - if (!features->device_type) + if (!features->device_type) { features->quirks |= WACOM_QUIRK_MONITOR; + features->quirks |= WACOM_QUIRK_BATTERY; + } } } @@ -1615,7 +1743,6 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev, struct wacom_wac *wacom_wac) { struct wacom_features *features = &wacom_wac->features; - int i; input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); @@ -1630,10 +1757,6 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev, /* fall through */ case WACOM_G4: - input_set_capability(input_dev, EV_MSC, MSC_SERIAL); - - __set_bit(BTN_BACK, input_dev->keybit); - __set_bit(BTN_FORWARD, input_dev->keybit); /* fall through */ case GRAPHIRE: @@ -1652,62 +1775,42 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev, __set_bit(INPUT_PROP_POINTER, input_dev->propbit); break; - case WACOM_24HD: - __set_bit(BTN_A, input_dev->keybit); - __set_bit(BTN_B, input_dev->keybit); - __set_bit(BTN_C, input_dev->keybit); - __set_bit(BTN_X, input_dev->keybit); - __set_bit(BTN_Y, input_dev->keybit); - __set_bit(BTN_Z, input_dev->keybit); + case GRAPHIRE_BT: + __clear_bit(ABS_MISC, input_dev->absbit); + input_set_abs_params(input_dev, ABS_DISTANCE, 0, + features->distance_max, + 0, 0); - for (i = 6; i < 10; i++) - __set_bit(BTN_0 + i, input_dev->keybit); + input_set_capability(input_dev, EV_REL, REL_WHEEL); - __set_bit(KEY_PROG1, input_dev->keybit); - __set_bit(KEY_PROG2, input_dev->keybit); - __set_bit(KEY_PROG3, input_dev->keybit); + __set_bit(BTN_LEFT, input_dev->keybit); + __set_bit(BTN_RIGHT, input_dev->keybit); + __set_bit(BTN_MIDDLE, input_dev->keybit); + + __set_bit(BTN_TOOL_RUBBER, input_dev->keybit); + __set_bit(BTN_TOOL_PEN, input_dev->keybit); + __set_bit(BTN_TOOL_MOUSE, input_dev->keybit); + __set_bit(BTN_STYLUS, input_dev->keybit); + __set_bit(BTN_STYLUS2, input_dev->keybit); + + __set_bit(INPUT_PROP_POINTER, input_dev->propbit); + break; + case WACOM_24HD: input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); input_set_abs_params(input_dev, ABS_THROTTLE, 0, 71, 0, 0); /* fall through */ case DTK: - for (i = 0; i < 6; i++) - __set_bit(BTN_0 + i, input_dev->keybit); - __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); wacom_setup_cintiq(wacom_wac); break; case WACOM_22HD: - __set_bit(KEY_PROG1, input_dev->keybit); - __set_bit(KEY_PROG2, input_dev->keybit); - __set_bit(KEY_PROG3, input_dev->keybit); - /* fall through */ - case WACOM_21UX2: - __set_bit(BTN_A, input_dev->keybit); - __set_bit(BTN_B, input_dev->keybit); - __set_bit(BTN_C, input_dev->keybit); - __set_bit(BTN_X, input_dev->keybit); - __set_bit(BTN_Y, input_dev->keybit); - __set_bit(BTN_Z, input_dev->keybit); - __set_bit(BTN_BASE, input_dev->keybit); - __set_bit(BTN_BASE2, input_dev->keybit); - /* fall through */ - case WACOM_BEE: - __set_bit(BTN_8, input_dev->keybit); - __set_bit(BTN_9, input_dev->keybit); - /* fall through */ - case CINTIQ: - for (i = 0; i < 8; i++) - __set_bit(BTN_0 + i, input_dev->keybit); - - input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0); - input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0); input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); @@ -1716,9 +1819,6 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev, break; case WACOM_13HD: - for (i = 0; i < 9; i++) - __set_bit(BTN_0 + i, input_dev->keybit); - input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); wacom_setup_cintiq(wacom_wac); @@ -1726,21 +1826,7 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev, case INTUOS3: case INTUOS3L: - __set_bit(BTN_4, input_dev->keybit); - __set_bit(BTN_5, input_dev->keybit); - __set_bit(BTN_6, input_dev->keybit); - __set_bit(BTN_7, input_dev->keybit); - - input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0); - /* fall through */ - case INTUOS3S: - __set_bit(BTN_0, input_dev->keybit); - __set_bit(BTN_1, input_dev->keybit); - __set_bit(BTN_2, input_dev->keybit); - __set_bit(BTN_3, input_dev->keybit); - - input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0); input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); /* fall through */ @@ -1754,20 +1840,11 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev, case INTUOS5L: case INTUOSPM: case INTUOSPL: - if (features->device_type == BTN_TOOL_PEN) { - __set_bit(BTN_7, input_dev->keybit); - __set_bit(BTN_8, input_dev->keybit); - } - /* fall through */ - case INTUOS5S: case INTUOSPS: __set_bit(INPUT_PROP_POINTER, input_dev->propbit); if (features->device_type == BTN_TOOL_PEN) { - for (i = 0; i < 7; i++) - __set_bit(BTN_0 + i, input_dev->keybit); - input_set_abs_params(input_dev, ABS_DISTANCE, 0, features->distance_max, 0, 0); @@ -1787,15 +1864,9 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev, break; case INTUOS4: + case INTUOS4WL: case INTUOS4L: - __set_bit(BTN_7, input_dev->keybit); - __set_bit(BTN_8, input_dev->keybit); - /* fall through */ - case INTUOS4S: - for (i = 0; i < 7; i++) - __set_bit(BTN_0 + i, input_dev->keybit); - input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); wacom_setup_intuos(wacom_wac); @@ -1833,11 +1904,6 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev, case DTUS: case PL: case DTU: - if (features->type == DTUS) { - input_set_capability(input_dev, EV_MSC, MSC_SERIAL); - for (i = 0; i < 4; i++) - __set_bit(BTN_0 + i, input_dev->keybit); - } __set_bit(BTN_TOOL_PEN, input_dev->keybit); __set_bit(BTN_TOOL_RUBBER, input_dev->keybit); __set_bit(BTN_STYLUS, input_dev->keybit); @@ -1871,11 +1937,6 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev, if (features->device_type == BTN_TOOL_FINGER) { - __set_bit(BTN_LEFT, input_dev->keybit); - __set_bit(BTN_FORWARD, input_dev->keybit); - __set_bit(BTN_BACK, input_dev->keybit); - __set_bit(BTN_RIGHT, input_dev->keybit); - if (features->touch_max) { if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) { input_set_abs_params(input_dev, @@ -1905,449 +1966,629 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev, break; case CINTIQ_HYBRID: + input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); + __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); + + wacom_setup_cintiq(wacom_wac); + break; + } + return 0; +} + +int wacom_setup_pad_input_capabilities(struct input_dev *input_dev, + struct wacom_wac *wacom_wac) +{ + struct wacom_features *features = &wacom_wac->features; + int i; + + input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); + + /* kept for making legacy xf86-input-wacom working with the wheels */ + __set_bit(ABS_MISC, input_dev->absbit); + + /* kept for making legacy xf86-input-wacom accepting the pad */ + input_set_abs_params(input_dev, ABS_X, 0, 1, 0, 0); + input_set_abs_params(input_dev, ABS_Y, 0, 1, 0, 0); + + switch (features->type) { + case GRAPHIRE_BT: + __set_bit(BTN_0, input_dev->keybit); __set_bit(BTN_1, input_dev->keybit); - __set_bit(BTN_2, input_dev->keybit); - __set_bit(BTN_3, input_dev->keybit); - __set_bit(BTN_4, input_dev->keybit); + break; + + case WACOM_MO: + __set_bit(BTN_BACK, input_dev->keybit); + __set_bit(BTN_LEFT, input_dev->keybit); + __set_bit(BTN_FORWARD, input_dev->keybit); + __set_bit(BTN_RIGHT, input_dev->keybit); + input_set_abs_params(input_dev, ABS_WHEEL, 0, 71, 0, 0); + break; + + case WACOM_G4: + __set_bit(BTN_BACK, input_dev->keybit); + __set_bit(BTN_LEFT, input_dev->keybit); + __set_bit(BTN_FORWARD, input_dev->keybit); + __set_bit(BTN_RIGHT, input_dev->keybit); + input_set_capability(input_dev, EV_REL, REL_WHEEL); + break; + + case WACOM_24HD: + __set_bit(BTN_A, input_dev->keybit); + __set_bit(BTN_B, input_dev->keybit); + __set_bit(BTN_C, input_dev->keybit); + __set_bit(BTN_X, input_dev->keybit); + __set_bit(BTN_Y, input_dev->keybit); + __set_bit(BTN_Z, input_dev->keybit); + for (i = 0; i < 10; i++) + __set_bit(BTN_0 + i, input_dev->keybit); + + __set_bit(KEY_PROG1, input_dev->keybit); + __set_bit(KEY_PROG2, input_dev->keybit); + __set_bit(KEY_PROG3, input_dev->keybit); + + input_set_abs_params(input_dev, ABS_WHEEL, 0, 71, 0, 0); + input_set_abs_params(input_dev, ABS_THROTTLE, 0, 71, 0, 0); + break; + + case DTK: + for (i = 0; i < 6; i++) + __set_bit(BTN_0 + i, input_dev->keybit); + + break; + + case WACOM_22HD: + __set_bit(KEY_PROG1, input_dev->keybit); + __set_bit(KEY_PROG2, input_dev->keybit); + __set_bit(KEY_PROG3, input_dev->keybit); + /* fall through */ + + case WACOM_21UX2: + __set_bit(BTN_A, input_dev->keybit); + __set_bit(BTN_B, input_dev->keybit); + __set_bit(BTN_C, input_dev->keybit); + __set_bit(BTN_X, input_dev->keybit); + __set_bit(BTN_Y, input_dev->keybit); + __set_bit(BTN_Z, input_dev->keybit); + __set_bit(BTN_BASE, input_dev->keybit); + __set_bit(BTN_BASE2, input_dev->keybit); + /* fall through */ + + case WACOM_BEE: + __set_bit(BTN_8, input_dev->keybit); + __set_bit(BTN_9, input_dev->keybit); + /* fall through */ + + case CINTIQ: + for (i = 0; i < 8; i++) + __set_bit(BTN_0 + i, input_dev->keybit); + + input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0); + input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0); + break; + + case WACOM_13HD: + for (i = 0; i < 9; i++) + __set_bit(BTN_0 + i, input_dev->keybit); + + input_set_abs_params(input_dev, ABS_WHEEL, 0, 71, 0, 0); + break; + + case INTUOS3: + case INTUOS3L: + __set_bit(BTN_4, input_dev->keybit); __set_bit(BTN_5, input_dev->keybit); __set_bit(BTN_6, input_dev->keybit); __set_bit(BTN_7, input_dev->keybit); - __set_bit(BTN_8, input_dev->keybit); + + input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0); + /* fall through */ + + case INTUOS3S: __set_bit(BTN_0, input_dev->keybit); + __set_bit(BTN_1, input_dev->keybit); + __set_bit(BTN_2, input_dev->keybit); + __set_bit(BTN_3, input_dev->keybit); - input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); - __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); + input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0); + break; - wacom_setup_cintiq(wacom_wac); + case INTUOS5: + case INTUOS5L: + case INTUOSPM: + case INTUOSPL: + __set_bit(BTN_7, input_dev->keybit); + __set_bit(BTN_8, input_dev->keybit); + /* fall through */ + + case INTUOS5S: + case INTUOSPS: + /* touch interface does not have the pad device */ + if (features->device_type != BTN_TOOL_PEN) + return 1; + + for (i = 0; i < 7; i++) + __set_bit(BTN_0 + i, input_dev->keybit); + + input_set_abs_params(input_dev, ABS_WHEEL, 0, 71, 0, 0); break; + + case INTUOS4WL: + /* + * For Bluetooth devices, the udev rule does not work correctly + * for pads unless we add a stylus capability, which forces + * ID_INPUT_TABLET to be set. + */ + __set_bit(BTN_STYLUS, input_dev->keybit); + /* fall through */ + + case INTUOS4: + case INTUOS4L: + __set_bit(BTN_7, input_dev->keybit); + __set_bit(BTN_8, input_dev->keybit); + /* fall through */ + + case INTUOS4S: + for (i = 0; i < 7; i++) + __set_bit(BTN_0 + i, input_dev->keybit); + + input_set_abs_params(input_dev, ABS_WHEEL, 0, 71, 0, 0); + break; + + case CINTIQ_HYBRID: + for (i = 0; i < 9; i++) + __set_bit(BTN_0 + i, input_dev->keybit); + + break; + + case DTUS: + for (i = 0; i < 4; i++) + __set_bit(BTN_0 + i, input_dev->keybit); + break; + + case INTUOSHT: + case BAMBOO_PT: + /* pad device is on the touch interface */ + if (features->device_type != BTN_TOOL_FINGER) + return 1; + + __clear_bit(ABS_MISC, input_dev->absbit); + + __set_bit(BTN_LEFT, input_dev->keybit); + __set_bit(BTN_FORWARD, input_dev->keybit); + __set_bit(BTN_BACK, input_dev->keybit); + __set_bit(BTN_RIGHT, input_dev->keybit); + + break; + + default: + /* no pad supported */ + return 1; } return 0; } static const struct wacom_features wacom_features_0x00 = - { "Wacom Penpartner", WACOM_PKGLEN_PENPRTN, 5040, 3780, 255, - 0, PENPARTNER, WACOM_PENPRTN_RES, WACOM_PENPRTN_RES }; + { "Wacom Penpartner", 5040, 3780, 255, 0, + PENPARTNER, WACOM_PENPRTN_RES, WACOM_PENPRTN_RES }; static const struct wacom_features wacom_features_0x10 = - { "Wacom Graphire", WACOM_PKGLEN_GRAPHIRE, 10206, 7422, 511, - 63, GRAPHIRE, WACOM_GRAPHIRE_RES, WACOM_GRAPHIRE_RES }; + { "Wacom Graphire", 10206, 7422, 511, 63, + GRAPHIRE, WACOM_GRAPHIRE_RES, WACOM_GRAPHIRE_RES }; +static const struct wacom_features wacom_features_0x81 = + { "Wacom Graphire BT", 16704, 12064, 511, 32, + GRAPHIRE_BT, WACOM_GRAPHIRE_RES, WACOM_GRAPHIRE_RES }; static const struct wacom_features wacom_features_0x11 = - { "Wacom Graphire2 4x5", WACOM_PKGLEN_GRAPHIRE, 10206, 7422, 511, - 63, GRAPHIRE, WACOM_GRAPHIRE_RES, WACOM_GRAPHIRE_RES }; + { "Wacom Graphire2 4x5", 10206, 7422, 511, 63, + GRAPHIRE, WACOM_GRAPHIRE_RES, WACOM_GRAPHIRE_RES }; static const struct wacom_features wacom_features_0x12 = - { "Wacom Graphire2 5x7", WACOM_PKGLEN_GRAPHIRE, 13918, 10206, 511, - 63, GRAPHIRE, WACOM_GRAPHIRE_RES, WACOM_GRAPHIRE_RES }; + { "Wacom Graphire2 5x7", 13918, 10206, 511, 63, + GRAPHIRE, WACOM_GRAPHIRE_RES, WACOM_GRAPHIRE_RES }; static const struct wacom_features wacom_features_0x13 = - { "Wacom Graphire3", WACOM_PKGLEN_GRAPHIRE, 10208, 7424, 511, - 63, GRAPHIRE, WACOM_GRAPHIRE_RES, WACOM_GRAPHIRE_RES }; + { "Wacom Graphire3", 10208, 7424, 511, 63, + GRAPHIRE, WACOM_GRAPHIRE_RES, WACOM_GRAPHIRE_RES }; static const struct wacom_features wacom_features_0x14 = - { "Wacom Graphire3 6x8", WACOM_PKGLEN_GRAPHIRE, 16704, 12064, 511, - 63, GRAPHIRE, WACOM_GRAPHIRE_RES, WACOM_GRAPHIRE_RES }; + { "Wacom Graphire3 6x8", 16704, 12064, 511, 63, + GRAPHIRE, WACOM_GRAPHIRE_RES, WACOM_GRAPHIRE_RES }; static const struct wacom_features wacom_features_0x15 = - { "Wacom Graphire4 4x5", WACOM_PKGLEN_GRAPHIRE, 10208, 7424, 511, - 63, WACOM_G4, WACOM_GRAPHIRE_RES, WACOM_GRAPHIRE_RES }; + { "Wacom Graphire4 4x5", 10208, 7424, 511, 63, + WACOM_G4, WACOM_GRAPHIRE_RES, WACOM_GRAPHIRE_RES }; static const struct wacom_features wacom_features_0x16 = - { "Wacom Graphire4 6x8", WACOM_PKGLEN_GRAPHIRE, 16704, 12064, 511, - 63, WACOM_G4, WACOM_GRAPHIRE_RES, WACOM_GRAPHIRE_RES }; + { "Wacom Graphire4 6x8", 16704, 12064, 511, 63, + WACOM_G4, WACOM_GRAPHIRE_RES, WACOM_GRAPHIRE_RES }; static const struct wacom_features wacom_features_0x17 = - { "Wacom BambooFun 4x5", WACOM_PKGLEN_BBFUN, 14760, 9225, 511, - 63, WACOM_MO, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; + { "Wacom BambooFun 4x5", 14760, 9225, 511, 63, + WACOM_MO, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x18 = - { "Wacom BambooFun 6x8", WACOM_PKGLEN_BBFUN, 21648, 13530, 511, - 63, WACOM_MO, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; + { "Wacom BambooFun 6x8", 21648, 13530, 511, 63, + WACOM_MO, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x19 = - { "Wacom Bamboo1 Medium", WACOM_PKGLEN_GRAPHIRE, 16704, 12064, 511, - 63, GRAPHIRE, WACOM_GRAPHIRE_RES, WACOM_GRAPHIRE_RES }; + { "Wacom Bamboo1 Medium", 16704, 12064, 511, 63, + GRAPHIRE, WACOM_GRAPHIRE_RES, WACOM_GRAPHIRE_RES }; static const struct wacom_features wacom_features_0x60 = - { "Wacom Volito", WACOM_PKGLEN_GRAPHIRE, 5104, 3712, 511, - 63, GRAPHIRE, WACOM_VOLITO_RES, WACOM_VOLITO_RES }; + { "Wacom Volito", 5104, 3712, 511, 63, + GRAPHIRE, WACOM_VOLITO_RES, WACOM_VOLITO_RES }; static const struct wacom_features wacom_features_0x61 = - { "Wacom PenStation2", WACOM_PKGLEN_GRAPHIRE, 3250, 2320, 255, - 63, GRAPHIRE, WACOM_VOLITO_RES, WACOM_VOLITO_RES }; + { "Wacom PenStation2", 3250, 2320, 255, 63, + GRAPHIRE, WACOM_VOLITO_RES, WACOM_VOLITO_RES }; static const struct wacom_features wacom_features_0x62 = - { "Wacom Volito2 4x5", WACOM_PKGLEN_GRAPHIRE, 5104, 3712, 511, - 63, GRAPHIRE, WACOM_VOLITO_RES, WACOM_VOLITO_RES }; + { "Wacom Volito2 4x5", 5104, 3712, 511, 63, + GRAPHIRE, WACOM_VOLITO_RES, WACOM_VOLITO_RES }; static const struct wacom_features wacom_features_0x63 = - { "Wacom Volito2 2x3", WACOM_PKGLEN_GRAPHIRE, 3248, 2320, 511, - 63, GRAPHIRE, WACOM_VOLITO_RES, WACOM_VOLITO_RES }; + { "Wacom Volito2 2x3", 3248, 2320, 511, 63, + GRAPHIRE, WACOM_VOLITO_RES, WACOM_VOLITO_RES }; static const struct wacom_features wacom_features_0x64 = - { "Wacom PenPartner2", WACOM_PKGLEN_GRAPHIRE, 3250, 2320, 511, - 63, GRAPHIRE, WACOM_VOLITO_RES, WACOM_VOLITO_RES }; + { "Wacom PenPartner2", 3250, 2320, 511, 63, + GRAPHIRE, WACOM_VOLITO_RES, WACOM_VOLITO_RES }; static const struct wacom_features wacom_features_0x65 = - { "Wacom Bamboo", WACOM_PKGLEN_BBFUN, 14760, 9225, 511, - 63, WACOM_MO, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; + { "Wacom Bamboo", 14760, 9225, 511, 63, + WACOM_MO, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x69 = - { "Wacom Bamboo1", WACOM_PKGLEN_GRAPHIRE, 5104, 3712, 511, - 63, GRAPHIRE, WACOM_PENPRTN_RES, WACOM_PENPRTN_RES }; + { "Wacom Bamboo1", 5104, 3712, 511, 63, + GRAPHIRE, WACOM_PENPRTN_RES, WACOM_PENPRTN_RES }; static const struct wacom_features wacom_features_0x6A = - { "Wacom Bamboo1 4x6", WACOM_PKGLEN_GRAPHIRE, 14760, 9225, 1023, - 63, GRAPHIRE, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; + { "Wacom Bamboo1 4x6", 14760, 9225, 1023, 63, + GRAPHIRE, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x6B = - { "Wacom Bamboo1 5x8", WACOM_PKGLEN_GRAPHIRE, 21648, 13530, 1023, - 63, GRAPHIRE, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; + { "Wacom Bamboo1 5x8", 21648, 13530, 1023, 63, + GRAPHIRE, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x20 = - { "Wacom Intuos 4x5", WACOM_PKGLEN_INTUOS, 12700, 10600, 1023, - 31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; + { "Wacom Intuos 4x5", 12700, 10600, 1023, 31, + INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x21 = - { "Wacom Intuos 6x8", WACOM_PKGLEN_INTUOS, 20320, 16240, 1023, - 31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; + { "Wacom Intuos 6x8", 20320, 16240, 1023, 31, + INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x22 = - { "Wacom Intuos 9x12", WACOM_PKGLEN_INTUOS, 30480, 24060, 1023, - 31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; + { "Wacom Intuos 9x12", 30480, 24060, 1023, 31, + INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x23 = - { "Wacom Intuos 12x12", WACOM_PKGLEN_INTUOS, 30480, 31680, 1023, - 31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; + { "Wacom Intuos 12x12", 30480, 31680, 1023, 31, + INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x24 = - { "Wacom Intuos 12x18", WACOM_PKGLEN_INTUOS, 45720, 31680, 1023, - 31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; + { "Wacom Intuos 12x18", 45720, 31680, 1023, 31, + INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x30 = - { "Wacom PL400", WACOM_PKGLEN_GRAPHIRE, 5408, 4056, 255, - 0, PL, WACOM_PL_RES, WACOM_PL_RES }; + { "Wacom PL400", 5408, 4056, 255, 0, + PL, WACOM_PL_RES, WACOM_PL_RES }; static const struct wacom_features wacom_features_0x31 = - { "Wacom PL500", WACOM_PKGLEN_GRAPHIRE, 6144, 4608, 255, - 0, PL, WACOM_PL_RES, WACOM_PL_RES }; + { "Wacom PL500", 6144, 4608, 255, 0, + PL, WACOM_PL_RES, WACOM_PL_RES }; static const struct wacom_features wacom_features_0x32 = - { "Wacom PL600", WACOM_PKGLEN_GRAPHIRE, 6126, 4604, 255, - 0, PL, WACOM_PL_RES, WACOM_PL_RES }; + { "Wacom PL600", 6126, 4604, 255, 0, + PL, WACOM_PL_RES, WACOM_PL_RES }; static const struct wacom_features wacom_features_0x33 = - { "Wacom PL600SX", WACOM_PKGLEN_GRAPHIRE, 6260, 5016, 255, - 0, PL, WACOM_PL_RES, WACOM_PL_RES }; + { "Wacom PL600SX", 6260, 5016, 255, 0, + PL, WACOM_PL_RES, WACOM_PL_RES }; static const struct wacom_features wacom_features_0x34 = - { "Wacom PL550", WACOM_PKGLEN_GRAPHIRE, 6144, 4608, 511, - 0, PL, WACOM_PL_RES, WACOM_PL_RES }; + { "Wacom PL550", 6144, 4608, 511, 0, + PL, WACOM_PL_RES, WACOM_PL_RES }; static const struct wacom_features wacom_features_0x35 = - { "Wacom PL800", WACOM_PKGLEN_GRAPHIRE, 7220, 5780, 511, - 0, PL, WACOM_PL_RES, WACOM_PL_RES }; + { "Wacom PL800", 7220, 5780, 511, 0, + PL, WACOM_PL_RES, WACOM_PL_RES }; static const struct wacom_features wacom_features_0x37 = - { "Wacom PL700", WACOM_PKGLEN_GRAPHIRE, 6758, 5406, 511, - 0, PL, WACOM_PL_RES, WACOM_PL_RES }; + { "Wacom PL700", 6758, 5406, 511, 0, + PL, WACOM_PL_RES, WACOM_PL_RES }; static const struct wacom_features wacom_features_0x38 = - { "Wacom PL510", WACOM_PKGLEN_GRAPHIRE, 6282, 4762, 511, - 0, PL, WACOM_PL_RES, WACOM_PL_RES }; + { "Wacom PL510", 6282, 4762, 511, 0, + PL, WACOM_PL_RES, WACOM_PL_RES }; static const struct wacom_features wacom_features_0x39 = - { "Wacom DTU710", WACOM_PKGLEN_GRAPHIRE, 34080, 27660, 511, - 0, PL, WACOM_PL_RES, WACOM_PL_RES }; + { "Wacom DTU710", 34080, 27660, 511, 0, + PL, WACOM_PL_RES, WACOM_PL_RES }; static const struct wacom_features wacom_features_0xC4 = - { "Wacom DTF521", WACOM_PKGLEN_GRAPHIRE, 6282, 4762, 511, - 0, PL, WACOM_PL_RES, WACOM_PL_RES }; + { "Wacom DTF521", 6282, 4762, 511, 0, + PL, WACOM_PL_RES, WACOM_PL_RES }; static const struct wacom_features wacom_features_0xC0 = - { "Wacom DTF720", WACOM_PKGLEN_GRAPHIRE, 6858, 5506, 511, - 0, PL, WACOM_PL_RES, WACOM_PL_RES }; + { "Wacom DTF720", 6858, 5506, 511, 0, + PL, WACOM_PL_RES, WACOM_PL_RES }; static const struct wacom_features wacom_features_0xC2 = - { "Wacom DTF720a", WACOM_PKGLEN_GRAPHIRE, 6858, 5506, 511, - 0, PL, WACOM_PL_RES, WACOM_PL_RES }; + { "Wacom DTF720a", 6858, 5506, 511, 0, + PL, WACOM_PL_RES, WACOM_PL_RES }; static const struct wacom_features wacom_features_0x03 = - { "Wacom Cintiq Partner", WACOM_PKGLEN_GRAPHIRE, 20480, 15360, 511, - 0, PTU, WACOM_PL_RES, WACOM_PL_RES }; + { "Wacom Cintiq Partner", 20480, 15360, 511, 0, + PTU, WACOM_PL_RES, WACOM_PL_RES }; static const struct wacom_features wacom_features_0x41 = - { "Wacom Intuos2 4x5", WACOM_PKGLEN_INTUOS, 12700, 10600, 1023, - 31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; + { "Wacom Intuos2 4x5", 12700, 10600, 1023, 31, + INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x42 = - { "Wacom Intuos2 6x8", WACOM_PKGLEN_INTUOS, 20320, 16240, 1023, - 31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; + { "Wacom Intuos2 6x8", 20320, 16240, 1023, 31, + INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x43 = - { "Wacom Intuos2 9x12", WACOM_PKGLEN_INTUOS, 30480, 24060, 1023, - 31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; + { "Wacom Intuos2 9x12", 30480, 24060, 1023, 31, + INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x44 = - { "Wacom Intuos2 12x12", WACOM_PKGLEN_INTUOS, 30480, 31680, 1023, - 31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; + { "Wacom Intuos2 12x12", 30480, 31680, 1023, 31, + INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x45 = - { "Wacom Intuos2 12x18", WACOM_PKGLEN_INTUOS, 45720, 31680, 1023, - 31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; + { "Wacom Intuos2 12x18", 45720, 31680, 1023, 31, + INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0xB0 = - { "Wacom Intuos3 4x5", WACOM_PKGLEN_INTUOS, 25400, 20320, 1023, - 63, INTUOS3S, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; + { "Wacom Intuos3 4x5", 25400, 20320, 1023, 63, + INTUOS3S, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; static const struct wacom_features wacom_features_0xB1 = - { "Wacom Intuos3 6x8", WACOM_PKGLEN_INTUOS, 40640, 30480, 1023, - 63, INTUOS3, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; + { "Wacom Intuos3 6x8", 40640, 30480, 1023, 63, + INTUOS3, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; static const struct wacom_features wacom_features_0xB2 = - { "Wacom Intuos3 9x12", WACOM_PKGLEN_INTUOS, 60960, 45720, 1023, - 63, INTUOS3, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; + { "Wacom Intuos3 9x12", 60960, 45720, 1023, 63, + INTUOS3, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; static const struct wacom_features wacom_features_0xB3 = - { "Wacom Intuos3 12x12", WACOM_PKGLEN_INTUOS, 60960, 60960, 1023, - 63, INTUOS3L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; + { "Wacom Intuos3 12x12", 60960, 60960, 1023, 63, + INTUOS3L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; static const struct wacom_features wacom_features_0xB4 = - { "Wacom Intuos3 12x19", WACOM_PKGLEN_INTUOS, 97536, 60960, 1023, - 63, INTUOS3L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; + { "Wacom Intuos3 12x19", 97536, 60960, 1023, 63, + INTUOS3L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; static const struct wacom_features wacom_features_0xB5 = - { "Wacom Intuos3 6x11", WACOM_PKGLEN_INTUOS, 54204, 31750, 1023, - 63, INTUOS3, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; + { "Wacom Intuos3 6x11", 54204, 31750, 1023, 63, + INTUOS3, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; static const struct wacom_features wacom_features_0xB7 = - { "Wacom Intuos3 4x6", WACOM_PKGLEN_INTUOS, 31496, 19685, 1023, - 63, INTUOS3S, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; + { "Wacom Intuos3 4x6", 31496, 19685, 1023, 63, + INTUOS3S, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; static const struct wacom_features wacom_features_0xB8 = - { "Wacom Intuos4 4x6", WACOM_PKGLEN_INTUOS, 31496, 19685, 2047, - 63, INTUOS4S, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; + { "Wacom Intuos4 4x6", 31496, 19685, 2047, 63, + INTUOS4S, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; static const struct wacom_features wacom_features_0xB9 = - { "Wacom Intuos4 6x9", WACOM_PKGLEN_INTUOS, 44704, 27940, 2047, - 63, INTUOS4, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; + { "Wacom Intuos4 6x9", 44704, 27940, 2047, 63, + INTUOS4, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; static const struct wacom_features wacom_features_0xBA = - { "Wacom Intuos4 8x13", WACOM_PKGLEN_INTUOS, 65024, 40640, 2047, - 63, INTUOS4L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; + { "Wacom Intuos4 8x13", 65024, 40640, 2047, 63, + INTUOS4L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; static const struct wacom_features wacom_features_0xBB = - { "Wacom Intuos4 12x19", WACOM_PKGLEN_INTUOS, 97536, 60960, 2047, - 63, INTUOS4L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; + { "Wacom Intuos4 12x19", 97536, 60960, 2047, 63, + INTUOS4L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; static const struct wacom_features wacom_features_0xBC = - { "Wacom Intuos4 WL", WACOM_PKGLEN_INTUOS, 40640, 25400, 2047, - 63, INTUOS4, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; + { "Wacom Intuos4 WL", 40640, 25400, 2047, 63, + INTUOS4, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; +static const struct wacom_features wacom_features_0xBD = + { "Wacom Intuos4 WL", 40640, 25400, 2047, 63, + INTUOS4WL, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; static const struct wacom_features wacom_features_0x26 = - { "Wacom Intuos5 touch S", WACOM_PKGLEN_INTUOS, 31496, 19685, 2047, - 63, INTUOS5S, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, - .touch_max = 16 }; + { "Wacom Intuos5 touch S", 31496, 19685, 2047, 63, + INTUOS5S, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, .touch_max = 16 }; static const struct wacom_features wacom_features_0x27 = - { "Wacom Intuos5 touch M", WACOM_PKGLEN_INTUOS, 44704, 27940, 2047, - 63, INTUOS5, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, - .touch_max = 16 }; + { "Wacom Intuos5 touch M", 44704, 27940, 2047, 63, + INTUOS5, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, .touch_max = 16 }; static const struct wacom_features wacom_features_0x28 = - { "Wacom Intuos5 touch L", WACOM_PKGLEN_INTUOS, 65024, 40640, 2047, - 63, INTUOS5L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, - .touch_max = 16 }; + { "Wacom Intuos5 touch L", 65024, 40640, 2047, 63, + INTUOS5L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, .touch_max = 16 }; static const struct wacom_features wacom_features_0x29 = - { "Wacom Intuos5 S", WACOM_PKGLEN_INTUOS, 31496, 19685, 2047, - 63, INTUOS5S, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; + { "Wacom Intuos5 S", 31496, 19685, 2047, 63, + INTUOS5S, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; static const struct wacom_features wacom_features_0x2A = - { "Wacom Intuos5 M", WACOM_PKGLEN_INTUOS, 44704, 27940, 2047, - 63, INTUOS5, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; + { "Wacom Intuos5 M", 44704, 27940, 2047, 63, + INTUOS5, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; static const struct wacom_features wacom_features_0x314 = - { "Wacom Intuos Pro S", WACOM_PKGLEN_INTUOS, 31496, 19685, 2047, - 63, INTUOSPS, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, - .touch_max = 16 }; + { "Wacom Intuos Pro S", 31496, 19685, 2047, 63, + INTUOSPS, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, .touch_max = 16, + .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE }; static const struct wacom_features wacom_features_0x315 = - { "Wacom Intuos Pro M", WACOM_PKGLEN_INTUOS, 44704, 27940, 2047, - 63, INTUOSPM, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, - .touch_max = 16 }; + { "Wacom Intuos Pro M", 44704, 27940, 2047, 63, + INTUOSPM, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, .touch_max = 16, + .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE }; static const struct wacom_features wacom_features_0x317 = - { "Wacom Intuos Pro L", WACOM_PKGLEN_INTUOS, 65024, 40640, 2047, - 63, INTUOSPL, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, - .touch_max = 16 }; + { "Wacom Intuos Pro L", 65024, 40640, 2047, 63, + INTUOSPL, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, .touch_max = 16, + .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE }; static const struct wacom_features wacom_features_0xF4 = - { "Wacom Cintiq 24HD", WACOM_PKGLEN_INTUOS, 104280, 65400, 2047, - 63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200 }; + { "Wacom Cintiq 24HD", 104280, 65400, 2047, 63, + WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200 }; static const struct wacom_features wacom_features_0xF8 = - { "Wacom Cintiq 24HD touch", WACOM_PKGLEN_INTUOS, 104280, 65400, 2047, /* Pen */ - 63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200, + { "Wacom Cintiq 24HD touch", 104280, 65400, 2047, 63, /* Pen */ + WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200, .oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf6 }; static const struct wacom_features wacom_features_0xF6 = { "Wacom Cintiq 24HD touch", .type = WACOM_24HDT, /* Touch */ - .oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf8, .touch_max = 10 }; + .oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf8, .touch_max = 10, + .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE }; static const struct wacom_features wacom_features_0x3F = - { "Wacom Cintiq 21UX", WACOM_PKGLEN_INTUOS, 87200, 65600, 1023, - 63, CINTIQ, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; + { "Wacom Cintiq 21UX", 87200, 65600, 1023, 63, + CINTIQ, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; static const struct wacom_features wacom_features_0xC5 = - { "Wacom Cintiq 20WSX", WACOM_PKGLEN_INTUOS, 86680, 54180, 1023, - 63, WACOM_BEE, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; + { "Wacom Cintiq 20WSX", 86680, 54180, 1023, 63, + WACOM_BEE, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; static const struct wacom_features wacom_features_0xC6 = - { "Wacom Cintiq 12WX", WACOM_PKGLEN_INTUOS, 53020, 33440, 1023, - 63, WACOM_BEE, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; + { "Wacom Cintiq 12WX", 53020, 33440, 1023, 63, + WACOM_BEE, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; static const struct wacom_features wacom_features_0x304 = - { "Wacom Cintiq 13HD", WACOM_PKGLEN_INTUOS, 59352, 33648, 1023, - 63, WACOM_13HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200 }; + { "Wacom Cintiq 13HD", 59352, 33648, 1023, 63, + WACOM_13HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200 }; static const struct wacom_features wacom_features_0xC7 = - { "Wacom DTU1931", WACOM_PKGLEN_GRAPHIRE, 37832, 30305, 511, - 0, PL, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; + { "Wacom DTU1931", 37832, 30305, 511, 0, + PL, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0xCE = - { "Wacom DTU2231", WACOM_PKGLEN_GRAPHIRE, 47864, 27011, 511, - 0, DTU, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; + { "Wacom DTU2231", 47864, 27011, 511, 0, + DTU, WACOM_INTUOS_RES, WACOM_INTUOS_RES, + .check_for_hid_type = true, .hid_type = HID_TYPE_USBMOUSE }; static const struct wacom_features wacom_features_0xF0 = - { "Wacom DTU1631", WACOM_PKGLEN_GRAPHIRE, 34623, 19553, 511, - 0, DTU, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; + { "Wacom DTU1631", 34623, 19553, 511, 0, + DTU, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0xFB = - { "Wacom DTU1031", WACOM_PKGLEN_DTUS, 22096, 13960, 511, - 0, DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; + { "Wacom DTU1031", 22096, 13960, 511, 0, + DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x57 = - { "Wacom DTK2241", WACOM_PKGLEN_INTUOS, 95640, 54060, 2047, - 63, DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200 }; + { "Wacom DTK2241", 95640, 54060, 2047, 63, + DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200 }; static const struct wacom_features wacom_features_0x59 = /* Pen */ - { "Wacom DTH2242", WACOM_PKGLEN_INTUOS, 95640, 54060, 2047, - 63, DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200, + { "Wacom DTH2242", 95640, 54060, 2047, 63, + DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200, .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x5D }; static const struct wacom_features wacom_features_0x5D = /* Touch */ { "Wacom DTH2242", .type = WACOM_24HDT, - .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x59, .touch_max = 10 }; + .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x59, .touch_max = 10, + .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE }; static const struct wacom_features wacom_features_0xCC = - { "Wacom Cintiq 21UX2", WACOM_PKGLEN_INTUOS, 87000, 65400, 2047, - 63, WACOM_21UX2, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200 }; + { "Wacom Cintiq 21UX2", 87000, 65400, 2047, 63, + WACOM_21UX2, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200 }; static const struct wacom_features wacom_features_0xFA = - { "Wacom Cintiq 22HD", WACOM_PKGLEN_INTUOS, 95640, 54060, 2047, - 63, WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200 }; + { "Wacom Cintiq 22HD", 95640, 54060, 2047, 63, + WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200 }; static const struct wacom_features wacom_features_0x5B = - { "Wacom Cintiq 22HDT", WACOM_PKGLEN_INTUOS, 95640, 54060, 2047, - 63, WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200, + { "Wacom Cintiq 22HDT", 95640, 54060, 2047, 63, + WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200, .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x5e }; static const struct wacom_features wacom_features_0x5E = { "Wacom Cintiq 22HDT", .type = WACOM_24HDT, - .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x5b, .touch_max = 10 }; + .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x5b, .touch_max = 10, + .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE }; static const struct wacom_features wacom_features_0x90 = - { "Wacom ISDv4 90", WACOM_PKGLEN_GRAPHIRE, 26202, 16325, 255, - 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; + { "Wacom ISDv4 90", 26202, 16325, 255, 0, + TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x93 = - { "Wacom ISDv4 93", WACOM_PKGLEN_GRAPHIRE, 26202, 16325, 255, - 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; + { "Wacom ISDv4 93", 26202, 16325, 255, 0, + TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x97 = - { "Wacom ISDv4 97", WACOM_PKGLEN_GRAPHIRE, 26202, 16325, 511, - 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; + { "Wacom ISDv4 97", 26202, 16325, 511, 0, + TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x9A = - { "Wacom ISDv4 9A", WACOM_PKGLEN_GRAPHIRE, 26202, 16325, 255, - 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; + { "Wacom ISDv4 9A", 26202, 16325, 255, 0, + TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x9F = - { "Wacom ISDv4 9F", WACOM_PKGLEN_GRAPHIRE, 26202, 16325, 255, - 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; + { "Wacom ISDv4 9F", 26202, 16325, 255, 0, + TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0xE2 = - { "Wacom ISDv4 E2", WACOM_PKGLEN_TPC2FG, 26202, 16325, 255, - 0, TABLETPC2FG, WACOM_INTUOS_RES, WACOM_INTUOS_RES, - .touch_max = 2 }; + { "Wacom ISDv4 E2", 26202, 16325, 255, 0, + TABLETPC2FG, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 2 }; static const struct wacom_features wacom_features_0xE3 = - { "Wacom ISDv4 E3", WACOM_PKGLEN_TPC2FG, 26202, 16325, 255, - 0, TABLETPC2FG, WACOM_INTUOS_RES, WACOM_INTUOS_RES, - .touch_max = 2 }; + { "Wacom ISDv4 E3", 26202, 16325, 255, 0, + TABLETPC2FG, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 2 }; static const struct wacom_features wacom_features_0xE5 = - { "Wacom ISDv4 E5", WACOM_PKGLEN_MTOUCH, 26202, 16325, 255, - 0, MTSCREEN, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; + { "Wacom ISDv4 E5", 26202, 16325, 255, 0, + MTSCREEN, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0xE6 = - { "Wacom ISDv4 E6", WACOM_PKGLEN_TPC2FG, 27760, 15694, 255, - 0, TABLETPC2FG, WACOM_INTUOS_RES, WACOM_INTUOS_RES, - .touch_max = 2 }; + { "Wacom ISDv4 E6", 27760, 15694, 255, 0, + TABLETPC2FG, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 2 }; static const struct wacom_features wacom_features_0xEC = - { "Wacom ISDv4 EC", WACOM_PKGLEN_GRAPHIRE, 25710, 14500, 255, - 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; + { "Wacom ISDv4 EC", 25710, 14500, 255, 0, + TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0xED = - { "Wacom ISDv4 ED", WACOM_PKGLEN_GRAPHIRE, 26202, 16325, 255, - 0, TABLETPCE, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; + { "Wacom ISDv4 ED", 26202, 16325, 255, 0, + TABLETPCE, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0xEF = - { "Wacom ISDv4 EF", WACOM_PKGLEN_GRAPHIRE, 26202, 16325, 255, - 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; + { "Wacom ISDv4 EF", 26202, 16325, 255, 0, + TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x100 = - { "Wacom ISDv4 100", WACOM_PKGLEN_MTTPC, 26202, 16325, 255, - 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; + { "Wacom ISDv4 100", 26202, 16325, 255, 0, + MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x101 = - { "Wacom ISDv4 101", WACOM_PKGLEN_MTTPC, 26202, 16325, 255, - 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; + { "Wacom ISDv4 101", 26202, 16325, 255, 0, + MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x10D = - { "Wacom ISDv4 10D", WACOM_PKGLEN_MTTPC, 26202, 16325, 255, - 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; + { "Wacom ISDv4 10D", 26202, 16325, 255, 0, + MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x10E = - { "Wacom ISDv4 10E", WACOM_PKGLEN_MTTPC, 27760, 15694, 255, - 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; + { "Wacom ISDv4 10E", 27760, 15694, 255, 0, + MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x10F = - { "Wacom ISDv4 10F", WACOM_PKGLEN_MTTPC, 27760, 15694, 255, - 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; + { "Wacom ISDv4 10F", 27760, 15694, 255, 0, + MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x116 = - { "Wacom ISDv4 116", WACOM_PKGLEN_GRAPHIRE, 26202, 16325, 255, - 0, TABLETPCE, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; + { "Wacom ISDv4 116", 26202, 16325, 255, 0, + TABLETPCE, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; +static const struct wacom_features wacom_features_0x12C = + { "Wacom ISDv4 12C", 27848, 15752, 2047, 0, + TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x4001 = - { "Wacom ISDv4 4001", WACOM_PKGLEN_MTTPC, 26202, 16325, 255, - 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; + { "Wacom ISDv4 4001", 26202, 16325, 255, 0, + MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x4004 = - { "Wacom ISDv4 4004", WACOM_PKGLEN_MTTPC, 11060, 6220, 255, - 0, MTTPC_B, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; + { "Wacom ISDv4 4004", 11060, 6220, 255, 0, + MTTPC_B, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x5000 = - { "Wacom ISDv4 5000", WACOM_PKGLEN_MTTPC, 27848, 15752, 1023, - 0, MTTPC_B, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; + { "Wacom ISDv4 5000", 27848, 15752, 1023, 0, + MTTPC_B, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x5002 = - { "Wacom ISDv4 5002", WACOM_PKGLEN_MTTPC, 29576, 16724, 1023, - 0, MTTPC_B, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; + { "Wacom ISDv4 5002", 29576, 16724, 1023, 0, + MTTPC_B, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x47 = - { "Wacom Intuos2 6x8", WACOM_PKGLEN_INTUOS, 20320, 16240, 1023, - 31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; + { "Wacom Intuos2 6x8", 20320, 16240, 1023, 31, + INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x84 = - { "Wacom Wireless Receiver", WACOM_PKGLEN_WIRELESS, 0, 0, 0, - 0, WIRELESS, 0, 0, .touch_max = 16 }; + { "Wacom Wireless Receiver", 0, 0, 0, 0, + WIRELESS, 0, 0, .touch_max = 16 }; static const struct wacom_features wacom_features_0xD0 = - { "Wacom Bamboo 2FG", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, - 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, - .touch_max = 2 }; + { "Wacom Bamboo 2FG", 14720, 9200, 1023, 31, + BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 2 }; static const struct wacom_features wacom_features_0xD1 = - { "Wacom Bamboo 2FG 4x5", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, - 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, - .touch_max = 2 }; + { "Wacom Bamboo 2FG 4x5", 14720, 9200, 1023, 31, + BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 2 }; static const struct wacom_features wacom_features_0xD2 = - { "Wacom Bamboo Craft", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, - 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, - .touch_max = 2 }; + { "Wacom Bamboo Craft", 14720, 9200, 1023, 31, + BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 2 }; static const struct wacom_features wacom_features_0xD3 = - { "Wacom Bamboo 2FG 6x8", WACOM_PKGLEN_BBFUN, 21648, 13700, 1023, - 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, - .touch_max = 2 }; + { "Wacom Bamboo 2FG 6x8", 21648, 13700, 1023, 31, + BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 2 }; static const struct wacom_features wacom_features_0xD4 = - { "Wacom Bamboo Pen", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, - 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; + { "Wacom Bamboo Pen", 14720, 9200, 1023, 31, + BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0xD5 = - { "Wacom Bamboo Pen 6x8", WACOM_PKGLEN_BBFUN, 21648, 13700, 1023, - 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; + { "Wacom Bamboo Pen 6x8", 21648, 13700, 1023, 31, + BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0xD6 = - { "Wacom BambooPT 2FG 4x5", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, - 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, - .touch_max = 2 }; + { "Wacom BambooPT 2FG 4x5", 14720, 9200, 1023, 31, + BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 2 }; static const struct wacom_features wacom_features_0xD7 = - { "Wacom BambooPT 2FG Small", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, - 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, - .touch_max = 2 }; + { "Wacom BambooPT 2FG Small", 14720, 9200, 1023, 31, + BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 2 }; static const struct wacom_features wacom_features_0xD8 = - { "Wacom Bamboo Comic 2FG", WACOM_PKGLEN_BBFUN, 21648, 13700, 1023, - 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, - .touch_max = 2 }; + { "Wacom Bamboo Comic 2FG", 21648, 13700, 1023, 31, + BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 2 }; static const struct wacom_features wacom_features_0xDA = - { "Wacom Bamboo 2FG 4x5 SE", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, - 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, - .touch_max = 2 }; + { "Wacom Bamboo 2FG 4x5 SE", 14720, 9200, 1023, 31, + BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 2 }; static const struct wacom_features wacom_features_0xDB = - { "Wacom Bamboo 2FG 6x8 SE", WACOM_PKGLEN_BBFUN, 21648, 13700, 1023, - 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, - .touch_max = 2 }; + { "Wacom Bamboo 2FG 6x8 SE", 21648, 13700, 1023, 31, + BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 2 }; static const struct wacom_features wacom_features_0xDD = - { "Wacom Bamboo Connect", WACOM_PKGLEN_BBPEN, 14720, 9200, 1023, - 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; + { "Wacom Bamboo Connect", 14720, 9200, 1023, 31, + BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0xDE = - { "Wacom Bamboo 16FG 4x5", WACOM_PKGLEN_BBPEN, 14720, 9200, 1023, - 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, - .touch_max = 16 }; + { "Wacom Bamboo 16FG 4x5", 14720, 9200, 1023, 31, + BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 16 }; static const struct wacom_features wacom_features_0xDF = - { "Wacom Bamboo 16FG 6x8", WACOM_PKGLEN_BBPEN, 21648, 13700, 1023, - 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, - .touch_max = 16 }; + { "Wacom Bamboo 16FG 6x8", 21648, 13700, 1023, 31, + BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 16 }; static const struct wacom_features wacom_features_0x300 = - { "Wacom Bamboo One S", WACOM_PKGLEN_BBPEN, 14720, 9225, 1023, - 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; + { "Wacom Bamboo One S", 14720, 9225, 1023, 31, + BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x301 = - { "Wacom Bamboo One M", WACOM_PKGLEN_BBPEN, 21648, 13530, 1023, - 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; + { "Wacom Bamboo One M", 21648, 13530, 1023, 31, + BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x302 = - { "Wacom Intuos PT S", WACOM_PKGLEN_BBPEN, 15200, 9500, 1023, - 31, INTUOSHT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, - .touch_max = 16 }; + { "Wacom Intuos PT S", 15200, 9500, 1023, 31, + INTUOSHT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 16, + .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE }; static const struct wacom_features wacom_features_0x303 = - { "Wacom Intuos PT M", WACOM_PKGLEN_BBPEN, 21600, 13500, 1023, - 31, INTUOSHT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, - .touch_max = 16 }; + { "Wacom Intuos PT M", 21600, 13500, 1023, 31, + INTUOSHT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 16, + .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE }; static const struct wacom_features wacom_features_0x30E = - { "Wacom Intuos S", WACOM_PKGLEN_BBPEN, 15200, 9500, 1023, - 31, INTUOSHT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; + { "Wacom Intuos S", 15200, 9500, 1023, 31, + INTUOSHT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, + .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE }; static const struct wacom_features wacom_features_0x6004 = - { "ISD-V4", WACOM_PKGLEN_GRAPHIRE, 12800, 8000, 255, - 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; -static const struct wacom_features wacom_features_0x0307 = - { "Wacom ISDv5 307", WACOM_PKGLEN_INTUOS, 59352, 33648, 2047, - 63, CINTIQ_HYBRID, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200, + { "ISD-V4", 12800, 8000, 255, 0, + TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; +static const struct wacom_features wacom_features_0x307 = + { "Wacom ISDv5 307", 59352, 33648, 2047, 63, + CINTIQ_HYBRID, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200, .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x309 }; -static const struct wacom_features wacom_features_0x0309 = +static const struct wacom_features wacom_features_0x309 = { "Wacom ISDv5 309", .type = WACOM_24HDT, /* Touch */ - .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x0307, .touch_max = 10 }; + .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x0307, .touch_max = 10, + .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE }; -#define USB_DEVICE_WACOM(prod) \ - USB_DEVICE(USB_VENDOR_ID_WACOM, prod), \ - .driver_info = (kernel_ulong_t)&wacom_features_##prod +#define USB_DEVICE_WACOM(prod) \ + HID_DEVICE(BUS_USB, HID_GROUP_WACOM, USB_VENDOR_ID_WACOM, prod),\ + .driver_data = (kernel_ulong_t)&wacom_features_##prod -#define USB_DEVICE_DETAILED(prod, class, sub, proto) \ - USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_WACOM, prod, class, \ - sub, proto), \ - .driver_info = (kernel_ulong_t)&wacom_features_##prod +#define BT_DEVICE_WACOM(prod) \ + HID_DEVICE(BUS_BLUETOOTH, HID_GROUP_WACOM, USB_VENDOR_ID_WACOM, prod),\ + .driver_data = (kernel_ulong_t)&wacom_features_##prod #define USB_DEVICE_LENOVO(prod) \ - USB_DEVICE(USB_VENDOR_ID_LENOVO, prod), \ - .driver_info = (kernel_ulong_t)&wacom_features_##prod + HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, prod), \ + .driver_data = (kernel_ulong_t)&wacom_features_##prod -const struct usb_device_id wacom_ids[] = { +const struct hid_device_id wacom_ids[] = { { USB_DEVICE_WACOM(0x00) }, + { USB_DEVICE_WACOM(0x03) }, { USB_DEVICE_WACOM(0x10) }, { USB_DEVICE_WACOM(0x11) }, { USB_DEVICE_WACOM(0x12) }, @@ -2358,20 +2599,16 @@ const struct usb_device_id wacom_ids[] = { { USB_DEVICE_WACOM(0x17) }, { USB_DEVICE_WACOM(0x18) }, { USB_DEVICE_WACOM(0x19) }, - { USB_DEVICE_WACOM(0x60) }, - { USB_DEVICE_WACOM(0x61) }, - { USB_DEVICE_WACOM(0x62) }, - { USB_DEVICE_WACOM(0x63) }, - { USB_DEVICE_WACOM(0x64) }, - { USB_DEVICE_WACOM(0x65) }, - { USB_DEVICE_WACOM(0x69) }, - { USB_DEVICE_WACOM(0x6A) }, - { USB_DEVICE_WACOM(0x6B) }, { USB_DEVICE_WACOM(0x20) }, { USB_DEVICE_WACOM(0x21) }, { USB_DEVICE_WACOM(0x22) }, { USB_DEVICE_WACOM(0x23) }, { USB_DEVICE_WACOM(0x24) }, + { USB_DEVICE_WACOM(0x26) }, + { USB_DEVICE_WACOM(0x27) }, + { USB_DEVICE_WACOM(0x28) }, + { USB_DEVICE_WACOM(0x29) }, + { USB_DEVICE_WACOM(0x2A) }, { USB_DEVICE_WACOM(0x30) }, { USB_DEVICE_WACOM(0x31) }, { USB_DEVICE_WACOM(0x32) }, @@ -2381,20 +2618,34 @@ const struct usb_device_id wacom_ids[] = { { USB_DEVICE_WACOM(0x37) }, { USB_DEVICE_WACOM(0x38) }, { USB_DEVICE_WACOM(0x39) }, - { USB_DEVICE_WACOM(0xC4) }, - { USB_DEVICE_WACOM(0xC0) }, - { USB_DEVICE_WACOM(0xC2) }, - { USB_DEVICE_WACOM(0x03) }, + { USB_DEVICE_WACOM(0x3F) }, { USB_DEVICE_WACOM(0x41) }, { USB_DEVICE_WACOM(0x42) }, { USB_DEVICE_WACOM(0x43) }, { USB_DEVICE_WACOM(0x44) }, { USB_DEVICE_WACOM(0x45) }, + { USB_DEVICE_WACOM(0x47) }, { USB_DEVICE_WACOM(0x57) }, { USB_DEVICE_WACOM(0x59) }, - { USB_DEVICE_DETAILED(0x5D, USB_CLASS_HID, 0, 0) }, { USB_DEVICE_WACOM(0x5B) }, - { USB_DEVICE_DETAILED(0x5E, USB_CLASS_HID, 0, 0) }, + { USB_DEVICE_WACOM(0x5D) }, + { USB_DEVICE_WACOM(0x5E) }, + { USB_DEVICE_WACOM(0x60) }, + { USB_DEVICE_WACOM(0x61) }, + { USB_DEVICE_WACOM(0x62) }, + { USB_DEVICE_WACOM(0x63) }, + { USB_DEVICE_WACOM(0x64) }, + { USB_DEVICE_WACOM(0x65) }, + { USB_DEVICE_WACOM(0x69) }, + { USB_DEVICE_WACOM(0x6A) }, + { USB_DEVICE_WACOM(0x6B) }, + { BT_DEVICE_WACOM(0x81) }, + { USB_DEVICE_WACOM(0x84) }, + { USB_DEVICE_WACOM(0x90) }, + { USB_DEVICE_WACOM(0x93) }, + { USB_DEVICE_WACOM(0x97) }, + { USB_DEVICE_WACOM(0x9A) }, + { USB_DEVICE_WACOM(0x9F) }, { USB_DEVICE_WACOM(0xB0) }, { USB_DEVICE_WACOM(0xB1) }, { USB_DEVICE_WACOM(0xB2) }, @@ -2407,23 +2658,15 @@ const struct usb_device_id wacom_ids[] = { { USB_DEVICE_WACOM(0xBA) }, { USB_DEVICE_WACOM(0xBB) }, { USB_DEVICE_WACOM(0xBC) }, - { USB_DEVICE_WACOM(0x26) }, - { USB_DEVICE_WACOM(0x27) }, - { USB_DEVICE_WACOM(0x28) }, - { USB_DEVICE_WACOM(0x29) }, - { USB_DEVICE_WACOM(0x2A) }, - { USB_DEVICE_WACOM(0x3F) }, + { BT_DEVICE_WACOM(0xBD) }, + { USB_DEVICE_WACOM(0xC0) }, + { USB_DEVICE_WACOM(0xC2) }, + { USB_DEVICE_WACOM(0xC4) }, { USB_DEVICE_WACOM(0xC5) }, { USB_DEVICE_WACOM(0xC6) }, { USB_DEVICE_WACOM(0xC7) }, - /* - * DTU-2231 has two interfaces on the same configuration, - * only one is used. - */ - { USB_DEVICE_DETAILED(0xCE, USB_CLASS_HID, - USB_INTERFACE_SUBCLASS_BOOT, - USB_INTERFACE_PROTOCOL_MOUSE) }, - { USB_DEVICE_WACOM(0x84) }, + { USB_DEVICE_WACOM(0xCC) }, + { USB_DEVICE_WACOM(0xCE) }, { USB_DEVICE_WACOM(0xD0) }, { USB_DEVICE_WACOM(0xD1) }, { USB_DEVICE_WACOM(0xD2) }, @@ -2438,13 +2681,6 @@ const struct usb_device_id wacom_ids[] = { { USB_DEVICE_WACOM(0xDD) }, { USB_DEVICE_WACOM(0xDE) }, { USB_DEVICE_WACOM(0xDF) }, - { USB_DEVICE_WACOM(0xF0) }, - { USB_DEVICE_WACOM(0xCC) }, - { USB_DEVICE_WACOM(0x90) }, - { USB_DEVICE_WACOM(0x93) }, - { USB_DEVICE_WACOM(0x97) }, - { USB_DEVICE_WACOM(0x9A) }, - { USB_DEVICE_WACOM(0x9F) }, { USB_DEVICE_WACOM(0xE2) }, { USB_DEVICE_WACOM(0xE3) }, { USB_DEVICE_WACOM(0xE5) }, @@ -2452,34 +2688,34 @@ const struct usb_device_id wacom_ids[] = { { USB_DEVICE_WACOM(0xEC) }, { USB_DEVICE_WACOM(0xED) }, { USB_DEVICE_WACOM(0xEF) }, + { USB_DEVICE_WACOM(0xF0) }, + { USB_DEVICE_WACOM(0xF4) }, + { USB_DEVICE_WACOM(0xF6) }, + { USB_DEVICE_WACOM(0xF8) }, + { USB_DEVICE_WACOM(0xFA) }, + { USB_DEVICE_WACOM(0xFB) }, { USB_DEVICE_WACOM(0x100) }, { USB_DEVICE_WACOM(0x101) }, { USB_DEVICE_WACOM(0x10D) }, { USB_DEVICE_WACOM(0x10E) }, { USB_DEVICE_WACOM(0x10F) }, { USB_DEVICE_WACOM(0x116) }, + { USB_DEVICE_WACOM(0x12C) }, { USB_DEVICE_WACOM(0x300) }, { USB_DEVICE_WACOM(0x301) }, - { USB_DEVICE_DETAILED(0x302, USB_CLASS_HID, 0, 0) }, - { USB_DEVICE_DETAILED(0x303, USB_CLASS_HID, 0, 0) }, - { USB_DEVICE_DETAILED(0x30E, USB_CLASS_HID, 0, 0) }, + { USB_DEVICE_WACOM(0x302) }, + { USB_DEVICE_WACOM(0x303) }, { USB_DEVICE_WACOM(0x304) }, - { USB_DEVICE_DETAILED(0x314, USB_CLASS_HID, 0, 0) }, - { USB_DEVICE_DETAILED(0x315, USB_CLASS_HID, 0, 0) }, - { USB_DEVICE_DETAILED(0x317, USB_CLASS_HID, 0, 0) }, + { USB_DEVICE_WACOM(0x307) }, + { USB_DEVICE_WACOM(0x309) }, + { USB_DEVICE_WACOM(0x30E) }, + { USB_DEVICE_WACOM(0x314) }, + { USB_DEVICE_WACOM(0x315) }, + { USB_DEVICE_WACOM(0x317) }, { USB_DEVICE_WACOM(0x4001) }, { USB_DEVICE_WACOM(0x4004) }, { USB_DEVICE_WACOM(0x5000) }, { USB_DEVICE_WACOM(0x5002) }, - { USB_DEVICE_WACOM(0x47) }, - { USB_DEVICE_WACOM(0xF4) }, - { USB_DEVICE_WACOM(0xF8) }, - { USB_DEVICE_DETAILED(0xF6, USB_CLASS_HID, 0, 0) }, - { USB_DEVICE_WACOM(0xFA) }, - { USB_DEVICE_WACOM(0xFB) }, - { USB_DEVICE_WACOM(0x0307) }, - { USB_DEVICE_DETAILED(0x0309, USB_CLASS_HID, 0, 0) }, - { USB_DEVICE_LENOVO(0x6004) }, { } }; -MODULE_DEVICE_TABLE(usb, wacom_ids); +MODULE_DEVICE_TABLE(hid, wacom_ids); diff --git a/drivers/input/tablet/wacom_wac.h b/drivers/hid/wacom_wac.h index b2c9a9c1b551..339ab5d81a2d 100644 --- a/drivers/input/tablet/wacom_wac.h +++ b/drivers/hid/wacom_wac.h @@ -46,6 +46,7 @@ /* wacom data packet report IDs */ #define WACOM_REPORT_PENABLED 2 +#define WACOM_REPORT_PENABLED_BT 3 #define WACOM_REPORT_INTUOSREAD 5 #define WACOM_REPORT_INTUOSWRITE 6 #define WACOM_REPORT_INTUOSPAD 12 @@ -68,10 +69,12 @@ #define WACOM_QUIRK_BBTOUCH_LOWRES 0x0002 #define WACOM_QUIRK_NO_INPUT 0x0004 #define WACOM_QUIRK_MONITOR 0x0008 +#define WACOM_QUIRK_BATTERY 0x0010 enum { PENPARTNER = 0, GRAPHIRE, + GRAPHIRE_BT, WACOM_G4, PTU, PL, @@ -83,6 +86,7 @@ enum { INTUOS3L, INTUOS4S, INTUOS4, + INTUOS4WL, INTUOS4L, INTUOS5S, INTUOS5, @@ -114,7 +118,6 @@ enum { struct wacom_features { const char *name; - int pktlen; int x_max; int y_max; int pressure_max; @@ -127,8 +130,8 @@ struct wacom_features { int device_type; int x_phy; int y_phy; - unsigned char unit; - unsigned char unitExpo; + unsigned unit; + int unitExpo; int x_fuzz; int y_fuzz; int pressure_fuzz; @@ -137,6 +140,9 @@ struct wacom_features { unsigned touch_max; int oVid; int oPid; + int pktlen; + bool check_for_hid_type; + int hid_type; }; struct wacom_shared { @@ -150,16 +156,24 @@ struct wacom_shared { struct wacom_wac { char name[WACOM_NAME_MAX]; - unsigned char *data; + char pad_name[WACOM_NAME_MAX]; + char bat_name[WACOM_NAME_MAX]; + char ac_name[WACOM_NAME_MAX]; + unsigned char data[WACOM_PKGLEN_MAX]; int tool[2]; int id[2]; __u32 serial[2]; struct wacom_features features; struct wacom_shared *shared; struct input_dev *input; + struct input_dev *pad_input; int pid; int battery_capacity; int num_contacts_left; + int bat_charging; + int ps_connected; + u8 bt_features; + u8 bt_high_speed; }; #endif diff --git a/drivers/hwmon/adm1025.c b/drivers/hwmon/adm1025.c index d3d0e8cf27b4..d6c767ace916 100644 --- a/drivers/hwmon/adm1025.c +++ b/drivers/hwmon/adm1025.c @@ -382,6 +382,9 @@ static ssize_t set_vrm(struct device *dev, struct device_attribute *attr, if (err) return err; + if (val > 255) + return -EINVAL; + data->vrm = val; return count; } diff --git a/drivers/hwmon/adm1026.c b/drivers/hwmon/adm1026.c index ca8430f92564..e67b9a50ac7c 100644 --- a/drivers/hwmon/adm1026.c +++ b/drivers/hwmon/adm1026.c @@ -1085,6 +1085,9 @@ static ssize_t store_vrm_reg(struct device *dev, struct device_attribute *attr, if (err) return err; + if (val > 255) + return -EINVAL; + data->vrm = val; return count; } diff --git a/drivers/hwmon/ads1015.c b/drivers/hwmon/ads1015.c index 22e0c926989d..126516414c11 100644 --- a/drivers/hwmon/ads1015.c +++ b/drivers/hwmon/ads1015.c @@ -212,6 +212,7 @@ static int ads1015_get_channels_config_of(struct i2c_client *client) dev_err(&client->dev, "invalid gain on %s\n", node->full_name); + return -EINVAL; } } @@ -222,6 +223,7 @@ static int ads1015_get_channels_config_of(struct i2c_client *client) dev_err(&client->dev, "invalid data_rate on %s\n", node->full_name); + return -EINVAL; } } diff --git a/drivers/hwmon/asb100.c b/drivers/hwmon/asb100.c index f96063680e58..272fcc837ecc 100644 --- a/drivers/hwmon/asb100.c +++ b/drivers/hwmon/asb100.c @@ -510,6 +510,10 @@ static ssize_t set_vrm(struct device *dev, struct device_attribute *attr, err = kstrtoul(buf, 10, &val); if (err) return err; + + if (val > 255) + return -EINVAL; + data->vrm = val; return count; } diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c index ae208f612198..cccef87963e0 100644 --- a/drivers/hwmon/asus_atk0110.c +++ b/drivers/hwmon/asus_atk0110.c @@ -688,7 +688,7 @@ static int atk_debugfs_gitm_get(void *p, u64 *val) DEFINE_SIMPLE_ATTRIBUTE(atk_debugfs_gitm, atk_debugfs_gitm_get, NULL, - "0x%08llx\n") + "0x%08llx\n"); static int atk_acpi_print(char *buf, size_t sz, union acpi_object *obj) { diff --git a/drivers/hwmon/dme1737.c b/drivers/hwmon/dme1737.c index 4ae3fff13f44..bea0a344fab5 100644 --- a/drivers/hwmon/dme1737.c +++ b/drivers/hwmon/dme1737.c @@ -247,8 +247,8 @@ struct dme1737_data { u8 pwm_acz[3]; u8 pwm_freq[6]; u8 pwm_rr[2]; - u8 zone_low[3]; - u8 zone_abs[3]; + s8 zone_low[3]; + s8 zone_abs[3]; u8 zone_hyst[2]; u32 alarms; }; @@ -277,7 +277,7 @@ static inline int IN_FROM_REG(int reg, int nominal, int res) return (reg * nominal + (3 << (res - 3))) / (3 << (res - 2)); } -static inline int IN_TO_REG(int val, int nominal) +static inline int IN_TO_REG(long val, int nominal) { return clamp_val((val * 192 + nominal / 2) / nominal, 0, 255); } @@ -293,7 +293,7 @@ static inline int TEMP_FROM_REG(int reg, int res) return (reg * 1000) >> (res - 8); } -static inline int TEMP_TO_REG(int val) +static inline int TEMP_TO_REG(long val) { return clamp_val((val < 0 ? val - 500 : val + 500) / 1000, -128, 127); } @@ -308,7 +308,7 @@ static inline int TEMP_RANGE_FROM_REG(int reg) return TEMP_RANGE[(reg >> 4) & 0x0f]; } -static int TEMP_RANGE_TO_REG(int val, int reg) +static int TEMP_RANGE_TO_REG(long val, int reg) { int i; @@ -331,7 +331,7 @@ static inline int TEMP_HYST_FROM_REG(int reg, int ix) return (((ix == 1) ? reg : reg >> 4) & 0x0f) * 1000; } -static inline int TEMP_HYST_TO_REG(int val, int ix, int reg) +static inline int TEMP_HYST_TO_REG(long val, int ix, int reg) { int hyst = clamp_val((val + 500) / 1000, 0, 15); @@ -347,7 +347,7 @@ static inline int FAN_FROM_REG(int reg, int tpc) return (reg == 0 || reg == 0xffff) ? 0 : 90000 * 60 / reg; } -static inline int FAN_TO_REG(int val, int tpc) +static inline int FAN_TO_REG(long val, int tpc) { if (tpc) { return clamp_val(val / tpc, 0, 0xffff); @@ -379,7 +379,7 @@ static inline int FAN_TYPE_FROM_REG(int reg) return (edge > 0) ? 1 << (edge - 1) : 0; } -static inline int FAN_TYPE_TO_REG(int val, int reg) +static inline int FAN_TYPE_TO_REG(long val, int reg) { int edge = (val == 4) ? 3 : val; @@ -402,7 +402,7 @@ static int FAN_MAX_FROM_REG(int reg) return 1000 + i * 500; } -static int FAN_MAX_TO_REG(int val) +static int FAN_MAX_TO_REG(long val) { int i; @@ -460,7 +460,7 @@ static inline int PWM_ACZ_FROM_REG(int reg) return acz[(reg >> 5) & 0x07]; } -static inline int PWM_ACZ_TO_REG(int val, int reg) +static inline int PWM_ACZ_TO_REG(long val, int reg) { int acz = (val == 4) ? 2 : val - 1; @@ -476,7 +476,7 @@ static inline int PWM_FREQ_FROM_REG(int reg) return PWM_FREQ[reg & 0x0f]; } -static int PWM_FREQ_TO_REG(int val, int reg) +static int PWM_FREQ_TO_REG(long val, int reg) { int i; @@ -510,7 +510,7 @@ static inline int PWM_RR_FROM_REG(int reg, int ix) return (rr & 0x08) ? PWM_RR[rr & 0x07] : 0; } -static int PWM_RR_TO_REG(int val, int ix, int reg) +static int PWM_RR_TO_REG(long val, int ix, int reg) { int i; @@ -528,7 +528,7 @@ static inline int PWM_RR_EN_FROM_REG(int reg, int ix) return PWM_RR_FROM_REG(reg, ix) ? 1 : 0; } -static inline int PWM_RR_EN_TO_REG(int val, int ix, int reg) +static inline int PWM_RR_EN_TO_REG(long val, int ix, int reg) { int en = (ix == 1) ? 0x80 : 0x08; @@ -1481,13 +1481,16 @@ static ssize_t set_vrm(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct dme1737_data *data = dev_get_drvdata(dev); - long val; + unsigned long val; int err; - err = kstrtol(buf, 10, &val); + err = kstrtoul(buf, 10, &val); if (err) return err; + if (val > 255) + return -EINVAL; + data->vrm = val; return count; } diff --git a/drivers/hwmon/ds1621.c b/drivers/hwmon/ds1621.c index fc6f5d54e7f7..8890870309e4 100644 --- a/drivers/hwmon/ds1621.c +++ b/drivers/hwmon/ds1621.c @@ -309,6 +309,7 @@ static ssize_t set_convrate(struct device *dev, struct device_attribute *da, data->conf |= (resol << DS1621_REG_CONFIG_RESOL_SHIFT); i2c_smbus_write_byte_data(client, DS1621_REG_CONF, data->conf); data->update_interval = ds1721_convrates[resol]; + data->zbits = 7 - resol; mutex_unlock(&data->update_lock); return count; diff --git a/drivers/hwmon/emc6w201.c b/drivers/hwmon/emc6w201.c index e87da902f3ae..ada90716448d 100644 --- a/drivers/hwmon/emc6w201.c +++ b/drivers/hwmon/emc6w201.c @@ -252,12 +252,12 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *devattr, if (err < 0) return err; - val /= 1000; + val = DIV_ROUND_CLOSEST(val, 1000); reg = (sf == min) ? EMC6W201_REG_TEMP_LOW(nr) : EMC6W201_REG_TEMP_HIGH(nr); mutex_lock(&data->update_lock); - data->temp[sf][nr] = clamp_val(val, -127, 128); + data->temp[sf][nr] = clamp_val(val, -127, 127); err = emc6w201_write8(client, reg, data->temp[sf][nr]); mutex_unlock(&data->update_lock); diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c index 4a7cbfad1d74..fcdbde4ec692 100644 --- a/drivers/hwmon/fam15h_power.c +++ b/drivers/hwmon/fam15h_power.c @@ -93,13 +93,29 @@ static ssize_t show_power_crit(struct device *dev, } static DEVICE_ATTR(power1_crit, S_IRUGO, show_power_crit, NULL); +static umode_t fam15h_power_is_visible(struct kobject *kobj, + struct attribute *attr, + int index) +{ + /* power1_input is only reported for Fam15h, Models 00h-0fh */ + if (attr == &dev_attr_power1_input.attr && + (boot_cpu_data.x86 != 0x15 || boot_cpu_data.x86_model > 0xf)) + return 0; + + return attr->mode; +} + static struct attribute *fam15h_power_attrs[] = { &dev_attr_power1_input.attr, &dev_attr_power1_crit.attr, NULL }; -ATTRIBUTE_GROUPS(fam15h_power); +static const struct attribute_group fam15h_power_group = { + .attrs = fam15h_power_attrs, + .is_visible = fam15h_power_is_visible, +}; +__ATTRIBUTE_GROUPS(fam15h_power); static bool fam15h_power_is_internal_node0(struct pci_dev *f4) { @@ -216,7 +232,9 @@ static int fam15h_power_probe(struct pci_dev *pdev, static const struct pci_device_id fam15h_power_id_table[] = { { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) }, + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) }, + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) }, {} }; MODULE_DEVICE_TABLE(pci, fam15h_power_id_table); diff --git a/drivers/hwmon/hih6130.c b/drivers/hwmon/hih6130.c index 0e01c4e13e33..7b73d2002d3e 100644 --- a/drivers/hwmon/hih6130.c +++ b/drivers/hwmon/hih6130.c @@ -238,6 +238,9 @@ static int hih6130_probe(struct i2c_client *client, hih6130->client = client; mutex_init(&hih6130->lock); + if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_QUICK)) + hih6130->write_length = 1; + hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name, hih6130, hih6130_groups); diff --git a/drivers/hwmon/lm87.c b/drivers/hwmon/lm87.c index ba1d83d48056..a5e295826aea 100644 --- a/drivers/hwmon/lm87.c +++ b/drivers/hwmon/lm87.c @@ -617,6 +617,10 @@ static ssize_t set_vrm(struct device *dev, struct device_attribute *attr, err = kstrtoul(buf, 10, &val); if (err) return err; + + if (val > 255) + return -EINVAL; + data->vrm = val; return count; } diff --git a/drivers/hwmon/lm92.c b/drivers/hwmon/lm92.c index d2060e245ff5..cfaf70b9cba7 100644 --- a/drivers/hwmon/lm92.c +++ b/drivers/hwmon/lm92.c @@ -74,12 +74,9 @@ static inline int TEMP_FROM_REG(s16 reg) return reg / 8 * 625 / 10; } -static inline s16 TEMP_TO_REG(int val) +static inline s16 TEMP_TO_REG(long val) { - if (val <= -60000) - return -60000 * 10 / 625 * 8; - if (val >= 160000) - return 160000 * 10 / 625 * 8; + val = clamp_val(val, -60000, 160000); return val * 10 / 625 * 8; } @@ -206,10 +203,12 @@ static ssize_t set_temp_hyst(struct device *dev, if (err) return err; + val = clamp_val(val, -120000, 220000); mutex_lock(&data->update_lock); - data->temp[t_hyst] = TEMP_FROM_REG(data->temp[attr->index]) - val; + data->temp[t_hyst] = + TEMP_TO_REG(TEMP_FROM_REG(data->temp[attr->index]) - val); i2c_smbus_write_word_swapped(client, LM92_REG_TEMP_HYST, - TEMP_TO_REG(data->temp[t_hyst])); + data->temp[t_hyst]); mutex_unlock(&data->update_lock); return count; } diff --git a/drivers/hwmon/pc87360.c b/drivers/hwmon/pc87360.c index 988181e4cfcd..145f674c1d87 100644 --- a/drivers/hwmon/pc87360.c +++ b/drivers/hwmon/pc87360.c @@ -615,6 +615,9 @@ static ssize_t set_vrm(struct device *dev, struct device_attribute *attr, if (err) return err; + if (val > 255) + return -EINVAL; + data->vrm = val; return count; } diff --git a/drivers/hwmon/tmp103.c b/drivers/hwmon/tmp103.c index c74d2da389d9..ad571ec795a3 100644 --- a/drivers/hwmon/tmp103.c +++ b/drivers/hwmon/tmp103.c @@ -131,13 +131,6 @@ static int tmp103_probe(struct i2c_client *client, struct regmap *regmap; int ret; - if (!i2c_check_functionality(client->adapter, - I2C_FUNC_SMBUS_BYTE_DATA)) { - dev_err(&client->dev, - "adapter doesn't support SMBus byte transactions\n"); - return -ENODEV; - } - regmap = devm_regmap_init_i2c(client, &tmp103_regmap_config); if (IS_ERR(regmap)) { dev_err(dev, "failed to allocate register map\n"); @@ -152,7 +145,7 @@ static int tmp103_probe(struct i2c_client *client, } i2c_set_clientdata(client, regmap); - hwmon_dev = hwmon_device_register_with_groups(dev, client->name, + hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name, regmap, tmp103_groups); return PTR_ERR_OR_ZERO(hwmon_dev); } diff --git a/drivers/hwmon/vt1211.c b/drivers/hwmon/vt1211.c index 344b22ec2553..3ea57c3504e2 100644 --- a/drivers/hwmon/vt1211.c +++ b/drivers/hwmon/vt1211.c @@ -879,6 +879,9 @@ static ssize_t set_vrm(struct device *dev, struct device_attribute *attr, if (err) return err; + if (val > 255) + return -EINVAL; + data->vrm = val; return count; diff --git a/drivers/hwmon/w83627hf.c b/drivers/hwmon/w83627hf.c index c1726be3654c..2f55973a8c4c 100644 --- a/drivers/hwmon/w83627hf.c +++ b/drivers/hwmon/w83627hf.c @@ -820,6 +820,9 @@ store_vrm_reg(struct device *dev, struct device_attribute *attr, const char *buf err = kstrtoul(buf, 10, &val); if (err) return err; + + if (val > 255) + return -EINVAL; data->vrm = val; return count; diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c index cb3765fec98c..001df856913f 100644 --- a/drivers/hwmon/w83791d.c +++ b/drivers/hwmon/w83791d.c @@ -1181,6 +1181,9 @@ static ssize_t store_vrm_reg(struct device *dev, if (err) return err; + if (val > 255) + return -EINVAL; + data->vrm = val; return count; } diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c index 9d63d71214ca..816aa6caf5d5 100644 --- a/drivers/hwmon/w83793.c +++ b/drivers/hwmon/w83793.c @@ -353,6 +353,9 @@ store_vrm(struct device *dev, struct device_attribute *attr, if (err) return err; + if (val > 255) + return -EINVAL; + data->vrm = val; return count; } diff --git a/drivers/hwspinlock/Kconfig b/drivers/hwspinlock/Kconfig index 70637d23b1f9..3612cb5b30b2 100644 --- a/drivers/hwspinlock/Kconfig +++ b/drivers/hwspinlock/Kconfig @@ -10,7 +10,7 @@ menu "Hardware Spinlock drivers" config HWSPINLOCK_OMAP tristate "OMAP Hardware Spinlock device" - depends on ARCH_OMAP4 || SOC_OMAP5 + depends on ARCH_OMAP4 || SOC_OMAP5 || SOC_DRA7XX || SOC_AM33XX || SOC_AM43XX select HWSPINLOCK help Say y here to support the OMAP Hardware Spinlock device (firstly diff --git a/drivers/hwspinlock/omap_hwspinlock.c b/drivers/hwspinlock/omap_hwspinlock.c index 292869cc9034..c1e2cd4d85fe 100644 --- a/drivers/hwspinlock/omap_hwspinlock.c +++ b/drivers/hwspinlock/omap_hwspinlock.c @@ -98,10 +98,29 @@ static int omap_hwspinlock_probe(struct platform_device *pdev) if (!io_base) return -ENOMEM; + /* + * make sure the module is enabled and clocked before reading + * the module SYSSTATUS register + */ + pm_runtime_enable(&pdev->dev); + ret = pm_runtime_get_sync(&pdev->dev); + if (ret < 0) { + pm_runtime_put_noidle(&pdev->dev); + goto iounmap_base; + } + /* Determine number of locks */ i = readl(io_base + SYSSTATUS_OFFSET); i >>= SPINLOCK_NUMLOCKS_BIT_OFFSET; + /* + * runtime PM will make sure the clock of this module is + * enabled again iff at least one lock is requested + */ + ret = pm_runtime_put(&pdev->dev); + if (ret < 0) + goto iounmap_base; + /* one of the four lsb's must be set, and nothing else */ if (hweight_long(i & 0xf) != 1 || i > 8) { ret = -EINVAL; @@ -121,12 +140,6 @@ static int omap_hwspinlock_probe(struct platform_device *pdev) for (i = 0, hwlock = &bank->lock[0]; i < num_locks; i++, hwlock++) hwlock->priv = io_base + LOCK_BASE_OFFSET + sizeof(u32) * i; - /* - * runtime PM will make sure the clock of this module is - * enabled iff at least one lock is requested - */ - pm_runtime_enable(&pdev->dev); - ret = hwspin_lock_register(bank, &pdev->dev, &omap_hwspinlock_ops, pdata->base_id, num_locks); if (ret) @@ -135,9 +148,9 @@ static int omap_hwspinlock_probe(struct platform_device *pdev) return 0; reg_fail: - pm_runtime_disable(&pdev->dev); kfree(bank); iounmap_base: + pm_runtime_disable(&pdev->dev); iounmap(io_base); return ret; } diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig index 7b7ea320a258..b51a402752c4 100644 --- a/drivers/i2c/Kconfig +++ b/drivers/i2c/Kconfig @@ -2,7 +2,9 @@ # I2C subsystem configuration # -menuconfig I2C +menu "I2C support" + +config I2C tristate "I2C support" select RT_MUTEXES ---help--- @@ -21,6 +23,15 @@ menuconfig I2C This I2C support can also be built as a module. If so, the module will be called i2c-core. +config ACPI_I2C_OPREGION + bool "ACPI I2C Operation region support" + depends on I2C=y && ACPI + default y + help + Say Y here if you want to enable ACPI I2C operation region support. + Operation Regions allow firmware (BIOS) code to access I2C slave devices, + such as smart batteries through an I2C host controller driver. + if I2C config I2C_BOARDINFO @@ -124,3 +135,5 @@ config I2C_DEBUG_BUS on. endif # I2C + +endmenu diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index a49bfd7a1ed0..2e45ae3796f1 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -109,6 +109,7 @@ config I2C_I801 Avoton (SOC) Wellsburg (PCH) Coleto Creek (PCH) + Wildcat Point (PCH) Wildcat Point-LP (PCH) BayTrail (SOC) @@ -465,9 +466,9 @@ config I2C_EG20T config I2C_EXYNOS5 tristate "Exynos5 high-speed I2C driver" depends on ARCH_EXYNOS5 && OF + default y help - Say Y here to include support for high-speed I2C controller in the - Exynos5 based Samsung SoCs. + High-speed I2C controller on Exynos5 based Samsung SoCs. config I2C_GPIO tristate "GPIO-based bitbanging I2C" @@ -700,16 +701,6 @@ config I2C_S3C2410 Say Y here to include support for I2C controller in the Samsung SoCs. -config I2C_S6000 - tristate "S6000 I2C support" - depends on XTENSA_VARIANT_S6000 - help - This driver supports the on chip I2C device on the - S6000 xtensa processor family. - - To compile this driver as a module, choose M here. The module - will be called i2c-s6000. - config I2C_SH7760 tristate "Renesas SH7760 I2C Controller" depends on CPU_SUBTYPE_SH7760 @@ -1018,37 +1009,6 @@ config I2C_CROS_EC_TUNNEL connected there. This will work whatever the interface used to talk to the EC (SPI, I2C or LPC). -config SCx200_I2C - tristate "NatSemi SCx200 I2C using GPIO pins (DEPRECATED)" - depends on SCx200_GPIO - select I2C_ALGOBIT - help - Enable the use of two GPIO pins of a SCx200 processor as an I2C bus. - - If you don't know what to do here, say N. - - This support is also available as a module. If so, the module - will be called scx200_i2c. - - This driver is deprecated and will be dropped soon. Use i2c-gpio - (or scx200_acb) instead. - -config SCx200_I2C_SCL - int "GPIO pin used for SCL" - depends on SCx200_I2C - default "12" - help - Enter the GPIO pin number used for the SCL signal. This value can - also be specified with a module parameter. - -config SCx200_I2C_SDA - int "GPIO pin used for SDA" - depends on SCx200_I2C - default "13" - help - Enter the GPIO pin number used for the SSA signal. This value can - also be specified with a module parameter. - config SCx200_ACB tristate "Geode ACCESS.bus support" depends on X86_32 && PCI diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile index dd9a7f8e873f..49bf07e5ef4d 100644 --- a/drivers/i2c/busses/Makefile +++ b/drivers/i2c/busses/Makefile @@ -68,7 +68,6 @@ obj-$(CONFIG_I2C_QUP) += i2c-qup.o obj-$(CONFIG_I2C_RIIC) += i2c-riic.o obj-$(CONFIG_I2C_RK3X) += i2c-rk3x.o obj-$(CONFIG_I2C_S3C2410) += i2c-s3c2410.o -obj-$(CONFIG_I2C_S6000) += i2c-s6000.o obj-$(CONFIG_I2C_SH7760) += i2c-sh7760.o obj-$(CONFIG_I2C_SH_MOBILE) += i2c-sh_mobile.o obj-$(CONFIG_I2C_SIMTEC) += i2c-simtec.o @@ -101,6 +100,5 @@ obj-$(CONFIG_I2C_ELEKTOR) += i2c-elektor.o obj-$(CONFIG_I2C_PCA_ISA) += i2c-pca-isa.o obj-$(CONFIG_I2C_SIBYTE) += i2c-sibyte.o obj-$(CONFIG_SCx200_ACB) += scx200_acb.o -obj-$(CONFIG_SCx200_I2C) += scx200_i2c.o ccflags-$(CONFIG_I2C_DEBUG_BUS) := -DDEBUG diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c index e95f9ba96790..917d54588d95 100644 --- a/drivers/i2c/busses/i2c-at91.c +++ b/drivers/i2c/busses/i2c-at91.c @@ -101,6 +101,7 @@ struct at91_twi_dev { unsigned twi_cwgr_reg; struct at91_twi_pdata *pdata; bool use_dma; + bool recv_len_abort; struct at91_twi_dma dma; }; @@ -210,7 +211,7 @@ static void at91_twi_write_data_dma_callback(void *data) struct at91_twi_dev *dev = (struct at91_twi_dev *)data; dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg), - dev->buf_len, DMA_MEM_TO_DEV); + dev->buf_len, DMA_TO_DEVICE); at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP); } @@ -267,12 +268,24 @@ static void at91_twi_read_next_byte(struct at91_twi_dev *dev) *dev->buf = at91_twi_read(dev, AT91_TWI_RHR) & 0xff; --dev->buf_len; + /* return if aborting, we only needed to read RHR to clear RXRDY*/ + if (dev->recv_len_abort) + return; + /* handle I2C_SMBUS_BLOCK_DATA */ if (unlikely(dev->msg->flags & I2C_M_RECV_LEN)) { - dev->msg->flags &= ~I2C_M_RECV_LEN; - dev->buf_len += *dev->buf; - dev->msg->len = dev->buf_len + 1; - dev_dbg(dev->dev, "received block length %d\n", dev->buf_len); + /* ensure length byte is a valid value */ + if (*dev->buf <= I2C_SMBUS_BLOCK_MAX && *dev->buf > 0) { + dev->msg->flags &= ~I2C_M_RECV_LEN; + dev->buf_len += *dev->buf; + dev->msg->len = dev->buf_len + 1; + dev_dbg(dev->dev, "received block length %d\n", + dev->buf_len); + } else { + /* abort and send the stop by reading one more byte */ + dev->recv_len_abort = true; + dev->buf_len = 1; + } } /* send stop if second but last byte has been read */ @@ -289,7 +302,7 @@ static void at91_twi_read_data_dma_callback(void *data) struct at91_twi_dev *dev = (struct at91_twi_dev *)data; dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg), - dev->buf_len, DMA_DEV_TO_MEM); + dev->buf_len, DMA_FROM_DEVICE); /* The last two bytes have to be read without using dma */ dev->buf += dev->buf_len - 2; @@ -421,8 +434,8 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev) } } - ret = wait_for_completion_interruptible_timeout(&dev->cmd_complete, - dev->adapter.timeout); + ret = wait_for_completion_io_timeout(&dev->cmd_complete, + dev->adapter.timeout); if (ret == 0) { dev_err(dev->dev, "controller timed out\n"); at91_init_twi_bus(dev); @@ -444,6 +457,12 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev) ret = -EIO; goto error; } + if (dev->recv_len_abort) { + dev_err(dev->dev, "invalid smbus block length recvd\n"); + ret = -EPROTO; + goto error; + } + dev_dbg(dev->dev, "transfer complete\n"); return 0; @@ -500,6 +519,7 @@ static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num) dev->buf_len = m_start->len; dev->buf = m_start->buf; dev->msg = m_start; + dev->recv_len_abort = false; ret = at91_do_twi_transfer(dev); @@ -768,7 +788,7 @@ static int at91_twi_probe(struct platform_device *pdev) snprintf(dev->adapter.name, sizeof(dev->adapter.name), "AT91"); i2c_set_adapdata(&dev->adapter, dev); dev->adapter.owner = THIS_MODULE; - dev->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_DEPRECATED; + dev->adapter.class = I2C_CLASS_DEPRECATED; dev->adapter.algo = &at91_twi_algorithm; dev->adapter.dev.parent = dev->dev; dev->adapter.nr = pdev->id; diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c index 214ff9700efe..4b8ecd0b3661 100644 --- a/drivers/i2c/busses/i2c-bcm2835.c +++ b/drivers/i2c/busses/i2c-bcm2835.c @@ -277,7 +277,7 @@ static int bcm2835_i2c_probe(struct platform_device *pdev) adap = &i2c_dev->adapter; i2c_set_adapdata(adap, i2c_dev); adap->owner = THIS_MODULE; - adap->class = I2C_CLASS_HWMON | I2C_CLASS_DEPRECATED; + adap->class = I2C_CLASS_DEPRECATED; strlcpy(adap->name, "bcm2835 I2C adapter", sizeof(adap->name)); adap->algo = &bcm2835_i2c_algo; adap->dev.parent = &pdev->dev; diff --git a/drivers/i2c/busses/i2c-bfin-twi.c b/drivers/i2c/busses/i2c-bfin-twi.c index 3e271e7558d3..067c1615e968 100644 --- a/drivers/i2c/busses/i2c-bfin-twi.c +++ b/drivers/i2c/busses/i2c-bfin-twi.c @@ -648,7 +648,7 @@ static int i2c_bfin_twi_probe(struct platform_device *pdev) strlcpy(p_adap->name, pdev->name, sizeof(p_adap->name)); p_adap->algo = &bfin_twi_algorithm; p_adap->algo_data = iface; - p_adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD | I2C_CLASS_DEPRECATED; + p_adap->class = I2C_CLASS_DEPRECATED; p_adap->dev.parent = &pdev->dev; p_adap->timeout = 5 * HZ; p_adap->retries = 3; diff --git a/drivers/i2c/busses/i2c-cros-ec-tunnel.c b/drivers/i2c/busses/i2c-cros-ec-tunnel.c index 8e7a71487bb1..05e033c98115 100644 --- a/drivers/i2c/busses/i2c-cros-ec-tunnel.c +++ b/drivers/i2c/busses/i2c-cros-ec-tunnel.c @@ -183,6 +183,7 @@ static int ec_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg i2c_msgs[], u8 *request = NULL; u8 *response = NULL; int result; + struct cros_ec_command msg; request_len = ec_i2c_count_message(i2c_msgs, num); if (request_len < 0) { @@ -218,10 +219,16 @@ static int ec_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg i2c_msgs[], } ec_i2c_construct_message(request, i2c_msgs, num, bus_num); - result = bus->ec->command_sendrecv(bus->ec, EC_CMD_I2C_PASSTHRU, - request, request_len, - response, response_len); - if (result) + + msg.version = 0; + msg.command = EC_CMD_I2C_PASSTHRU; + msg.outdata = request; + msg.outsize = request_len; + msg.indata = response; + msg.insize = response_len; + + result = bus->ec->cmd_xfer(bus->ec, &msg); + if (result < 0) goto exit; result = ec_i2c_parse_response(response, i2c_msgs, &num); @@ -258,7 +265,7 @@ static int ec_i2c_probe(struct platform_device *pdev) u32 remote_bus; int err; - if (!ec->command_sendrecv) { + if (!ec->cmd_xfer) { dev_err(dev, "Missing sendrecv\n"); return -EINVAL; } diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c index 389bc68c55ad..4d9614719128 100644 --- a/drivers/i2c/busses/i2c-davinci.c +++ b/drivers/i2c/busses/i2c-davinci.c @@ -712,7 +712,7 @@ static int davinci_i2c_probe(struct platform_device *pdev) adap = &dev->adapter; i2c_set_adapdata(adap, dev); adap->owner = THIS_MODULE; - adap->class = I2C_CLASS_HWMON | I2C_CLASS_DEPRECATED; + adap->class = I2C_CLASS_DEPRECATED; strlcpy(adap->name, "DaVinci I2C adapter", sizeof(adap->name)); adap->algo = &i2c_davinci_algo; adap->dev.parent = &pdev->dev; diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c index 3356f7ab9f79..d31d313ab4f7 100644 --- a/drivers/i2c/busses/i2c-designware-pcidrv.c +++ b/drivers/i2c/busses/i2c-designware-pcidrv.c @@ -188,6 +188,7 @@ static struct dw_pci_controller dw_pci_controllers[] = { .scl_sda_cfg = &hsw_config, }, }; + static struct i2c_algorithm i2c_dw_algo = { .master_xfer = i2c_dw_xfer, .functionality = i2c_dw_func, @@ -350,6 +351,14 @@ static const struct pci_device_id i2_designware_pci_ids[] = { /* Haswell */ { PCI_VDEVICE(INTEL, 0x9c61), haswell }, { PCI_VDEVICE(INTEL, 0x9c62), haswell }, + /* Braswell / Cherrytrail */ + { PCI_VDEVICE(INTEL, 0x22C1), baytrail,}, + { PCI_VDEVICE(INTEL, 0x22C2), baytrail }, + { PCI_VDEVICE(INTEL, 0x22C3), baytrail }, + { PCI_VDEVICE(INTEL, 0x22C4), baytrail }, + { PCI_VDEVICE(INTEL, 0x22C5), baytrail }, + { PCI_VDEVICE(INTEL, 0x22C6), baytrail }, + { PCI_VDEVICE(INTEL, 0x22C7), baytrail }, { 0,} }; MODULE_DEVICE_TABLE(pci, i2_designware_pci_ids); diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 402ec3970fed..bc8773333155 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -106,6 +106,7 @@ static const struct acpi_device_id dw_i2c_acpi_match[] = { { "INT3432", 0 }, { "INT3433", 0 }, { "80860F41", 0 }, + { "808622C1", 0 }, { } }; MODULE_DEVICE_TABLE(acpi, dw_i2c_acpi_match); @@ -202,7 +203,7 @@ static int dw_i2c_probe(struct platform_device *pdev) adap = &dev->adapter; i2c_set_adapdata(adap, dev); adap->owner = THIS_MODULE; - adap->class = I2C_CLASS_HWMON | I2C_CLASS_DEPRECATED; + adap->class = I2C_CLASS_DEPRECATED; strlcpy(adap->name, "Synopsys DesignWare I2C adapter", sizeof(adap->name)); adap->algo = &i2c_dw_algo; diff --git a/drivers/i2c/busses/i2c-efm32.c b/drivers/i2c/busses/i2c-efm32.c index f7eccd682de9..10b8323b08d4 100644 --- a/drivers/i2c/busses/i2c-efm32.c +++ b/drivers/i2c/busses/i2c-efm32.c @@ -370,7 +370,13 @@ static int efm32_i2c_probe(struct platform_device *pdev) return ret; } - ret = of_property_read_u32(np, "efm32,location", &location); + + ret = of_property_read_u32(np, "energymicro,location", &location); + + if (ret) + /* fall back to wrongly namespaced property */ + ret = of_property_read_u32(np, "efm32,location", &location); + if (!ret) { dev_dbg(&pdev->dev, "using location %u\n", location); } else { diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c index 63d229202854..28073f1d6d47 100644 --- a/drivers/i2c/busses/i2c-exynos5.c +++ b/drivers/i2c/busses/i2c-exynos5.c @@ -405,7 +405,6 @@ static irqreturn_t exynos5_i2c_irq(int irqno, void *dev_id) int_status = readl(i2c->regs + HSI2C_INT_STATUS); writel(int_status, i2c->regs + HSI2C_INT_STATUS); - fifo_status = readl(i2c->regs + HSI2C_FIFO_STATUS); /* handle interrupt related to the transfer status */ if (int_status & HSI2C_INT_I2C) { @@ -526,7 +525,7 @@ static void exynos5_i2c_message_start(struct exynos5_i2c *i2c, int stop) if (i2c->msg->flags & I2C_M_RD) { i2c_ctl |= HSI2C_RXCHON; - i2c_auto_conf = HSI2C_READ_WRITE; + i2c_auto_conf |= HSI2C_READ_WRITE; trig_lvl = (i2c->msg->len > i2c->variant->fifo_depth) ? (i2c->variant->fifo_depth * 3 / 4) : i2c->msg->len; @@ -549,7 +548,6 @@ static void exynos5_i2c_message_start(struct exynos5_i2c *i2c, int stop) writel(fifo_ctl, i2c->regs + HSI2C_FIFO_CTL); writel(i2c_ctl, i2c->regs + HSI2C_CTL); - /* * Enable interrupts before starting the transfer so that we don't * miss any INT_I2C interrupts. @@ -789,8 +787,16 @@ static int exynos5_i2c_resume_noirq(struct device *dev) } #endif -static SIMPLE_DEV_PM_OPS(exynos5_i2c_dev_pm_ops, exynos5_i2c_suspend_noirq, - exynos5_i2c_resume_noirq); +static const struct dev_pm_ops exynos5_i2c_dev_pm_ops = { +#ifdef CONFIG_PM_SLEEP + .suspend_noirq = exynos5_i2c_suspend_noirq, + .resume_noirq = exynos5_i2c_resume_noirq, + .freeze_noirq = exynos5_i2c_suspend_noirq, + .thaw_noirq = exynos5_i2c_resume_noirq, + .poweroff_noirq = exynos5_i2c_suspend_noirq, + .restore_noirq = exynos5_i2c_resume_noirq, +#endif +}; static struct platform_driver exynos5_i2c_driver = { .probe = exynos5_i2c_probe, diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c index 71a45b210a24..933f1e453e41 100644 --- a/drivers/i2c/busses/i2c-gpio.c +++ b/drivers/i2c/busses/i2c-gpio.c @@ -238,12 +238,10 @@ static int i2c_gpio_probe(struct platform_device *pdev) static int i2c_gpio_remove(struct platform_device *pdev) { struct i2c_gpio_private_data *priv; - struct i2c_gpio_platform_data *pdata; struct i2c_adapter *adap; priv = platform_get_drvdata(pdev); adap = &priv->adap; - pdata = &priv->pdata; i2c_del_adapter(adap); diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index 6777cd6f8776..10467a327749 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -22,57 +22,58 @@ */ /* - Supports the following Intel I/O Controller Hubs (ICH): - - I/O Block I2C - region SMBus Block proc. block - Chip name PCI ID size PEC buffer call read - ---------------------------------------------------------------------- - 82801AA (ICH) 0x2413 16 no no no no - 82801AB (ICH0) 0x2423 16 no no no no - 82801BA (ICH2) 0x2443 16 no no no no - 82801CA (ICH3) 0x2483 32 soft no no no - 82801DB (ICH4) 0x24c3 32 hard yes no no - 82801E (ICH5) 0x24d3 32 hard yes yes yes - 6300ESB 0x25a4 32 hard yes yes yes - 82801F (ICH6) 0x266a 32 hard yes yes yes - 6310ESB/6320ESB 0x269b 32 hard yes yes yes - 82801G (ICH7) 0x27da 32 hard yes yes yes - 82801H (ICH8) 0x283e 32 hard yes yes yes - 82801I (ICH9) 0x2930 32 hard yes yes yes - EP80579 (Tolapai) 0x5032 32 hard yes yes yes - ICH10 0x3a30 32 hard yes yes yes - ICH10 0x3a60 32 hard yes yes yes - 5/3400 Series (PCH) 0x3b30 32 hard yes yes yes - 6 Series (PCH) 0x1c22 32 hard yes yes yes - Patsburg (PCH) 0x1d22 32 hard yes yes yes - Patsburg (PCH) IDF 0x1d70 32 hard yes yes yes - Patsburg (PCH) IDF 0x1d71 32 hard yes yes yes - Patsburg (PCH) IDF 0x1d72 32 hard yes yes yes - DH89xxCC (PCH) 0x2330 32 hard yes yes yes - Panther Point (PCH) 0x1e22 32 hard yes yes yes - Lynx Point (PCH) 0x8c22 32 hard yes yes yes - Lynx Point-LP (PCH) 0x9c22 32 hard yes yes yes - Avoton (SOC) 0x1f3c 32 hard yes yes yes - Wellsburg (PCH) 0x8d22 32 hard yes yes yes - Wellsburg (PCH) MS 0x8d7d 32 hard yes yes yes - Wellsburg (PCH) MS 0x8d7e 32 hard yes yes yes - Wellsburg (PCH) MS 0x8d7f 32 hard yes yes yes - Coleto Creek (PCH) 0x23b0 32 hard yes yes yes - Wildcat Point-LP (PCH) 0x9ca2 32 hard yes yes yes - BayTrail (SOC) 0x0f12 32 hard yes yes yes - - Features supported by this driver: - Software PEC no - Hardware PEC yes - Block buffer yes - Block process call transaction no - I2C block read transaction yes (doesn't use the block buffer) - Slave mode no - Interrupt processing yes - - See the file Documentation/i2c/busses/i2c-i801 for details. -*/ + * Supports the following Intel I/O Controller Hubs (ICH): + * + * I/O Block I2C + * region SMBus Block proc. block + * Chip name PCI ID size PEC buffer call read + * --------------------------------------------------------------------------- + * 82801AA (ICH) 0x2413 16 no no no no + * 82801AB (ICH0) 0x2423 16 no no no no + * 82801BA (ICH2) 0x2443 16 no no no no + * 82801CA (ICH3) 0x2483 32 soft no no no + * 82801DB (ICH4) 0x24c3 32 hard yes no no + * 82801E (ICH5) 0x24d3 32 hard yes yes yes + * 6300ESB 0x25a4 32 hard yes yes yes + * 82801F (ICH6) 0x266a 32 hard yes yes yes + * 6310ESB/6320ESB 0x269b 32 hard yes yes yes + * 82801G (ICH7) 0x27da 32 hard yes yes yes + * 82801H (ICH8) 0x283e 32 hard yes yes yes + * 82801I (ICH9) 0x2930 32 hard yes yes yes + * EP80579 (Tolapai) 0x5032 32 hard yes yes yes + * ICH10 0x3a30 32 hard yes yes yes + * ICH10 0x3a60 32 hard yes yes yes + * 5/3400 Series (PCH) 0x3b30 32 hard yes yes yes + * 6 Series (PCH) 0x1c22 32 hard yes yes yes + * Patsburg (PCH) 0x1d22 32 hard yes yes yes + * Patsburg (PCH) IDF 0x1d70 32 hard yes yes yes + * Patsburg (PCH) IDF 0x1d71 32 hard yes yes yes + * Patsburg (PCH) IDF 0x1d72 32 hard yes yes yes + * DH89xxCC (PCH) 0x2330 32 hard yes yes yes + * Panther Point (PCH) 0x1e22 32 hard yes yes yes + * Lynx Point (PCH) 0x8c22 32 hard yes yes yes + * Lynx Point-LP (PCH) 0x9c22 32 hard yes yes yes + * Avoton (SOC) 0x1f3c 32 hard yes yes yes + * Wellsburg (PCH) 0x8d22 32 hard yes yes yes + * Wellsburg (PCH) MS 0x8d7d 32 hard yes yes yes + * Wellsburg (PCH) MS 0x8d7e 32 hard yes yes yes + * Wellsburg (PCH) MS 0x8d7f 32 hard yes yes yes + * Coleto Creek (PCH) 0x23b0 32 hard yes yes yes + * Wildcat Point (PCH) 0x8ca2 32 hard yes yes yes + * Wildcat Point-LP (PCH) 0x9ca2 32 hard yes yes yes + * BayTrail (SOC) 0x0f12 32 hard yes yes yes + * + * Features supported by this driver: + * Software PEC no + * Hardware PEC yes + * Block buffer yes + * Block process call transaction no + * I2C block read transaction yes (doesn't use the block buffer) + * Slave mode no + * Interrupt processing yes + * + * See the file Documentation/i2c/busses/i2c-i801 for details. + */ #include <linux/interrupt.h> #include <linux/module.h> @@ -162,24 +163,26 @@ STATUS_ERROR_FLAGS) /* Older devices have their ID defined in <linux/pci_ids.h> */ -#define PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS 0x0f12 -#define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS 0x1c22 -#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS 0x1d22 +#define PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS 0x0f12 +#define PCI_DEVICE_ID_INTEL_BRASWELL_SMBUS 0x2292 +#define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS 0x1c22 +#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS 0x1d22 /* Patsburg also has three 'Integrated Device Function' SMBus controllers */ -#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF0 0x1d70 -#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF1 0x1d71 -#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF2 0x1d72 -#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_SMBUS 0x1e22 -#define PCI_DEVICE_ID_INTEL_AVOTON_SMBUS 0x1f3c -#define PCI_DEVICE_ID_INTEL_DH89XXCC_SMBUS 0x2330 -#define PCI_DEVICE_ID_INTEL_COLETOCREEK_SMBUS 0x23b0 -#define PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS 0x3b30 -#define PCI_DEVICE_ID_INTEL_LYNXPOINT_SMBUS 0x8c22 -#define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS 0x8d22 -#define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS0 0x8d7d -#define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS1 0x8d7e -#define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS2 0x8d7f -#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_SMBUS 0x9c22 +#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF0 0x1d70 +#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF1 0x1d71 +#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF2 0x1d72 +#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_SMBUS 0x1e22 +#define PCI_DEVICE_ID_INTEL_AVOTON_SMBUS 0x1f3c +#define PCI_DEVICE_ID_INTEL_DH89XXCC_SMBUS 0x2330 +#define PCI_DEVICE_ID_INTEL_COLETOCREEK_SMBUS 0x23b0 +#define PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS 0x3b30 +#define PCI_DEVICE_ID_INTEL_LYNXPOINT_SMBUS 0x8c22 +#define PCI_DEVICE_ID_INTEL_WILDCATPOINT_SMBUS 0x8ca2 +#define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS 0x8d22 +#define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS0 0x8d7d +#define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS1 0x8d7e +#define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS2 0x8d7f +#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_SMBUS 0x9c22 #define PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_SMBUS 0x9ca2 struct i801_mux_config { @@ -823,8 +826,10 @@ static const struct pci_device_id i801_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS1) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS2) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COLETOCREEK_SMBUS) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WILDCATPOINT_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BRASWELL_SMBUS) }, { 0, } }; diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index aa8bc146718b..613069bc561a 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c @@ -735,10 +735,7 @@ static int i2c_imx_probe(struct platform_device *pdev) clk_disable_unprepare(i2c_imx->clk); dev_dbg(&i2c_imx->adapter.dev, "claimed irq %d\n", irq); - dev_dbg(&i2c_imx->adapter.dev, "device resources from 0x%x to 0x%x\n", - res->start, res->end); - dev_dbg(&i2c_imx->adapter.dev, "allocated %d bytes at 0x%x\n", - resource_size(res), res->start); + dev_dbg(&i2c_imx->adapter.dev, "device resources: %pR\n", res); dev_dbg(&i2c_imx->adapter.dev, "adapter name: \"%s\"\n", i2c_imx->adapter.name); dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n"); diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c index 984492553e95..d9ee43c80cde 100644 --- a/drivers/i2c/busses/i2c-ismt.c +++ b/drivers/i2c/busses/i2c-ismt.c @@ -497,7 +497,7 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr, desc->wr_len_cmd = dma_size; desc->control |= ISMT_DESC_BLK; priv->dma_buffer[0] = command; - memcpy(&priv->dma_buffer[1], &data->block[1], dma_size); + memcpy(&priv->dma_buffer[1], &data->block[1], dma_size - 1); } else { /* Block Read */ dev_dbg(dev, "I2C_SMBUS_BLOCK_DATA: READ\n"); @@ -525,7 +525,7 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr, desc->wr_len_cmd = dma_size; desc->control |= ISMT_DESC_I2C; priv->dma_buffer[0] = command; - memcpy(&priv->dma_buffer[1], &data->block[1], dma_size); + memcpy(&priv->dma_buffer[1], &data->block[1], dma_size - 1); } else { /* i2c Block Read */ dev_dbg(dev, "I2C_SMBUS_I2C_BLOCK_DATA: READ\n"); diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c index 6a32aa095f83..0edf630b099a 100644 --- a/drivers/i2c/busses/i2c-mpc.c +++ b/drivers/i2c/busses/i2c-mpc.c @@ -341,8 +341,7 @@ static u32 mpc_i2c_get_sec_cfg_8xxx(void) iounmap(reg); } } - if (node) - of_node_put(node); + of_node_put(node); return val; } diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c index 9f4b775e2e39..2f64273d3f2b 100644 --- a/drivers/i2c/busses/i2c-mv64xxx.c +++ b/drivers/i2c/busses/i2c-mv64xxx.c @@ -746,8 +746,7 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data, } tclk = clk_get_rate(drv_data->clk); - rc = of_property_read_u32(np, "clock-frequency", &bus_freq); - if (rc) + if (of_property_read_u32(np, "clock-frequency", &bus_freq)) bus_freq = 100000; /* 100kHz by default */ if (!mv64xxx_find_baud_factors(bus_freq, tclk, @@ -863,7 +862,7 @@ mv64xxx_i2c_probe(struct platform_device *pd) drv_data->adapter.dev.parent = &pd->dev; drv_data->adapter.algo = &mv64xxx_i2c_algo; drv_data->adapter.owner = THIS_MODULE; - drv_data->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD | I2C_CLASS_DEPRECATED; + drv_data->adapter.class = I2C_CLASS_DEPRECATED; drv_data->adapter.nr = pd->id; drv_data->adapter.dev.of_node = pd->dev.of_node; platform_set_drvdata(pd, drv_data); diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c index 7170fc892829..65a21fed08b5 100644 --- a/drivers/i2c/busses/i2c-mxs.c +++ b/drivers/i2c/busses/i2c-mxs.c @@ -429,7 +429,7 @@ static int mxs_i2c_pio_setup_xfer(struct i2c_adapter *adap, ret = mxs_i2c_pio_wait_xfer_end(i2c); if (ret) { dev_err(i2c->dev, - "PIO: Failed to send SELECT command!\n"); + "PIO: Failed to send READ command!\n"); goto cleanup; } diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c index 0e55d85fd4ed..9ad038d223c4 100644 --- a/drivers/i2c/busses/i2c-nomadik.c +++ b/drivers/i2c/busses/i2c-nomadik.c @@ -1032,10 +1032,10 @@ static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id) adap = &dev->adap; adap->dev.of_node = np; adap->dev.parent = &adev->dev; - adap->owner = THIS_MODULE; - adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD | I2C_CLASS_DEPRECATED; - adap->algo = &nmk_i2c_algo; - adap->timeout = msecs_to_jiffies(dev->timeout); + adap->owner = THIS_MODULE; + adap->class = I2C_CLASS_DEPRECATED; + adap->algo = &nmk_i2c_algo; + adap->timeout = msecs_to_jiffies(dev->timeout); snprintf(adap->name, sizeof(adap->name), "Nomadik I2C at %pR", &adev->res); diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c index 0e10cc6182f0..2a4fe0b7cfb7 100644 --- a/drivers/i2c/busses/i2c-ocores.c +++ b/drivers/i2c/busses/i2c-ocores.c @@ -239,15 +239,15 @@ static u32 ocores_func(struct i2c_adapter *adap) } static const struct i2c_algorithm ocores_algorithm = { - .master_xfer = ocores_xfer, - .functionality = ocores_func, + .master_xfer = ocores_xfer, + .functionality = ocores_func, }; static struct i2c_adapter ocores_adapter = { - .owner = THIS_MODULE, - .name = "i2c-ocores", - .class = I2C_CLASS_HWMON | I2C_CLASS_SPD | I2C_CLASS_DEPRECATED, - .algo = &ocores_algorithm, + .owner = THIS_MODULE, + .name = "i2c-ocores", + .class = I2C_CLASS_DEPRECATED, + .algo = &ocores_algorithm, }; static const struct of_device_id ocores_i2c_match[] = { diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index b182793a4051..0dffb0e62c3b 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c @@ -1236,7 +1236,7 @@ omap_i2c_probe(struct platform_device *pdev) adap = &dev->adapter; i2c_set_adapdata(adap, dev); adap->owner = THIS_MODULE; - adap->class = I2C_CLASS_HWMON | I2C_CLASS_DEPRECATED; + adap->class = I2C_CLASS_DEPRECATED; strlcpy(adap->name, "OMAP I2C adapter", sizeof(adap->name)); adap->algo = &omap_i2c_algo; adap->dev.parent = &pdev->dev; diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c index 2a5efb5b487c..092d89bd3224 100644 --- a/drivers/i2c/busses/i2c-qup.c +++ b/drivers/i2c/busses/i2c-qup.c @@ -633,13 +633,17 @@ static int qup_i2c_probe(struct platform_device *pdev) * associated with each byte written/received */ size = QUP_OUTPUT_BLOCK_SIZE(io_mode); - if (size >= ARRAY_SIZE(blk_sizes)) - return -EIO; + if (size >= ARRAY_SIZE(blk_sizes)) { + ret = -EIO; + goto fail; + } qup->out_blk_sz = blk_sizes[size] / 2; size = QUP_INPUT_BLOCK_SIZE(io_mode); - if (size >= ARRAY_SIZE(blk_sizes)) - return -EIO; + if (size >= ARRAY_SIZE(blk_sizes)) { + ret = -EIO; + goto fail; + } qup->in_blk_sz = blk_sizes[size] / 2; size = QUP_OUTPUT_FIFO_SIZE(io_mode); @@ -670,16 +674,20 @@ static int qup_i2c_probe(struct platform_device *pdev) qup->adap.dev.of_node = pdev->dev.of_node; strlcpy(qup->adap.name, "QUP I2C adapter", sizeof(qup->adap.name)); - ret = i2c_add_adapter(&qup->adap); - if (ret) - goto fail; - pm_runtime_set_autosuspend_delay(qup->dev, MSEC_PER_SEC); pm_runtime_use_autosuspend(qup->dev); pm_runtime_set_active(qup->dev); pm_runtime_enable(qup->dev); + + ret = i2c_add_adapter(&qup->adap); + if (ret) + goto fail_runtime; + return 0; +fail_runtime: + pm_runtime_disable(qup->dev); + pm_runtime_set_suspended(qup->dev); fail: qup_i2c_disable_clocks(qup); return ret; diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c index 899405923678..e506fcd3ca04 100644 --- a/drivers/i2c/busses/i2c-rcar.c +++ b/drivers/i2c/busses/i2c-rcar.c @@ -34,6 +34,7 @@ #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/slab.h> +#include <linux/spinlock.h> /* register offsets */ #define ICSCR 0x00 /* slave ctrl */ @@ -75,8 +76,8 @@ #define RCAR_IRQ_RECV (MNR | MAL | MST | MAT | MDR) #define RCAR_IRQ_STOP (MST) -#define RCAR_IRQ_ACK_SEND (~(MAT | MDE)) -#define RCAR_IRQ_ACK_RECV (~(MAT | MDR)) +#define RCAR_IRQ_ACK_SEND (~(MAT | MDE) & 0xFF) +#define RCAR_IRQ_ACK_RECV (~(MAT | MDR) & 0xFF) #define ID_LAST_MSG (1 << 0) #define ID_IOERROR (1 << 1) @@ -95,6 +96,7 @@ struct rcar_i2c_priv { struct i2c_msg *msg; struct clk *clk; + spinlock_t lock; wait_queue_head_t wait; int pos; @@ -365,20 +367,20 @@ static irqreturn_t rcar_i2c_irq(int irq, void *ptr) struct rcar_i2c_priv *priv = ptr; u32 msr; + /*-------------- spin lock -----------------*/ + spin_lock(&priv->lock); + msr = rcar_i2c_read(priv, ICMSR); + /* Only handle interrupts that are currently enabled */ + msr &= rcar_i2c_read(priv, ICMIER); + /* Arbitration lost */ if (msr & MAL) { rcar_i2c_flags_set(priv, (ID_DONE | ID_ARBLOST)); goto out; } - /* Stop */ - if (msr & MST) { - rcar_i2c_flags_set(priv, ID_DONE); - goto out; - } - /* Nack */ if (msr & MNR) { /* go to stop phase */ @@ -388,6 +390,12 @@ static irqreturn_t rcar_i2c_irq(int irq, void *ptr) goto out; } + /* Stop */ + if (msr & MST) { + rcar_i2c_flags_set(priv, ID_DONE); + goto out; + } + if (rcar_i2c_is_recv(priv)) rcar_i2c_flags_set(priv, rcar_i2c_irq_recv(priv, msr)); else @@ -400,6 +408,9 @@ out: wake_up(&priv->wait); } + spin_unlock(&priv->lock); + /*-------------- spin unlock -----------------*/ + return IRQ_HANDLED; } @@ -409,14 +420,21 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap, { struct rcar_i2c_priv *priv = i2c_get_adapdata(adap); struct device *dev = rcar_i2c_priv_to_dev(priv); + unsigned long flags; int i, ret, timeout; pm_runtime_get_sync(dev); + /*-------------- spin lock -----------------*/ + spin_lock_irqsave(&priv->lock, flags); + rcar_i2c_init(priv); /* start clock */ rcar_i2c_write(priv, ICCCR, priv->icccr); + spin_unlock_irqrestore(&priv->lock, flags); + /*-------------- spin unlock -----------------*/ + ret = rcar_i2c_bus_barrier(priv); if (ret < 0) goto out; @@ -428,6 +446,9 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap, break; } + /*-------------- spin lock -----------------*/ + spin_lock_irqsave(&priv->lock, flags); + /* init each data */ priv->msg = &msgs[i]; priv->pos = 0; @@ -437,6 +458,9 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap, ret = rcar_i2c_prepare_msg(priv); + spin_unlock_irqrestore(&priv->lock, flags); + /*-------------- spin unlock -----------------*/ + if (ret < 0) break; @@ -540,14 +564,15 @@ static int rcar_i2c_probe(struct platform_device *pdev) irq = platform_get_irq(pdev, 0); init_waitqueue_head(&priv->wait); - - adap = &priv->adap; - adap->nr = pdev->id; - adap->algo = &rcar_i2c_algo; - adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD | I2C_CLASS_DEPRECATED; - adap->retries = 3; - adap->dev.parent = dev; - adap->dev.of_node = dev->of_node; + spin_lock_init(&priv->lock); + + adap = &priv->adap; + adap->nr = pdev->id; + adap->algo = &rcar_i2c_algo; + adap->class = I2C_CLASS_DEPRECATED; + adap->retries = 3; + adap->dev.parent = dev; + adap->dev.of_node = dev->of_node; i2c_set_adapdata(adap, priv); strlcpy(adap->name, pdev->name, sizeof(adap->name)); diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c index a9791509966a..b38b0529946a 100644 --- a/drivers/i2c/busses/i2c-rk3x.c +++ b/drivers/i2c/busses/i2c-rk3x.c @@ -238,7 +238,7 @@ static void rk3x_i2c_fill_transmit_buf(struct rk3x_i2c *i2c) for (i = 0; i < 8; ++i) { val = 0; for (j = 0; j < 4; ++j) { - if (i2c->processed == i2c->msg->len) + if ((i2c->processed == i2c->msg->len) && (cnt != 0)) break; if (i2c->processed == 0 && cnt == 0) @@ -323,6 +323,10 @@ static void rk3x_i2c_handle_read(struct rk3x_i2c *i2c, unsigned int ipd) /* ack interrupt */ i2c_writel(i2c, REG_INT_MBRF, REG_IPD); + /* Can only handle a maximum of 32 bytes at a time */ + if (len > 32) + len = 32; + /* read the data from receive buffer */ for (i = 0; i < len; ++i) { if (i % 4 == 0) @@ -399,7 +403,7 @@ static irqreturn_t rk3x_i2c_irq(int irqno, void *dev_id) } /* is there anything left to handle? */ - if (unlikely(ipd == 0)) + if (unlikely((ipd & REG_INT_ALL) == 0)) goto out; switch (i2c->state) { @@ -429,12 +433,11 @@ static void rk3x_i2c_set_scl_rate(struct rk3x_i2c *i2c, unsigned long scl_rate) unsigned long i2c_rate = clk_get_rate(i2c->clk); unsigned int div; - /* SCL rate = (clk rate) / (8 * DIV) */ - div = DIV_ROUND_UP(i2c_rate, scl_rate * 8); - - /* The lower and upper half of the CLKDIV reg describe the length of - * SCL low & high periods. */ - div = DIV_ROUND_UP(div, 2); + /* set DIV = DIVH = DIVL + * SCL rate = (clk rate) / (8 * (DIVH + 1 + DIVL + 1)) + * = (clk rate) / (16 * (DIV + 1)) + */ + div = DIV_ROUND_UP(i2c_rate, scl_rate * 16) - 1; i2c_writel(i2c, (div << 16) | (div & 0xffff), REG_CLKDIV); } diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c index 24e2370d52a1..e3b0337faeb7 100644 --- a/drivers/i2c/busses/i2c-s3c2410.c +++ b/drivers/i2c/busses/i2c-s3c2410.c @@ -1128,11 +1128,11 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev) s3c24xx_i2c_parse_dt(pdev->dev.of_node, i2c); strlcpy(i2c->adap.name, "s3c2410-i2c", sizeof(i2c->adap.name)); - i2c->adap.owner = THIS_MODULE; - i2c->adap.algo = &s3c24xx_i2c_algorithm; + i2c->adap.owner = THIS_MODULE; + i2c->adap.algo = &s3c24xx_i2c_algorithm; i2c->adap.retries = 2; - i2c->adap.class = I2C_CLASS_HWMON | I2C_CLASS_SPD | I2C_CLASS_DEPRECATED; - i2c->tx_setup = 50; + i2c->adap.class = I2C_CLASS_DEPRECATED; + i2c->tx_setup = 50; init_waitqueue_head(&i2c->wait); @@ -1267,7 +1267,7 @@ static int s3c24xx_i2c_suspend_noirq(struct device *dev) return 0; } -static int s3c24xx_i2c_resume(struct device *dev) +static int s3c24xx_i2c_resume_noirq(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev); @@ -1285,7 +1285,11 @@ static int s3c24xx_i2c_resume(struct device *dev) static const struct dev_pm_ops s3c24xx_i2c_dev_pm_ops = { #ifdef CONFIG_PM_SLEEP .suspend_noirq = s3c24xx_i2c_suspend_noirq, - .resume = s3c24xx_i2c_resume, + .resume_noirq = s3c24xx_i2c_resume_noirq, + .freeze_noirq = s3c24xx_i2c_suspend_noirq, + .thaw_noirq = s3c24xx_i2c_resume_noirq, + .poweroff_noirq = s3c24xx_i2c_suspend_noirq, + .restore_noirq = s3c24xx_i2c_resume_noirq, #endif }; diff --git a/drivers/i2c/busses/i2c-s6000.c b/drivers/i2c/busses/i2c-s6000.c deleted file mode 100644 index dd186a037684..000000000000 --- a/drivers/i2c/busses/i2c-s6000.c +++ /dev/null @@ -1,404 +0,0 @@ -/* - * drivers/i2c/busses/i2c-s6000.c - * - * Description: Driver for S6000 Family I2C Interface - * Copyright (c) 2008 emlix GmbH - * Author: Oskar Schirmer <oskar@scara.com> - * - * Partially based on i2c-bfin-twi.c driver by <sonic.zhang@analog.com> - * Copyright (c) 2005-2007 Analog Devices, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA - */ - -#include <linux/clk.h> -#include <linux/err.h> -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/init.h> -#include <linux/delay.h> -#include <linux/i2c.h> -#include <linux/i2c/s6000.h> -#include <linux/timer.h> -#include <linux/spinlock.h> -#include <linux/completion.h> -#include <linux/interrupt.h> -#include <linux/platform_device.h> -#include <linux/io.h> - -#include "i2c-s6000.h" - -#define DRV_NAME "i2c-s6000" - -#define POLL_TIMEOUT (2 * HZ) - -struct s6i2c_if { - u8 __iomem *reg; /* memory mapped registers */ - int irq; - spinlock_t lock; - struct i2c_msg *msgs; /* messages currently handled */ - int msgs_num; /* nb of msgs to do */ - int msgs_push; /* nb of msgs read/written */ - int msgs_done; /* nb of msgs finally handled */ - unsigned push; /* nb of bytes read/written in msg */ - unsigned done; /* nb of bytes finally handled */ - int timeout_count; /* timeout retries left */ - struct timer_list timeout_timer; - struct i2c_adapter adap; - struct completion complete; - struct clk *clk; - struct resource *res; -}; - -static inline u16 i2c_rd16(struct s6i2c_if *iface, unsigned n) -{ - return readw(iface->reg + (n)); -} - -static inline void i2c_wr16(struct s6i2c_if *iface, unsigned n, u16 v) -{ - writew(v, iface->reg + (n)); -} - -static inline u32 i2c_rd32(struct s6i2c_if *iface, unsigned n) -{ - return readl(iface->reg + (n)); -} - -static inline void i2c_wr32(struct s6i2c_if *iface, unsigned n, u32 v) -{ - writel(v, iface->reg + (n)); -} - -static struct s6i2c_if s6i2c_if; - -static void s6i2c_handle_interrupt(struct s6i2c_if *iface) -{ - if (i2c_rd16(iface, S6_I2C_INTRSTAT) & (1 << S6_I2C_INTR_TXABRT)) { - i2c_rd16(iface, S6_I2C_CLRTXABRT); - i2c_wr16(iface, S6_I2C_INTRMASK, 0); - complete(&iface->complete); - return; - } - if (iface->msgs_done >= iface->msgs_num) { - dev_err(&iface->adap.dev, "s6i2c: spurious I2C irq: %04x\n", - i2c_rd16(iface, S6_I2C_INTRSTAT)); - i2c_wr16(iface, S6_I2C_INTRMASK, 0); - return; - } - while ((iface->msgs_push < iface->msgs_num) - && (i2c_rd16(iface, S6_I2C_STATUS) & (1 << S6_I2C_STATUS_TFNF))) { - struct i2c_msg *m = &iface->msgs[iface->msgs_push]; - if (!(m->flags & I2C_M_RD)) - i2c_wr16(iface, S6_I2C_DATACMD, m->buf[iface->push]); - else - i2c_wr16(iface, S6_I2C_DATACMD, - 1 << S6_I2C_DATACMD_READ); - if (++iface->push >= m->len) { - iface->push = 0; - iface->msgs_push += 1; - } - } - do { - struct i2c_msg *m = &iface->msgs[iface->msgs_done]; - if (!(m->flags & I2C_M_RD)) { - if (iface->msgs_done < iface->msgs_push) - iface->msgs_done += 1; - else - break; - } else if (i2c_rd16(iface, S6_I2C_STATUS) - & (1 << S6_I2C_STATUS_RFNE)) { - m->buf[iface->done] = i2c_rd16(iface, S6_I2C_DATACMD); - if (++iface->done >= m->len) { - iface->done = 0; - iface->msgs_done += 1; - } - } else{ - break; - } - } while (iface->msgs_done < iface->msgs_num); - if (iface->msgs_done >= iface->msgs_num) { - i2c_wr16(iface, S6_I2C_INTRMASK, 1 << S6_I2C_INTR_TXABRT); - complete(&iface->complete); - } else if (iface->msgs_push >= iface->msgs_num) { - i2c_wr16(iface, S6_I2C_INTRMASK, (1 << S6_I2C_INTR_TXABRT) | - (1 << S6_I2C_INTR_RXFULL)); - } else { - i2c_wr16(iface, S6_I2C_INTRMASK, (1 << S6_I2C_INTR_TXABRT) | - (1 << S6_I2C_INTR_TXEMPTY) | - (1 << S6_I2C_INTR_RXFULL)); - } -} - -static irqreturn_t s6i2c_interrupt_entry(int irq, void *dev_id) -{ - struct s6i2c_if *iface = dev_id; - if (!(i2c_rd16(iface, S6_I2C_STATUS) & ((1 << S6_I2C_INTR_RXUNDER) - | (1 << S6_I2C_INTR_RXOVER) - | (1 << S6_I2C_INTR_RXFULL) - | (1 << S6_I2C_INTR_TXOVER) - | (1 << S6_I2C_INTR_TXEMPTY) - | (1 << S6_I2C_INTR_RDREQ) - | (1 << S6_I2C_INTR_TXABRT) - | (1 << S6_I2C_INTR_RXDONE) - | (1 << S6_I2C_INTR_ACTIVITY) - | (1 << S6_I2C_INTR_STOPDET) - | (1 << S6_I2C_INTR_STARTDET) - | (1 << S6_I2C_INTR_GENCALL)))) - return IRQ_NONE; - - spin_lock(&iface->lock); - del_timer(&iface->timeout_timer); - s6i2c_handle_interrupt(iface); - spin_unlock(&iface->lock); - return IRQ_HANDLED; -} - -static void s6i2c_timeout(unsigned long data) -{ - struct s6i2c_if *iface = (struct s6i2c_if *)data; - unsigned long flags; - - spin_lock_irqsave(&iface->lock, flags); - s6i2c_handle_interrupt(iface); - if (--iface->timeout_count > 0) { - iface->timeout_timer.expires = jiffies + POLL_TIMEOUT; - add_timer(&iface->timeout_timer); - } else { - complete(&iface->complete); - i2c_wr16(iface, S6_I2C_INTRMASK, 0); - } - spin_unlock_irqrestore(&iface->lock, flags); -} - -static int s6i2c_master_xfer(struct i2c_adapter *adap, - struct i2c_msg *msgs, int num) -{ - struct s6i2c_if *iface = adap->algo_data; - int i; - if (num == 0) - return 0; - if (i2c_rd16(iface, S6_I2C_STATUS) & (1 << S6_I2C_STATUS_ACTIVITY)) - yield(); - i2c_wr16(iface, S6_I2C_INTRMASK, 0); - i2c_rd16(iface, S6_I2C_CLRINTR); - for (i = 0; i < num; i++) { - if (msgs[i].flags & I2C_M_TEN) { - dev_err(&adap->dev, - "s6i2c: 10 bits addr not supported\n"); - return -EINVAL; - } - if (msgs[i].len == 0) { - dev_err(&adap->dev, - "s6i2c: zero length message not supported\n"); - return -EINVAL; - } - if (msgs[i].addr != msgs[0].addr) { - dev_err(&adap->dev, - "s6i2c: multiple xfer cannot change target\n"); - return -EINVAL; - } - } - - iface->msgs = msgs; - iface->msgs_num = num; - iface->msgs_push = 0; - iface->msgs_done = 0; - iface->push = 0; - iface->done = 0; - iface->timeout_count = 10; - i2c_wr16(iface, S6_I2C_TAR, msgs[0].addr); - i2c_wr16(iface, S6_I2C_ENABLE, 1); - i2c_wr16(iface, S6_I2C_INTRMASK, (1 << S6_I2C_INTR_TXEMPTY) | - (1 << S6_I2C_INTR_TXABRT)); - - iface->timeout_timer.expires = jiffies + POLL_TIMEOUT; - add_timer(&iface->timeout_timer); - wait_for_completion(&iface->complete); - del_timer_sync(&iface->timeout_timer); - while (i2c_rd32(iface, S6_I2C_TXFLR) > 0) - schedule(); - while (i2c_rd16(iface, S6_I2C_STATUS) & (1 << S6_I2C_STATUS_ACTIVITY)) - schedule(); - - i2c_wr16(iface, S6_I2C_INTRMASK, 0); - i2c_wr16(iface, S6_I2C_ENABLE, 0); - return iface->msgs_done; -} - -static u32 s6i2c_functionality(struct i2c_adapter *adap) -{ - return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; -} - -static struct i2c_algorithm s6i2c_algorithm = { - .master_xfer = s6i2c_master_xfer, - .functionality = s6i2c_functionality, -}; - -static u16 nanoseconds_on_clk(struct s6i2c_if *iface, u32 ns) -{ - u32 dividend = ((clk_get_rate(iface->clk) / 1000) * ns) / 1000000; - if (dividend > 0xffff) - return 0xffff; - return dividend; -} - -static int s6i2c_probe(struct platform_device *dev) -{ - struct s6i2c_if *iface = &s6i2c_if; - struct i2c_adapter *p_adap; - const char *clock; - int bus_num, rc; - spin_lock_init(&iface->lock); - init_completion(&iface->complete); - iface->irq = platform_get_irq(dev, 0); - if (iface->irq < 0) { - rc = iface->irq; - goto err_out; - } - iface->res = platform_get_resource(dev, IORESOURCE_MEM, 0); - if (!iface->res) { - rc = -ENXIO; - goto err_out; - } - iface->res = request_mem_region(iface->res->start, - resource_size(iface->res), - dev->dev.bus_id); - if (!iface->res) { - rc = -EBUSY; - goto err_out; - } - iface->reg = ioremap_nocache(iface->res->start, - resource_size(iface->res)); - if (!iface->reg) { - rc = -ENOMEM; - goto err_reg; - } - - clock = 0; - bus_num = -1; - if (dev_get_platdata(&dev->dev)) { - struct s6_i2c_platform_data *pdata = - dev_get_platdata(&dev->dev); - bus_num = pdata->bus_num; - clock = pdata->clock; - } - iface->clk = clk_get(&dev->dev, clock); - if (IS_ERR(iface->clk)) { - rc = PTR_ERR(iface->clk); - goto err_map; - } - rc = clk_enable(iface->clk); - if (rc < 0) - goto err_clk_put; - init_timer(&iface->timeout_timer); - iface->timeout_timer.function = s6i2c_timeout; - iface->timeout_timer.data = (unsigned long)iface; - - p_adap = &iface->adap; - strlcpy(p_adap->name, dev->name, sizeof(p_adap->name)); - p_adap->algo = &s6i2c_algorithm; - p_adap->algo_data = iface; - p_adap->nr = bus_num; - p_adap->class = 0; - p_adap->dev.parent = &dev->dev; - i2c_wr16(iface, S6_I2C_INTRMASK, 0); - rc = request_irq(iface->irq, s6i2c_interrupt_entry, - IRQF_SHARED, dev->name, iface); - if (rc) { - dev_err(&p_adap->dev, "s6i2c: can't get IRQ %d\n", iface->irq); - goto err_clk_dis; - } - - i2c_wr16(iface, S6_I2C_ENABLE, 0); - udelay(1); - i2c_wr32(iface, S6_I2C_SRESET, 1 << S6_I2C_SRESET_IC_SRST); - i2c_wr16(iface, S6_I2C_CLRTXABRT, 1); - i2c_wr16(iface, S6_I2C_CON, - (1 << S6_I2C_CON_MASTER) | - (S6_I2C_CON_SPEED_NORMAL << S6_I2C_CON_SPEED) | - (0 << S6_I2C_CON_10BITSLAVE) | - (0 << S6_I2C_CON_10BITMASTER) | - (1 << S6_I2C_CON_RESTARTENA) | - (1 << S6_I2C_CON_SLAVEDISABLE)); - i2c_wr16(iface, S6_I2C_SSHCNT, nanoseconds_on_clk(iface, 4000)); - i2c_wr16(iface, S6_I2C_SSLCNT, nanoseconds_on_clk(iface, 4700)); - i2c_wr16(iface, S6_I2C_FSHCNT, nanoseconds_on_clk(iface, 600)); - i2c_wr16(iface, S6_I2C_FSLCNT, nanoseconds_on_clk(iface, 1300)); - i2c_wr16(iface, S6_I2C_RXTL, 0); - i2c_wr16(iface, S6_I2C_TXTL, 0); - - platform_set_drvdata(dev, iface); - rc = i2c_add_numbered_adapter(p_adap); - if (rc) - goto err_irq_free; - return 0; - -err_irq_free: - free_irq(iface->irq, iface); -err_clk_dis: - clk_disable(iface->clk); -err_clk_put: - clk_put(iface->clk); -err_map: - iounmap(iface->reg); -err_reg: - release_mem_region(iface->res->start, - resource_size(iface->res)); -err_out: - return rc; -} - -static int s6i2c_remove(struct platform_device *pdev) -{ - struct s6i2c_if *iface = platform_get_drvdata(pdev); - i2c_wr16(iface, S6_I2C_ENABLE, 0); - i2c_del_adapter(&iface->adap); - free_irq(iface->irq, iface); - clk_disable(iface->clk); - clk_put(iface->clk); - iounmap(iface->reg); - release_mem_region(iface->res->start, - resource_size(iface->res)); - return 0; -} - -static struct platform_driver s6i2c_driver = { - .probe = s6i2c_probe, - .remove = s6i2c_remove, - .driver = { - .name = DRV_NAME, - .owner = THIS_MODULE, - }, -}; - -static int __init s6i2c_init(void) -{ - pr_info("I2C: S6000 I2C driver\n"); - return platform_driver_register(&s6i2c_driver); -} - -static void __exit s6i2c_exit(void) -{ - platform_driver_unregister(&s6i2c_driver); -} - -MODULE_DESCRIPTION("I2C-Bus adapter routines for S6000 I2C"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:" DRV_NAME); - -subsys_initcall(s6i2c_init); -module_exit(s6i2c_exit); diff --git a/drivers/i2c/busses/i2c-s6000.h b/drivers/i2c/busses/i2c-s6000.h deleted file mode 100644 index 4936f9f2256f..000000000000 --- a/drivers/i2c/busses/i2c-s6000.h +++ /dev/null @@ -1,79 +0,0 @@ -/* - * drivers/i2c/busses/i2c-s6000.h - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2008 Emlix GmbH <info@emlix.com> - * Author: Oskar Schirmer <oskar@scara.com> - */ - -#ifndef __DRIVERS_I2C_BUSSES_I2C_S6000_H -#define __DRIVERS_I2C_BUSSES_I2C_S6000_H - -#define S6_I2C_CON 0x000 -#define S6_I2C_CON_MASTER 0 -#define S6_I2C_CON_SPEED 1 -#define S6_I2C_CON_SPEED_NORMAL 1 -#define S6_I2C_CON_SPEED_FAST 2 -#define S6_I2C_CON_SPEED_MASK 3 -#define S6_I2C_CON_10BITSLAVE 3 -#define S6_I2C_CON_10BITMASTER 4 -#define S6_I2C_CON_RESTARTENA 5 -#define S6_I2C_CON_SLAVEDISABLE 6 -#define S6_I2C_TAR 0x004 -#define S6_I2C_TAR_GCORSTART 10 -#define S6_I2C_TAR_SPECIAL 11 -#define S6_I2C_SAR 0x008 -#define S6_I2C_HSMADDR 0x00C -#define S6_I2C_DATACMD 0x010 -#define S6_I2C_DATACMD_READ 8 -#define S6_I2C_SSHCNT 0x014 -#define S6_I2C_SSLCNT 0x018 -#define S6_I2C_FSHCNT 0x01C -#define S6_I2C_FSLCNT 0x020 -#define S6_I2C_INTRSTAT 0x02C -#define S6_I2C_INTRMASK 0x030 -#define S6_I2C_RAWINTR 0x034 -#define S6_I2C_INTR_RXUNDER 0 -#define S6_I2C_INTR_RXOVER 1 -#define S6_I2C_INTR_RXFULL 2 -#define S6_I2C_INTR_TXOVER 3 -#define S6_I2C_INTR_TXEMPTY 4 -#define S6_I2C_INTR_RDREQ 5 -#define S6_I2C_INTR_TXABRT 6 -#define S6_I2C_INTR_RXDONE 7 -#define S6_I2C_INTR_ACTIVITY 8 -#define S6_I2C_INTR_STOPDET 9 -#define S6_I2C_INTR_STARTDET 10 -#define S6_I2C_INTR_GENCALL 11 -#define S6_I2C_RXTL 0x038 -#define S6_I2C_TXTL 0x03C -#define S6_I2C_CLRINTR 0x040 -#define S6_I2C_CLRRXUNDER 0x044 -#define S6_I2C_CLRRXOVER 0x048 -#define S6_I2C_CLRTXOVER 0x04C -#define S6_I2C_CLRRDREQ 0x050 -#define S6_I2C_CLRTXABRT 0x054 -#define S6_I2C_CLRRXDONE 0x058 -#define S6_I2C_CLRACTIVITY 0x05C -#define S6_I2C_CLRSTOPDET 0x060 -#define S6_I2C_CLRSTARTDET 0x064 -#define S6_I2C_CLRGENCALL 0x068 -#define S6_I2C_ENABLE 0x06C -#define S6_I2C_STATUS 0x070 -#define S6_I2C_STATUS_ACTIVITY 0 -#define S6_I2C_STATUS_TFNF 1 -#define S6_I2C_STATUS_TFE 2 -#define S6_I2C_STATUS_RFNE 3 -#define S6_I2C_STATUS_RFF 4 -#define S6_I2C_TXFLR 0x074 -#define S6_I2C_RXFLR 0x078 -#define S6_I2C_SRESET 0x07C -#define S6_I2C_SRESET_IC_SRST 0 -#define S6_I2C_SRESET_IC_MASTER_SRST 1 -#define S6_I2C_SRESET_IC_SLAVE_SRST 2 -#define S6_I2C_TXABRTSOURCE 0x080 - -#endif diff --git a/drivers/i2c/busses/i2c-sirf.c b/drivers/i2c/busses/i2c-sirf.c index a3216defc1d3..b1336d5f0531 100644 --- a/drivers/i2c/busses/i2c-sirf.c +++ b/drivers/i2c/busses/i2c-sirf.c @@ -311,7 +311,7 @@ static int i2c_sirfsoc_probe(struct platform_device *pdev) goto out; } adap = &siic->adapter; - adap->class = I2C_CLASS_HWMON | I2C_CLASS_DEPRECATED; + adap->class = I2C_CLASS_DEPRECATED; mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); siic->base = devm_ioremap_resource(&pdev->dev, mem_res); diff --git a/drivers/i2c/busses/i2c-st.c b/drivers/i2c/busses/i2c-st.c index 95b947670386..2e4eccd6599a 100644 --- a/drivers/i2c/busses/i2c-st.c +++ b/drivers/i2c/busses/i2c-st.c @@ -206,25 +206,31 @@ static inline void st_i2c_clr_bits(void __iomem *reg, u32 mask) writel_relaxed(readl_relaxed(reg) & ~mask, reg); } -/* From I2C Specifications v0.5 */ +/* + * From I2C Specifications v0.5. + * + * All the values below have +10% margin added to be + * compatible with some out-of-spec devices, + * like HDMI link of the Toshiba 19AV600 TV. + */ static struct st_i2c_timings i2c_timings[] = { [I2C_MODE_STANDARD] = { .rate = 100000, - .rep_start_hold = 4000, - .rep_start_setup = 4700, - .start_hold = 4000, - .data_setup_time = 250, - .stop_setup_time = 4000, - .bus_free_time = 4700, + .rep_start_hold = 4400, + .rep_start_setup = 5170, + .start_hold = 4400, + .data_setup_time = 275, + .stop_setup_time = 4400, + .bus_free_time = 5170, }, [I2C_MODE_FAST] = { .rate = 400000, - .rep_start_hold = 600, - .rep_start_setup = 600, - .start_hold = 600, - .data_setup_time = 100, - .stop_setup_time = 600, - .bus_free_time = 1300, + .rep_start_hold = 660, + .rep_start_setup = 660, + .start_hold = 660, + .data_setup_time = 110, + .stop_setup_time = 660, + .bus_free_time = 1430, }, }; @@ -815,7 +821,7 @@ static int st_i2c_probe(struct platform_device *pdev) adap = &i2c_dev->adap; i2c_set_adapdata(adap, i2c_dev); - snprintf(adap->name, sizeof(adap->name), "ST I2C(0x%x)", res->start); + snprintf(adap->name, sizeof(adap->name), "ST I2C(0x%pa)", &res->start); adap->owner = THIS_MODULE; adap->timeout = 2 * HZ; adap->retries = 0; diff --git a/drivers/i2c/busses/i2c-stu300.c b/drivers/i2c/busses/i2c-stu300.c index fefb1c19ec1d..6a44f37798c8 100644 --- a/drivers/i2c/busses/i2c-stu300.c +++ b/drivers/i2c/busses/i2c-stu300.c @@ -909,7 +909,7 @@ static int stu300_probe(struct platform_device *pdev) adap = &dev->adapter; adap->owner = THIS_MODULE; /* DDC class but actually often used for more generic I2C */ - adap->class = I2C_CLASS_DDC | I2C_CLASS_DEPRECATED; + adap->class = I2C_CLASS_DEPRECATED; strlcpy(adap->name, "ST Microelectronics DDC I2C adapter", sizeof(adap->name)); adap->nr = bus_nr; diff --git a/drivers/i2c/busses/i2c-taos-evm.c b/drivers/i2c/busses/i2c-taos-evm.c index 057602683553..10855a0b7e7f 100644 --- a/drivers/i2c/busses/i2c-taos-evm.c +++ b/drivers/i2c/busses/i2c-taos-evm.c @@ -311,19 +311,8 @@ static struct serio_driver taos_drv = { .interrupt = taos_interrupt, }; -static int __init taos_init(void) -{ - return serio_register_driver(&taos_drv); -} - -static void __exit taos_exit(void) -{ - serio_unregister_driver(&taos_drv); -} +module_serio_driver(taos_drv); MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>"); MODULE_DESCRIPTION("TAOS evaluation module driver"); MODULE_LICENSE("GPL"); - -module_init(taos_init); -module_exit(taos_exit); diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c index f1bb2fc06791..efba1ebe16ba 100644 --- a/drivers/i2c/busses/i2c-tegra.c +++ b/drivers/i2c/busses/i2c-tegra.c @@ -380,34 +380,33 @@ static inline int tegra_i2c_clock_enable(struct tegra_i2c_dev *i2c_dev) { int ret; if (!i2c_dev->hw->has_single_clk_source) { - ret = clk_prepare_enable(i2c_dev->fast_clk); + ret = clk_enable(i2c_dev->fast_clk); if (ret < 0) { dev_err(i2c_dev->dev, "Enabling fast clk failed, err %d\n", ret); return ret; } } - ret = clk_prepare_enable(i2c_dev->div_clk); + ret = clk_enable(i2c_dev->div_clk); if (ret < 0) { dev_err(i2c_dev->dev, "Enabling div clk failed, err %d\n", ret); - clk_disable_unprepare(i2c_dev->fast_clk); + clk_disable(i2c_dev->fast_clk); } return ret; } static inline void tegra_i2c_clock_disable(struct tegra_i2c_dev *i2c_dev) { - clk_disable_unprepare(i2c_dev->div_clk); + clk_disable(i2c_dev->div_clk); if (!i2c_dev->hw->has_single_clk_source) - clk_disable_unprepare(i2c_dev->fast_clk); + clk_disable(i2c_dev->fast_clk); } static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev) { u32 val; int err = 0; - int clk_multiplier = I2C_CLK_MULTIPLIER_STD_FAST_MODE; u32 clk_divisor; err = tegra_i2c_clock_enable(i2c_dev); @@ -428,9 +427,6 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev) i2c_writel(i2c_dev, val, I2C_CNFG); i2c_writel(i2c_dev, 0, I2C_INT_MASK); - clk_multiplier *= (i2c_dev->hw->clk_divisor_std_fast_mode + 1); - clk_set_rate(i2c_dev->div_clk, i2c_dev->bus_clk_rate * clk_multiplier); - /* Make sure clock divisor programmed correctly */ clk_divisor = i2c_dev->hw->clk_divisor_hs_mode; clk_divisor |= i2c_dev->hw->clk_divisor_std_fast_mode << @@ -712,6 +708,7 @@ static int tegra_i2c_probe(struct platform_device *pdev) void __iomem *base; int irq; int ret = 0; + int clk_multiplier = I2C_CLK_MULTIPLIER_STD_FAST_MODE; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); base = devm_ioremap_resource(&pdev->dev, res); @@ -777,22 +774,44 @@ static int tegra_i2c_probe(struct platform_device *pdev) platform_set_drvdata(pdev, i2c_dev); + if (!i2c_dev->hw->has_single_clk_source) { + ret = clk_prepare(i2c_dev->fast_clk); + if (ret < 0) { + dev_err(i2c_dev->dev, "Clock prepare failed %d\n", ret); + return ret; + } + } + + clk_multiplier *= (i2c_dev->hw->clk_divisor_std_fast_mode + 1); + ret = clk_set_rate(i2c_dev->div_clk, + i2c_dev->bus_clk_rate * clk_multiplier); + if (ret) { + dev_err(i2c_dev->dev, "Clock rate change failed %d\n", ret); + goto unprepare_fast_clk; + } + + ret = clk_prepare(i2c_dev->div_clk); + if (ret < 0) { + dev_err(i2c_dev->dev, "Clock prepare failed %d\n", ret); + goto unprepare_fast_clk; + } + ret = tegra_i2c_init(i2c_dev); if (ret) { dev_err(&pdev->dev, "Failed to initialize i2c controller"); - return ret; + goto unprepare_div_clk; } ret = devm_request_irq(&pdev->dev, i2c_dev->irq, tegra_i2c_isr, 0, dev_name(&pdev->dev), i2c_dev); if (ret) { dev_err(&pdev->dev, "Failed to request irq %i\n", i2c_dev->irq); - return ret; + goto unprepare_div_clk; } i2c_set_adapdata(&i2c_dev->adapter, i2c_dev); i2c_dev->adapter.owner = THIS_MODULE; - i2c_dev->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_DEPRECATED; + i2c_dev->adapter.class = I2C_CLASS_DEPRECATED; strlcpy(i2c_dev->adapter.name, "Tegra I2C adapter", sizeof(i2c_dev->adapter.name)); i2c_dev->adapter.algo = &tegra_i2c_algo; @@ -803,16 +822,30 @@ static int tegra_i2c_probe(struct platform_device *pdev) ret = i2c_add_numbered_adapter(&i2c_dev->adapter); if (ret) { dev_err(&pdev->dev, "Failed to add I2C adapter\n"); - return ret; + goto unprepare_div_clk; } return 0; + +unprepare_div_clk: + clk_unprepare(i2c_dev->div_clk); + +unprepare_fast_clk: + if (!i2c_dev->hw->has_single_clk_source) + clk_unprepare(i2c_dev->fast_clk); + + return ret; } static int tegra_i2c_remove(struct platform_device *pdev) { struct tegra_i2c_dev *i2c_dev = platform_get_drvdata(pdev); i2c_del_adapter(&i2c_dev->adapter); + + clk_unprepare(i2c_dev->div_clk); + if (!i2c_dev->hw->has_single_clk_source) + clk_unprepare(i2c_dev->fast_clk); + return 0; } diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c index 7731f1795869..ade9223912d3 100644 --- a/drivers/i2c/busses/i2c-xiic.c +++ b/drivers/i2c/busses/i2c-xiic.c @@ -677,15 +677,15 @@ static u32 xiic_func(struct i2c_adapter *adap) } static const struct i2c_algorithm xiic_algorithm = { - .master_xfer = xiic_xfer, - .functionality = xiic_func, + .master_xfer = xiic_xfer, + .functionality = xiic_func, }; static struct i2c_adapter xiic_adapter = { - .owner = THIS_MODULE, - .name = DRIVER_NAME, - .class = I2C_CLASS_HWMON | I2C_CLASS_SPD | I2C_CLASS_DEPRECATED, - .algo = &xiic_algorithm, + .owner = THIS_MODULE, + .name = DRIVER_NAME, + .class = I2C_CLASS_DEPRECATED, + .algo = &xiic_algorithm, }; diff --git a/drivers/i2c/busses/scx200_i2c.c b/drivers/i2c/busses/scx200_i2c.c deleted file mode 100644 index 8eadf0f47ad7..000000000000 --- a/drivers/i2c/busses/scx200_i2c.c +++ /dev/null @@ -1,129 +0,0 @@ -/* linux/drivers/i2c/busses/scx200_i2c.c - - Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com> - - National Semiconductor SCx200 I2C bus on GPIO pins - - Based on i2c-velleman.c Copyright (C) 1995-96, 2000 Simon G. Vogl - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. -*/ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/module.h> -#include <linux/errno.h> -#include <linux/kernel.h> -#include <linux/i2c.h> -#include <linux/i2c-algo-bit.h> -#include <linux/io.h> - -#include <linux/scx200_gpio.h> - -MODULE_AUTHOR("Christer Weinigel <wingel@nano-system.com>"); -MODULE_DESCRIPTION("NatSemi SCx200 I2C Driver"); -MODULE_LICENSE("GPL"); - -static int scl = CONFIG_SCx200_I2C_SCL; -static int sda = CONFIG_SCx200_I2C_SDA; - -module_param(scl, int, 0); -MODULE_PARM_DESC(scl, "GPIO line for SCL"); -module_param(sda, int, 0); -MODULE_PARM_DESC(sda, "GPIO line for SDA"); - -static void scx200_i2c_setscl(void *data, int state) -{ - scx200_gpio_set(scl, state); -} - -static void scx200_i2c_setsda(void *data, int state) -{ - scx200_gpio_set(sda, state); -} - -static int scx200_i2c_getscl(void *data) -{ - return scx200_gpio_get(scl); -} - -static int scx200_i2c_getsda(void *data) -{ - return scx200_gpio_get(sda); -} - -/* ------------------------------------------------------------------------ - * Encapsulate the above functions in the correct operations structure. - * This is only done when more than one hardware adapter is supported. - */ - -static struct i2c_algo_bit_data scx200_i2c_data = { - .setsda = scx200_i2c_setsda, - .setscl = scx200_i2c_setscl, - .getsda = scx200_i2c_getsda, - .getscl = scx200_i2c_getscl, - .udelay = 10, - .timeout = HZ, -}; - -static struct i2c_adapter scx200_i2c_ops = { - .owner = THIS_MODULE, - .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, - .algo_data = &scx200_i2c_data, - .name = "NatSemi SCx200 I2C", -}; - -static int scx200_i2c_init(void) -{ - pr_debug("NatSemi SCx200 I2C Driver\n"); - - if (!scx200_gpio_present()) { - pr_err("no SCx200 gpio pins available\n"); - return -ENODEV; - } - - pr_debug("SCL=GPIO%02u, SDA=GPIO%02u\n", scl, sda); - - if (scl == -1 || sda == -1 || scl == sda) { - pr_err("scl and sda must be specified\n"); - return -EINVAL; - } - - /* Configure GPIOs as open collector outputs */ - scx200_gpio_configure(scl, ~2, 5); - scx200_gpio_configure(sda, ~2, 5); - - if (i2c_bit_add_bus(&scx200_i2c_ops) < 0) { - pr_err("adapter %s registration failed\n", scx200_i2c_ops.name); - return -ENODEV; - } - - return 0; -} - -static void scx200_i2c_cleanup(void) -{ - i2c_del_adapter(&scx200_i2c_ops); -} - -module_init(scx200_i2c_init); -module_exit(scx200_i2c_cleanup); - -/* - Local variables: - compile-command: "make -k -C ../.. SUBDIRS=drivers/i2c modules" - c-basic-offset: 8 - End: -*/ diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 66aa83b99383..ccfbbab82a15 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -27,6 +27,8 @@ OF support is copyright (c) 2008 Jochen Friedrich <jochen@scram.de> (based on a previous patch from Jon Smirl <jonsmirl@gmail.com>) and (c) 2013 Wolfram Sang <wsa@the-dreams.de> + I2C ACPI code Copyright (C) 2014 Intel Corp + Author: Lan Tianyu <tianyu.lan@intel.com> */ #include <linux/module.h> @@ -78,6 +80,368 @@ void i2c_transfer_trace_unreg(void) static_key_slow_dec(&i2c_trace_msg); } +#if defined(CONFIG_ACPI) +struct acpi_i2c_handler_data { + struct acpi_connection_info info; + struct i2c_adapter *adapter; +}; + +struct gsb_buffer { + u8 status; + u8 len; + union { + u16 wdata; + u8 bdata; + u8 data[0]; + }; +} __packed; + +static int acpi_i2c_add_resource(struct acpi_resource *ares, void *data) +{ + struct i2c_board_info *info = data; + + if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { + struct acpi_resource_i2c_serialbus *sb; + + sb = &ares->data.i2c_serial_bus; + if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_I2C) { + info->addr = sb->slave_address; + if (sb->access_mode == ACPI_I2C_10BIT_MODE) + info->flags |= I2C_CLIENT_TEN; + } + } else if (info->irq < 0) { + struct resource r; + + if (acpi_dev_resource_interrupt(ares, 0, &r)) + info->irq = r.start; + } + + /* Tell the ACPI core to skip this resource */ + return 1; +} + +static acpi_status acpi_i2c_add_device(acpi_handle handle, u32 level, + void *data, void **return_value) +{ + struct i2c_adapter *adapter = data; + struct list_head resource_list; + struct i2c_board_info info; + struct acpi_device *adev; + int ret; + + if (acpi_bus_get_device(handle, &adev)) + return AE_OK; + if (acpi_bus_get_status(adev) || !adev->status.present) + return AE_OK; + + memset(&info, 0, sizeof(info)); + info.acpi_node.companion = adev; + info.irq = -1; + + INIT_LIST_HEAD(&resource_list); + ret = acpi_dev_get_resources(adev, &resource_list, + acpi_i2c_add_resource, &info); + acpi_dev_free_resource_list(&resource_list); + + if (ret < 0 || !info.addr) + return AE_OK; + + adev->power.flags.ignore_parent = true; + strlcpy(info.type, dev_name(&adev->dev), sizeof(info.type)); + if (!i2c_new_device(adapter, &info)) { + adev->power.flags.ignore_parent = false; + dev_err(&adapter->dev, + "failed to add I2C device %s from ACPI\n", + dev_name(&adev->dev)); + } + + return AE_OK; +} + +/** + * acpi_i2c_register_devices - enumerate I2C slave devices behind adapter + * @adap: pointer to adapter + * + * Enumerate all I2C slave devices behind this adapter by walking the ACPI + * namespace. When a device is found it will be added to the Linux device + * model and bound to the corresponding ACPI handle. + */ +static void acpi_i2c_register_devices(struct i2c_adapter *adap) +{ + acpi_handle handle; + acpi_status status; + + if (!adap->dev.parent) + return; + + handle = ACPI_HANDLE(adap->dev.parent); + if (!handle) + return; + + status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, + acpi_i2c_add_device, NULL, + adap, NULL); + if (ACPI_FAILURE(status)) + dev_warn(&adap->dev, "failed to enumerate I2C slaves\n"); +} + +#else /* CONFIG_ACPI */ +static inline void acpi_i2c_register_devices(struct i2c_adapter *adap) { } +#endif /* CONFIG_ACPI */ + +#ifdef CONFIG_ACPI_I2C_OPREGION +static int acpi_gsb_i2c_read_bytes(struct i2c_client *client, + u8 cmd, u8 *data, u8 data_len) +{ + + struct i2c_msg msgs[2]; + int ret; + u8 *buffer; + + buffer = kzalloc(data_len, GFP_KERNEL); + if (!buffer) + return AE_NO_MEMORY; + + msgs[0].addr = client->addr; + msgs[0].flags = client->flags; + msgs[0].len = 1; + msgs[0].buf = &cmd; + + msgs[1].addr = client->addr; + msgs[1].flags = client->flags | I2C_M_RD; + msgs[1].len = data_len; + msgs[1].buf = buffer; + + ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); + if (ret < 0) + dev_err(&client->adapter->dev, "i2c read failed\n"); + else + memcpy(data, buffer, data_len); + + kfree(buffer); + return ret; +} + +static int acpi_gsb_i2c_write_bytes(struct i2c_client *client, + u8 cmd, u8 *data, u8 data_len) +{ + + struct i2c_msg msgs[1]; + u8 *buffer; + int ret = AE_OK; + + buffer = kzalloc(data_len + 1, GFP_KERNEL); + if (!buffer) + return AE_NO_MEMORY; + + buffer[0] = cmd; + memcpy(buffer + 1, data, data_len); + + msgs[0].addr = client->addr; + msgs[0].flags = client->flags; + msgs[0].len = data_len + 1; + msgs[0].buf = buffer; + + ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); + if (ret < 0) + dev_err(&client->adapter->dev, "i2c write failed\n"); + + kfree(buffer); + return ret; +} + +static acpi_status +acpi_i2c_space_handler(u32 function, acpi_physical_address command, + u32 bits, u64 *value64, + void *handler_context, void *region_context) +{ + struct gsb_buffer *gsb = (struct gsb_buffer *)value64; + struct acpi_i2c_handler_data *data = handler_context; + struct acpi_connection_info *info = &data->info; + struct acpi_resource_i2c_serialbus *sb; + struct i2c_adapter *adapter = data->adapter; + struct i2c_client client; + struct acpi_resource *ares; + u32 accessor_type = function >> 16; + u8 action = function & ACPI_IO_MASK; + acpi_status ret = AE_OK; + int status; + + ret = acpi_buffer_to_resource(info->connection, info->length, &ares); + if (ACPI_FAILURE(ret)) + return ret; + + if (!value64 || ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS) { + ret = AE_BAD_PARAMETER; + goto err; + } + + sb = &ares->data.i2c_serial_bus; + if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_I2C) { + ret = AE_BAD_PARAMETER; + goto err; + } + + memset(&client, 0, sizeof(client)); + client.adapter = adapter; + client.addr = sb->slave_address; + client.flags = 0; + + if (sb->access_mode == ACPI_I2C_10BIT_MODE) + client.flags |= I2C_CLIENT_TEN; + + switch (accessor_type) { + case ACPI_GSB_ACCESS_ATTRIB_SEND_RCV: + if (action == ACPI_READ) { + status = i2c_smbus_read_byte(&client); + if (status >= 0) { + gsb->bdata = status; + status = 0; + } + } else { + status = i2c_smbus_write_byte(&client, gsb->bdata); + } + break; + + case ACPI_GSB_ACCESS_ATTRIB_BYTE: + if (action == ACPI_READ) { + status = i2c_smbus_read_byte_data(&client, command); + if (status >= 0) { + gsb->bdata = status; + status = 0; + } + } else { + status = i2c_smbus_write_byte_data(&client, command, + gsb->bdata); + } + break; + + case ACPI_GSB_ACCESS_ATTRIB_WORD: + if (action == ACPI_READ) { + status = i2c_smbus_read_word_data(&client, command); + if (status >= 0) { + gsb->wdata = status; + status = 0; + } + } else { + status = i2c_smbus_write_word_data(&client, command, + gsb->wdata); + } + break; + + case ACPI_GSB_ACCESS_ATTRIB_BLOCK: + if (action == ACPI_READ) { + status = i2c_smbus_read_block_data(&client, command, + gsb->data); + if (status >= 0) { + gsb->len = status; + status = 0; + } + } else { + status = i2c_smbus_write_block_data(&client, command, + gsb->len, gsb->data); + } + break; + + case ACPI_GSB_ACCESS_ATTRIB_MULTIBYTE: + if (action == ACPI_READ) { + status = acpi_gsb_i2c_read_bytes(&client, command, + gsb->data, info->access_length); + if (status > 0) + status = 0; + } else { + status = acpi_gsb_i2c_write_bytes(&client, command, + gsb->data, info->access_length); + } + break; + + default: + pr_info("protocol(0x%02x) is not supported.\n", accessor_type); + ret = AE_BAD_PARAMETER; + goto err; + } + + gsb->status = status; + + err: + ACPI_FREE(ares); + return ret; +} + + +static int acpi_i2c_install_space_handler(struct i2c_adapter *adapter) +{ + acpi_handle handle; + struct acpi_i2c_handler_data *data; + acpi_status status; + + if (!adapter->dev.parent) + return -ENODEV; + + handle = ACPI_HANDLE(adapter->dev.parent); + + if (!handle) + return -ENODEV; + + data = kzalloc(sizeof(struct acpi_i2c_handler_data), + GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->adapter = adapter; + status = acpi_bus_attach_private_data(handle, (void *)data); + if (ACPI_FAILURE(status)) { + kfree(data); + return -ENOMEM; + } + + status = acpi_install_address_space_handler(handle, + ACPI_ADR_SPACE_GSBUS, + &acpi_i2c_space_handler, + NULL, + data); + if (ACPI_FAILURE(status)) { + dev_err(&adapter->dev, "Error installing i2c space handler\n"); + acpi_bus_detach_private_data(handle); + kfree(data); + return -ENOMEM; + } + + return 0; +} + +static void acpi_i2c_remove_space_handler(struct i2c_adapter *adapter) +{ + acpi_handle handle; + struct acpi_i2c_handler_data *data; + acpi_status status; + + if (!adapter->dev.parent) + return; + + handle = ACPI_HANDLE(adapter->dev.parent); + + if (!handle) + return; + + acpi_remove_address_space_handler(handle, + ACPI_ADR_SPACE_GSBUS, + &acpi_i2c_space_handler); + + status = acpi_bus_get_private_data(handle, (void **)&data); + if (ACPI_SUCCESS(status)) + kfree(data); + + acpi_bus_detach_private_data(handle); +} +#else /* CONFIG_ACPI_I2C_OPREGION */ +static inline void acpi_i2c_remove_space_handler(struct i2c_adapter *adapter) +{ } + +static inline int acpi_i2c_install_space_handler(struct i2c_adapter *adapter) +{ return 0; } +#endif /* CONFIG_ACPI_I2C_OPREGION */ + /* ------------------------------------------------------------------------- */ static const struct i2c_device_id *i2c_match_id(const struct i2c_device_id *id, @@ -1097,101 +1461,6 @@ EXPORT_SYMBOL(of_find_i2c_adapter_by_node); static void of_i2c_register_devices(struct i2c_adapter *adap) { } #endif /* CONFIG_OF */ -/* ACPI support code */ - -#if IS_ENABLED(CONFIG_ACPI) -static int acpi_i2c_add_resource(struct acpi_resource *ares, void *data) -{ - struct i2c_board_info *info = data; - - if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { - struct acpi_resource_i2c_serialbus *sb; - - sb = &ares->data.i2c_serial_bus; - if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_I2C) { - info->addr = sb->slave_address; - if (sb->access_mode == ACPI_I2C_10BIT_MODE) - info->flags |= I2C_CLIENT_TEN; - } - } else if (info->irq < 0) { - struct resource r; - - if (acpi_dev_resource_interrupt(ares, 0, &r)) - info->irq = r.start; - } - - /* Tell the ACPI core to skip this resource */ - return 1; -} - -static acpi_status acpi_i2c_add_device(acpi_handle handle, u32 level, - void *data, void **return_value) -{ - struct i2c_adapter *adapter = data; - struct list_head resource_list; - struct i2c_board_info info; - struct acpi_device *adev; - int ret; - - if (acpi_bus_get_device(handle, &adev)) - return AE_OK; - if (acpi_bus_get_status(adev) || !adev->status.present) - return AE_OK; - - memset(&info, 0, sizeof(info)); - info.acpi_node.companion = adev; - info.irq = -1; - - INIT_LIST_HEAD(&resource_list); - ret = acpi_dev_get_resources(adev, &resource_list, - acpi_i2c_add_resource, &info); - acpi_dev_free_resource_list(&resource_list); - - if (ret < 0 || !info.addr) - return AE_OK; - - adev->power.flags.ignore_parent = true; - strlcpy(info.type, dev_name(&adev->dev), sizeof(info.type)); - if (!i2c_new_device(adapter, &info)) { - adev->power.flags.ignore_parent = false; - dev_err(&adapter->dev, - "failed to add I2C device %s from ACPI\n", - dev_name(&adev->dev)); - } - - return AE_OK; -} - -/** - * acpi_i2c_register_devices - enumerate I2C slave devices behind adapter - * @adap: pointer to adapter - * - * Enumerate all I2C slave devices behind this adapter by walking the ACPI - * namespace. When a device is found it will be added to the Linux device - * model and bound to the corresponding ACPI handle. - */ -static void acpi_i2c_register_devices(struct i2c_adapter *adap) -{ - acpi_handle handle; - acpi_status status; - - if (!adap->dev.parent) - return; - - handle = ACPI_HANDLE(adap->dev.parent); - if (!handle) - return; - - status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, - acpi_i2c_add_device, NULL, - adap, NULL); - if (ACPI_FAILURE(status)) - dev_warn(&adap->dev, "failed to enumerate I2C slaves\n"); -} -#else -static inline void acpi_i2c_register_devices(struct i2c_adapter *adap) {} -#endif /* CONFIG_ACPI */ - static int i2c_do_add_adapter(struct i2c_driver *driver, struct i2c_adapter *adap) { @@ -1298,6 +1567,7 @@ exit_recovery: /* create pre-declared device nodes */ of_i2c_register_devices(adap); acpi_i2c_register_devices(adap); + acpi_i2c_install_space_handler(adap); if (adap->nr < __i2c_first_dynamic_bus_num) i2c_scan_static_board_info(adap); @@ -1471,6 +1741,7 @@ void i2c_del_adapter(struct i2c_adapter *adap) return; } + acpi_i2c_remove_space_handler(adap); /* Tell drivers about this removal */ mutex_lock(&core_lock); bus_for_each_drv(&i2c_bus_type, NULL, adap, @@ -2013,6 +2284,16 @@ static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver) if (!driver->detect || !address_list) return 0; + /* Warn that the adapter lost class based instantiation */ + if (adapter->class == I2C_CLASS_DEPRECATED) { + dev_dbg(&adapter->dev, + "This adapter dropped support for I2C classes and " + "won't auto-detect %s devices anymore. If you need it, check " + "'Documentation/i2c/instantiating-devices' for alternatives.\n", + driver->driver.name); + return 0; + } + /* Stop here if the classes do not match */ if (!(adapter->class & driver->class)) return 0; diff --git a/drivers/i2c/i2c-stub.c b/drivers/i2c/i2c-stub.c index 77e4849d2f2a..d241aa295d96 100644 --- a/drivers/i2c/i2c-stub.c +++ b/drivers/i2c/i2c-stub.c @@ -2,7 +2,7 @@ i2c-stub.c - I2C/SMBus chip emulator Copyright (c) 2004 Mark M. Hoffman <mhoffman@lightlink.com> - Copyright (C) 2007, 2012 Jean Delvare <jdelvare@suse.de> + Copyright (C) 2007-2014 Jean Delvare <jdelvare@suse.de> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -27,28 +27,109 @@ #include <linux/slab.h> #include <linux/errno.h> #include <linux/i2c.h> +#include <linux/list.h> #define MAX_CHIPS 10 -#define STUB_FUNC (I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | \ - I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | \ - I2C_FUNC_SMBUS_I2C_BLOCK) + +/* + * Support for I2C_FUNC_SMBUS_BLOCK_DATA is disabled by default and must + * be enabled explicitly by setting the I2C_FUNC_SMBUS_BLOCK_DATA bits + * in the 'functionality' module parameter. + */ +#define STUB_FUNC_DEFAULT \ + (I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | \ + I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | \ + I2C_FUNC_SMBUS_I2C_BLOCK) + +#define STUB_FUNC_ALL \ + (STUB_FUNC_DEFAULT | I2C_FUNC_SMBUS_BLOCK_DATA) static unsigned short chip_addr[MAX_CHIPS]; module_param_array(chip_addr, ushort, NULL, S_IRUGO); MODULE_PARM_DESC(chip_addr, "Chip addresses (up to 10, between 0x03 and 0x77)"); -static unsigned long functionality = STUB_FUNC; +static unsigned long functionality = STUB_FUNC_DEFAULT; module_param(functionality, ulong, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(functionality, "Override functionality bitfield"); +/* Some chips have banked register ranges */ + +static u8 bank_reg[MAX_CHIPS]; +module_param_array(bank_reg, byte, NULL, S_IRUGO); +MODULE_PARM_DESC(bank_reg, "Bank register"); + +static u8 bank_mask[MAX_CHIPS]; +module_param_array(bank_mask, byte, NULL, S_IRUGO); +MODULE_PARM_DESC(bank_mask, "Bank value mask"); + +static u8 bank_start[MAX_CHIPS]; +module_param_array(bank_start, byte, NULL, S_IRUGO); +MODULE_PARM_DESC(bank_start, "First banked register"); + +static u8 bank_end[MAX_CHIPS]; +module_param_array(bank_end, byte, NULL, S_IRUGO); +MODULE_PARM_DESC(bank_end, "Last banked register"); + +struct smbus_block_data { + struct list_head node; + u8 command; + u8 len; + u8 block[I2C_SMBUS_BLOCK_MAX]; +}; + struct stub_chip { u8 pointer; u16 words[256]; /* Byte operations use the LSB as per SMBus specification */ + struct list_head smbus_blocks; + + /* For chips with banks, extra registers are allocated dynamically */ + u8 bank_reg; + u8 bank_shift; + u8 bank_mask; + u8 bank_sel; /* Currently selected bank */ + u8 bank_start; + u8 bank_end; + u16 bank_size; + u16 *bank_words; /* Room for bank_mask * bank_size registers */ }; static struct stub_chip *stub_chips; +static int stub_chips_nr; + +static struct smbus_block_data *stub_find_block(struct device *dev, + struct stub_chip *chip, + u8 command, bool create) +{ + struct smbus_block_data *b, *rb = NULL; + + list_for_each_entry(b, &chip->smbus_blocks, node) { + if (b->command == command) { + rb = b; + break; + } + } + if (rb == NULL && create) { + rb = devm_kzalloc(dev, sizeof(*rb), GFP_KERNEL); + if (rb == NULL) + return rb; + rb->command = command; + list_add(&rb->node, &chip->smbus_blocks); + } + return rb; +} + +static u16 *stub_get_wordp(struct stub_chip *chip, u8 offset) +{ + if (chip->bank_sel && + offset >= chip->bank_start && offset <= chip->bank_end) + return chip->bank_words + + (chip->bank_sel - 1) * chip->bank_size + + offset - chip->bank_start; + else + return chip->words + offset; +} /* Return negative errno on error. */ static s32 stub_xfer(struct i2c_adapter *adap, u16 addr, unsigned short flags, @@ -57,9 +138,11 @@ static s32 stub_xfer(struct i2c_adapter *adap, u16 addr, unsigned short flags, s32 ret; int i, len; struct stub_chip *chip = NULL; + struct smbus_block_data *b; + u16 *wordp; /* Search for the right chip */ - for (i = 0; i < MAX_CHIPS && chip_addr[i]; i++) { + for (i = 0; i < stub_chips_nr; i++) { if (addr == chip_addr[i]) { chip = stub_chips + i; break; @@ -82,7 +165,8 @@ static s32 stub_xfer(struct i2c_adapter *adap, u16 addr, unsigned short flags, "smbus byte - addr 0x%02x, wrote 0x%02x.\n", addr, command); } else { - data->byte = chip->words[chip->pointer++] & 0xff; + wordp = stub_get_wordp(chip, chip->pointer++); + data->byte = *wordp & 0xff; dev_dbg(&adap->dev, "smbus byte - addr 0x%02x, read 0x%02x.\n", addr, data->byte); @@ -92,14 +176,25 @@ static s32 stub_xfer(struct i2c_adapter *adap, u16 addr, unsigned short flags, break; case I2C_SMBUS_BYTE_DATA: + wordp = stub_get_wordp(chip, command); if (read_write == I2C_SMBUS_WRITE) { - chip->words[command] &= 0xff00; - chip->words[command] |= data->byte; + *wordp &= 0xff00; + *wordp |= data->byte; dev_dbg(&adap->dev, "smbus byte data - addr 0x%02x, wrote 0x%02x at 0x%02x.\n", addr, data->byte, command); + + /* Set the bank as needed */ + if (chip->bank_words && command == chip->bank_reg) { + chip->bank_sel = + (data->byte >> chip->bank_shift) + & chip->bank_mask; + dev_dbg(&adap->dev, + "switching to bank %u.\n", + chip->bank_sel); + } } else { - data->byte = chip->words[command] & 0xff; + data->byte = *wordp & 0xff; dev_dbg(&adap->dev, "smbus byte data - addr 0x%02x, read 0x%02x at 0x%02x.\n", addr, data->byte, command); @@ -110,13 +205,14 @@ static s32 stub_xfer(struct i2c_adapter *adap, u16 addr, unsigned short flags, break; case I2C_SMBUS_WORD_DATA: + wordp = stub_get_wordp(chip, command); if (read_write == I2C_SMBUS_WRITE) { - chip->words[command] = data->word; + *wordp = data->word; dev_dbg(&adap->dev, "smbus word data - addr 0x%02x, wrote 0x%04x at 0x%02x.\n", addr, data->word, command); } else { - data->word = chip->words[command]; + data->word = *wordp; dev_dbg(&adap->dev, "smbus word data - addr 0x%02x, read 0x%04x at 0x%02x.\n", addr, data->word, command); @@ -126,6 +222,12 @@ static s32 stub_xfer(struct i2c_adapter *adap, u16 addr, unsigned short flags, break; case I2C_SMBUS_I2C_BLOCK_DATA: + /* + * We ignore banks here, because banked chips don't use I2C + * block transfers + */ + if (data->block[0] > 256 - command) /* Avoid overrun */ + data->block[0] = 256 - command; len = data->block[0]; if (read_write == I2C_SMBUS_WRITE) { for (i = 0; i < len; i++) { @@ -148,6 +250,55 @@ static s32 stub_xfer(struct i2c_adapter *adap, u16 addr, unsigned short flags, ret = 0; break; + case I2C_SMBUS_BLOCK_DATA: + /* + * We ignore banks here, because chips typically don't use both + * banks and SMBus block transfers + */ + b = stub_find_block(&adap->dev, chip, command, false); + if (read_write == I2C_SMBUS_WRITE) { + len = data->block[0]; + if (len == 0 || len > I2C_SMBUS_BLOCK_MAX) { + ret = -EINVAL; + break; + } + if (b == NULL) { + b = stub_find_block(&adap->dev, chip, command, + true); + if (b == NULL) { + ret = -ENOMEM; + break; + } + } + /* Largest write sets read block length */ + if (len > b->len) + b->len = len; + for (i = 0; i < len; i++) + b->block[i] = data->block[i + 1]; + /* update for byte and word commands */ + chip->words[command] = (b->block[0] << 8) | b->len; + dev_dbg(&adap->dev, + "smbus block data - addr 0x%02x, wrote %d bytes at 0x%02x.\n", + addr, len, command); + } else { + if (b == NULL) { + dev_dbg(&adap->dev, + "SMBus block read command without prior block write not supported\n"); + ret = -EOPNOTSUPP; + break; + } + len = b->len; + data->block[0] = len; + for (i = 0; i < len; i++) + data->block[i + 1] = b->block[i]; + dev_dbg(&adap->dev, + "smbus block data - addr 0x%02x, read %d bytes at 0x%02x.\n", + addr, len, command); + } + + ret = 0; + break; + default: dev_dbg(&adap->dev, "Unsupported I2C/SMBus command\n"); ret = -EOPNOTSUPP; @@ -159,7 +310,7 @@ static s32 stub_xfer(struct i2c_adapter *adap, u16 addr, unsigned short flags, static u32 stub_func(struct i2c_adapter *adapter) { - return STUB_FUNC & functionality; + return STUB_FUNC_ALL & functionality; } static const struct i2c_algorithm smbus_algorithm = { @@ -174,6 +325,43 @@ static struct i2c_adapter stub_adapter = { .name = "SMBus stub driver", }; +static int __init i2c_stub_allocate_banks(int i) +{ + struct stub_chip *chip = stub_chips + i; + + chip->bank_reg = bank_reg[i]; + chip->bank_start = bank_start[i]; + chip->bank_end = bank_end[i]; + chip->bank_size = bank_end[i] - bank_start[i] + 1; + + /* We assume that all bits in the mask are contiguous */ + chip->bank_mask = bank_mask[i]; + while (!(chip->bank_mask & 1)) { + chip->bank_shift++; + chip->bank_mask >>= 1; + } + + chip->bank_words = kzalloc(chip->bank_mask * chip->bank_size * + sizeof(u16), GFP_KERNEL); + if (!chip->bank_words) + return -ENOMEM; + + pr_debug("i2c-stub: Allocated %u banks of %u words each (registers 0x%02x to 0x%02x)\n", + chip->bank_mask, chip->bank_size, chip->bank_start, + chip->bank_end); + + return 0; +} + +static void i2c_stub_free(void) +{ + int i; + + for (i = 0; i < stub_chips_nr; i++) + kfree(stub_chips[i].bank_words); + kfree(stub_chips); +} + static int __init i2c_stub_init(void) { int i, ret; @@ -194,22 +382,39 @@ static int __init i2c_stub_init(void) } /* Allocate memory for all chips at once */ - stub_chips = kzalloc(i * sizeof(struct stub_chip), GFP_KERNEL); + stub_chips_nr = i; + stub_chips = kcalloc(stub_chips_nr, sizeof(struct stub_chip), + GFP_KERNEL); if (!stub_chips) { pr_err("i2c-stub: Out of memory\n"); return -ENOMEM; } + for (i = 0; i < stub_chips_nr; i++) { + INIT_LIST_HEAD(&stub_chips[i].smbus_blocks); + + /* Allocate extra memory for banked register ranges */ + if (bank_mask[i]) { + ret = i2c_stub_allocate_banks(i); + if (ret) + goto fail_free; + } + } ret = i2c_add_adapter(&stub_adapter); if (ret) - kfree(stub_chips); + goto fail_free; + + return 0; + + fail_free: + i2c_stub_free(); return ret; } static void __exit i2c_stub_exit(void) { i2c_del_adapter(&stub_adapter); - kfree(stub_chips); + i2c_stub_free(); } MODULE_AUTHOR("Mark M. Hoffman <mhoffman@lightlink.com>"); diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c index 9bd4212782ab..ec11b404b433 100644 --- a/drivers/i2c/muxes/i2c-mux-pca954x.c +++ b/drivers/i2c/muxes/i2c-mux-pca954x.c @@ -41,6 +41,7 @@ #include <linux/i2c-mux.h> #include <linux/i2c/pca954x.h> #include <linux/module.h> +#include <linux/pm.h> #include <linux/slab.h> #define PCA954X_MAX_NCHANS 8 @@ -273,9 +274,23 @@ static int pca954x_remove(struct i2c_client *client) return 0; } +#ifdef CONFIG_PM_SLEEP +static int pca954x_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct pca954x *data = i2c_get_clientdata(client); + + data->last_chan = 0; + return i2c_smbus_write_byte(client, 0); +} +#endif + +static SIMPLE_DEV_PM_OPS(pca954x_pm, NULL, pca954x_resume); + static struct i2c_driver pca954x_driver = { .driver = { .name = "pca954x", + .pm = &pca954x_pm, .owner = THIS_MODULE, }, .probe = pca954x_probe, diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 4d140bbbe100..9b7ee7e427df 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -89,6 +89,7 @@ struct idle_cpu { * Indicate which enable bits to clear here. */ unsigned long auto_demotion_disable_flags; + bool byt_auto_demotion_disable_flag; bool disable_promotion_to_c1e; }; @@ -442,6 +443,66 @@ static struct cpuidle_state hsw_cstates[] = { { .enter = NULL } }; +static struct cpuidle_state bdw_cstates[] = { + { + .name = "C1-BDW", + .desc = "MWAIT 0x00", + .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID, + .exit_latency = 2, + .target_residency = 2, + .enter = &intel_idle }, + { + .name = "C1E-BDW", + .desc = "MWAIT 0x01", + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_TIME_VALID, + .exit_latency = 10, + .target_residency = 20, + .enter = &intel_idle }, + { + .name = "C3-BDW", + .desc = "MWAIT 0x10", + .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 40, + .target_residency = 100, + .enter = &intel_idle }, + { + .name = "C6-BDW", + .desc = "MWAIT 0x20", + .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 133, + .target_residency = 400, + .enter = &intel_idle }, + { + .name = "C7s-BDW", + .desc = "MWAIT 0x32", + .flags = MWAIT2flg(0x32) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 166, + .target_residency = 500, + .enter = &intel_idle }, + { + .name = "C8-BDW", + .desc = "MWAIT 0x40", + .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 300, + .target_residency = 900, + .enter = &intel_idle }, + { + .name = "C9-BDW", + .desc = "MWAIT 0x50", + .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 600, + .target_residency = 1800, + .enter = &intel_idle }, + { + .name = "C10-BDW", + .desc = "MWAIT 0x60", + .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 2600, + .target_residency = 7700, + .enter = &intel_idle }, + { + .enter = NULL } +}; static struct cpuidle_state atom_cstates[] = { { @@ -613,6 +674,7 @@ static const struct idle_cpu idle_cpu_snb = { static const struct idle_cpu idle_cpu_byt = { .state_table = byt_cstates, .disable_promotion_to_c1e = true, + .byt_auto_demotion_disable_flag = true, }; static const struct idle_cpu idle_cpu_ivb = { @@ -630,6 +692,11 @@ static const struct idle_cpu idle_cpu_hsw = { .disable_promotion_to_c1e = true, }; +static const struct idle_cpu idle_cpu_bdw = { + .state_table = bdw_cstates, + .disable_promotion_to_c1e = true, +}; + static const struct idle_cpu idle_cpu_avn = { .state_table = avn_cstates, .disable_promotion_to_c1e = true, @@ -658,7 +725,10 @@ static const struct x86_cpu_id intel_idle_ids[] = { ICPU(0x3f, idle_cpu_hsw), ICPU(0x45, idle_cpu_hsw), ICPU(0x46, idle_cpu_hsw), - ICPU(0x4D, idle_cpu_avn), + ICPU(0x4d, idle_cpu_avn), + ICPU(0x3d, idle_cpu_bdw), + ICPU(0x4f, idle_cpu_bdw), + ICPU(0x56, idle_cpu_bdw), {} }; MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids); @@ -814,6 +884,11 @@ static int __init intel_idle_cpuidle_driver_init(void) if (icpu->auto_demotion_disable_flags) on_each_cpu(auto_demotion_disable, NULL, 1); + if (icpu->byt_auto_demotion_disable_flag) { + wrmsrl(MSR_CC6_DEMOTION_POLICY_CONFIG, 0); + wrmsrl(MSR_MC6_DEMOTION_POLICY_CONFIG, 0); + } + if (icpu->disable_promotion_to_c1e) /* each-cpu is redundant */ on_each_cpu(c1e_promotion_disable, NULL, 1); diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c index a077cc86421b..19100fddd2ed 100644 --- a/drivers/iio/accel/bma180.c +++ b/drivers/iio/accel/bma180.c @@ -571,7 +571,7 @@ static int bma180_probe(struct i2c_client *client, trig->ops = &bma180_trigger_ops; iio_trigger_set_drvdata(trig, indio_dev); data->trig = trig; - indio_dev->trig = trig; + indio_dev->trig = iio_trigger_get(trig); ret = iio_trigger_register(trig); if (ret) diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c index c55b81f7f970..d10bd0c97233 100644 --- a/drivers/iio/adc/ad_sigma_delta.c +++ b/drivers/iio/adc/ad_sigma_delta.c @@ -472,7 +472,7 @@ static int ad_sd_probe_trigger(struct iio_dev *indio_dev) goto error_free_irq; /* select default trigger */ - indio_dev->trig = sigma_delta->trig; + indio_dev->trig = iio_trigger_get(sigma_delta->trig); return 0; diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c index 772e869c280e..7eadaf16adc1 100644 --- a/drivers/iio/adc/at91_adc.c +++ b/drivers/iio/adc/at91_adc.c @@ -196,6 +196,7 @@ struct at91_adc_state { bool done; int irq; u16 last_value; + int chnb; struct mutex lock; u8 num_channels; void __iomem *reg_base; @@ -274,7 +275,7 @@ void handle_adc_eoc_trigger(int irq, struct iio_dev *idev) disable_irq_nosync(irq); iio_trigger_poll(idev->trig); } else { - st->last_value = at91_adc_readl(st, AT91_ADC_LCDR); + st->last_value = at91_adc_readl(st, AT91_ADC_CHAN(st, st->chnb)); st->done = true; wake_up_interruptible(&st->wq_data_avail); } @@ -351,7 +352,7 @@ static irqreturn_t at91_adc_rl_interrupt(int irq, void *private) unsigned int reg; status &= at91_adc_readl(st, AT91_ADC_IMR); - if (status & st->registers->drdy_mask) + if (status & GENMASK(st->num_channels - 1, 0)) handle_adc_eoc_trigger(irq, idev); if (status & AT91RL_ADC_IER_PEN) { @@ -418,7 +419,7 @@ static irqreturn_t at91_adc_9x5_interrupt(int irq, void *private) AT91_ADC_IER_YRDY | AT91_ADC_IER_PRDY; - if (status & st->registers->drdy_mask) + if (status & GENMASK(st->num_channels - 1, 0)) handle_adc_eoc_trigger(irq, idev); if (status & AT91_ADC_IER_PEN) { @@ -689,9 +690,10 @@ static int at91_adc_read_raw(struct iio_dev *idev, case IIO_CHAN_INFO_RAW: mutex_lock(&st->lock); + st->chnb = chan->channel; at91_adc_writel(st, AT91_ADC_CHER, AT91_ADC_CH(chan->channel)); - at91_adc_writel(st, AT91_ADC_IER, st->registers->drdy_mask); + at91_adc_writel(st, AT91_ADC_IER, BIT(chan->channel)); at91_adc_writel(st, AT91_ADC_CR, AT91_ADC_START); ret = wait_event_interruptible_timeout(st->wq_data_avail, @@ -708,7 +710,7 @@ static int at91_adc_read_raw(struct iio_dev *idev, at91_adc_writel(st, AT91_ADC_CHDR, AT91_ADC_CH(chan->channel)); - at91_adc_writel(st, AT91_ADC_IDR, st->registers->drdy_mask); + at91_adc_writel(st, AT91_ADC_IDR, BIT(chan->channel)); st->last_value = 0; st->done = false; diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c index fd2745c62943..626b39749767 100644 --- a/drivers/iio/adc/xilinx-xadc-core.c +++ b/drivers/iio/adc/xilinx-xadc-core.c @@ -1126,7 +1126,7 @@ static int xadc_parse_dt(struct iio_dev *indio_dev, struct device_node *np, chan->address = XADC_REG_VPVN; } else { chan->scan_index = 15 + reg; - chan->scan_index = XADC_REG_VAUX(reg - 1); + chan->address = XADC_REG_VAUX(reg - 1); } num_channels++; chan++; diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c index a3109a6f4d86..92068cdbf8c7 100644 --- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c +++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c @@ -122,7 +122,8 @@ int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name, dev_err(&indio_dev->dev, "Trigger Register Failed\n"); goto error_free_trig; } - indio_dev->trig = attrb->trigger = trig; + attrb->trigger = trig; + indio_dev->trig = iio_trigger_get(trig); return ret; diff --git a/drivers/iio/common/st_sensors/st_sensors_trigger.c b/drivers/iio/common/st_sensors/st_sensors_trigger.c index 8fc3a97eb266..8d8ca6f1e16a 100644 --- a/drivers/iio/common/st_sensors/st_sensors_trigger.c +++ b/drivers/iio/common/st_sensors/st_sensors_trigger.c @@ -49,7 +49,7 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev, dev_err(&indio_dev->dev, "failed to register iio trigger.\n"); goto iio_trigger_register_error; } - indio_dev->trig = sdata->trig; + indio_dev->trig = iio_trigger_get(sdata->trig); return 0; diff --git a/drivers/iio/gyro/itg3200_buffer.c b/drivers/iio/gyro/itg3200_buffer.c index e3b3c5084070..eef50e91f17c 100644 --- a/drivers/iio/gyro/itg3200_buffer.c +++ b/drivers/iio/gyro/itg3200_buffer.c @@ -132,7 +132,7 @@ int itg3200_probe_trigger(struct iio_dev *indio_dev) goto error_free_irq; /* select default trigger */ - indio_dev->trig = st->trig; + indio_dev->trig = iio_trigger_get(st->trig); return 0; diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c index 03b9372c1212..926fccea8de0 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c @@ -135,7 +135,7 @@ int inv_mpu6050_probe_trigger(struct iio_dev *indio_dev) ret = iio_trigger_register(st->trig); if (ret) goto error_free_irq; - indio_dev->trig = st->trig; + indio_dev->trig = iio_trigger_get(st->trig); return 0; diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c index c7497009d60a..f0846108d006 100644 --- a/drivers/iio/inkern.c +++ b/drivers/iio/inkern.c @@ -178,7 +178,7 @@ static struct iio_channel *of_iio_channel_get_by_name(struct device_node *np, index = of_property_match_string(np, "io-channel-names", name); chan = of_iio_channel_get(np, index); - if (!IS_ERR(chan)) + if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER) break; else if (name && index >= 0) { pr_err("ERROR: could not get IIO channel %s:%s(%i)\n", diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c index a4b64130ac2f..68cae86dbd29 100644 --- a/drivers/iio/magnetometer/st_magn_core.c +++ b/drivers/iio/magnetometer/st_magn_core.c @@ -42,7 +42,8 @@ #define ST_MAGN_FS_AVL_5600MG 5600 #define ST_MAGN_FS_AVL_8000MG 8000 #define ST_MAGN_FS_AVL_8100MG 8100 -#define ST_MAGN_FS_AVL_10000MG 10000 +#define ST_MAGN_FS_AVL_12000MG 12000 +#define ST_MAGN_FS_AVL_16000MG 16000 /* CUSTOM VALUES FOR SENSOR 1 */ #define ST_MAGN_1_WAI_EXP 0x3c @@ -69,20 +70,20 @@ #define ST_MAGN_1_FS_AVL_4700_VAL 0x05 #define ST_MAGN_1_FS_AVL_5600_VAL 0x06 #define ST_MAGN_1_FS_AVL_8100_VAL 0x07 -#define ST_MAGN_1_FS_AVL_1300_GAIN_XY 1100 -#define ST_MAGN_1_FS_AVL_1900_GAIN_XY 855 -#define ST_MAGN_1_FS_AVL_2500_GAIN_XY 670 -#define ST_MAGN_1_FS_AVL_4000_GAIN_XY 450 -#define ST_MAGN_1_FS_AVL_4700_GAIN_XY 400 -#define ST_MAGN_1_FS_AVL_5600_GAIN_XY 330 -#define ST_MAGN_1_FS_AVL_8100_GAIN_XY 230 -#define ST_MAGN_1_FS_AVL_1300_GAIN_Z 980 -#define ST_MAGN_1_FS_AVL_1900_GAIN_Z 760 -#define ST_MAGN_1_FS_AVL_2500_GAIN_Z 600 -#define ST_MAGN_1_FS_AVL_4000_GAIN_Z 400 -#define ST_MAGN_1_FS_AVL_4700_GAIN_Z 355 -#define ST_MAGN_1_FS_AVL_5600_GAIN_Z 295 -#define ST_MAGN_1_FS_AVL_8100_GAIN_Z 205 +#define ST_MAGN_1_FS_AVL_1300_GAIN_XY 909 +#define ST_MAGN_1_FS_AVL_1900_GAIN_XY 1169 +#define ST_MAGN_1_FS_AVL_2500_GAIN_XY 1492 +#define ST_MAGN_1_FS_AVL_4000_GAIN_XY 2222 +#define ST_MAGN_1_FS_AVL_4700_GAIN_XY 2500 +#define ST_MAGN_1_FS_AVL_5600_GAIN_XY 3030 +#define ST_MAGN_1_FS_AVL_8100_GAIN_XY 4347 +#define ST_MAGN_1_FS_AVL_1300_GAIN_Z 1020 +#define ST_MAGN_1_FS_AVL_1900_GAIN_Z 1315 +#define ST_MAGN_1_FS_AVL_2500_GAIN_Z 1666 +#define ST_MAGN_1_FS_AVL_4000_GAIN_Z 2500 +#define ST_MAGN_1_FS_AVL_4700_GAIN_Z 2816 +#define ST_MAGN_1_FS_AVL_5600_GAIN_Z 3389 +#define ST_MAGN_1_FS_AVL_8100_GAIN_Z 4878 #define ST_MAGN_1_MULTIREAD_BIT false /* CUSTOM VALUES FOR SENSOR 2 */ @@ -105,10 +106,12 @@ #define ST_MAGN_2_FS_MASK 0x60 #define ST_MAGN_2_FS_AVL_4000_VAL 0x00 #define ST_MAGN_2_FS_AVL_8000_VAL 0x01 -#define ST_MAGN_2_FS_AVL_10000_VAL 0x02 -#define ST_MAGN_2_FS_AVL_4000_GAIN 430 -#define ST_MAGN_2_FS_AVL_8000_GAIN 230 -#define ST_MAGN_2_FS_AVL_10000_GAIN 230 +#define ST_MAGN_2_FS_AVL_12000_VAL 0x02 +#define ST_MAGN_2_FS_AVL_16000_VAL 0x03 +#define ST_MAGN_2_FS_AVL_4000_GAIN 146 +#define ST_MAGN_2_FS_AVL_8000_GAIN 292 +#define ST_MAGN_2_FS_AVL_12000_GAIN 438 +#define ST_MAGN_2_FS_AVL_16000_GAIN 584 #define ST_MAGN_2_MULTIREAD_BIT false #define ST_MAGN_2_OUT_X_L_ADDR 0x28 #define ST_MAGN_2_OUT_Y_L_ADDR 0x2a @@ -266,9 +269,14 @@ static const struct st_sensors st_magn_sensors[] = { .gain = ST_MAGN_2_FS_AVL_8000_GAIN, }, [2] = { - .num = ST_MAGN_FS_AVL_10000MG, - .value = ST_MAGN_2_FS_AVL_10000_VAL, - .gain = ST_MAGN_2_FS_AVL_10000_GAIN, + .num = ST_MAGN_FS_AVL_12000MG, + .value = ST_MAGN_2_FS_AVL_12000_VAL, + .gain = ST_MAGN_2_FS_AVL_12000_GAIN, + }, + [3] = { + .num = ST_MAGN_FS_AVL_16000MG, + .value = ST_MAGN_2_FS_AVL_16000_VAL, + .gain = ST_MAGN_2_FS_AVL_16000_GAIN, }, }, }, diff --git a/drivers/infiniband/core/agent.c b/drivers/infiniband/core/agent.c index 2bc7f5af64f4..f6d29614cb01 100644 --- a/drivers/infiniband/core/agent.c +++ b/drivers/infiniband/core/agent.c @@ -94,14 +94,14 @@ void agent_send_response(struct ib_mad *mad, struct ib_grh *grh, port_priv = ib_get_agent_port(device, port_num); if (!port_priv) { - printk(KERN_ERR SPFX "Unable to find port agent\n"); + dev_err(&device->dev, "Unable to find port agent\n"); return; } agent = port_priv->agent[qpn]; ah = ib_create_ah_from_wc(agent->qp->pd, wc, grh, port_num); if (IS_ERR(ah)) { - printk(KERN_ERR SPFX "ib_create_ah_from_wc error %ld\n", + dev_err(&device->dev, "ib_create_ah_from_wc error %ld\n", PTR_ERR(ah)); return; } @@ -110,7 +110,7 @@ void agent_send_response(struct ib_mad *mad, struct ib_grh *grh, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, GFP_KERNEL); if (IS_ERR(send_buf)) { - printk(KERN_ERR SPFX "ib_create_send_mad error\n"); + dev_err(&device->dev, "ib_create_send_mad error\n"); goto err1; } @@ -125,7 +125,7 @@ void agent_send_response(struct ib_mad *mad, struct ib_grh *grh, } if (ib_post_send_mad(send_buf, NULL)) { - printk(KERN_ERR SPFX "ib_post_send_mad error\n"); + dev_err(&device->dev, "ib_post_send_mad error\n"); goto err2; } return; @@ -151,7 +151,7 @@ int ib_agent_port_open(struct ib_device *device, int port_num) /* Create new device info */ port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL); if (!port_priv) { - printk(KERN_ERR SPFX "No memory for ib_agent_port_private\n"); + dev_err(&device->dev, "No memory for ib_agent_port_private\n"); ret = -ENOMEM; goto error1; } @@ -161,7 +161,7 @@ int ib_agent_port_open(struct ib_device *device, int port_num) port_priv->agent[0] = ib_register_mad_agent(device, port_num, IB_QPT_SMI, NULL, 0, &agent_send_handler, - NULL, NULL); + NULL, NULL, 0); if (IS_ERR(port_priv->agent[0])) { ret = PTR_ERR(port_priv->agent[0]); goto error2; @@ -172,7 +172,7 @@ int ib_agent_port_open(struct ib_device *device, int port_num) port_priv->agent[1] = ib_register_mad_agent(device, port_num, IB_QPT_GSI, NULL, 0, &agent_send_handler, - NULL, NULL); + NULL, NULL, 0); if (IS_ERR(port_priv->agent[1])) { ret = PTR_ERR(port_priv->agent[1]); goto error3; @@ -202,7 +202,7 @@ int ib_agent_port_close(struct ib_device *device, int port_num) port_priv = __ib_get_agent_port(device, port_num); if (port_priv == NULL) { spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); - printk(KERN_ERR SPFX "Port %d not found\n", port_num); + dev_err(&device->dev, "Port %d not found\n", port_num); return -ENODEV; } list_del(&port_priv->port_list); diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index c3239170d8b7..e28a494e2a3a 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -3753,7 +3753,7 @@ static void cm_add_one(struct ib_device *ib_device) struct cm_port *port; struct ib_mad_reg_req reg_req = { .mgmt_class = IB_MGMT_CLASS_CM, - .mgmt_class_version = IB_CM_CLASS_VERSION + .mgmt_class_version = IB_CM_CLASS_VERSION, }; struct ib_port_modify port_modify = { .set_port_cap_mask = IB_PORT_CM_SUP @@ -3801,7 +3801,8 @@ static void cm_add_one(struct ib_device *ib_device) 0, cm_send_handler, cm_recv_handler, - port); + port, + 0); if (IS_ERR(port->mad_agent)) goto error2; diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c index 3d2e489ab732..ff9163dc1596 100644 --- a/drivers/infiniband/core/iwcm.c +++ b/drivers/infiniband/core/iwcm.c @@ -46,6 +46,7 @@ #include <linux/completion.h> #include <linux/slab.h> #include <linux/module.h> +#include <linux/sysctl.h> #include <rdma/iw_cm.h> #include <rdma/ib_addr.h> @@ -65,6 +66,20 @@ struct iwcm_work { struct list_head free_list; }; +static unsigned int default_backlog = 256; + +static struct ctl_table_header *iwcm_ctl_table_hdr; +static struct ctl_table iwcm_ctl_table[] = { + { + .procname = "default_backlog", + .data = &default_backlog, + .maxlen = sizeof(default_backlog), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { } +}; + /* * The following services provide a mechanism for pre-allocating iwcm_work * elements. The design pre-allocates them based on the cm_id type: @@ -425,6 +440,9 @@ int iw_cm_listen(struct iw_cm_id *cm_id, int backlog) cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); + if (!backlog) + backlog = default_backlog; + ret = alloc_work_entries(cm_id_priv, backlog); if (ret) return ret; @@ -1030,11 +1048,20 @@ static int __init iw_cm_init(void) if (!iwcm_wq) return -ENOMEM; + iwcm_ctl_table_hdr = register_net_sysctl(&init_net, "net/iw_cm", + iwcm_ctl_table); + if (!iwcm_ctl_table_hdr) { + pr_err("iw_cm: couldn't register sysctl paths\n"); + destroy_workqueue(iwcm_wq); + return -ENOMEM; + } + return 0; } static void __exit iw_cm_cleanup(void) { + unregister_net_sysctl_table(iwcm_ctl_table_hdr); destroy_workqueue(iwcm_wq); } diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index ab31f136d04b..74c30f4c557e 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -33,6 +33,9 @@ * SOFTWARE. * */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/module.h> @@ -195,7 +198,8 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, u8 rmpp_version, ib_mad_send_handler send_handler, ib_mad_recv_handler recv_handler, - void *context) + void *context, + u32 registration_flags) { struct ib_mad_port_private *port_priv; struct ib_mad_agent *ret = ERR_PTR(-EINVAL); @@ -211,68 +215,109 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, /* Validate parameters */ qpn = get_spl_qp_index(qp_type); - if (qpn == -1) + if (qpn == -1) { + dev_notice(&device->dev, + "ib_register_mad_agent: invalid QP Type %d\n", + qp_type); goto error1; + } - if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) + if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) { + dev_notice(&device->dev, + "ib_register_mad_agent: invalid RMPP Version %u\n", + rmpp_version); goto error1; + } /* Validate MAD registration request if supplied */ if (mad_reg_req) { - if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) + if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) { + dev_notice(&device->dev, + "ib_register_mad_agent: invalid Class Version %u\n", + mad_reg_req->mgmt_class_version); goto error1; - if (!recv_handler) + } + if (!recv_handler) { + dev_notice(&device->dev, + "ib_register_mad_agent: no recv_handler\n"); goto error1; + } if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) { /* * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only * one in this range currently allowed */ if (mad_reg_req->mgmt_class != - IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) + IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { + dev_notice(&device->dev, + "ib_register_mad_agent: Invalid Mgmt Class 0x%x\n", + mad_reg_req->mgmt_class); goto error1; + } } else if (mad_reg_req->mgmt_class == 0) { /* * Class 0 is reserved in IBA and is used for * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE */ + dev_notice(&device->dev, + "ib_register_mad_agent: Invalid Mgmt Class 0\n"); goto error1; } else if (is_vendor_class(mad_reg_req->mgmt_class)) { /* * If class is in "new" vendor range, * ensure supplied OUI is not zero */ - if (!is_vendor_oui(mad_reg_req->oui)) + if (!is_vendor_oui(mad_reg_req->oui)) { + dev_notice(&device->dev, + "ib_register_mad_agent: No OUI specified for class 0x%x\n", + mad_reg_req->mgmt_class); goto error1; + } } /* Make sure class supplied is consistent with RMPP */ if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) { - if (rmpp_version) + if (rmpp_version) { + dev_notice(&device->dev, + "ib_register_mad_agent: RMPP version for non-RMPP class 0x%x\n", + mad_reg_req->mgmt_class); goto error1; + } } + /* Make sure class supplied is consistent with QP type */ if (qp_type == IB_QPT_SMI) { if ((mad_reg_req->mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED) && (mad_reg_req->mgmt_class != - IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) + IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { + dev_notice(&device->dev, + "ib_register_mad_agent: Invalid SM QP type: class 0x%x\n", + mad_reg_req->mgmt_class); goto error1; + } } else { if ((mad_reg_req->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) || (mad_reg_req->mgmt_class == - IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) + IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { + dev_notice(&device->dev, + "ib_register_mad_agent: Invalid GS QP type: class 0x%x\n", + mad_reg_req->mgmt_class); goto error1; + } } } else { /* No registration request supplied */ if (!send_handler) goto error1; + if (registration_flags & IB_MAD_USER_RMPP) + goto error1; } /* Validate device and port */ port_priv = ib_get_mad_port(device, port_num); if (!port_priv) { + dev_notice(&device->dev, "ib_register_mad_agent: Invalid port\n"); ret = ERR_PTR(-ENODEV); goto error1; } @@ -280,6 +325,8 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, /* Verify the QP requested is supported. For example, Ethernet devices * will not have QP0 */ if (!port_priv->qp_info[qpn].qp) { + dev_notice(&device->dev, + "ib_register_mad_agent: QP %d not supported\n", qpn); ret = ERR_PTR(-EPROTONOSUPPORT); goto error1; } @@ -316,6 +363,7 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, mad_agent_priv->agent.context = context; mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp; mad_agent_priv->agent.port_num = port_num; + mad_agent_priv->agent.flags = registration_flags; spin_lock_init(&mad_agent_priv->lock); INIT_LIST_HEAD(&mad_agent_priv->send_list); INIT_LIST_HEAD(&mad_agent_priv->wait_list); @@ -706,7 +754,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, smi_handle_dr_smp_send(smp, device->node_type, port_num) == IB_SMI_DISCARD) { ret = -EINVAL; - printk(KERN_ERR PFX "Invalid directed route\n"); + dev_err(&device->dev, "Invalid directed route\n"); goto out; } @@ -718,7 +766,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, local = kmalloc(sizeof *local, GFP_ATOMIC); if (!local) { ret = -ENOMEM; - printk(KERN_ERR PFX "No memory for ib_mad_local_private\n"); + dev_err(&device->dev, "No memory for ib_mad_local_private\n"); goto out; } local->mad_priv = NULL; @@ -726,7 +774,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_ATOMIC); if (!mad_priv) { ret = -ENOMEM; - printk(KERN_ERR PFX "No memory for local response MAD\n"); + dev_err(&device->dev, "No memory for local response MAD\n"); kfree(local); goto out; } @@ -837,9 +885,9 @@ static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr, for (left = send_buf->data_len + pad; left > 0; left -= seg_size) { seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask); if (!seg) { - printk(KERN_ERR "alloc_send_rmpp_segs: RMPP mem " - "alloc failed for len %zd, gfp %#x\n", - sizeof (*seg) + seg_size, gfp_mask); + dev_err(&send_buf->mad_agent->device->dev, + "alloc_send_rmpp_segs: RMPP mem alloc failed for len %zd, gfp %#x\n", + sizeof (*seg) + seg_size, gfp_mask); free_send_rmpp_list(send_wr); return -ENOMEM; } @@ -862,6 +910,12 @@ static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr, return 0; } +int ib_mad_kernel_rmpp_agent(struct ib_mad_agent *agent) +{ + return agent->rmpp_version && !(agent->flags & IB_MAD_USER_RMPP); +} +EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent); + struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, u32 remote_qpn, u16 pkey_index, int rmpp_active, @@ -878,10 +932,12 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, pad = get_pad_size(hdr_len, data_len); message_size = hdr_len + data_len + pad; - if ((!mad_agent->rmpp_version && - (rmpp_active || message_size > sizeof(struct ib_mad))) || - (!rmpp_active && message_size > sizeof(struct ib_mad))) - return ERR_PTR(-EINVAL); + if (ib_mad_kernel_rmpp_agent(mad_agent)) { + if (!rmpp_active && message_size > sizeof(struct ib_mad)) + return ERR_PTR(-EINVAL); + } else + if (rmpp_active || message_size > sizeof(struct ib_mad)) + return ERR_PTR(-EINVAL); size = rmpp_active ? hdr_len : sizeof(struct ib_mad); buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask); @@ -1135,7 +1191,7 @@ int ib_post_send_mad(struct ib_mad_send_buf *send_buf, &mad_agent_priv->send_list); spin_unlock_irqrestore(&mad_agent_priv->lock, flags); - if (mad_agent_priv->agent.rmpp_version) { + if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { ret = ib_send_rmpp_mad(mad_send_wr); if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED) ret = ib_send_mad(mad_send_wr); @@ -1199,7 +1255,8 @@ EXPORT_SYMBOL(ib_redirect_mad_qp); int ib_process_mad_wc(struct ib_mad_agent *mad_agent, struct ib_wc *wc) { - printk(KERN_ERR PFX "ib_process_mad_wc() not implemented yet\n"); + dev_err(&mad_agent->device->dev, + "ib_process_mad_wc() not implemented yet\n"); return 0; } EXPORT_SYMBOL(ib_process_mad_wc); @@ -1211,7 +1268,7 @@ static int method_in_use(struct ib_mad_mgmt_method_table **method, for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) { if ((*method)->agent[i]) { - printk(KERN_ERR PFX "Method %d already in use\n", i); + pr_err("Method %d already in use\n", i); return -EINVAL; } } @@ -1223,8 +1280,7 @@ static int allocate_method_table(struct ib_mad_mgmt_method_table **method) /* Allocate management method table */ *method = kzalloc(sizeof **method, GFP_ATOMIC); if (!*method) { - printk(KERN_ERR PFX "No memory for " - "ib_mad_mgmt_method_table\n"); + pr_err("No memory for ib_mad_mgmt_method_table\n"); return -ENOMEM; } @@ -1319,8 +1375,8 @@ static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, /* Allocate management class table for "new" class version */ *class = kzalloc(sizeof **class, GFP_ATOMIC); if (!*class) { - printk(KERN_ERR PFX "No memory for " - "ib_mad_mgmt_class_table\n"); + dev_err(&agent_priv->agent.device->dev, + "No memory for ib_mad_mgmt_class_table\n"); ret = -ENOMEM; goto error1; } @@ -1386,8 +1442,8 @@ static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, /* Allocate mgmt vendor class table for "new" class version */ vendor = kzalloc(sizeof *vendor, GFP_ATOMIC); if (!vendor) { - printk(KERN_ERR PFX "No memory for " - "ib_mad_mgmt_vendor_class_table\n"); + dev_err(&agent_priv->agent.device->dev, + "No memory for ib_mad_mgmt_vendor_class_table\n"); goto error1; } @@ -1397,8 +1453,8 @@ static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, /* Allocate table for this management vendor class */ vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC); if (!vendor_class) { - printk(KERN_ERR PFX "No memory for " - "ib_mad_mgmt_vendor_class\n"); + dev_err(&agent_priv->agent.device->dev, + "No memory for ib_mad_mgmt_vendor_class\n"); goto error2; } @@ -1429,7 +1485,7 @@ static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, goto check_in_use; } } - printk(KERN_ERR PFX "All OUI slots in use\n"); + dev_err(&agent_priv->agent.device->dev, "All OUI slots in use\n"); goto error3; check_in_use: @@ -1640,9 +1696,9 @@ find_mad_agent(struct ib_mad_port_private *port_priv, if (mad_agent->agent.recv_handler) atomic_inc(&mad_agent->refcount); else { - printk(KERN_NOTICE PFX "No receive handler for client " - "%p on port %d\n", - &mad_agent->agent, port_priv->port_num); + dev_notice(&port_priv->device->dev, + "No receive handler for client %p on port %d\n", + &mad_agent->agent, port_priv->port_num); mad_agent = NULL; } } @@ -1658,8 +1714,8 @@ static int validate_mad(struct ib_mad *mad, u32 qp_num) /* Make sure MAD base version is understood */ if (mad->mad_hdr.base_version != IB_MGMT_BASE_VERSION) { - printk(KERN_ERR PFX "MAD received with unsupported base " - "version %d\n", mad->mad_hdr.base_version); + pr_err("MAD received with unsupported base version %d\n", + mad->mad_hdr.base_version); goto out; } @@ -1685,6 +1741,7 @@ static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv, rmpp_mad = (struct ib_rmpp_mad *)mad_hdr; return !mad_agent_priv->agent.rmpp_version || + !ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) || !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE) || (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA); @@ -1812,7 +1869,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, INIT_LIST_HEAD(&mad_recv_wc->rmpp_list); list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list); - if (mad_agent_priv->agent.rmpp_version) { + if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv, mad_recv_wc); if (!mad_recv_wc) { @@ -1827,23 +1884,39 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc); if (!mad_send_wr) { spin_unlock_irqrestore(&mad_agent_priv->lock, flags); - ib_free_recv_mad(mad_recv_wc); - deref_mad_agent(mad_agent_priv); - return; - } - ib_mark_mad_done(mad_send_wr); - spin_unlock_irqrestore(&mad_agent_priv->lock, flags); + if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) + && ib_is_mad_class_rmpp(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class) + && (ib_get_rmpp_flags(&((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr) + & IB_MGMT_RMPP_FLAG_ACTIVE)) { + /* user rmpp is in effect + * and this is an active RMPP MAD + */ + mad_recv_wc->wc->wr_id = 0; + mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, + mad_recv_wc); + atomic_dec(&mad_agent_priv->refcount); + } else { + /* not user rmpp, revert to normal behavior and + * drop the mad */ + ib_free_recv_mad(mad_recv_wc); + deref_mad_agent(mad_agent_priv); + return; + } + } else { + ib_mark_mad_done(mad_send_wr); + spin_unlock_irqrestore(&mad_agent_priv->lock, flags); - /* Defined behavior is to complete response before request */ - mad_recv_wc->wc->wr_id = (unsigned long) &mad_send_wr->send_buf; - mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, - mad_recv_wc); - atomic_dec(&mad_agent_priv->refcount); + /* Defined behavior is to complete response before request */ + mad_recv_wc->wc->wr_id = (unsigned long) &mad_send_wr->send_buf; + mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, + mad_recv_wc); + atomic_dec(&mad_agent_priv->refcount); - mad_send_wc.status = IB_WC_SUCCESS; - mad_send_wc.vendor_err = 0; - mad_send_wc.send_buf = &mad_send_wr->send_buf; - ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); + mad_send_wc.status = IB_WC_SUCCESS; + mad_send_wc.vendor_err = 0; + mad_send_wc.send_buf = &mad_send_wr->send_buf; + ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); + } } else { mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, mad_recv_wc); @@ -1911,8 +1984,8 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv, response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL); if (!response) { - printk(KERN_ERR PFX "ib_mad_recv_done_handler no memory " - "for response buffer\n"); + dev_err(&port_priv->device->dev, + "ib_mad_recv_done_handler no memory for response buffer\n"); goto out; } @@ -2083,7 +2156,7 @@ void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr, mad_agent_priv = mad_send_wr->mad_agent_priv; spin_lock_irqsave(&mad_agent_priv->lock, flags); - if (mad_agent_priv->agent.rmpp_version) { + if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc); if (ret == IB_RMPP_RESULT_CONSUMED) goto done; @@ -2176,7 +2249,8 @@ retry: ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr, &bad_send_wr); if (ret) { - printk(KERN_ERR PFX "ib_post_send failed: %d\n", ret); + dev_err(&port_priv->device->dev, + "ib_post_send failed: %d\n", ret); mad_send_wr = queued_send_wr; wc->status = IB_WC_LOC_QP_OP_ERR; goto retry; @@ -2248,8 +2322,9 @@ static void mad_error_handler(struct ib_mad_port_private *port_priv, IB_QP_STATE | IB_QP_CUR_STATE); kfree(attr); if (ret) - printk(KERN_ERR PFX "mad_error_handler - " - "ib_modify_qp to RTS : %d\n", ret); + dev_err(&port_priv->device->dev, + "mad_error_handler - ib_modify_qp to RTS : %d\n", + ret); else mark_sends_for_retry(qp_info); } @@ -2408,7 +2483,8 @@ static void local_completions(struct work_struct *work) if (local->mad_priv) { recv_mad_agent = local->recv_mad_agent; if (!recv_mad_agent) { - printk(KERN_ERR PFX "No receive MAD agent for local completion\n"); + dev_err(&mad_agent_priv->agent.device->dev, + "No receive MAD agent for local completion\n"); free_mad = 1; goto local_send_completion; } @@ -2476,7 +2552,7 @@ static int retry_send(struct ib_mad_send_wr_private *mad_send_wr) mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms); - if (mad_send_wr->mad_agent_priv->agent.rmpp_version) { + if (ib_mad_kernel_rmpp_agent(&mad_send_wr->mad_agent_priv->agent)) { ret = ib_retry_rmpp(mad_send_wr); switch (ret) { case IB_RMPP_RESULT_UNHANDLED: @@ -2589,7 +2665,8 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, } else { mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL); if (!mad_priv) { - printk(KERN_ERR PFX "No memory for receive buffer\n"); + dev_err(&qp_info->port_priv->device->dev, + "No memory for receive buffer\n"); ret = -ENOMEM; break; } @@ -2625,7 +2702,8 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, sizeof mad_priv->header, DMA_FROM_DEVICE); kmem_cache_free(ib_mad_cache, mad_priv); - printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret); + dev_err(&qp_info->port_priv->device->dev, + "ib_post_recv failed: %d\n", ret); break; } } while (post); @@ -2681,7 +2759,8 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv) attr = kmalloc(sizeof *attr, GFP_KERNEL); if (!attr) { - printk(KERN_ERR PFX "Couldn't kmalloc ib_qp_attr\n"); + dev_err(&port_priv->device->dev, + "Couldn't kmalloc ib_qp_attr\n"); return -ENOMEM; } @@ -2705,16 +2784,18 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv) ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_QKEY); if (ret) { - printk(KERN_ERR PFX "Couldn't change QP%d state to " - "INIT: %d\n", i, ret); + dev_err(&port_priv->device->dev, + "Couldn't change QP%d state to INIT: %d\n", + i, ret); goto out; } attr->qp_state = IB_QPS_RTR; ret = ib_modify_qp(qp, attr, IB_QP_STATE); if (ret) { - printk(KERN_ERR PFX "Couldn't change QP%d state to " - "RTR: %d\n", i, ret); + dev_err(&port_priv->device->dev, + "Couldn't change QP%d state to RTR: %d\n", + i, ret); goto out; } @@ -2722,16 +2803,18 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv) attr->sq_psn = IB_MAD_SEND_Q_PSN; ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN); if (ret) { - printk(KERN_ERR PFX "Couldn't change QP%d state to " - "RTS: %d\n", i, ret); + dev_err(&port_priv->device->dev, + "Couldn't change QP%d state to RTS: %d\n", + i, ret); goto out; } } ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP); if (ret) { - printk(KERN_ERR PFX "Failed to request completion " - "notification: %d\n", ret); + dev_err(&port_priv->device->dev, + "Failed to request completion notification: %d\n", + ret); goto out; } @@ -2741,7 +2824,8 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv) ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL); if (ret) { - printk(KERN_ERR PFX "Couldn't post receive WRs\n"); + dev_err(&port_priv->device->dev, + "Couldn't post receive WRs\n"); goto out; } } @@ -2755,7 +2839,8 @@ static void qp_event_handler(struct ib_event *event, void *qp_context) struct ib_mad_qp_info *qp_info = qp_context; /* It's worse than that! He's dead, Jim! */ - printk(KERN_ERR PFX "Fatal error (%d) on MAD QP (%d)\n", + dev_err(&qp_info->port_priv->device->dev, + "Fatal error (%d) on MAD QP (%d)\n", event->event, qp_info->qp->qp_num); } @@ -2801,8 +2886,9 @@ static int create_mad_qp(struct ib_mad_qp_info *qp_info, qp_init_attr.event_handler = qp_event_handler; qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr); if (IS_ERR(qp_info->qp)) { - printk(KERN_ERR PFX "Couldn't create ib_mad QP%d\n", - get_spl_qp_index(qp_type)); + dev_err(&qp_info->port_priv->device->dev, + "Couldn't create ib_mad QP%d\n", + get_spl_qp_index(qp_type)); ret = PTR_ERR(qp_info->qp); goto error; } @@ -2840,7 +2926,7 @@ static int ib_mad_port_open(struct ib_device *device, /* Create new device info */ port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL); if (!port_priv) { - printk(KERN_ERR PFX "No memory for ib_mad_port_private\n"); + dev_err(&device->dev, "No memory for ib_mad_port_private\n"); return -ENOMEM; } @@ -2860,21 +2946,21 @@ static int ib_mad_port_open(struct ib_device *device, ib_mad_thread_completion_handler, NULL, port_priv, cq_size, 0); if (IS_ERR(port_priv->cq)) { - printk(KERN_ERR PFX "Couldn't create ib_mad CQ\n"); + dev_err(&device->dev, "Couldn't create ib_mad CQ\n"); ret = PTR_ERR(port_priv->cq); goto error3; } port_priv->pd = ib_alloc_pd(device); if (IS_ERR(port_priv->pd)) { - printk(KERN_ERR PFX "Couldn't create ib_mad PD\n"); + dev_err(&device->dev, "Couldn't create ib_mad PD\n"); ret = PTR_ERR(port_priv->pd); goto error4; } port_priv->mr = ib_get_dma_mr(port_priv->pd, IB_ACCESS_LOCAL_WRITE); if (IS_ERR(port_priv->mr)) { - printk(KERN_ERR PFX "Couldn't get ib_mad DMA MR\n"); + dev_err(&device->dev, "Couldn't get ib_mad DMA MR\n"); ret = PTR_ERR(port_priv->mr); goto error5; } @@ -2902,7 +2988,7 @@ static int ib_mad_port_open(struct ib_device *device, ret = ib_mad_port_start(port_priv); if (ret) { - printk(KERN_ERR PFX "Couldn't start port\n"); + dev_err(&device->dev, "Couldn't start port\n"); goto error9; } @@ -2946,7 +3032,7 @@ static int ib_mad_port_close(struct ib_device *device, int port_num) port_priv = __ib_get_mad_port(device, port_num); if (port_priv == NULL) { spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); - printk(KERN_ERR PFX "Port %d not found\n", port_num); + dev_err(&device->dev, "Port %d not found\n", port_num); return -ENODEV; } list_del_init(&port_priv->port_list); @@ -2984,14 +3070,12 @@ static void ib_mad_init_device(struct ib_device *device) for (i = start; i <= end; i++) { if (ib_mad_port_open(device, i)) { - printk(KERN_ERR PFX "Couldn't open %s port %d\n", - device->name, i); + dev_err(&device->dev, "Couldn't open port %d\n", i); goto error; } if (ib_agent_port_open(device, i)) { - printk(KERN_ERR PFX "Couldn't open %s port %d " - "for agents\n", - device->name, i); + dev_err(&device->dev, + "Couldn't open port %d for agents\n", i); goto error_agent; } } @@ -2999,20 +3083,17 @@ static void ib_mad_init_device(struct ib_device *device) error_agent: if (ib_mad_port_close(device, i)) - printk(KERN_ERR PFX "Couldn't close %s port %d\n", - device->name, i); + dev_err(&device->dev, "Couldn't close port %d\n", i); error: i--; while (i >= start) { if (ib_agent_port_close(device, i)) - printk(KERN_ERR PFX "Couldn't close %s port %d " - "for agents\n", - device->name, i); + dev_err(&device->dev, + "Couldn't close port %d for agents\n", i); if (ib_mad_port_close(device, i)) - printk(KERN_ERR PFX "Couldn't close %s port %d\n", - device->name, i); + dev_err(&device->dev, "Couldn't close port %d\n", i); i--; } } @@ -3033,12 +3114,12 @@ static void ib_mad_remove_device(struct ib_device *device) } for (i = 0; i < num_ports; i++, cur_port++) { if (ib_agent_port_close(device, cur_port)) - printk(KERN_ERR PFX "Couldn't close %s port %d " - "for agents\n", - device->name, cur_port); + dev_err(&device->dev, + "Couldn't close port %d for agents\n", + cur_port); if (ib_mad_port_close(device, cur_port)) - printk(KERN_ERR PFX "Couldn't close %s port %d\n", - device->name, cur_port); + dev_err(&device->dev, "Couldn't close port %d\n", + cur_port); } } @@ -3064,7 +3145,7 @@ static int __init ib_mad_init_module(void) SLAB_HWCACHE_ALIGN, NULL); if (!ib_mad_cache) { - printk(KERN_ERR PFX "Couldn't create ib_mad cache\n"); + pr_err("Couldn't create ib_mad cache\n"); ret = -ENOMEM; goto error1; } @@ -3072,7 +3153,7 @@ static int __init ib_mad_init_module(void) INIT_LIST_HEAD(&ib_mad_port_list); if (ib_register_client(&mad_client)) { - printk(KERN_ERR PFX "Couldn't register ib_mad client\n"); + pr_err("Couldn't register ib_mad client\n"); ret = -EINVAL; goto error2; } diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h index 9430ab4969c5..d1a0b0ee9444 100644 --- a/drivers/infiniband/core/mad_priv.h +++ b/drivers/infiniband/core/mad_priv.h @@ -42,9 +42,6 @@ #include <rdma/ib_mad.h> #include <rdma/ib_smi.h> - -#define PFX "ib_mad: " - #define IB_MAD_QPS_CORE 2 /* Always QP0 and QP1 as a minimum */ /* QP and CQ parameters */ diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index 233eaf541f55..c38f030f0dc9 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c @@ -1184,7 +1184,7 @@ static void ib_sa_add_one(struct ib_device *device) sa_dev->port[i].agent = ib_register_mad_agent(device, i + s, IB_QPT_GSI, NULL, 0, send_handler, - recv_handler, sa_dev); + recv_handler, sa_dev, 0); if (IS_ERR(sa_dev->port[i].agent)) goto err; diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index a3a2e9c1639b..df0c4f605a21 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c @@ -105,6 +105,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, umem->length = size; umem->offset = addr & ~PAGE_MASK; umem->page_size = PAGE_SIZE; + umem->pid = get_task_pid(current, PIDTYPE_PID); /* * We ask for writable memory if any access flags other than * "remote read" are set. "Local write" and "remote write" @@ -198,6 +199,7 @@ out: if (ret < 0) { if (need_release) __ib_umem_release(context->device, umem, 0); + put_pid(umem->pid); kfree(umem); } else current->mm->pinned_vm = locked; @@ -230,15 +232,19 @@ void ib_umem_release(struct ib_umem *umem) { struct ib_ucontext *context = umem->context; struct mm_struct *mm; + struct task_struct *task; unsigned long diff; __ib_umem_release(umem->context->device, umem, 1); - mm = get_task_mm(current); - if (!mm) { - kfree(umem); - return; - } + task = get_pid_task(umem->pid, PIDTYPE_PID); + put_pid(umem->pid); + if (!task) + goto out; + mm = get_task_mm(task); + put_task_struct(task); + if (!mm) + goto out; diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT; @@ -262,9 +268,10 @@ void ib_umem_release(struct ib_umem *umem) } else down_write(&mm->mmap_sem); - current->mm->pinned_vm -= diff; + mm->pinned_vm -= diff; up_write(&mm->mmap_sem); mmput(mm); +out: kfree(umem); } EXPORT_SYMBOL(ib_umem_release); diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index 1acb99100556..928cdd20e2d1 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c @@ -33,6 +33,8 @@ * SOFTWARE. */ +#define pr_fmt(fmt) "user_mad: " fmt + #include <linux/module.h> #include <linux/init.h> #include <linux/device.h> @@ -504,13 +506,15 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, rmpp_mad = (struct ib_rmpp_mad *) packet->mad.data; hdr_len = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class); - if (!ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class)) { - copy_offset = IB_MGMT_MAD_HDR; - rmpp_active = 0; - } else { + + if (ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class) + && ib_mad_kernel_rmpp_agent(agent)) { copy_offset = IB_MGMT_RMPP_HDR; rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & - IB_MGMT_RMPP_FLAG_ACTIVE; + IB_MGMT_RMPP_FLAG_ACTIVE; + } else { + copy_offset = IB_MGMT_MAD_HDR; + rmpp_active = 0; } data_len = count - hdr_size(file) - hdr_len; @@ -556,14 +560,22 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, rmpp_mad->mad_hdr.tid = *tid; } - spin_lock_irq(&file->send_lock); - ret = is_duplicate(file, packet); - if (!ret) + if (!ib_mad_kernel_rmpp_agent(agent) + && ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class) + && (ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE)) { + spin_lock_irq(&file->send_lock); list_add_tail(&packet->list, &file->send_list); - spin_unlock_irq(&file->send_lock); - if (ret) { - ret = -EINVAL; - goto err_msg; + spin_unlock_irq(&file->send_lock); + } else { + spin_lock_irq(&file->send_lock); + ret = is_duplicate(file, packet); + if (!ret) + list_add_tail(&packet->list, &file->send_list); + spin_unlock_irq(&file->send_lock); + if (ret) { + ret = -EINVAL; + goto err_msg; + } } ret = ib_post_send_mad(packet->msg, NULL); @@ -614,6 +626,8 @@ static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg, mutex_lock(&file->mutex); if (!file->port->ib_dev) { + dev_notice(file->port->dev, + "ib_umad_reg_agent: invalid device\n"); ret = -EPIPE; goto out; } @@ -624,6 +638,9 @@ static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg, } if (ureq.qpn != 0 && ureq.qpn != 1) { + dev_notice(file->port->dev, + "ib_umad_reg_agent: invalid QPN %d specified\n", + ureq.qpn); ret = -EINVAL; goto out; } @@ -632,11 +649,15 @@ static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg, if (!__get_agent(file, agent_id)) goto found; + dev_notice(file->port->dev, + "ib_umad_reg_agent: Max Agents (%u) reached\n", + IB_UMAD_MAX_AGENTS); ret = -ENOMEM; goto out; found: if (ureq.mgmt_class) { + memset(&req, 0, sizeof(req)); req.mgmt_class = ureq.mgmt_class; req.mgmt_class_version = ureq.mgmt_class_version; memcpy(req.oui, ureq.oui, sizeof req.oui); @@ -657,7 +678,7 @@ found: ureq.qpn ? IB_QPT_GSI : IB_QPT_SMI, ureq.mgmt_class ? &req : NULL, ureq.rmpp_version, - send_handler, recv_handler, file); + send_handler, recv_handler, file, 0); if (IS_ERR(agent)) { ret = PTR_ERR(agent); agent = NULL; @@ -673,10 +694,11 @@ found: if (!file->already_used) { file->already_used = 1; if (!file->use_pkey_index) { - printk(KERN_WARNING "user_mad: process %s did not enable " - "P_Key index support.\n", current->comm); - printk(KERN_WARNING "user_mad: Documentation/infiniband/user_mad.txt " - "has info on the new ABI.\n"); + dev_warn(file->port->dev, + "process %s did not enable P_Key index support.\n", + current->comm); + dev_warn(file->port->dev, + " Documentation/infiniband/user_mad.txt has info on the new ABI.\n"); } } @@ -694,6 +716,119 @@ out: return ret; } +static int ib_umad_reg_agent2(struct ib_umad_file *file, void __user *arg) +{ + struct ib_user_mad_reg_req2 ureq; + struct ib_mad_reg_req req; + struct ib_mad_agent *agent = NULL; + int agent_id; + int ret; + + mutex_lock(&file->port->file_mutex); + mutex_lock(&file->mutex); + + if (!file->port->ib_dev) { + dev_notice(file->port->dev, + "ib_umad_reg_agent2: invalid device\n"); + ret = -EPIPE; + goto out; + } + + if (copy_from_user(&ureq, arg, sizeof(ureq))) { + ret = -EFAULT; + goto out; + } + + if (ureq.qpn != 0 && ureq.qpn != 1) { + dev_notice(file->port->dev, + "ib_umad_reg_agent2: invalid QPN %d specified\n", + ureq.qpn); + ret = -EINVAL; + goto out; + } + + if (ureq.flags & ~IB_USER_MAD_REG_FLAGS_CAP) { + dev_notice(file->port->dev, + "ib_umad_reg_agent2 failed: invalid registration flags specified 0x%x; supported 0x%x\n", + ureq.flags, IB_USER_MAD_REG_FLAGS_CAP); + ret = -EINVAL; + + if (put_user((u32)IB_USER_MAD_REG_FLAGS_CAP, + (u32 __user *) (arg + offsetof(struct + ib_user_mad_reg_req2, flags)))) + ret = -EFAULT; + + goto out; + } + + for (agent_id = 0; agent_id < IB_UMAD_MAX_AGENTS; ++agent_id) + if (!__get_agent(file, agent_id)) + goto found; + + dev_notice(file->port->dev, + "ib_umad_reg_agent2: Max Agents (%u) reached\n", + IB_UMAD_MAX_AGENTS); + ret = -ENOMEM; + goto out; + +found: + if (ureq.mgmt_class) { + memset(&req, 0, sizeof(req)); + req.mgmt_class = ureq.mgmt_class; + req.mgmt_class_version = ureq.mgmt_class_version; + if (ureq.oui & 0xff000000) { + dev_notice(file->port->dev, + "ib_umad_reg_agent2 failed: oui invalid 0x%08x\n", + ureq.oui); + ret = -EINVAL; + goto out; + } + req.oui[2] = ureq.oui & 0x0000ff; + req.oui[1] = (ureq.oui & 0x00ff00) >> 8; + req.oui[0] = (ureq.oui & 0xff0000) >> 16; + memcpy(req.method_mask, ureq.method_mask, + sizeof(req.method_mask)); + } + + agent = ib_register_mad_agent(file->port->ib_dev, file->port->port_num, + ureq.qpn ? IB_QPT_GSI : IB_QPT_SMI, + ureq.mgmt_class ? &req : NULL, + ureq.rmpp_version, + send_handler, recv_handler, file, + ureq.flags); + if (IS_ERR(agent)) { + ret = PTR_ERR(agent); + agent = NULL; + goto out; + } + + if (put_user(agent_id, + (u32 __user *)(arg + + offsetof(struct ib_user_mad_reg_req2, id)))) { + ret = -EFAULT; + goto out; + } + + if (!file->already_used) { + file->already_used = 1; + file->use_pkey_index = 1; + } + + file->agent[agent_id] = agent; + ret = 0; + +out: + mutex_unlock(&file->mutex); + + if (ret && agent) + ib_unregister_mad_agent(agent); + + mutex_unlock(&file->port->file_mutex); + + return ret; +} + + static int ib_umad_unreg_agent(struct ib_umad_file *file, u32 __user *arg) { struct ib_mad_agent *agent = NULL; @@ -749,6 +884,8 @@ static long ib_umad_ioctl(struct file *filp, unsigned int cmd, return ib_umad_unreg_agent(filp->private_data, (__u32 __user *) arg); case IB_USER_MAD_ENABLE_PKEY: return ib_umad_enable_pkey(filp->private_data); + case IB_USER_MAD_REGISTER_AGENT2: + return ib_umad_reg_agent2(filp->private_data, (void __user *) arg); default: return -ENOIOCTLCMD; } @@ -765,6 +902,8 @@ static long ib_umad_compat_ioctl(struct file *filp, unsigned int cmd, return ib_umad_unreg_agent(filp->private_data, compat_ptr(arg)); case IB_USER_MAD_ENABLE_PKEY: return ib_umad_enable_pkey(filp->private_data); + case IB_USER_MAD_REGISTER_AGENT2: + return ib_umad_reg_agent2(filp->private_data, compat_ptr(arg)); default: return -ENOIOCTLCMD; } @@ -983,7 +1122,7 @@ static CLASS_ATTR_STRING(abi_version, S_IRUGO, static dev_t overflow_maj; static DECLARE_BITMAP(overflow_map, IB_UMAD_MAX_PORTS); -static int find_overflow_devnum(void) +static int find_overflow_devnum(struct ib_device *device) { int ret; @@ -991,7 +1130,8 @@ static int find_overflow_devnum(void) ret = alloc_chrdev_region(&overflow_maj, 0, IB_UMAD_MAX_PORTS * 2, "infiniband_mad"); if (ret) { - printk(KERN_ERR "user_mad: couldn't register dynamic device number\n"); + dev_err(&device->dev, + "couldn't register dynamic device number\n"); return ret; } } @@ -1014,7 +1154,7 @@ static int ib_umad_init_port(struct ib_device *device, int port_num, devnum = find_first_zero_bit(dev_map, IB_UMAD_MAX_PORTS); if (devnum >= IB_UMAD_MAX_PORTS) { spin_unlock(&port_lock); - devnum = find_overflow_devnum(); + devnum = find_overflow_devnum(device); if (devnum < 0) return -1; @@ -1200,14 +1340,14 @@ static int __init ib_umad_init(void) ret = register_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2, "infiniband_mad"); if (ret) { - printk(KERN_ERR "user_mad: couldn't register device number\n"); + pr_err("couldn't register device number\n"); goto out; } umad_class = class_create(THIS_MODULE, "infiniband_mad"); if (IS_ERR(umad_class)) { ret = PTR_ERR(umad_class); - printk(KERN_ERR "user_mad: couldn't create class infiniband_mad\n"); + pr_err("couldn't create class infiniband_mad\n"); goto out_chrdev; } @@ -1215,13 +1355,13 @@ static int __init ib_umad_init(void) ret = class_create_file(umad_class, &class_attr_abi_version.attr); if (ret) { - printk(KERN_ERR "user_mad: couldn't create abi_version attribute\n"); + pr_err("couldn't create abi_version attribute\n"); goto out_class; } ret = ib_register_client(&umad_client); if (ret) { - printk(KERN_ERR "user_mad: couldn't register ib_umad client\n"); + pr_err("couldn't register ib_umad client\n"); goto out_class; } diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h index a283274a5a09..643c08a025a5 100644 --- a/drivers/infiniband/core/uverbs.h +++ b/drivers/infiniband/core/uverbs.h @@ -221,6 +221,7 @@ IB_UVERBS_DECLARE_CMD(query_port); IB_UVERBS_DECLARE_CMD(alloc_pd); IB_UVERBS_DECLARE_CMD(dealloc_pd); IB_UVERBS_DECLARE_CMD(reg_mr); +IB_UVERBS_DECLARE_CMD(rereg_mr); IB_UVERBS_DECLARE_CMD(dereg_mr); IB_UVERBS_DECLARE_CMD(alloc_mw); IB_UVERBS_DECLARE_CMD(dealloc_mw); diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index ea6203ee7bcc..0600c50e6215 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -1002,6 +1002,99 @@ err_free: return ret; } +ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file, + const char __user *buf, int in_len, + int out_len) +{ + struct ib_uverbs_rereg_mr cmd; + struct ib_uverbs_rereg_mr_resp resp; + struct ib_udata udata; + struct ib_pd *pd = NULL; + struct ib_mr *mr; + struct ib_pd *old_pd; + int ret; + struct ib_uobject *uobj; + + if (out_len < sizeof(resp)) + return -ENOSPC; + + if (copy_from_user(&cmd, buf, sizeof(cmd))) + return -EFAULT; + + INIT_UDATA(&udata, buf + sizeof(cmd), + (unsigned long) cmd.response + sizeof(resp), + in_len - sizeof(cmd), out_len - sizeof(resp)); + + if (cmd.flags & ~IB_MR_REREG_SUPPORTED || !cmd.flags) + return -EINVAL; + + if ((cmd.flags & IB_MR_REREG_TRANS) && + (!cmd.start || !cmd.hca_va || 0 >= cmd.length || + (cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))) + return -EINVAL; + + uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle, + file->ucontext); + + if (!uobj) + return -EINVAL; + + mr = uobj->object; + + if (cmd.flags & IB_MR_REREG_ACCESS) { + ret = ib_check_mr_access(cmd.access_flags); + if (ret) + goto put_uobjs; + } + + if (cmd.flags & IB_MR_REREG_PD) { + pd = idr_read_pd(cmd.pd_handle, file->ucontext); + if (!pd) { + ret = -EINVAL; + goto put_uobjs; + } + } + + if (atomic_read(&mr->usecnt)) { + ret = -EBUSY; + goto put_uobj_pd; + } + + old_pd = mr->pd; + ret = mr->device->rereg_user_mr(mr, cmd.flags, cmd.start, + cmd.length, cmd.hca_va, + cmd.access_flags, pd, &udata); + if (!ret) { + if (cmd.flags & IB_MR_REREG_PD) { + atomic_inc(&pd->usecnt); + mr->pd = pd; + atomic_dec(&old_pd->usecnt); + } + } else { + goto put_uobj_pd; + } + + memset(&resp, 0, sizeof(resp)); + resp.lkey = mr->lkey; + resp.rkey = mr->rkey; + + if (copy_to_user((void __user *)(unsigned long)cmd.response, + &resp, sizeof(resp))) + ret = -EFAULT; + else + ret = in_len; + +put_uobj_pd: + if (cmd.flags & IB_MR_REREG_PD) + put_pd_read(pd); + +put_uobjs: + + put_uobj_write(mr->uobject); + + return ret; +} + ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file, const char __user *buf, int in_len, int out_len) diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 08219fb3338b..c73b22a257fe 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -87,6 +87,7 @@ static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file, [IB_USER_VERBS_CMD_ALLOC_PD] = ib_uverbs_alloc_pd, [IB_USER_VERBS_CMD_DEALLOC_PD] = ib_uverbs_dealloc_pd, [IB_USER_VERBS_CMD_REG_MR] = ib_uverbs_reg_mr, + [IB_USER_VERBS_CMD_REREG_MR] = ib_uverbs_rereg_mr, [IB_USER_VERBS_CMD_DEREG_MR] = ib_uverbs_dereg_mr, [IB_USER_VERBS_CMD_ALLOC_MW] = ib_uverbs_alloc_mw, [IB_USER_VERBS_CMD_DEALLOC_MW] = ib_uverbs_dealloc_mw, diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c index e7bee46868d1..abd97247443e 100644 --- a/drivers/infiniband/core/uverbs_marshall.c +++ b/drivers/infiniband/core/uverbs_marshall.c @@ -140,5 +140,9 @@ void ib_copy_path_rec_from_user(struct ib_sa_path_rec *dst, dst->packet_life_time = src->packet_life_time; dst->preference = src->preference; dst->packet_life_time_selector = src->packet_life_time_selector; + + memset(dst->smac, 0, sizeof(dst->smac)); + memset(dst->dmac, 0, sizeof(dst->dmac)); + dst->vlan_id = 0xffff; } EXPORT_SYMBOL(ib_copy_path_rec_from_user); diff --git a/drivers/infiniband/hw/amso1100/c2.c b/drivers/infiniband/hw/amso1100/c2.c index 00400c352c1a..766a71ccefed 100644 --- a/drivers/infiniband/hw/amso1100/c2.c +++ b/drivers/infiniband/hw/amso1100/c2.c @@ -604,16 +604,14 @@ static int c2_up(struct net_device *netdev) tx_size = c2_port->tx_ring.count * sizeof(struct c2_tx_desc); c2_port->mem_size = tx_size + rx_size; - c2_port->mem = pci_alloc_consistent(c2dev->pcidev, c2_port->mem_size, - &c2_port->dma); + c2_port->mem = pci_zalloc_consistent(c2dev->pcidev, c2_port->mem_size, + &c2_port->dma); if (c2_port->mem == NULL) { pr_debug("Unable to allocate memory for " "host descriptor rings\n"); return -ENOMEM; } - memset(c2_port->mem, 0, c2_port->mem_size); - /* Create the Rx host descriptor ring */ if ((ret = c2_rx_ring_alloc(&c2_port->rx_ring, c2_port->mem, c2_port->dma, diff --git a/drivers/infiniband/hw/amso1100/c2_cq.c b/drivers/infiniband/hw/amso1100/c2_cq.c index 49e0e8533f74..1b63185b4ad4 100644 --- a/drivers/infiniband/hw/amso1100/c2_cq.c +++ b/drivers/infiniband/hw/amso1100/c2_cq.c @@ -260,11 +260,14 @@ static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq) mq->msg_pool.host, dma_unmap_addr(mq, mapping)); } -static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size, - int msg_size) +static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, + size_t q_size, size_t msg_size) { u8 *pool_start; + if (q_size > SIZE_MAX / msg_size) + return -EINVAL; + pool_start = dma_alloc_coherent(&c2dev->pcidev->dev, q_size * msg_size, &mq->host_dma, GFP_KERNEL); if (!pool_start) diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c index fbe6051af254..c9df0549f51d 100644 --- a/drivers/infiniband/hw/cxgb4/ev.c +++ b/drivers/infiniband/hw/cxgb4/ev.c @@ -227,6 +227,7 @@ int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid) chp = get_chp(dev, qid); if (chp) { + t4_clear_cq_armed(&chp->cq); spin_lock_irqsave(&chp->comp_handler_lock, flag); (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); spin_unlock_irqrestore(&chp->comp_handler_lock, flag); diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index c158fcc02bca..41cd6882b648 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -1105,7 +1105,7 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp, struct c4iw_cq *schp) { int count; - int flushed; + int rq_flushed, sq_flushed; unsigned long flag; PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp); @@ -1123,27 +1123,40 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp, c4iw_flush_hw_cq(rchp); c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count); - flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count); + rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count); spin_unlock(&qhp->lock); spin_unlock_irqrestore(&rchp->lock, flag); - if (flushed) { - spin_lock_irqsave(&rchp->comp_handler_lock, flag); - (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); - spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); - } /* locking hierarchy: cq lock first, then qp lock. */ spin_lock_irqsave(&schp->lock, flag); spin_lock(&qhp->lock); if (schp != rchp) c4iw_flush_hw_cq(schp); - flushed = c4iw_flush_sq(qhp); + sq_flushed = c4iw_flush_sq(qhp); spin_unlock(&qhp->lock); spin_unlock_irqrestore(&schp->lock, flag); - if (flushed) { - spin_lock_irqsave(&schp->comp_handler_lock, flag); - (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); - spin_unlock_irqrestore(&schp->comp_handler_lock, flag); + + if (schp == rchp) { + if (t4_clear_cq_armed(&rchp->cq) && + (rq_flushed || sq_flushed)) { + spin_lock_irqsave(&rchp->comp_handler_lock, flag); + (*rchp->ibcq.comp_handler)(&rchp->ibcq, + rchp->ibcq.cq_context); + spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); + } + } else { + if (t4_clear_cq_armed(&rchp->cq) && rq_flushed) { + spin_lock_irqsave(&rchp->comp_handler_lock, flag); + (*rchp->ibcq.comp_handler)(&rchp->ibcq, + rchp->ibcq.cq_context); + spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); + } + if (t4_clear_cq_armed(&schp->cq) && sq_flushed) { + spin_lock_irqsave(&schp->comp_handler_lock, flag); + (*schp->ibcq.comp_handler)(&schp->ibcq, + schp->ibcq.cq_context); + spin_unlock_irqrestore(&schp->comp_handler_lock, flag); + } } } diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h index df5edfa31a8f..c04e5134b30c 100644 --- a/drivers/infiniband/hw/cxgb4/t4.h +++ b/drivers/infiniband/hw/cxgb4/t4.h @@ -524,6 +524,10 @@ static inline int t4_wq_db_enabled(struct t4_wq *wq) return !wq->rq.queue[wq->rq.size].status.db_off; } +enum t4_cq_flags { + CQ_ARMED = 1, +}; + struct t4_cq { struct t4_cqe *queue; dma_addr_t dma_addr; @@ -544,12 +548,19 @@ struct t4_cq { u16 cidx_inc; u8 gen; u8 error; + unsigned long flags; }; +static inline int t4_clear_cq_armed(struct t4_cq *cq) +{ + return test_and_clear_bit(CQ_ARMED, &cq->flags); +} + static inline int t4_arm_cq(struct t4_cq *cq, int se) { u32 val; + set_bit(CQ_ARMED, &cq->flags); while (cq->cidx_inc > CIDXINC_MASK) { val = SEINTARM(0) | CIDXINC(CIDXINC_MASK) | TIMERREG(7) | INGRESSQID(cq->cqid); diff --git a/drivers/infiniband/hw/ipath/ipath_mad.c b/drivers/infiniband/hw/ipath/ipath_mad.c index 43f2d0424d4f..e890e5ba0e01 100644 --- a/drivers/infiniband/hw/ipath/ipath_mad.c +++ b/drivers/infiniband/hw/ipath/ipath_mad.c @@ -726,7 +726,7 @@ bail: * @dd: the infinipath device * @pkeys: the PKEY table */ -static int set_pkeys(struct ipath_devdata *dd, u16 *pkeys) +static int set_pkeys(struct ipath_devdata *dd, u16 *pkeys, u8 port) { struct ipath_portdata *pd; int i; @@ -759,6 +759,7 @@ static int set_pkeys(struct ipath_devdata *dd, u16 *pkeys) } if (changed) { u64 pkey; + struct ib_event event; pkey = (u64) dd->ipath_pkeys[0] | ((u64) dd->ipath_pkeys[1] << 16) | @@ -768,12 +769,17 @@ static int set_pkeys(struct ipath_devdata *dd, u16 *pkeys) (unsigned long long) pkey); ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey, pkey); + + event.event = IB_EVENT_PKEY_CHANGE; + event.device = &dd->verbs_dev->ibdev; + event.element.port_num = port; + ib_dispatch_event(&event); } return 0; } static int recv_subn_set_pkeytable(struct ib_smp *smp, - struct ib_device *ibdev) + struct ib_device *ibdev, u8 port) { u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff); __be16 *p = (__be16 *) smp->data; @@ -784,7 +790,7 @@ static int recv_subn_set_pkeytable(struct ib_smp *smp, for (i = 0; i < n; i++) q[i] = be16_to_cpu(p[i]); - if (startpx != 0 || set_pkeys(dev->dd, q) != 0) + if (startpx != 0 || set_pkeys(dev->dd, q, port) != 0) smp->status |= IB_SMP_INVALID_FIELD; return recv_subn_get_pkeytable(smp, ibdev); @@ -1342,7 +1348,7 @@ static int process_subn(struct ib_device *ibdev, int mad_flags, ret = recv_subn_set_portinfo(smp, ibdev, port_num); goto bail; case IB_SMP_ATTR_PKEY_TABLE: - ret = recv_subn_set_pkeytable(smp, ibdev); + ret = recv_subn_set_pkeytable(smp, ibdev, port_num); goto bail; case IB_SMP_ATTR_SM_INFO: if (dev->port_cap_flags & IB_PORT_SM_DISABLED) { diff --git a/drivers/infiniband/hw/ipath/ipath_user_pages.c b/drivers/infiniband/hw/ipath/ipath_user_pages.c index dc66c4506916..1da1252dcdb3 100644 --- a/drivers/infiniband/hw/ipath/ipath_user_pages.c +++ b/drivers/infiniband/hw/ipath/ipath_user_pages.c @@ -54,7 +54,7 @@ static void __ipath_release_user_pages(struct page **p, size_t num_pages, /* call with current->mm->mmap_sem held */ static int __ipath_get_user_pages(unsigned long start_page, size_t num_pages, - struct page **p, struct vm_area_struct **vma) + struct page **p) { unsigned long lock_limit; size_t got; @@ -74,7 +74,7 @@ static int __ipath_get_user_pages(unsigned long start_page, size_t num_pages, ret = get_user_pages(current, current->mm, start_page + got * PAGE_SIZE, num_pages - got, 1, 1, - p + got, vma); + p + got, NULL); if (ret < 0) goto bail_release; } @@ -165,7 +165,7 @@ int ipath_get_user_pages(unsigned long start_page, size_t num_pages, down_write(¤t->mm->mmap_sem); - ret = __ipath_get_user_pages(start_page, num_pages, p, NULL); + ret = __ipath_get_user_pages(start_page, num_pages, p); up_write(¤t->mm->mmap_sem); diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index 287ad0564acd..82a7dd87089b 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c @@ -891,7 +891,7 @@ int mlx4_ib_mad_init(struct mlx4_ib_dev *dev) agent = ib_register_mad_agent(&dev->ib_dev, p + 1, q ? IB_QPT_GSI : IB_QPT_SMI, NULL, 0, send_handler, - NULL, NULL); + NULL, NULL, 0); if (IS_ERR(agent)) { ret = PTR_ERR(agent); goto err; diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 0f7027e7db13..bda5994ceb68 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -59,6 +59,7 @@ #define MLX4_IB_FLOW_MAX_PRIO 0xFFF #define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF +#define MLX4_IB_CARD_REV_A0 0xA0 MODULE_AUTHOR("Roland Dreier"); MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver"); @@ -119,6 +120,17 @@ static int check_flow_steering_support(struct mlx4_dev *dev) return dmfs; } +static int num_ib_ports(struct mlx4_dev *dev) +{ + int ib_ports = 0; + int i; + + mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) + ib_ports++; + + return ib_ports; +} + static int mlx4_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props) { @@ -126,6 +138,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, struct ib_smp *in_mad = NULL; struct ib_smp *out_mad = NULL; int err = -ENOMEM; + int have_ib_ports; in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); @@ -142,6 +155,8 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, memset(props, 0, sizeof *props); + have_ib_ports = num_ib_ports(dev->dev); + props->fw_ver = dev->dev->caps.fw_ver; props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT | IB_DEVICE_PORT_ACTIVE_EVENT | @@ -152,13 +167,15 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR) props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; - if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM) + if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM && have_ib_ports) props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT) props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE; if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM) props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM; - if (dev->dev->caps.max_gso_sz && dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH) + if (dev->dev->caps.max_gso_sz && + (dev->dev->rev_id != MLX4_IB_CARD_REV_A0) && + (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH)) props->device_cap_flags |= IB_DEVICE_UD_TSO; if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY) props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY; @@ -357,7 +374,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port, props->state = IB_PORT_DOWN; props->phys_state = state_to_phys_state(props->state); props->active_mtu = IB_MTU_256; - spin_lock(&iboe->lock); + spin_lock_bh(&iboe->lock); ndev = iboe->netdevs[port - 1]; if (!ndev) goto out_unlock; @@ -369,7 +386,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port, IB_PORT_ACTIVE : IB_PORT_DOWN; props->phys_state = state_to_phys_state(props->state); out_unlock: - spin_unlock(&iboe->lock); + spin_unlock_bh(&iboe->lock); out: mlx4_free_cmd_mailbox(mdev->dev, mailbox); return err; @@ -811,11 +828,11 @@ int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, if (!mqp->port) return 0; - spin_lock(&mdev->iboe.lock); + spin_lock_bh(&mdev->iboe.lock); ndev = mdev->iboe.netdevs[mqp->port - 1]; if (ndev) dev_hold(ndev); - spin_unlock(&mdev->iboe.lock); + spin_unlock_bh(&mdev->iboe.lock); if (ndev) { ret = 1; @@ -910,8 +927,7 @@ static int __mlx4_ib_default_rules_match(struct ib_qp *qp, const struct default_rules *pdefault_rules = default_table; u8 link_layer = rdma_port_get_link_layer(qp->device, flow_attr->port); - for (i = 0; i < sizeof(default_table)/sizeof(default_table[0]); i++, - pdefault_rules++) { + for (i = 0; i < ARRAY_SIZE(default_table); i++, pdefault_rules++) { __u32 field_types[IB_FLOW_SPEC_SUPPORT_LAYERS]; memset(&field_types, 0, sizeof(field_types)); @@ -965,8 +981,7 @@ static int __mlx4_ib_create_default_rules( int size = 0; int i; - for (i = 0; i < sizeof(pdefault_rules->rules_create_list)/ - sizeof(pdefault_rules->rules_create_list[0]); i++) { + for (i = 0; i < ARRAY_SIZE(pdefault_rules->rules_create_list); i++) { int ret; union ib_flow_spec ib_spec; switch (pdefault_rules->rules_create_list[i]) { @@ -1091,6 +1106,30 @@ static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id) return err; } +static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_attr, + u64 *reg_id) +{ + void *ib_flow; + union ib_flow_spec *ib_spec; + struct mlx4_dev *dev = to_mdev(qp->device)->dev; + int err = 0; + + if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) + return 0; /* do nothing */ + + ib_flow = flow_attr + 1; + ib_spec = (union ib_flow_spec *)ib_flow; + + if (ib_spec->type != IB_FLOW_SPEC_ETH || flow_attr->num_of_specs != 1) + return 0; /* do nothing */ + + err = mlx4_tunnel_steer_add(to_mdev(qp->device)->dev, ib_spec->eth.val.dst_mac, + flow_attr->port, qp->qp_num, + MLX4_DOMAIN_UVERBS | (flow_attr->priority & 0xff), + reg_id); + return err; +} + static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_attr, int domain) @@ -1138,6 +1177,12 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp, i++; } + if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) { + err = mlx4_ib_tunnel_steer_add(qp, flow_attr, &mflow->reg_id[i]); + if (err) + goto err_free; + } + return &mflow->ibflow; err_free: @@ -1264,11 +1309,11 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) mutex_lock(&mqp->mutex); ge = find_gid_entry(mqp, gid->raw); if (ge) { - spin_lock(&mdev->iboe.lock); + spin_lock_bh(&mdev->iboe.lock); ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL; if (ndev) dev_hold(ndev); - spin_unlock(&mdev->iboe.lock); + spin_unlock_bh(&mdev->iboe.lock); if (ndev) dev_put(ndev); list_del(&ge->list); @@ -1389,6 +1434,9 @@ static void update_gids_task(struct work_struct *work) int err; struct mlx4_dev *dev = gw->dev->dev; + if (!gw->dev->ib_active) + return; + mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) { pr_warn("update gid table failed %ld\n", PTR_ERR(mailbox)); @@ -1419,6 +1467,9 @@ static void reset_gids_task(struct work_struct *work) int err; struct mlx4_dev *dev = gw->dev->dev; + if (!gw->dev->ib_active) + return; + mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) { pr_warn("reset gid table failed\n"); @@ -1553,7 +1604,7 @@ static int mlx4_ib_addr_event(int event, struct net_device *event_netdev, return 0; iboe = &ibdev->iboe; - spin_lock(&iboe->lock); + spin_lock_bh(&iboe->lock); for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) if ((netif_is_bond_master(real_dev) && @@ -1563,7 +1614,7 @@ static int mlx4_ib_addr_event(int event, struct net_device *event_netdev, update_gid_table(ibdev, port, gid, event == NETDEV_DOWN, 0); - spin_unlock(&iboe->lock); + spin_unlock_bh(&iboe->lock); return 0; } @@ -1636,13 +1687,21 @@ static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev, new_smac = mlx4_mac_to_u64(dev->dev_addr); read_unlock(&dev_base_lock); + atomic64_set(&ibdev->iboe.mac[port - 1], new_smac); + + /* no need for update QP1 and mac registration in non-SRIOV */ + if (!mlx4_is_mfunc(ibdev->dev)) + return; + mutex_lock(&ibdev->qp1_proxy_lock[port - 1]); qp = ibdev->qp1_proxy[port - 1]; if (qp) { int new_smac_index; - u64 old_smac = qp->pri.smac; + u64 old_smac; struct mlx4_update_qp_params update_params; + mutex_lock(&qp->mutex); + old_smac = qp->pri.smac; if (new_smac == old_smac) goto unlock; @@ -1652,22 +1711,25 @@ static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev, goto unlock; update_params.smac_index = new_smac_index; - if (mlx4_update_qp(ibdev->dev, &qp->mqp, MLX4_UPDATE_QP_SMAC, + if (mlx4_update_qp(ibdev->dev, qp->mqp.qpn, MLX4_UPDATE_QP_SMAC, &update_params)) { release_mac = new_smac; goto unlock; } - + /* if old port was zero, no mac was yet registered for this QP */ + if (qp->pri.smac_port) + release_mac = old_smac; qp->pri.smac = new_smac; + qp->pri.smac_port = port; qp->pri.smac_index = new_smac_index; - - release_mac = old_smac; } unlock: - mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]); if (release_mac != MLX4_IB_INVALID_MAC) mlx4_unregister_mac(ibdev->dev, port, release_mac); + if (qp) + mutex_unlock(&qp->mutex); + mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]); } static void mlx4_ib_get_dev_addr(struct net_device *dev, @@ -1678,6 +1740,7 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev, struct inet6_dev *in6_dev; union ib_gid *pgid; struct inet6_ifaddr *ifp; + union ib_gid default_gid; #endif union ib_gid gid; @@ -1698,12 +1761,15 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev, in_dev_put(in_dev); } #if IS_ENABLED(CONFIG_IPV6) + mlx4_make_default_gid(dev, &default_gid); /* IPv6 gids */ in6_dev = in6_dev_get(dev); if (in6_dev) { read_lock_bh(&in6_dev->lock); list_for_each_entry(ifp, &in6_dev->addr_list, if_list) { pgid = (union ib_gid *)&ifp->addr; + if (!memcmp(pgid, &default_gid, sizeof(*pgid))) + continue; update_gid_table(ibdev, port, pgid, 0, 0); } read_unlock_bh(&in6_dev->lock); @@ -1725,24 +1791,33 @@ static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev) struct net_device *dev; struct mlx4_ib_iboe *iboe = &ibdev->iboe; int i; + int err = 0; - for (i = 1; i <= ibdev->num_ports; ++i) - if (reset_gid_table(ibdev, i)) - return -1; + for (i = 1; i <= ibdev->num_ports; ++i) { + if (rdma_port_get_link_layer(&ibdev->ib_dev, i) == + IB_LINK_LAYER_ETHERNET) { + err = reset_gid_table(ibdev, i); + if (err) + goto out; + } + } read_lock(&dev_base_lock); - spin_lock(&iboe->lock); + spin_lock_bh(&iboe->lock); for_each_netdev(&init_net, dev) { u8 port = mlx4_ib_get_dev_port(dev, ibdev); - if (port) + /* port will be non-zero only for ETH ports */ + if (port) { + mlx4_ib_set_default_gid(ibdev, dev, port); mlx4_ib_get_dev_addr(dev, ibdev, port); + } } - spin_unlock(&iboe->lock); + spin_unlock_bh(&iboe->lock); read_unlock(&dev_base_lock); - - return 0; +out: + return err; } static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev, @@ -1756,7 +1831,7 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev, iboe = &ibdev->iboe; - spin_lock(&iboe->lock); + spin_lock_bh(&iboe->lock); mlx4_foreach_ib_transport_port(port, ibdev->dev) { enum ib_port_state port_state = IB_PORT_NOP; struct net_device *old_master = iboe->masters[port - 1]; @@ -1788,35 +1863,47 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev, port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ? IB_PORT_ACTIVE : IB_PORT_DOWN; mlx4_ib_set_default_gid(ibdev, curr_netdev, port); - } else { - reset_gid_table(ibdev, port); - } - /* if using bonding/team and a slave port is down, we don't the bond IP - * based gids in the table since flows that select port by gid may get - * the down port. - */ - if (curr_master && (port_state == IB_PORT_DOWN)) { - reset_gid_table(ibdev, port); - mlx4_ib_set_default_gid(ibdev, curr_netdev, port); - } - /* if bonding is used it is possible that we add it to masters - * only after IP address is assigned to the net bonding - * interface. - */ - if (curr_master && (old_master != curr_master)) { - reset_gid_table(ibdev, port); - mlx4_ib_set_default_gid(ibdev, curr_netdev, port); - mlx4_ib_get_dev_addr(curr_master, ibdev, port); - } + if (curr_master) { + /* if using bonding/team and a slave port is down, we + * don't want the bond IP based gids in the table since + * flows that select port by gid may get the down port. + */ + if (port_state == IB_PORT_DOWN) { + reset_gid_table(ibdev, port); + mlx4_ib_set_default_gid(ibdev, + curr_netdev, + port); + } else { + /* gids from the upper dev (bond/team) + * should appear in port's gid table + */ + mlx4_ib_get_dev_addr(curr_master, + ibdev, port); + } + } + /* if bonding is used it is possible that we add it to + * masters only after IP address is assigned to the + * net bonding interface. + */ + if (curr_master && (old_master != curr_master)) { + reset_gid_table(ibdev, port); + mlx4_ib_set_default_gid(ibdev, + curr_netdev, port); + mlx4_ib_get_dev_addr(curr_master, ibdev, port); + } - if (!curr_master && (old_master != curr_master)) { + if (!curr_master && (old_master != curr_master)) { + reset_gid_table(ibdev, port); + mlx4_ib_set_default_gid(ibdev, + curr_netdev, port); + mlx4_ib_get_dev_addr(curr_netdev, ibdev, port); + } + } else { reset_gid_table(ibdev, port); - mlx4_ib_set_default_gid(ibdev, curr_netdev, port); - mlx4_ib_get_dev_addr(curr_netdev, ibdev, port); } } - spin_unlock(&iboe->lock); + spin_unlock_bh(&iboe->lock); if (update_qps_port > 0) mlx4_ib_update_qps(ibdev, dev, update_qps_port); @@ -2007,6 +2094,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | (1ull << IB_USER_VERBS_CMD_REG_MR) | + (1ull << IB_USER_VERBS_CMD_REREG_MR) | (1ull << IB_USER_VERBS_CMD_DEREG_MR) | (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | @@ -2059,6 +2147,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) ibdev->ib_dev.req_notify_cq = mlx4_ib_arm_cq; ibdev->ib_dev.get_dma_mr = mlx4_ib_get_dma_mr; ibdev->ib_dev.reg_user_mr = mlx4_ib_reg_user_mr; + ibdev->ib_dev.rereg_user_mr = mlx4_ib_rereg_user_mr; ibdev->ib_dev.dereg_mr = mlx4_ib_dereg_mr; ibdev->ib_dev.alloc_fast_reg_mr = mlx4_ib_alloc_fast_reg_mr; ibdev->ib_dev.alloc_fast_reg_page_list = mlx4_ib_alloc_fast_reg_page_list; @@ -2156,6 +2245,9 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) goto err_steer_free_bitmap; } + for (j = 1; j <= ibdev->dev->caps.num_ports; j++) + atomic64_set(&iboe->mac[j - 1], ibdev->dev->caps.def_mac[j]); + if (ib_register_device(&ibdev->ib_dev, NULL)) goto err_steer_free_bitmap; @@ -2192,12 +2284,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) } } #endif - for (i = 1 ; i <= ibdev->num_ports ; ++i) - reset_gid_table(ibdev, i); - rtnl_lock(); - mlx4_ib_scan_netdevs(ibdev, NULL, 0); - rtnl_unlock(); - mlx4_ib_init_gid_table(ibdev); + if (mlx4_ib_init_gid_table(ibdev)) + goto err_notif; } for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) { @@ -2345,6 +2433,9 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr) struct mlx4_ib_dev *ibdev = ibdev_ptr; int p; + ibdev->ib_active = false; + flush_workqueue(wq); + mlx4_ib_close_sriov(ibdev); mlx4_ib_mad_cleanup(ibdev); ib_unregister_device(&ibdev->ib_dev); diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index 369da3ca5d64..6eb743f65f6f 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h @@ -451,6 +451,7 @@ struct mlx4_ib_iboe { spinlock_t lock; struct net_device *netdevs[MLX4_MAX_PORTS]; struct net_device *masters[MLX4_MAX_PORTS]; + atomic64_t mac[MLX4_MAX_PORTS]; struct notifier_block nb; struct notifier_block nb_inet; struct notifier_block nb_inet6; @@ -788,5 +789,9 @@ int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn); void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count); int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, int is_attach); +int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags, + u64 start, u64 length, u64 virt_addr, + int mr_access_flags, struct ib_pd *pd, + struct ib_udata *udata); #endif /* MLX4_IB_H */ diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c index cb2a8727f3fb..8f9325cfc85d 100644 --- a/drivers/infiniband/hw/mlx4/mr.c +++ b/drivers/infiniband/hw/mlx4/mr.c @@ -144,8 +144,10 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, if (!mr) return ERR_PTR(-ENOMEM); + /* Force registering the memory as writable. */ + /* Used for memory re-registeration. HCA protects the access */ mr->umem = ib_umem_get(pd->uobject->context, start, length, - access_flags, 0); + access_flags | IB_ACCESS_LOCAL_WRITE, 0); if (IS_ERR(mr->umem)) { err = PTR_ERR(mr->umem); goto err_free; @@ -183,6 +185,93 @@ err_free: return ERR_PTR(err); } +int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags, + u64 start, u64 length, u64 virt_addr, + int mr_access_flags, struct ib_pd *pd, + struct ib_udata *udata) +{ + struct mlx4_ib_dev *dev = to_mdev(mr->device); + struct mlx4_ib_mr *mmr = to_mmr(mr); + struct mlx4_mpt_entry *mpt_entry; + struct mlx4_mpt_entry **pmpt_entry = &mpt_entry; + int err; + + /* Since we synchronize this call and mlx4_ib_dereg_mr via uverbs, + * we assume that the calls can't run concurrently. Otherwise, a + * race exists. + */ + err = mlx4_mr_hw_get_mpt(dev->dev, &mmr->mmr, &pmpt_entry); + + if (err) + return err; + + if (flags & IB_MR_REREG_PD) { + err = mlx4_mr_hw_change_pd(dev->dev, *pmpt_entry, + to_mpd(pd)->pdn); + + if (err) + goto release_mpt_entry; + } + + if (flags & IB_MR_REREG_ACCESS) { + err = mlx4_mr_hw_change_access(dev->dev, *pmpt_entry, + convert_access(mr_access_flags)); + + if (err) + goto release_mpt_entry; + } + + if (flags & IB_MR_REREG_TRANS) { + int shift; + int err; + int n; + + mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr); + ib_umem_release(mmr->umem); + mmr->umem = ib_umem_get(mr->uobject->context, start, length, + mr_access_flags | + IB_ACCESS_LOCAL_WRITE, + 0); + if (IS_ERR(mmr->umem)) { + err = PTR_ERR(mmr->umem); + /* Prevent mlx4_ib_dereg_mr from free'ing invalid pointer */ + mmr->umem = NULL; + goto release_mpt_entry; + } + n = ib_umem_page_count(mmr->umem); + shift = ilog2(mmr->umem->page_size); + + err = mlx4_mr_rereg_mem_write(dev->dev, &mmr->mmr, + virt_addr, length, n, shift, + *pmpt_entry); + if (err) { + ib_umem_release(mmr->umem); + goto release_mpt_entry; + } + mmr->mmr.iova = virt_addr; + mmr->mmr.size = length; + + err = mlx4_ib_umem_write_mtt(dev, &mmr->mmr.mtt, mmr->umem); + if (err) { + mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr); + ib_umem_release(mmr->umem); + goto release_mpt_entry; + } + } + + /* If we couldn't transfer the MR to the HCA, just remember to + * return a failure. But dereg_mr will free the resources. + */ + err = mlx4_mr_hw_write_mpt(dev->dev, &mmr->mmr, pmpt_entry); + if (!err && flags & IB_MR_REREG_ACCESS) + mmr->mmr.access = mr_access_flags; + +release_mpt_entry: + mlx4_mr_hw_put_mpt(dev->dev, pmpt_entry); + + return err; +} + int mlx4_ib_dereg_mr(struct ib_mr *ibmr) { struct mlx4_ib_mr *mr = to_mmr(ibmr); diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 67780452f0cf..9c5150c3cb31 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -964,9 +964,10 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp)) pr_warn("modify QP %06x to RESET failed.\n", qp->mqp.qpn); - if (qp->pri.smac) { + if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) { mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); qp->pri.smac = 0; + qp->pri.smac_port = 0; } if (qp->alt.smac) { mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac); @@ -1325,7 +1326,8 @@ static int _mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah, * If one was already assigned, but the new mac differs, * unregister the old one and register the new one. */ - if (!smac_info->smac || smac_info->smac != smac) { + if ((!smac_info->smac && !smac_info->smac_port) || + smac_info->smac != smac) { /* register candidate now, unreg if needed, after success */ smac_index = mlx4_register_mac(dev->dev, port, smac); if (smac_index >= 0) { @@ -1390,21 +1392,13 @@ static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) static int handle_eth_ud_smac_index(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, u8 *smac, struct mlx4_qp_context *context) { - struct net_device *ndev; u64 u64_mac; int smac_index; - - ndev = dev->iboe.netdevs[qp->port - 1]; - if (ndev) { - smac = ndev->dev_addr; - u64_mac = mlx4_mac_to_u64(smac); - } else { - u64_mac = dev->dev->caps.def_mac[qp->port]; - } + u64_mac = atomic64_read(&dev->iboe.mac[qp->port - 1]); context->pri_path.sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | ((qp->port - 1) << 6); - if (!qp->pri.smac) { + if (!qp->pri.smac && !qp->pri.smac_port) { smac_index = mlx4_register_mac(dev->dev, qp->port, u64_mac); if (smac_index >= 0) { qp->pri.candidate_smac_index = smac_index; @@ -1432,6 +1426,12 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, int steer_qp = 0; int err = -EINVAL; + /* APM is not supported under RoCE */ + if (attr_mask & IB_QP_ALT_PATH && + rdma_port_get_link_layer(&dev->ib_dev, qp->port) == + IB_LINK_LAYER_ETHERNET) + return -ENOTSUPP; + context = kzalloc(sizeof *context, GFP_KERNEL); if (!context) return -ENOMEM; @@ -1677,9 +1677,15 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, } } - if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) + if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) { context->pri_path.ackto = (context->pri_path.ackto & 0xf8) | MLX4_IB_LINK_TYPE_ETH; + if (dev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { + /* set QP to receive both tunneled & non-tunneled packets */ + if (!(context->flags & cpu_to_be32(1 << MLX4_RSS_QPC_FLAG_OFFSET))) + context->srqn = cpu_to_be32(7 << 28); + } + } if (ibqp->qp_type == IB_QPT_UD && (new_state == IB_QPS_RTR)) { int is_eth = rdma_port_get_link_layer( @@ -1780,9 +1786,10 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, if (qp->flags & MLX4_IB_QP_NETIF) mlx4_ib_steer_qp_reg(dev, qp, 0); } - if (qp->pri.smac) { + if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) { mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); qp->pri.smac = 0; + qp->pri.smac_port = 0; } if (qp->alt.smac) { mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac); @@ -1806,11 +1813,12 @@ out: if (err && steer_qp) mlx4_ib_steer_qp_reg(dev, qp, 0); kfree(context); - if (qp->pri.candidate_smac) { + if (qp->pri.candidate_smac || + (!qp->pri.candidate_smac && qp->pri.candidate_smac_port)) { if (err) { mlx4_unregister_mac(dev->dev, qp->pri.candidate_smac_port, qp->pri.candidate_smac); } else { - if (qp->pri.smac) + if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); qp->pri.smac = qp->pri.candidate_smac; qp->pri.smac_index = qp->pri.candidate_smac_index; @@ -2083,6 +2091,16 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp, return 0; } +static void mlx4_u64_to_smac(u8 *dst_mac, u64 src_mac) +{ + int i; + + for (i = ETH_ALEN; i; i--) { + dst_mac[i - 1] = src_mac & 0xff; + src_mac >>= 8; + } +} + static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, void *wqe, unsigned *mlx_seg_len) { @@ -2197,7 +2215,6 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, } if (is_eth) { - u8 *smac; struct in6_addr in6; u16 pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 29) << 13; @@ -2210,12 +2227,17 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, memcpy(&ctrl->imm, ah->av.eth.mac + 2, 4); memcpy(&in6, sgid.raw, sizeof(in6)); - if (!mlx4_is_mfunc(to_mdev(ib_dev)->dev)) - smac = to_mdev(sqp->qp.ibqp.device)-> - iboe.netdevs[sqp->qp.port - 1]->dev_addr; - else /* use the src mac of the tunnel */ - smac = ah->av.eth.s_mac; - memcpy(sqp->ud_header.eth.smac_h, smac, 6); + if (!mlx4_is_mfunc(to_mdev(ib_dev)->dev)) { + u64 mac = atomic64_read(&to_mdev(ib_dev)->iboe.mac[sqp->qp.port - 1]); + u8 smac[ETH_ALEN]; + + mlx4_u64_to_smac(smac, mac); + memcpy(sqp->ud_header.eth.smac_h, smac, ETH_ALEN); + } else { + /* use the src mac of the tunnel */ + memcpy(sqp->ud_header.eth.smac_h, ah->av.eth.s_mac, ETH_ALEN); + } + if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6)) mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK); if (!is_vlan) { diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 7efe6e3f3542..8c574b63d77b 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -2501,7 +2501,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, spin_lock_irqsave(&qp->sq.lock, flags); for (nreq = 0; wr; nreq++, wr = wr->next) { - if (unlikely(wr->opcode >= sizeof(mlx5_ib_opcode) / sizeof(mlx5_ib_opcode[0]))) { + if (unlikely(wr->opcode >= ARRAY_SIZE(mlx5_ib_opcode))) { mlx5_ib_warn(dev, "\n"); err = -EINVAL; *bad_wr = wr; diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c index b6f7f457fc55..8881fa376e06 100644 --- a/drivers/infiniband/hw/mthca/mthca_mad.c +++ b/drivers/infiniband/hw/mthca/mthca_mad.c @@ -294,7 +294,7 @@ int mthca_create_agents(struct mthca_dev *dev) agent = ib_register_mad_agent(&dev->ib_dev, p + 1, q ? IB_QPT_GSI : IB_QPT_SMI, NULL, 0, send_handler, - NULL, NULL); + NULL, NULL, 0); if (IS_ERR(agent)) { ret = PTR_ERR(agent); goto err; diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c index 90200245c5eb..02120d340d50 100644 --- a/drivers/infiniband/hw/nes/nes_hw.c +++ b/drivers/infiniband/hw/nes/nes_hw.c @@ -1003,13 +1003,13 @@ int nes_init_cqp(struct nes_device *nesdev) (sizeof(struct nes_hw_aeqe) * nesadapter->max_qp) + sizeof(struct nes_hw_cqp_qp_context); - nesdev->cqp_vbase = pci_alloc_consistent(nesdev->pcidev, nesdev->cqp_mem_size, - &nesdev->cqp_pbase); + nesdev->cqp_vbase = pci_zalloc_consistent(nesdev->pcidev, + nesdev->cqp_mem_size, + &nesdev->cqp_pbase); if (!nesdev->cqp_vbase) { nes_debug(NES_DBG_INIT, "Unable to allocate memory for host descriptor rings\n"); return -ENOMEM; } - memset(nesdev->cqp_vbase, 0, nesdev->cqp_mem_size); /* Allocate a twice the number of CQP requests as the SQ size */ nesdev->nes_cqp_requests = kzalloc(sizeof(struct nes_cqp_request) * @@ -1691,13 +1691,13 @@ int nes_init_nic_qp(struct nes_device *nesdev, struct net_device *netdev) (NES_NIC_WQ_SIZE * 2 * sizeof(struct nes_hw_nic_cqe)) + sizeof(struct nes_hw_nic_qp_context); - nesvnic->nic_vbase = pci_alloc_consistent(nesdev->pcidev, nesvnic->nic_mem_size, - &nesvnic->nic_pbase); + nesvnic->nic_vbase = pci_zalloc_consistent(nesdev->pcidev, + nesvnic->nic_mem_size, + &nesvnic->nic_pbase); if (!nesvnic->nic_vbase) { nes_debug(NES_DBG_INIT, "Unable to allocate memory for NIC host descriptor rings\n"); return -ENOMEM; } - memset(nesvnic->nic_vbase, 0, nesvnic->nic_mem_size); nes_debug(NES_DBG_INIT, "Allocated NIC QP structures at %p (phys = %016lX), size = %u.\n", nesvnic->nic_vbase, (unsigned long)nesvnic->nic_pbase, nesvnic->nic_mem_size); diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index 218dd3574285..fef067c959fc 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c @@ -1616,8 +1616,8 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries, entries, nescq->cq_mem_size, nescq->hw_cq.cq_number); /* allocate the physical buffer space */ - mem = pci_alloc_consistent(nesdev->pcidev, nescq->cq_mem_size, - &nescq->hw_cq.cq_pbase); + mem = pci_zalloc_consistent(nesdev->pcidev, nescq->cq_mem_size, + &nescq->hw_cq.cq_pbase); if (!mem) { printk(KERN_ERR PFX "Unable to allocate pci memory for cq\n"); nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num); @@ -1625,7 +1625,6 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries, return ERR_PTR(-ENOMEM); } - memset(mem, 0, nescq->cq_mem_size); nescq->hw_cq.cq_vbase = mem; nescq->hw_cq.cq_head = 0; nes_debug(NES_DBG_CQ, "CQ%u virtual address @ %p, phys = 0x%08X\n", diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h index 19011dbb930f..b43456ae124b 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma.h @@ -40,7 +40,7 @@ #include <be_roce.h> #include "ocrdma_sli.h" -#define OCRDMA_ROCE_DRV_VERSION "10.2.145.0u" +#define OCRDMA_ROCE_DRV_VERSION "10.2.287.0u" #define OCRDMA_ROCE_DRV_DESC "Emulex OneConnect RoCE Driver" #define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA" @@ -137,6 +137,7 @@ struct mqe_ctx { u16 cqe_status; u16 ext_status; bool cmd_done; + bool fw_error_state; }; struct ocrdma_hw_mr { @@ -235,7 +236,10 @@ struct ocrdma_dev { struct list_head entry; struct rcu_head rcu; int id; - u64 stag_arr[OCRDMA_MAX_STAG]; + u64 *stag_arr; + u8 sl; /* service level */ + bool pfc_state; + atomic_t update_sl; u16 pvid; u32 asic_id; @@ -518,4 +522,22 @@ static inline u8 ocrdma_get_asic_type(struct ocrdma_dev *dev) OCRDMA_SLI_ASIC_GEN_NUM_SHIFT; } +static inline u8 ocrdma_get_pfc_prio(u8 *pfc, u8 prio) +{ + return *(pfc + prio); +} + +static inline u8 ocrdma_get_app_prio(u8 *app_prio, u8 prio) +{ + return *(app_prio + prio); +} + +static inline u8 ocrdma_is_enabled_and_synced(u32 state) +{ /* May also be used to interpret TC-state, QCN-state + * Appl-state and Logical-link-state in future. + */ + return (state & OCRDMA_STATE_FLAG_ENABLED) && + (state & OCRDMA_STATE_FLAG_SYNC); +} + #endif diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c index d4cc01f10c01..ac02ce4e8040 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c @@ -35,8 +35,10 @@ #include "ocrdma_ah.h" #include "ocrdma_hw.h" +#define OCRDMA_VID_PCP_SHIFT 0xD + static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah, - struct ib_ah_attr *attr, int pdid) + struct ib_ah_attr *attr, union ib_gid *sgid, int pdid) { int status = 0; u16 vlan_tag; bool vlan_enabled = false; @@ -47,15 +49,14 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah, memset(ð, 0, sizeof(eth)); memset(&grh, 0, sizeof(grh)); - ah->sgid_index = attr->grh.sgid_index; - + /* VLAN */ vlan_tag = attr->vlan_id; if (!vlan_tag || (vlan_tag > 0xFFF)) vlan_tag = dev->pvid; if (vlan_tag && (vlan_tag < 0x1000)) { eth.eth_type = cpu_to_be16(0x8100); eth.roce_eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE); - vlan_tag |= (attr->sl & 7) << 13; + vlan_tag |= (dev->sl & 0x07) << OCRDMA_VID_PCP_SHIFT; eth.vlan_tag = cpu_to_be16(vlan_tag); eth_sz = sizeof(struct ocrdma_eth_vlan); vlan_enabled = true; @@ -63,15 +64,14 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah, eth.eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE); eth_sz = sizeof(struct ocrdma_eth_basic); } + /* MAC */ memcpy(ð.smac[0], &dev->nic_info.mac_addr[0], ETH_ALEN); - memcpy(ð.dmac[0], attr->dmac, ETH_ALEN); status = ocrdma_resolve_dmac(dev, attr, ð.dmac[0]); if (status) return status; - status = ocrdma_query_gid(&dev->ibdev, 1, attr->grh.sgid_index, - (union ib_gid *)&grh.sgid[0]); - if (status) - return status; + ah->sgid_index = attr->grh.sgid_index; + memcpy(&grh.sgid[0], sgid->raw, sizeof(union ib_gid)); + memcpy(&grh.dgid[0], attr->grh.dgid.raw, sizeof(attr->grh.dgid.raw)); grh.tclass_flow = cpu_to_be32((6 << 28) | (attr->grh.traffic_class << 24) | @@ -79,8 +79,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah, /* 0x1b is next header value in GRH */ grh.pdid_hoplimit = cpu_to_be32((pdid << 16) | (0x1b << 8) | attr->grh.hop_limit); - - memcpy(&grh.dgid[0], attr->grh.dgid.raw, sizeof(attr->grh.dgid.raw)); + /* Eth HDR */ memcpy(&ah->av->eth_hdr, ð, eth_sz); memcpy((u8 *)ah->av + eth_sz, &grh, sizeof(struct ocrdma_grh)); if (vlan_enabled) @@ -96,10 +95,14 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr) struct ocrdma_ah *ah; struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device); + union ib_gid sgid; + u8 zmac[ETH_ALEN]; if (!(attr->ah_flags & IB_AH_GRH)) return ERR_PTR(-EINVAL); + if (atomic_cmpxchg(&dev->update_sl, 1, 0)) + ocrdma_init_service_level(dev); ah = kzalloc(sizeof(*ah), GFP_ATOMIC); if (!ah) return ERR_PTR(-ENOMEM); @@ -107,7 +110,27 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr) status = ocrdma_alloc_av(dev, ah); if (status) goto av_err; - status = set_av_attr(dev, ah, attr, pd->id); + + status = ocrdma_query_gid(&dev->ibdev, 1, attr->grh.sgid_index, &sgid); + if (status) { + pr_err("%s(): Failed to query sgid, status = %d\n", + __func__, status); + goto av_conf_err; + } + + memset(&zmac, 0, ETH_ALEN); + if (pd->uctx && + memcmp(attr->dmac, &zmac, ETH_ALEN)) { + status = rdma_addr_find_dmac_by_grh(&sgid, &attr->grh.dgid, + attr->dmac, &attr->vlan_id); + if (status) { + pr_err("%s(): Failed to resolve dmac from gid." + "status = %d\n", __func__, status); + goto av_conf_err; + } + } + + status = set_av_attr(dev, ah, attr, &sgid, pd->id); if (status) goto av_conf_err; @@ -141,7 +164,7 @@ int ocrdma_query_ah(struct ib_ah *ibah, struct ib_ah_attr *attr) struct ocrdma_av *av = ah->av; struct ocrdma_grh *grh; attr->ah_flags |= IB_AH_GRH; - if (ah->av->valid & Bit(1)) { + if (ah->av->valid & OCRDMA_AV_VALID) { grh = (struct ocrdma_grh *)((u8 *)ah->av + sizeof(struct ocrdma_eth_vlan)); attr->sl = be16_to_cpu(av->eth_hdr.vlan_tag) >> 13; diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c index 3bbf2010a821..dd35ae558ae1 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c @@ -525,7 +525,7 @@ static int ocrdma_mbx_mq_cq_create(struct ocrdma_dev *dev, cmd->ev_cnt_flags = OCRDMA_CREATE_CQ_DEF_FLAGS; cmd->eqn = eq->id; - cmd->cqe_count = cq->size / sizeof(struct ocrdma_mcqe); + cmd->pdid_cqecnt = cq->size / sizeof(struct ocrdma_mcqe); ocrdma_build_q_pages(&cmd->pa[0], cq->size / OCRDMA_MIN_Q_PAGE_SIZE, cq->dma, PAGE_SIZE_4K); @@ -661,7 +661,7 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev, { struct ocrdma_qp *qp = NULL; struct ocrdma_cq *cq = NULL; - struct ib_event ib_evt = { 0 }; + struct ib_event ib_evt; int cq_event = 0; int qp_event = 1; int srq_event = 0; @@ -674,6 +674,8 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev, if (cqe->cqvalid_cqid & OCRDMA_AE_MCQE_CQVALID) cq = dev->cq_tbl[cqe->cqvalid_cqid & OCRDMA_AE_MCQE_CQID_MASK]; + memset(&ib_evt, 0, sizeof(ib_evt)); + ib_evt.device = &dev->ibdev; switch (type) { @@ -771,6 +773,10 @@ static void ocrdma_process_grp5_aync(struct ocrdma_dev *dev, OCRDMA_AE_PVID_MCQE_TAG_MASK) >> OCRDMA_AE_PVID_MCQE_TAG_SHIFT); break; + + case OCRDMA_ASYNC_EVENT_COS_VALUE: + atomic_set(&dev->update_sl, 1); + break; default: /* Not interested evts. */ break; @@ -962,8 +968,12 @@ static int ocrdma_wait_mqe_cmpl(struct ocrdma_dev *dev) msecs_to_jiffies(30000)); if (status) return 0; - else + else { + dev->mqe_ctx.fw_error_state = true; + pr_err("%s(%d) mailbox timeout: fw not responding\n", + __func__, dev->id); return -1; + } } /* issue a mailbox command on the MQ */ @@ -975,6 +985,8 @@ static int ocrdma_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe) struct ocrdma_mbx_rsp *rsp = NULL; mutex_lock(&dev->mqe_ctx.lock); + if (dev->mqe_ctx.fw_error_state) + goto mbx_err; ocrdma_post_mqe(dev, mqe); status = ocrdma_wait_mqe_cmpl(dev); if (status) @@ -1078,7 +1090,8 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev, OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_SHIFT; attr->max_mw = rsp->max_mw; attr->max_mr = rsp->max_mr; - attr->max_mr_size = ~0ull; + attr->max_mr_size = ((u64)rsp->max_mr_size_hi << 32) | + rsp->max_mr_size_lo; attr->max_fmr = 0; attr->max_pages_per_frmr = rsp->max_pages_per_frmr; attr->max_num_mr_pbl = rsp->max_num_mr_pbl; @@ -1252,7 +1265,9 @@ static int ocrdma_mbx_get_ctrl_attribs(struct ocrdma_dev *dev) ctrl_attr_rsp = (struct ocrdma_get_ctrl_attribs_rsp *)dma.va; hba_attribs = &ctrl_attr_rsp->ctrl_attribs.hba_attribs; - dev->hba_port_num = hba_attribs->phy_port; + dev->hba_port_num = (hba_attribs->ptpnum_maxdoms_hbast_cv & + OCRDMA_HBA_ATTRB_PTNUM_MASK) + >> OCRDMA_HBA_ATTRB_PTNUM_SHIFT; strncpy(dev->model_number, hba_attribs->controller_model_number, 31); } @@ -1302,7 +1317,8 @@ int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed) goto mbx_err; rsp = (struct ocrdma_get_link_speed_rsp *)cmd; - *lnk_speed = rsp->phys_port_speed; + *lnk_speed = (rsp->pflt_pps_ld_pnum & OCRDMA_PHY_PS_MASK) + >> OCRDMA_PHY_PS_SHIFT; mbx_err: kfree(cmd); @@ -1328,11 +1344,16 @@ static int ocrdma_mbx_get_phy_info(struct ocrdma_dev *dev) goto mbx_err; rsp = (struct ocrdma_get_phy_info_rsp *)cmd; - dev->phy.phy_type = le16_to_cpu(rsp->phy_type); + dev->phy.phy_type = + (rsp->ityp_ptyp & OCRDMA_PHY_TYPE_MASK); + dev->phy.interface_type = + (rsp->ityp_ptyp & OCRDMA_IF_TYPE_MASK) + >> OCRDMA_IF_TYPE_SHIFT; dev->phy.auto_speeds_supported = - le16_to_cpu(rsp->auto_speeds_supported); + (rsp->fspeed_aspeed & OCRDMA_ASPEED_SUPP_MASK); dev->phy.fixed_speeds_supported = - le16_to_cpu(rsp->fixed_speeds_supported); + (rsp->fspeed_aspeed & OCRDMA_FSPEED_SUPP_MASK) + >> OCRDMA_FSPEED_SUPP_SHIFT; mbx_err: kfree(cmd); return status; @@ -1457,8 +1478,8 @@ static int ocrdma_mbx_create_ah_tbl(struct ocrdma_dev *dev) pbes = (struct ocrdma_pbe *)dev->av_tbl.pbl.va; for (i = 0; i < dev->av_tbl.size / OCRDMA_MIN_Q_PAGE_SIZE; i++) { - pbes[i].pa_lo = (u32) (pa & 0xffffffff); - pbes[i].pa_hi = (u32) upper_32_bits(pa); + pbes[i].pa_lo = (u32)cpu_to_le32(pa & 0xffffffff); + pbes[i].pa_hi = (u32)cpu_to_le32(upper_32_bits(pa)); pa += PAGE_SIZE; } cmd->tbl_addr[0].lo = (u32)(dev->av_tbl.pbl.pa & 0xFFFFFFFF); @@ -1501,6 +1522,7 @@ static void ocrdma_mbx_delete_ah_tbl(struct ocrdma_dev *dev) ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); dma_free_coherent(&pdev->dev, dev->av_tbl.size, dev->av_tbl.va, dev->av_tbl.pa); + dev->av_tbl.va = NULL; dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->av_tbl.pbl.va, dev->av_tbl.pbl.pa); kfree(cmd); @@ -1624,14 +1646,16 @@ int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq, cmd->cmd.pgsz_pgcnt |= OCRDMA_CREATE_CQ_DPP << OCRDMA_CREATE_CQ_TYPE_SHIFT; cq->phase_change = false; - cmd->cmd.cqe_count = (cq->len / cqe_size); + cmd->cmd.pdid_cqecnt = (cq->len / cqe_size); } else { - cmd->cmd.cqe_count = (cq->len / cqe_size) - 1; + cmd->cmd.pdid_cqecnt = (cq->len / cqe_size) - 1; cmd->cmd.ev_cnt_flags |= OCRDMA_CREATE_CQ_FLAGS_AUTO_VALID; cq->phase_change = true; } - cmd->cmd.pd_id = pd_id; /* valid only for v3 */ + /* pd_id valid only for v3 */ + cmd->cmd.pdid_cqecnt |= (pd_id << + OCRDMA_CREATE_CQ_CMD_PDID_SHIFT); ocrdma_build_q_pages(&cmd->cmd.pa[0], hw_pages, cq->pa, page_size); status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); if (status) @@ -2206,7 +2230,8 @@ int ocrdma_mbx_create_qp(struct ocrdma_qp *qp, struct ib_qp_init_attr *attrs, OCRDMA_CREATE_QP_REQ_RQ_CQID_MASK; qp->rq_cq = cq; - if (pd->dpp_enabled && pd->num_dpp_qp) { + if (pd->dpp_enabled && attrs->cap.max_inline_data && pd->num_dpp_qp && + (attrs->cap.max_inline_data <= dev->attr.max_inline_data)) { ocrdma_set_create_qp_dpp_cmd(cmd, pd, qp, enable_dpp_cq, dpp_cq_id); } @@ -2264,6 +2289,8 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp, if ((ah_attr->ah_flags & IB_AH_GRH) == 0) return -EINVAL; + if (atomic_cmpxchg(&qp->dev->update_sl, 1, 0)) + ocrdma_init_service_level(qp->dev); cmd->params.tclass_sq_psn |= (ah_attr->grh.traffic_class << OCRDMA_QP_PARAMS_TCLASS_SHIFT); cmd->params.rnt_rc_sl_fl |= @@ -2297,6 +2324,8 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp, cmd->params.vlan_dmac_b4_to_b5 |= vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT; cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID; + cmd->params.rnt_rc_sl_fl |= + (qp->dev->sl & 0x07) << OCRDMA_QP_PARAMS_SL_SHIFT; } return 0; } @@ -2604,6 +2633,168 @@ int ocrdma_mbx_destroy_srq(struct ocrdma_dev *dev, struct ocrdma_srq *srq) return status; } +static int ocrdma_mbx_get_dcbx_config(struct ocrdma_dev *dev, u32 ptype, + struct ocrdma_dcbx_cfg *dcbxcfg) +{ + int status = 0; + dma_addr_t pa; + struct ocrdma_mqe cmd; + + struct ocrdma_get_dcbx_cfg_req *req = NULL; + struct ocrdma_get_dcbx_cfg_rsp *rsp = NULL; + struct pci_dev *pdev = dev->nic_info.pdev; + struct ocrdma_mqe_sge *mqe_sge = cmd.u.nonemb_req.sge; + + memset(&cmd, 0, sizeof(struct ocrdma_mqe)); + cmd.hdr.pyld_len = max_t (u32, sizeof(struct ocrdma_get_dcbx_cfg_rsp), + sizeof(struct ocrdma_get_dcbx_cfg_req)); + req = dma_alloc_coherent(&pdev->dev, cmd.hdr.pyld_len, &pa, GFP_KERNEL); + if (!req) { + status = -ENOMEM; + goto mem_err; + } + + cmd.hdr.spcl_sge_cnt_emb |= (1 << OCRDMA_MQE_HDR_SGE_CNT_SHIFT) & + OCRDMA_MQE_HDR_SGE_CNT_MASK; + mqe_sge->pa_lo = (u32) (pa & 0xFFFFFFFFUL); + mqe_sge->pa_hi = (u32) upper_32_bits(pa); + mqe_sge->len = cmd.hdr.pyld_len; + + memset(req, 0, sizeof(struct ocrdma_get_dcbx_cfg_req)); + ocrdma_init_mch(&req->hdr, OCRDMA_CMD_GET_DCBX_CONFIG, + OCRDMA_SUBSYS_DCBX, cmd.hdr.pyld_len); + req->param_type = ptype; + + status = ocrdma_mbx_cmd(dev, &cmd); + if (status) + goto mbx_err; + + rsp = (struct ocrdma_get_dcbx_cfg_rsp *)req; + ocrdma_le32_to_cpu(rsp, sizeof(struct ocrdma_get_dcbx_cfg_rsp)); + memcpy(dcbxcfg, &rsp->cfg, sizeof(struct ocrdma_dcbx_cfg)); + +mbx_err: + dma_free_coherent(&pdev->dev, cmd.hdr.pyld_len, req, pa); +mem_err: + return status; +} + +#define OCRDMA_MAX_SERVICE_LEVEL_INDEX 0x08 +#define OCRDMA_DEFAULT_SERVICE_LEVEL 0x05 + +static int ocrdma_parse_dcbxcfg_rsp(struct ocrdma_dev *dev, int ptype, + struct ocrdma_dcbx_cfg *dcbxcfg, + u8 *srvc_lvl) +{ + int status = -EINVAL, indx, slindx; + int ventry_cnt; + struct ocrdma_app_parameter *app_param; + u8 valid, proto_sel; + u8 app_prio, pfc_prio; + u16 proto; + + if (!(dcbxcfg->tcv_aev_opv_st & OCRDMA_DCBX_STATE_MASK)) { + pr_info("%s ocrdma%d DCBX is disabled\n", + dev_name(&dev->nic_info.pdev->dev), dev->id); + goto out; + } + + if (!ocrdma_is_enabled_and_synced(dcbxcfg->pfc_state)) { + pr_info("%s ocrdma%d priority flow control(%s) is %s%s\n", + dev_name(&dev->nic_info.pdev->dev), dev->id, + (ptype > 0 ? "operational" : "admin"), + (dcbxcfg->pfc_state & OCRDMA_STATE_FLAG_ENABLED) ? + "enabled" : "disabled", + (dcbxcfg->pfc_state & OCRDMA_STATE_FLAG_SYNC) ? + "" : ", not sync'ed"); + goto out; + } else { + pr_info("%s ocrdma%d priority flow control is enabled and sync'ed\n", + dev_name(&dev->nic_info.pdev->dev), dev->id); + } + + ventry_cnt = (dcbxcfg->tcv_aev_opv_st >> + OCRDMA_DCBX_APP_ENTRY_SHIFT) + & OCRDMA_DCBX_STATE_MASK; + + for (indx = 0; indx < ventry_cnt; indx++) { + app_param = &dcbxcfg->app_param[indx]; + valid = (app_param->valid_proto_app >> + OCRDMA_APP_PARAM_VALID_SHIFT) + & OCRDMA_APP_PARAM_VALID_MASK; + proto_sel = (app_param->valid_proto_app + >> OCRDMA_APP_PARAM_PROTO_SEL_SHIFT) + & OCRDMA_APP_PARAM_PROTO_SEL_MASK; + proto = app_param->valid_proto_app & + OCRDMA_APP_PARAM_APP_PROTO_MASK; + + if ( + valid && proto == OCRDMA_APP_PROTO_ROCE && + proto_sel == OCRDMA_PROTO_SELECT_L2) { + for (slindx = 0; slindx < + OCRDMA_MAX_SERVICE_LEVEL_INDEX; slindx++) { + app_prio = ocrdma_get_app_prio( + (u8 *)app_param->app_prio, + slindx); + pfc_prio = ocrdma_get_pfc_prio( + (u8 *)dcbxcfg->pfc_prio, + slindx); + + if (app_prio && pfc_prio) { + *srvc_lvl = slindx; + status = 0; + goto out; + } + } + if (slindx == OCRDMA_MAX_SERVICE_LEVEL_INDEX) { + pr_info("%s ocrdma%d application priority not set for 0x%x protocol\n", + dev_name(&dev->nic_info.pdev->dev), + dev->id, proto); + } + } + } + +out: + return status; +} + +void ocrdma_init_service_level(struct ocrdma_dev *dev) +{ + int status = 0, indx; + struct ocrdma_dcbx_cfg dcbxcfg; + u8 srvc_lvl = OCRDMA_DEFAULT_SERVICE_LEVEL; + int ptype = OCRDMA_PARAMETER_TYPE_OPER; + + for (indx = 0; indx < 2; indx++) { + status = ocrdma_mbx_get_dcbx_config(dev, ptype, &dcbxcfg); + if (status) { + pr_err("%s(): status=%d\n", __func__, status); + ptype = OCRDMA_PARAMETER_TYPE_ADMIN; + continue; + } + + status = ocrdma_parse_dcbxcfg_rsp(dev, ptype, + &dcbxcfg, &srvc_lvl); + if (status) { + ptype = OCRDMA_PARAMETER_TYPE_ADMIN; + continue; + } + + break; + } + + if (status) + pr_info("%s ocrdma%d service level default\n", + dev_name(&dev->nic_info.pdev->dev), dev->id); + else + pr_info("%s ocrdma%d service level %d\n", + dev_name(&dev->nic_info.pdev->dev), dev->id, + srvc_lvl); + + dev->pfc_state = ocrdma_is_enabled_and_synced(dcbxcfg.pfc_state); + dev->sl = srvc_lvl; +} + int ocrdma_alloc_av(struct ocrdma_dev *dev, struct ocrdma_ah *ah) { int i; @@ -2709,13 +2900,15 @@ int ocrdma_init_hw(struct ocrdma_dev *dev) goto conf_err; status = ocrdma_mbx_get_phy_info(dev); if (status) - goto conf_err; + goto info_attrb_err; status = ocrdma_mbx_get_ctrl_attribs(dev); if (status) - goto conf_err; + goto info_attrb_err; return 0; +info_attrb_err: + ocrdma_mbx_delete_ah_tbl(dev); conf_err: ocrdma_destroy_mq(dev); mq_err: diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h index e513f7293142..6eed8f191322 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h @@ -135,4 +135,6 @@ int ocrdma_get_irq(struct ocrdma_dev *dev, struct ocrdma_eq *eq); int ocrdma_mbx_rdma_stats(struct ocrdma_dev *, bool reset); char *port_speed_string(struct ocrdma_dev *dev); +void ocrdma_init_service_level(struct ocrdma_dev *); + #endif /* __OCRDMA_HW_H__ */ diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c index 7c504e079744..256a06bc0b68 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c @@ -324,6 +324,11 @@ static int ocrdma_alloc_resources(struct ocrdma_dev *dev) if (!dev->qp_tbl) goto alloc_err; } + + dev->stag_arr = kzalloc(sizeof(u64) * OCRDMA_MAX_STAG, GFP_KERNEL); + if (dev->stag_arr == NULL) + goto alloc_err; + spin_lock_init(&dev->av_tbl.lock); spin_lock_init(&dev->flush_q_lock); return 0; @@ -334,6 +339,7 @@ alloc_err: static void ocrdma_free_resources(struct ocrdma_dev *dev) { + kfree(dev->stag_arr); kfree(dev->qp_tbl); kfree(dev->cq_tbl); kfree(dev->sgid_tbl); @@ -353,15 +359,25 @@ static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr, { struct ocrdma_dev *dev = dev_get_drvdata(device); - return scnprintf(buf, PAGE_SIZE, "%s", &dev->attr.fw_ver[0]); + return scnprintf(buf, PAGE_SIZE, "%s\n", &dev->attr.fw_ver[0]); +} + +static ssize_t show_hca_type(struct device *device, + struct device_attribute *attr, char *buf) +{ + struct ocrdma_dev *dev = dev_get_drvdata(device); + + return scnprintf(buf, PAGE_SIZE, "%s\n", &dev->model_number[0]); } static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL); +static DEVICE_ATTR(hca_type, S_IRUGO, show_hca_type, NULL); static struct device_attribute *ocrdma_attributes[] = { &dev_attr_hw_rev, - &dev_attr_fw_ver + &dev_attr_fw_ver, + &dev_attr_hca_type }; static void ocrdma_remove_sysfiles(struct ocrdma_dev *dev) @@ -372,6 +388,58 @@ static void ocrdma_remove_sysfiles(struct ocrdma_dev *dev) device_remove_file(&dev->ibdev.dev, ocrdma_attributes[i]); } +static void ocrdma_init_ipv4_gids(struct ocrdma_dev *dev, + struct net_device *net) +{ + struct in_device *in_dev; + union ib_gid gid; + in_dev = in_dev_get(net); + if (in_dev) { + for_ifa(in_dev) { + ipv6_addr_set_v4mapped(ifa->ifa_address, + (struct in6_addr *)&gid); + ocrdma_add_sgid(dev, &gid); + } + endfor_ifa(in_dev); + in_dev_put(in_dev); + } +} + +static void ocrdma_init_ipv6_gids(struct ocrdma_dev *dev, + struct net_device *net) +{ +#if IS_ENABLED(CONFIG_IPV6) + struct inet6_dev *in6_dev; + union ib_gid *pgid; + struct inet6_ifaddr *ifp; + in6_dev = in6_dev_get(net); + if (in6_dev) { + read_lock_bh(&in6_dev->lock); + list_for_each_entry(ifp, &in6_dev->addr_list, if_list) { + pgid = (union ib_gid *)&ifp->addr; + ocrdma_add_sgid(dev, pgid); + } + read_unlock_bh(&in6_dev->lock); + in6_dev_put(in6_dev); + } +#endif +} + +static void ocrdma_init_gid_table(struct ocrdma_dev *dev) +{ + struct net_device *net_dev; + + for_each_netdev(&init_net, net_dev) { + struct net_device *real_dev = rdma_vlan_dev_real_dev(net_dev) ? + rdma_vlan_dev_real_dev(net_dev) : net_dev; + + if (real_dev == dev->nic_info.netdev) { + ocrdma_init_ipv4_gids(dev, net_dev); + ocrdma_init_ipv6_gids(dev, net_dev); + } + } +} + static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info) { int status = 0, i; @@ -399,6 +467,8 @@ static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info) if (status) goto alloc_err; + ocrdma_init_service_level(dev); + ocrdma_init_gid_table(dev); status = ocrdma_register_device(dev); if (status) goto alloc_err; @@ -508,6 +578,12 @@ static int ocrdma_close(struct ocrdma_dev *dev) return 0; } +static void ocrdma_shutdown(struct ocrdma_dev *dev) +{ + ocrdma_close(dev); + ocrdma_remove(dev); +} + /* event handling via NIC driver ensures that all the NIC specific * initialization done before RoCE driver notifies * event to stack. @@ -521,6 +597,9 @@ static void ocrdma_event_handler(struct ocrdma_dev *dev, u32 event) case BE_DEV_DOWN: ocrdma_close(dev); break; + case BE_DEV_SHUTDOWN: + ocrdma_shutdown(dev); + break; } } diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h index 96c9ee602ba4..904989ec5eaa 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h @@ -44,35 +44,39 @@ enum { #define OCRDMA_SUBSYS_ROCE 10 enum { OCRDMA_CMD_QUERY_CONFIG = 1, - OCRDMA_CMD_ALLOC_PD, - OCRDMA_CMD_DEALLOC_PD, - - OCRDMA_CMD_CREATE_AH_TBL, - OCRDMA_CMD_DELETE_AH_TBL, - - OCRDMA_CMD_CREATE_QP, - OCRDMA_CMD_QUERY_QP, - OCRDMA_CMD_MODIFY_QP, - OCRDMA_CMD_DELETE_QP, - - OCRDMA_CMD_RSVD1, - OCRDMA_CMD_ALLOC_LKEY, - OCRDMA_CMD_DEALLOC_LKEY, - OCRDMA_CMD_REGISTER_NSMR, - OCRDMA_CMD_REREGISTER_NSMR, - OCRDMA_CMD_REGISTER_NSMR_CONT, - OCRDMA_CMD_QUERY_NSMR, - OCRDMA_CMD_ALLOC_MW, - OCRDMA_CMD_QUERY_MW, - - OCRDMA_CMD_CREATE_SRQ, - OCRDMA_CMD_QUERY_SRQ, - OCRDMA_CMD_MODIFY_SRQ, - OCRDMA_CMD_DELETE_SRQ, - - OCRDMA_CMD_ATTACH_MCAST, - OCRDMA_CMD_DETACH_MCAST, - OCRDMA_CMD_GET_RDMA_STATS, + OCRDMA_CMD_ALLOC_PD = 2, + OCRDMA_CMD_DEALLOC_PD = 3, + + OCRDMA_CMD_CREATE_AH_TBL = 4, + OCRDMA_CMD_DELETE_AH_TBL = 5, + + OCRDMA_CMD_CREATE_QP = 6, + OCRDMA_CMD_QUERY_QP = 7, + OCRDMA_CMD_MODIFY_QP = 8 , + OCRDMA_CMD_DELETE_QP = 9, + + OCRDMA_CMD_RSVD1 = 10, + OCRDMA_CMD_ALLOC_LKEY = 11, + OCRDMA_CMD_DEALLOC_LKEY = 12, + OCRDMA_CMD_REGISTER_NSMR = 13, + OCRDMA_CMD_REREGISTER_NSMR = 14, + OCRDMA_CMD_REGISTER_NSMR_CONT = 15, + OCRDMA_CMD_QUERY_NSMR = 16, + OCRDMA_CMD_ALLOC_MW = 17, + OCRDMA_CMD_QUERY_MW = 18, + + OCRDMA_CMD_CREATE_SRQ = 19, + OCRDMA_CMD_QUERY_SRQ = 20, + OCRDMA_CMD_MODIFY_SRQ = 21, + OCRDMA_CMD_DELETE_SRQ = 22, + + OCRDMA_CMD_ATTACH_MCAST = 23, + OCRDMA_CMD_DETACH_MCAST = 24, + + OCRDMA_CMD_CREATE_RBQ = 25, + OCRDMA_CMD_DESTROY_RBQ = 26, + + OCRDMA_CMD_GET_RDMA_STATS = 27, OCRDMA_CMD_MAX }; @@ -103,7 +107,7 @@ enum { #define OCRDMA_MAX_QP 2048 #define OCRDMA_MAX_CQ 2048 -#define OCRDMA_MAX_STAG 8192 +#define OCRDMA_MAX_STAG 16384 enum { OCRDMA_DB_RQ_OFFSET = 0xE0, @@ -422,7 +426,12 @@ struct ocrdma_ae_qp_mcqe { #define OCRDMA_ASYNC_RDMA_EVE_CODE 0x14 #define OCRDMA_ASYNC_GRP5_EVE_CODE 0x5 -#define OCRDMA_ASYNC_EVENT_PVID_STATE 0x3 + +enum ocrdma_async_grp5_events { + OCRDMA_ASYNC_EVENT_QOS_VALUE = 0x01, + OCRDMA_ASYNC_EVENT_COS_VALUE = 0x02, + OCRDMA_ASYNC_EVENT_PVID_STATE = 0x03 +}; enum OCRDMA_ASYNC_EVENT_TYPE { OCRDMA_CQ_ERROR = 0x00, @@ -525,8 +534,8 @@ struct ocrdma_mbx_query_config { u32 max_ird_ord_per_qp; u32 max_shared_ird_ord; u32 max_mr; - u32 max_mr_size_lo; u32 max_mr_size_hi; + u32 max_mr_size_lo; u32 max_num_mr_pbl; u32 max_mw; u32 max_fmr; @@ -580,17 +589,26 @@ enum { OCRDMA_FN_MODE_RDMA = 0x4 }; +enum { + OCRDMA_IF_TYPE_MASK = 0xFFFF0000, + OCRDMA_IF_TYPE_SHIFT = 0x10, + OCRDMA_PHY_TYPE_MASK = 0x0000FFFF, + OCRDMA_FUTURE_DETAILS_MASK = 0xFFFF0000, + OCRDMA_FUTURE_DETAILS_SHIFT = 0x10, + OCRDMA_EX_PHY_DETAILS_MASK = 0x0000FFFF, + OCRDMA_FSPEED_SUPP_MASK = 0xFFFF0000, + OCRDMA_FSPEED_SUPP_SHIFT = 0x10, + OCRDMA_ASPEED_SUPP_MASK = 0x0000FFFF +}; + struct ocrdma_get_phy_info_rsp { struct ocrdma_mqe_hdr hdr; struct ocrdma_mbx_rsp rsp; - u16 phy_type; - u16 interface_type; + u32 ityp_ptyp; u32 misc_params; - u16 ext_phy_details; - u16 rsvd; - u16 auto_speeds_supported; - u16 fixed_speeds_supported; + u32 ftrdtl_exphydtl; + u32 fspeed_aspeed; u32 future_use[2]; }; @@ -603,19 +621,34 @@ enum { OCRDMA_PHY_SPEED_40GBPS = 0x20 }; +enum { + OCRDMA_PORT_NUM_MASK = 0x3F, + OCRDMA_PT_MASK = 0xC0, + OCRDMA_PT_SHIFT = 0x6, + OCRDMA_LINK_DUP_MASK = 0x0000FF00, + OCRDMA_LINK_DUP_SHIFT = 0x8, + OCRDMA_PHY_PS_MASK = 0x00FF0000, + OCRDMA_PHY_PS_SHIFT = 0x10, + OCRDMA_PHY_PFLT_MASK = 0xFF000000, + OCRDMA_PHY_PFLT_SHIFT = 0x18, + OCRDMA_QOS_LNKSP_MASK = 0xFFFF0000, + OCRDMA_QOS_LNKSP_SHIFT = 0x10, + OCRDMA_LLST_MASK = 0xFF, + OCRDMA_PLFC_MASK = 0x00000400, + OCRDMA_PLFC_SHIFT = 0x8, + OCRDMA_PLRFC_MASK = 0x00000200, + OCRDMA_PLRFC_SHIFT = 0x8, + OCRDMA_PLTFC_MASK = 0x00000100, + OCRDMA_PLTFC_SHIFT = 0x8 +}; struct ocrdma_get_link_speed_rsp { struct ocrdma_mqe_hdr hdr; struct ocrdma_mbx_rsp rsp; - u8 pt_port_num; - u8 link_duplex; - u8 phys_port_speed; - u8 phys_port_fault; - u16 rsvd1; - u16 qos_lnk_speed; - u8 logical_lnk_status; - u8 rsvd2[3]; + u32 pflt_pps_ld_pnum; + u32 qos_lsp; + u32 res_lls; }; enum { @@ -666,8 +699,7 @@ struct ocrdma_create_cq_cmd { u32 pgsz_pgcnt; u32 ev_cnt_flags; u32 eqn; - u16 cqe_count; - u16 pd_id; + u32 pdid_cqecnt; u32 rsvd6; struct ocrdma_pa pa[OCRDMA_CREATE_CQ_MAX_PAGES]; }; @@ -678,6 +710,10 @@ struct ocrdma_create_cq { }; enum { + OCRDMA_CREATE_CQ_CMD_PDID_SHIFT = 0x10 +}; + +enum { OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK = 0xFFFF }; @@ -1231,7 +1267,6 @@ struct ocrdma_destroy_srq { enum { OCRDMA_ALLOC_PD_ENABLE_DPP = BIT(16), - OCRDMA_PD_MAX_DPP_ENABLED_QP = 8, OCRDMA_DPP_PAGE_SIZE = 4096 }; @@ -1896,12 +1931,62 @@ struct ocrdma_rdma_stats_resp { struct ocrdma_rx_dbg_stats rx_dbg_stats; } __packed; +enum { + OCRDMA_HBA_ATTRB_EPROM_VER_LO_MASK = 0xFF, + OCRDMA_HBA_ATTRB_EPROM_VER_HI_MASK = 0xFF00, + OCRDMA_HBA_ATTRB_EPROM_VER_HI_SHIFT = 0x08, + OCRDMA_HBA_ATTRB_CDBLEN_MASK = 0xFFFF, + OCRDMA_HBA_ATTRB_ASIC_REV_MASK = 0xFF0000, + OCRDMA_HBA_ATTRB_ASIC_REV_SHIFT = 0x10, + OCRDMA_HBA_ATTRB_GUID0_MASK = 0xFF000000, + OCRDMA_HBA_ATTRB_GUID0_SHIFT = 0x18, + OCRDMA_HBA_ATTRB_GUID13_MASK = 0xFF, + OCRDMA_HBA_ATTRB_GUID14_MASK = 0xFF00, + OCRDMA_HBA_ATTRB_GUID14_SHIFT = 0x08, + OCRDMA_HBA_ATTRB_GUID15_MASK = 0xFF0000, + OCRDMA_HBA_ATTRB_GUID15_SHIFT = 0x10, + OCRDMA_HBA_ATTRB_PCNT_MASK = 0xFF000000, + OCRDMA_HBA_ATTRB_PCNT_SHIFT = 0x18, + OCRDMA_HBA_ATTRB_LDTOUT_MASK = 0xFFFF, + OCRDMA_HBA_ATTRB_ISCSI_VER_MASK = 0xFF0000, + OCRDMA_HBA_ATTRB_ISCSI_VER_SHIFT = 0x10, + OCRDMA_HBA_ATTRB_MFUNC_DEV_MASK = 0xFF000000, + OCRDMA_HBA_ATTRB_MFUNC_DEV_SHIFT = 0x18, + OCRDMA_HBA_ATTRB_CV_MASK = 0xFF, + OCRDMA_HBA_ATTRB_HBA_ST_MASK = 0xFF00, + OCRDMA_HBA_ATTRB_HBA_ST_SHIFT = 0x08, + OCRDMA_HBA_ATTRB_MAX_DOMS_MASK = 0xFF0000, + OCRDMA_HBA_ATTRB_MAX_DOMS_SHIFT = 0x10, + OCRDMA_HBA_ATTRB_PTNUM_MASK = 0x3F000000, + OCRDMA_HBA_ATTRB_PTNUM_SHIFT = 0x18, + OCRDMA_HBA_ATTRB_PT_MASK = 0xC0000000, + OCRDMA_HBA_ATTRB_PT_SHIFT = 0x1E, + OCRDMA_HBA_ATTRB_ISCSI_FET_MASK = 0xFF, + OCRDMA_HBA_ATTRB_ASIC_GEN_MASK = 0xFF00, + OCRDMA_HBA_ATTRB_ASIC_GEN_SHIFT = 0x08, + OCRDMA_HBA_ATTRB_PCI_VID_MASK = 0xFFFF, + OCRDMA_HBA_ATTRB_PCI_DID_MASK = 0xFFFF0000, + OCRDMA_HBA_ATTRB_PCI_DID_SHIFT = 0x10, + OCRDMA_HBA_ATTRB_PCI_SVID_MASK = 0xFFFF, + OCRDMA_HBA_ATTRB_PCI_SSID_MASK = 0xFFFF0000, + OCRDMA_HBA_ATTRB_PCI_SSID_SHIFT = 0x10, + OCRDMA_HBA_ATTRB_PCI_BUSNUM_MASK = 0xFF, + OCRDMA_HBA_ATTRB_PCI_DEVNUM_MASK = 0xFF00, + OCRDMA_HBA_ATTRB_PCI_DEVNUM_SHIFT = 0x08, + OCRDMA_HBA_ATTRB_PCI_FUNCNUM_MASK = 0xFF0000, + OCRDMA_HBA_ATTRB_PCI_FUNCNUM_SHIFT = 0x10, + OCRDMA_HBA_ATTRB_IF_TYPE_MASK = 0xFF000000, + OCRDMA_HBA_ATTRB_IF_TYPE_SHIFT = 0x18, + OCRDMA_HBA_ATTRB_NETFIL_MASK =0xFF +}; struct mgmt_hba_attribs { u8 flashrom_version_string[32]; u8 manufacturer_name[32]; u32 supported_modes; - u32 rsvd0[3]; + u32 rsvd_eprom_verhi_verlo; + u32 mbx_ds_ver; + u32 epfw_ds_ver; u8 ncsi_ver_string[12]; u32 default_extended_timeout; u8 controller_model_number[32]; @@ -1914,34 +1999,26 @@ struct mgmt_hba_attribs { u8 driver_version_string[32]; u8 fw_on_flash_version_string[32]; u32 functionalities_supported; - u16 max_cdblength; - u8 asic_revision; - u8 generational_guid[16]; - u8 hba_port_count; - u16 default_link_down_timeout; - u8 iscsi_ver_min_max; - u8 multifunction_device; - u8 cache_valid; - u8 hba_status; - u8 max_domains_supported; - u8 phy_port; + u32 guid0_asicrev_cdblen; + u8 generational_guid[12]; + u32 portcnt_guid15; + u32 mfuncdev_iscsi_ldtout; + u32 ptpnum_maxdoms_hbast_cv; u32 firmware_post_status; u32 hba_mtu[8]; - u32 rsvd1[4]; + u32 res_asicgen_iscsi_feaures; + u32 rsvd1[3]; }; struct mgmt_controller_attrib { struct mgmt_hba_attribs hba_attribs; - u16 pci_vendor_id; - u16 pci_device_id; - u16 pci_sub_vendor_id; - u16 pci_sub_system_id; - u8 pci_bus_number; - u8 pci_device_number; - u8 pci_function_number; - u8 interface_type; - u64 unique_identifier; - u32 rsvd0[5]; + u32 pci_did_vid; + u32 pci_ssid_svid; + u32 ityp_fnum_devnum_bnum; + u32 uid_hi; + u32 uid_lo; + u32 res_nnetfil; + u32 rsvd0[4]; }; struct ocrdma_get_ctrl_attribs_rsp { @@ -1949,5 +2026,79 @@ struct ocrdma_get_ctrl_attribs_rsp { struct mgmt_controller_attrib ctrl_attribs; }; +#define OCRDMA_SUBSYS_DCBX 0x10 + +enum OCRDMA_DCBX_OPCODE { + OCRDMA_CMD_GET_DCBX_CONFIG = 0x01 +}; + +enum OCRDMA_DCBX_PARAM_TYPE { + OCRDMA_PARAMETER_TYPE_ADMIN = 0x00, + OCRDMA_PARAMETER_TYPE_OPER = 0x01, + OCRDMA_PARAMETER_TYPE_PEER = 0x02 +}; + +enum OCRDMA_DCBX_APP_PROTO { + OCRDMA_APP_PROTO_ROCE = 0x8915 +}; + +enum OCRDMA_DCBX_PROTO { + OCRDMA_PROTO_SELECT_L2 = 0x00, + OCRDMA_PROTO_SELECT_L4 = 0x01 +}; + +enum OCRDMA_DCBX_APP_PARAM { + OCRDMA_APP_PARAM_APP_PROTO_MASK = 0xFFFF, + OCRDMA_APP_PARAM_PROTO_SEL_MASK = 0xFF, + OCRDMA_APP_PARAM_PROTO_SEL_SHIFT = 0x10, + OCRDMA_APP_PARAM_VALID_MASK = 0xFF, + OCRDMA_APP_PARAM_VALID_SHIFT = 0x18 +}; + +enum OCRDMA_DCBX_STATE_FLAGS { + OCRDMA_STATE_FLAG_ENABLED = 0x01, + OCRDMA_STATE_FLAG_ADDVERTISED = 0x02, + OCRDMA_STATE_FLAG_WILLING = 0x04, + OCRDMA_STATE_FLAG_SYNC = 0x08, + OCRDMA_STATE_FLAG_UNSUPPORTED = 0x40000000, + OCRDMA_STATE_FLAG_NEG_FAILD = 0x80000000 +}; + +enum OCRDMA_TCV_AEV_OPV_ST { + OCRDMA_DCBX_TC_SUPPORT_MASK = 0xFF, + OCRDMA_DCBX_TC_SUPPORT_SHIFT = 0x18, + OCRDMA_DCBX_APP_ENTRY_SHIFT = 0x10, + OCRDMA_DCBX_OP_PARAM_SHIFT = 0x08, + OCRDMA_DCBX_STATE_MASK = 0xFF +}; + +struct ocrdma_app_parameter { + u32 valid_proto_app; + u32 oui; + u32 app_prio[2]; +}; + +struct ocrdma_dcbx_cfg { + u32 tcv_aev_opv_st; + u32 tc_state; + u32 pfc_state; + u32 qcn_state; + u32 appl_state; + u32 ll_state; + u32 tc_bw[2]; + u32 tc_prio[8]; + u32 pfc_prio[2]; + struct ocrdma_app_parameter app_param[15]; +}; + +struct ocrdma_get_dcbx_cfg_req { + struct ocrdma_mbx_hdr hdr; + u32 param_type; +} __packed; + +struct ocrdma_get_dcbx_cfg_rsp { + struct ocrdma_mbx_rsp hdr; + struct ocrdma_dcbx_cfg cfg; +} __packed; #endif /* __OCRDMA_SLI_H__ */ diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index 8bae8718fa53..e8b8569788c0 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -69,11 +69,11 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr) memcpy(&attr->fw_ver, &dev->attr.fw_ver[0], min(sizeof(dev->attr.fw_ver), sizeof(attr->fw_ver))); ocrdma_get_guid(dev, (u8 *)&attr->sys_image_guid); - attr->max_mr_size = ~0ull; + attr->max_mr_size = dev->attr.max_mr_size; attr->page_size_cap = 0xffff000; attr->vendor_id = dev->nic_info.pdev->vendor; attr->vendor_part_id = dev->nic_info.pdev->device; - attr->hw_ver = 0; + attr->hw_ver = dev->asic_id; attr->max_qp = dev->attr.max_qp; attr->max_ah = OCRDMA_MAX_AH; attr->max_qp_wr = dev->attr.max_wqe; @@ -101,7 +101,7 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr) attr->max_srq_sge = dev->attr.max_srq_sge; attr->max_srq_wr = dev->attr.max_rqe; attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay; - attr->max_fast_reg_page_list_len = 0; + attr->max_fast_reg_page_list_len = dev->attr.max_pages_per_frmr; attr->max_pkeys = 1; return 0; } @@ -268,7 +268,8 @@ static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev, pd->dpp_enabled = ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R; pd->num_dpp_qp = - pd->dpp_enabled ? OCRDMA_PD_MAX_DPP_ENABLED_QP : 0; + pd->dpp_enabled ? (dev->nic_info.db_page_size / + dev->attr.wqe_size) : 0; } retry: @@ -328,7 +329,10 @@ static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx) struct ocrdma_pd *pd = uctx->cntxt_pd; struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device); - BUG_ON(uctx->pd_in_use); + if (uctx->pd_in_use) { + pr_err("%s(%d) Freeing in use pdid=0x%x.\n", + __func__, dev->id, pd->id); + } uctx->cntxt_pd = NULL; status = _ocrdma_dealloc_pd(dev, pd); return status; @@ -843,6 +847,13 @@ int ocrdma_dereg_mr(struct ib_mr *ib_mr) if (mr->umem) ib_umem_release(mr->umem); kfree(mr); + + /* Don't stop cleanup, in case FW is unresponsive */ + if (dev->mqe_ctx.fw_error_state) { + status = 0; + pr_err("%s(%d) fw not responding.\n", + __func__, dev->id); + } return status; } @@ -2054,6 +2065,13 @@ int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, } while (wr) { + if (qp->qp_type == IB_QPT_UD && + (wr->opcode != IB_WR_SEND && + wr->opcode != IB_WR_SEND_WITH_IMM)) { + *bad_wr = wr; + status = -EINVAL; + break; + } if (ocrdma_hwq_free_cnt(&qp->sq) == 0 || wr->num_sge > qp->sq.max_sges) { *bad_wr = wr; @@ -2488,6 +2506,11 @@ static bool ocrdma_poll_err_scqe(struct ocrdma_qp *qp, *stop = true; expand = false; } + } else if (is_hw_sq_empty(qp)) { + /* Do nothing */ + expand = false; + *polled = false; + *stop = false; } else { *polled = true; expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status); @@ -2593,6 +2616,11 @@ static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, *stop = true; expand = false; } + } else if (is_hw_rq_empty(qp)) { + /* Do nothing */ + expand = false; + *polled = false; + *stop = false; } else { *polled = true; expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status); @@ -2818,11 +2846,9 @@ int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags) if (cq->first_arm) { ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0); cq->first_arm = false; - goto skip_defer; } - cq->deferred_arm = true; -skip_defer: + cq->deferred_arm = true; cq->deferred_sol = sol_needed; spin_unlock_irqrestore(&cq->cq_lock, flags); diff --git a/drivers/infiniband/hw/qib/qib_debugfs.c b/drivers/infiniband/hw/qib/qib_debugfs.c index 799a0c3bffc4..6abd3ed3cd51 100644 --- a/drivers/infiniband/hw/qib/qib_debugfs.c +++ b/drivers/infiniband/hw/qib/qib_debugfs.c @@ -193,6 +193,7 @@ static void *_qp_stats_seq_start(struct seq_file *s, loff_t *pos) struct qib_qp_iter *iter; loff_t n = *pos; + rcu_read_lock(); iter = qib_qp_iter_init(s->private); if (!iter) return NULL; @@ -224,7 +225,7 @@ static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr, static void _qp_stats_seq_stop(struct seq_file *s, void *iter_ptr) { - /* nothing for now */ + rcu_read_unlock(); } static int _qp_stats_seq_show(struct seq_file *s, void *iter_ptr) diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c index 8d3c78ddc906..729da39c49ed 100644 --- a/drivers/infiniband/hw/qib/qib_init.c +++ b/drivers/infiniband/hw/qib/qib_init.c @@ -1222,7 +1222,7 @@ static int qib_init_one(struct pci_dev *, const struct pci_device_id *); #define DRIVER_LOAD_MSG "Intel " QIB_DRV_NAME " loaded: " #define PFX QIB_DRV_NAME ": " -static DEFINE_PCI_DEVICE_TABLE(qib_pci_tbl) = { +static const struct pci_device_id qib_pci_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_QLOGIC_IB_6120) }, { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_IB_7220) }, { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_IB_7322) }, diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c index 22c720e5740d..636be117b578 100644 --- a/drivers/infiniband/hw/qib/qib_mad.c +++ b/drivers/infiniband/hw/qib/qib_mad.c @@ -2476,7 +2476,7 @@ int qib_create_agents(struct qib_ibdev *dev) ibp = &dd->pport[p].ibport_data; agent = ib_register_mad_agent(&dev->ibdev, p + 1, IB_QPT_SMI, NULL, 0, send_handler, - NULL, NULL); + NULL, NULL, 0); if (IS_ERR(agent)) { ret = PTR_ERR(agent); goto err; diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c index 7fcc150d603c..6ddc0264aad2 100644 --- a/drivers/infiniband/hw/qib/qib_qp.c +++ b/drivers/infiniband/hw/qib/qib_qp.c @@ -1325,7 +1325,6 @@ int qib_qp_iter_next(struct qib_qp_iter *iter) struct qib_qp *pqp = iter->qp; struct qib_qp *qp; - rcu_read_lock(); for (; n < dev->qp_table_size; n++) { if (pqp) qp = rcu_dereference(pqp->next); @@ -1333,18 +1332,11 @@ int qib_qp_iter_next(struct qib_qp_iter *iter) qp = rcu_dereference(dev->qp_table[n]); pqp = qp; if (qp) { - if (iter->qp) - atomic_dec(&iter->qp->refcount); - atomic_inc(&qp->refcount); - rcu_read_unlock(); iter->qp = qp; iter->n = n; return 0; } } - rcu_read_unlock(); - if (iter->qp) - atomic_dec(&iter->qp->refcount); return ret; } diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c index 2bc1d2b96298..74f90b2619f6 100644 --- a/drivers/infiniband/hw/qib/qib_user_pages.c +++ b/drivers/infiniband/hw/qib/qib_user_pages.c @@ -52,7 +52,7 @@ static void __qib_release_user_pages(struct page **p, size_t num_pages, * Call with current->mm->mmap_sem held. */ static int __qib_get_user_pages(unsigned long start_page, size_t num_pages, - struct page **p, struct vm_area_struct **vma) + struct page **p) { unsigned long lock_limit; size_t got; @@ -69,7 +69,7 @@ static int __qib_get_user_pages(unsigned long start_page, size_t num_pages, ret = get_user_pages(current, current->mm, start_page + got * PAGE_SIZE, num_pages - got, 1, 1, - p + got, vma); + p + got, NULL); if (ret < 0) goto bail_release; } @@ -136,7 +136,7 @@ int qib_get_user_pages(unsigned long start_page, size_t num_pages, down_write(¤t->mm->mmap_sem); - ret = __qib_get_user_pages(start_page, num_pages, p, NULL); + ret = __qib_get_user_pages(start_page, num_pages, p); up_write(¤t->mm->mmap_sem); diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c index fb6d026f92cd..0d0f98695d53 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_main.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c @@ -490,7 +490,7 @@ out: /* Start of PCI section */ -static DEFINE_PCI_DEVICE_TABLE(usnic_ib_pci_ids) = { +static const struct pci_device_id usnic_ib_pci_ids[] = { {PCI_DEVICE(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_VIC_USPACE_NIC)}, {0,} }; diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index c639f90cfda4..d7562beb5423 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h @@ -86,7 +86,6 @@ enum { IPOIB_FLAG_INITIALIZED = 1, IPOIB_FLAG_ADMIN_UP = 2, IPOIB_PKEY_ASSIGNED = 3, - IPOIB_PKEY_STOP = 4, IPOIB_FLAG_SUBINTERFACE = 5, IPOIB_MCAST_RUN = 6, IPOIB_STOP_REAPER = 7, @@ -132,6 +131,12 @@ struct ipoib_cb { u8 hwaddr[INFINIBAND_ALEN]; }; +static inline struct ipoib_cb *ipoib_skb_cb(const struct sk_buff *skb) +{ + BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct ipoib_cb)); + return (struct ipoib_cb *)skb->cb; +} + /* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */ struct ipoib_mcast { struct ib_sa_mcmember_rec mcmember; @@ -312,7 +317,6 @@ struct ipoib_dev_priv { struct list_head multicast_list; struct rb_root multicast_tree; - struct delayed_work pkey_poll_task; struct delayed_work mcast_task; struct work_struct carrier_on_task; struct work_struct flush_light; @@ -473,10 +477,11 @@ void ipoib_ib_dev_flush_heavy(struct work_struct *work); void ipoib_pkey_event(struct work_struct *work); void ipoib_ib_dev_cleanup(struct net_device *dev); -int ipoib_ib_dev_open(struct net_device *dev); +int ipoib_ib_dev_open(struct net_device *dev, int flush); int ipoib_ib_dev_up(struct net_device *dev); int ipoib_ib_dev_down(struct net_device *dev, int flush); int ipoib_ib_dev_stop(struct net_device *dev, int flush); +void ipoib_pkey_dev_check_presence(struct net_device *dev); int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port); void ipoib_dev_cleanup(struct net_device *dev); @@ -532,8 +537,7 @@ int ipoib_set_mode(struct net_device *dev, const char *buf); void ipoib_setup(struct net_device *dev); -void ipoib_pkey_poll(struct work_struct *work); -int ipoib_pkey_dev_delay_open(struct net_device *dev); +void ipoib_pkey_open(struct ipoib_dev_priv *priv); void ipoib_drain_cq(struct net_device *dev); void ipoib_set_ethtool_ops(struct net_device *dev); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_fs.c b/drivers/infiniband/ulp/ipoib/ipoib_fs.c index 50061854616e..6bd5740e2691 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_fs.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_fs.c @@ -281,10 +281,8 @@ void ipoib_delete_debug_files(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); - if (priv->mcg_dentry) - debugfs_remove(priv->mcg_dentry); - if (priv->path_dentry) - debugfs_remove(priv->path_dentry); + debugfs_remove(priv->mcg_dentry); + debugfs_remove(priv->path_dentry); } int ipoib_register_debugfs(void) diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 6a7003ddb0be..72626c348174 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -664,17 +664,18 @@ static void ipoib_ib_tx_timer_func(unsigned long ctx) drain_tx_cq((struct net_device *)ctx); } -int ipoib_ib_dev_open(struct net_device *dev) +int ipoib_ib_dev_open(struct net_device *dev, int flush) { struct ipoib_dev_priv *priv = netdev_priv(dev); int ret; - if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &priv->pkey_index)) { - ipoib_warn(priv, "P_Key 0x%04x not found\n", priv->pkey); - clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); + ipoib_pkey_dev_check_presence(dev); + + if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) { + ipoib_warn(priv, "P_Key 0x%04x is %s\n", priv->pkey, + (!(priv->pkey & 0x7fff) ? "Invalid" : "not found")); return -1; } - set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); ret = ipoib_init_qp(dev); if (ret) { @@ -705,16 +706,17 @@ int ipoib_ib_dev_open(struct net_device *dev) dev_stop: if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) napi_enable(&priv->napi); - ipoib_ib_dev_stop(dev, 1); + ipoib_ib_dev_stop(dev, flush); return -1; } -static void ipoib_pkey_dev_check_presence(struct net_device *dev) +void ipoib_pkey_dev_check_presence(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); - u16 pkey_index = 0; - if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &pkey_index)) + if (!(priv->pkey & 0x7fff) || + ib_find_pkey(priv->ca, priv->port, priv->pkey, + &priv->pkey_index)) clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); else set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); @@ -745,14 +747,6 @@ int ipoib_ib_dev_down(struct net_device *dev, int flush) clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags); netif_carrier_off(dev); - /* Shutdown the P_Key thread if still active */ - if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) { - mutex_lock(&pkey_mutex); - set_bit(IPOIB_PKEY_STOP, &priv->flags); - cancel_delayed_work_sync(&priv->pkey_poll_task); - mutex_unlock(&pkey_mutex); - } - ipoib_mcast_stop_thread(dev, flush); ipoib_mcast_dev_flush(dev); @@ -924,7 +918,7 @@ int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port) (unsigned long) dev); if (dev->flags & IFF_UP) { - if (ipoib_ib_dev_open(dev)) { + if (ipoib_ib_dev_open(dev, 1)) { ipoib_transport_dev_cleanup(dev); return -ENODEV; } @@ -966,13 +960,27 @@ static inline int update_parent_pkey(struct ipoib_dev_priv *priv) return 1; } +/* + * returns 0 if pkey value was found in a different slot. + */ +static inline int update_child_pkey(struct ipoib_dev_priv *priv) +{ + u16 old_index = priv->pkey_index; + + priv->pkey_index = 0; + ipoib_pkey_dev_check_presence(priv->dev); + + if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) && + (old_index == priv->pkey_index)) + return 1; + return 0; +} static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, enum ipoib_flush_level level) { struct ipoib_dev_priv *cpriv; struct net_device *dev = priv->dev; - u16 new_index; int result; down_read(&priv->vlan_rwsem); @@ -986,16 +994,20 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, up_read(&priv->vlan_rwsem); - if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) { - /* for non-child devices must check/update the pkey value here */ - if (level == IPOIB_FLUSH_HEAVY && - !test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) - update_parent_pkey(priv); + if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) && + level != IPOIB_FLUSH_HEAVY) { ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n"); return; } if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) { + /* interface is down. update pkey and leave. */ + if (level == IPOIB_FLUSH_HEAVY) { + if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) + update_parent_pkey(priv); + else + update_child_pkey(priv); + } ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n"); return; } @@ -1005,20 +1017,13 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, * (parent) devices should always takes what present in pkey index 0 */ if (test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { - if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &new_index)) { - clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); - ipoib_ib_dev_down(dev, 0); - ipoib_ib_dev_stop(dev, 0); - if (ipoib_pkey_dev_delay_open(dev)) - return; - } - /* restart QP only if P_Key index is changed */ - if (test_and_set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) && - new_index == priv->pkey_index) { + result = update_child_pkey(priv); + if (result) { + /* restart QP only if P_Key index is changed */ ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n"); return; } - priv->pkey_index = new_index; + } else { result = update_parent_pkey(priv); /* restart QP only if P_Key value changed */ @@ -1038,8 +1043,12 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, ipoib_ib_dev_down(dev, 0); if (level == IPOIB_FLUSH_HEAVY) { - ipoib_ib_dev_stop(dev, 0); - ipoib_ib_dev_open(dev); + if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) + ipoib_ib_dev_stop(dev, 0); + if (ipoib_ib_dev_open(dev, 0) != 0) + return; + if (netif_queue_stopped(dev)) + netif_start_queue(dev); } /* @@ -1094,54 +1103,4 @@ void ipoib_ib_dev_cleanup(struct net_device *dev) ipoib_transport_dev_cleanup(dev); } -/* - * Delayed P_Key Assigment Interim Support - * - * The following is initial implementation of delayed P_Key assigment - * mechanism. It is using the same approach implemented for the multicast - * group join. The single goal of this implementation is to quickly address - * Bug #2507. This implementation will probably be removed when the P_Key - * change async notification is available. - */ - -void ipoib_pkey_poll(struct work_struct *work) -{ - struct ipoib_dev_priv *priv = - container_of(work, struct ipoib_dev_priv, pkey_poll_task.work); - struct net_device *dev = priv->dev; - - ipoib_pkey_dev_check_presence(dev); - - if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) - ipoib_open(dev); - else { - mutex_lock(&pkey_mutex); - if (!test_bit(IPOIB_PKEY_STOP, &priv->flags)) - queue_delayed_work(ipoib_workqueue, - &priv->pkey_poll_task, - HZ); - mutex_unlock(&pkey_mutex); - } -} - -int ipoib_pkey_dev_delay_open(struct net_device *dev) -{ - struct ipoib_dev_priv *priv = netdev_priv(dev); - - /* Look for the interface pkey value in the IB Port P_Key table and */ - /* set the interface pkey assigment flag */ - ipoib_pkey_dev_check_presence(dev); - /* P_Key value not assigned yet - start polling */ - if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) { - mutex_lock(&pkey_mutex); - clear_bit(IPOIB_PKEY_STOP, &priv->flags); - queue_delayed_work(ipoib_workqueue, - &priv->pkey_poll_task, - HZ); - mutex_unlock(&pkey_mutex); - return 1; - } - - return 0; -} diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 4e675f4fecc9..13e6e0431592 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -108,11 +108,11 @@ int ipoib_open(struct net_device *dev) set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); - if (ipoib_pkey_dev_delay_open(dev)) - return 0; - - if (ipoib_ib_dev_open(dev)) + if (ipoib_ib_dev_open(dev, 1)) { + if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) + return 0; goto err_disable; + } if (ipoib_ib_dev_up(dev)) goto err_stop; @@ -716,7 +716,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_neigh *neigh; - struct ipoib_cb *cb = (struct ipoib_cb *) skb->cb; + struct ipoib_cb *cb = ipoib_skb_cb(skb); struct ipoib_header *header; unsigned long flags; @@ -813,7 +813,7 @@ static int ipoib_hard_header(struct sk_buff *skb, const void *daddr, const void *saddr, unsigned len) { struct ipoib_header *header; - struct ipoib_cb *cb = (struct ipoib_cb *) skb->cb; + struct ipoib_cb *cb = ipoib_skb_cb(skb); header = (struct ipoib_header *) skb_push(skb, sizeof *header); @@ -1379,7 +1379,6 @@ void ipoib_setup(struct net_device *dev) INIT_LIST_HEAD(&priv->dead_ahs); INIT_LIST_HEAD(&priv->multicast_list); - INIT_DELAYED_WORK(&priv->pkey_poll_task, ipoib_pkey_poll); INIT_DELAYED_WORK(&priv->mcast_task, ipoib_mcast_join_task); INIT_WORK(&priv->carrier_on_task, ipoib_mcast_carrier_on_task); INIT_WORK(&priv->flush_light, ipoib_ib_dev_flush_light); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index d4e005720d01..ffb83b5f7e80 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -529,21 +529,13 @@ void ipoib_mcast_join_task(struct work_struct *work) port_attr.state); return; } + priv->local_lid = port_attr.lid; if (ib_query_gid(priv->ca, priv->port, 0, &priv->local_gid)) ipoib_warn(priv, "ib_query_gid() failed\n"); else memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid)); - { - struct ib_port_attr attr; - - if (!ib_query_port(priv->ca, priv->port, &attr)) - priv->local_lid = attr.lid; - else - ipoib_warn(priv, "ib_query_port failed\n"); - } - if (!priv->broadcast) { struct ipoib_mcast *broadcast; diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index eb7973957a6e..93ce62fe1594 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c @@ -344,7 +344,6 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session, int is_leading) { struct iscsi_conn *conn = cls_conn->dd_data; - struct iscsi_session *session; struct iser_conn *ib_conn; struct iscsi_endpoint *ep; int error; @@ -363,9 +362,17 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session, } ib_conn = ep->dd_data; - session = conn->session; - if (iser_alloc_rx_descriptors(ib_conn, session)) - return -ENOMEM; + mutex_lock(&ib_conn->state_mutex); + if (ib_conn->state != ISER_CONN_UP) { + error = -EINVAL; + iser_err("iser_conn %p state is %d, teardown started\n", + ib_conn, ib_conn->state); + goto out; + } + + error = iser_alloc_rx_descriptors(ib_conn, conn->session); + if (error) + goto out; /* binds the iSER connection retrieved from the previously * connected ep_handle to the iSCSI layer connection. exchanges @@ -375,7 +382,9 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session, conn->dd_data = ib_conn; ib_conn->iscsi_conn = conn; - return 0; +out: + mutex_unlock(&ib_conn->state_mutex); + return error; } static int @@ -596,20 +605,28 @@ iscsi_iser_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, struct iser_conn *ib_conn; struct iscsi_endpoint *ep; - ep = iscsi_create_endpoint(sizeof(*ib_conn)); + ep = iscsi_create_endpoint(0); if (!ep) return ERR_PTR(-ENOMEM); - ib_conn = ep->dd_data; + ib_conn = kzalloc(sizeof(*ib_conn), GFP_KERNEL); + if (!ib_conn) { + err = -ENOMEM; + goto failure; + } + + ep->dd_data = ib_conn; ib_conn->ep = ep; iser_conn_init(ib_conn); - err = iser_connect(ib_conn, NULL, (struct sockaddr_in *)dst_addr, - non_blocking); + err = iser_connect(ib_conn, NULL, dst_addr, non_blocking); if (err) - return ERR_PTR(err); + goto failure; return ep; +failure: + iscsi_destroy_endpoint(ep); + return ERR_PTR(err); } static int @@ -619,15 +636,16 @@ iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) int rc; ib_conn = ep->dd_data; - rc = wait_event_interruptible_timeout(ib_conn->wait, - ib_conn->state == ISER_CONN_UP, - msecs_to_jiffies(timeout_ms)); - + rc = wait_for_completion_interruptible_timeout(&ib_conn->up_completion, + msecs_to_jiffies(timeout_ms)); /* if conn establishment failed, return error code to iscsi */ - if (!rc && - (ib_conn->state == ISER_CONN_TERMINATING || - ib_conn->state == ISER_CONN_DOWN)) - rc = -1; + if (rc == 0) { + mutex_lock(&ib_conn->state_mutex); + if (ib_conn->state == ISER_CONN_TERMINATING || + ib_conn->state == ISER_CONN_DOWN) + rc = -1; + mutex_unlock(&ib_conn->state_mutex); + } iser_info("ib conn %p rc = %d\n", ib_conn, rc); @@ -646,19 +664,25 @@ iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep) ib_conn = ep->dd_data; iser_info("ep %p ib conn %p state %d\n", ep, ib_conn, ib_conn->state); + mutex_lock(&ib_conn->state_mutex); iser_conn_terminate(ib_conn); /* - * if iser_conn and iscsi_conn are bound, we must wait iscsi_conn_stop - * call and ISER_CONN_DOWN state before freeing the iser resources. - * otherwise we are safe to free resources immediately. + * if iser_conn and iscsi_conn are bound, we must wait for + * iscsi_conn_stop and flush errors completion before freeing + * the iser resources. Otherwise we are safe to free resources + * immediately. */ if (ib_conn->iscsi_conn) { INIT_WORK(&ib_conn->release_work, iser_release_work); queue_work(release_wq, &ib_conn->release_work); + mutex_unlock(&ib_conn->state_mutex); } else { + ib_conn->state = ISER_CONN_DOWN; + mutex_unlock(&ib_conn->state_mutex); iser_conn_release(ib_conn); } + iscsi_destroy_endpoint(ep); } static umode_t iser_attr_is_visible(int param_type, int param) diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index 97cd385bf7f7..9f0e0e34d6ca 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h @@ -69,7 +69,7 @@ #define DRV_NAME "iser" #define PFX DRV_NAME ": " -#define DRV_VER "1.4" +#define DRV_VER "1.4.1" #define iser_dbg(fmt, arg...) \ do { \ @@ -326,7 +326,6 @@ struct iser_conn { struct iser_device *device; /* device context */ struct rdma_cm_id *cma_id; /* CMA ID */ struct ib_qp *qp; /* QP */ - wait_queue_head_t wait; /* waitq for conn/disconn */ unsigned qp_max_recv_dtos; /* num of rx buffers */ unsigned qp_max_recv_dtos_mask; /* above minus 1 */ unsigned min_posted_rx; /* qp_max_recv_dtos >> 2 */ @@ -335,6 +334,9 @@ struct iser_conn { char name[ISER_OBJECT_NAME_SIZE]; struct work_struct release_work; struct completion stop_completion; + struct mutex state_mutex; + struct completion flush_completion; + struct completion up_completion; struct list_head conn_list; /* entry in ig conn list */ char *login_buf; @@ -448,8 +450,8 @@ int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *task, enum iser_data_dir cmd_dir); int iser_connect(struct iser_conn *ib_conn, - struct sockaddr_in *src_addr, - struct sockaddr_in *dst_addr, + struct sockaddr *src_addr, + struct sockaddr *dst_addr, int non_blocking); int iser_reg_page_vec(struct iser_conn *ib_conn, diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index ea01075f9f9b..3bfec4bbda52 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c @@ -73,7 +73,7 @@ static int iser_create_device_ib_res(struct iser_device *device) { struct iser_cq_desc *cq_desc; struct ib_device_attr *dev_attr = &device->dev_attr; - int ret, i, j; + int ret, i; ret = ib_query_device(device->ib_device, dev_attr); if (ret) { @@ -125,16 +125,20 @@ static int iser_create_device_ib_res(struct iser_device *device) iser_cq_event_callback, (void *)&cq_desc[i], ISER_MAX_RX_CQ_LEN, i); - if (IS_ERR(device->rx_cq[i])) + if (IS_ERR(device->rx_cq[i])) { + device->rx_cq[i] = NULL; goto cq_err; + } device->tx_cq[i] = ib_create_cq(device->ib_device, NULL, iser_cq_event_callback, (void *)&cq_desc[i], ISER_MAX_TX_CQ_LEN, i); - if (IS_ERR(device->tx_cq[i])) + if (IS_ERR(device->tx_cq[i])) { + device->tx_cq[i] = NULL; goto cq_err; + } if (ib_req_notify_cq(device->rx_cq[i], IB_CQ_NEXT_COMP)) goto cq_err; @@ -160,14 +164,14 @@ static int iser_create_device_ib_res(struct iser_device *device) handler_err: ib_dereg_mr(device->mr); dma_mr_err: - for (j = 0; j < device->cqs_used; j++) - tasklet_kill(&device->cq_tasklet[j]); + for (i = 0; i < device->cqs_used; i++) + tasklet_kill(&device->cq_tasklet[i]); cq_err: - for (j = 0; j < i; j++) { - if (device->tx_cq[j]) - ib_destroy_cq(device->tx_cq[j]); - if (device->rx_cq[j]) - ib_destroy_cq(device->rx_cq[j]); + for (i = 0; i < device->cqs_used; i++) { + if (device->tx_cq[i]) + ib_destroy_cq(device->tx_cq[i]); + if (device->rx_cq[i]) + ib_destroy_cq(device->rx_cq[i]); } ib_dealloc_pd(device->pd); pd_err: @@ -491,10 +495,9 @@ out_err: } /** - * releases the QP objects, returns 0 on success, - * -1 on failure + * releases the QP object */ -static int iser_free_ib_conn_res(struct iser_conn *ib_conn) +static void iser_free_ib_conn_res(struct iser_conn *ib_conn) { int cq_index; BUG_ON(ib_conn == NULL); @@ -513,8 +516,6 @@ static int iser_free_ib_conn_res(struct iser_conn *ib_conn) } ib_conn->qp = NULL; - - return 0; } /** @@ -568,31 +569,40 @@ static void iser_device_try_release(struct iser_device *device) mutex_unlock(&ig.device_list_mutex); } +/** + * Called with state mutex held + **/ static int iser_conn_state_comp_exch(struct iser_conn *ib_conn, enum iser_ib_conn_state comp, enum iser_ib_conn_state exch) { int ret; - spin_lock_bh(&ib_conn->lock); if ((ret = (ib_conn->state == comp))) ib_conn->state = exch; - spin_unlock_bh(&ib_conn->lock); return ret; } void iser_release_work(struct work_struct *work) { struct iser_conn *ib_conn; + int rc; ib_conn = container_of(work, struct iser_conn, release_work); /* wait for .conn_stop callback */ - wait_for_completion(&ib_conn->stop_completion); + rc = wait_for_completion_timeout(&ib_conn->stop_completion, 30 * HZ); + WARN_ON(rc == 0); /* wait for the qp`s post send and post receive buffers to empty */ - wait_event_interruptible(ib_conn->wait, - ib_conn->state == ISER_CONN_DOWN); + rc = wait_for_completion_timeout(&ib_conn->flush_completion, 30 * HZ); + WARN_ON(rc == 0); + + ib_conn->state = ISER_CONN_DOWN; + + mutex_lock(&ib_conn->state_mutex); + ib_conn->state = ISER_CONN_DOWN; + mutex_unlock(&ib_conn->state_mutex); iser_conn_release(ib_conn); } @@ -604,23 +614,27 @@ void iser_conn_release(struct iser_conn *ib_conn) { struct iser_device *device = ib_conn->device; - BUG_ON(ib_conn->state == ISER_CONN_UP); - mutex_lock(&ig.connlist_mutex); list_del(&ib_conn->conn_list); mutex_unlock(&ig.connlist_mutex); + + mutex_lock(&ib_conn->state_mutex); + BUG_ON(ib_conn->state != ISER_CONN_DOWN); + iser_free_rx_descriptors(ib_conn); iser_free_ib_conn_res(ib_conn); ib_conn->device = NULL; /* on EVENT_ADDR_ERROR there's no device yet for this conn */ if (device != NULL) iser_device_try_release(device); + mutex_unlock(&ib_conn->state_mutex); + /* if cma handler context, the caller actually destroy the id */ if (ib_conn->cma_id != NULL) { rdma_destroy_id(ib_conn->cma_id); ib_conn->cma_id = NULL; } - iscsi_destroy_endpoint(ib_conn->ep); + kfree(ib_conn); } /** @@ -642,22 +656,31 @@ void iser_conn_terminate(struct iser_conn *ib_conn) ib_conn,err); } +/** + * Called with state mutex held + **/ static void iser_connect_error(struct rdma_cm_id *cma_id) { struct iser_conn *ib_conn; ib_conn = (struct iser_conn *)cma_id->context; - ib_conn->state = ISER_CONN_DOWN; - wake_up_interruptible(&ib_conn->wait); } +/** + * Called with state mutex held + **/ static void iser_addr_handler(struct rdma_cm_id *cma_id) { struct iser_device *device; struct iser_conn *ib_conn; int ret; + ib_conn = (struct iser_conn *)cma_id->context; + if (ib_conn->state != ISER_CONN_PENDING) + /* bailout */ + return; + device = iser_device_find_by_ib_device(cma_id); if (!device) { iser_err("device lookup/creation failed\n"); @@ -665,7 +688,6 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id) return; } - ib_conn = (struct iser_conn *)cma_id->context; ib_conn->device = device; /* connection T10-PI support */ @@ -689,18 +711,27 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id) } } +/** + * Called with state mutex held + **/ static void iser_route_handler(struct rdma_cm_id *cma_id) { struct rdma_conn_param conn_param; int ret; struct iser_cm_hdr req_hdr; + struct iser_conn *ib_conn = (struct iser_conn *)cma_id->context; + struct iser_device *device = ib_conn->device; + + if (ib_conn->state != ISER_CONN_PENDING) + /* bailout */ + return; ret = iser_create_ib_conn_res((struct iser_conn *)cma_id->context); if (ret) goto failure; memset(&conn_param, 0, sizeof conn_param); - conn_param.responder_resources = 4; + conn_param.responder_resources = device->dev_attr.max_qp_rd_atom; conn_param.initiator_depth = 1; conn_param.retry_count = 7; conn_param.rnr_retry_count = 6; @@ -728,12 +759,16 @@ static void iser_connected_handler(struct rdma_cm_id *cma_id) struct ib_qp_attr attr; struct ib_qp_init_attr init_attr; + ib_conn = (struct iser_conn *)cma_id->context; + if (ib_conn->state != ISER_CONN_PENDING) + /* bailout */ + return; + (void)ib_query_qp(cma_id->qp, &attr, ~0, &init_attr); iser_info("remote qpn:%x my qpn:%x\n", attr.dest_qp_num, cma_id->qp->qp_num); - ib_conn = (struct iser_conn *)cma_id->context; - if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_PENDING, ISER_CONN_UP)) - wake_up_interruptible(&ib_conn->wait); + ib_conn->state = ISER_CONN_UP; + complete(&ib_conn->up_completion); } static void iser_disconnected_handler(struct rdma_cm_id *cma_id) @@ -752,19 +787,25 @@ static void iser_disconnected_handler(struct rdma_cm_id *cma_id) iser_err("iscsi_iser connection isn't bound\n"); } - /* Complete the termination process if no posts are pending */ + /* Complete the termination process if no posts are pending. This code + * block also exists in iser_handle_comp_error(), but it is needed here + * for cases of no flushes at all, e.g. discovery over rdma. + */ if (ib_conn->post_recv_buf_count == 0 && (atomic_read(&ib_conn->post_send_buf_count) == 0)) { - ib_conn->state = ISER_CONN_DOWN; - wake_up_interruptible(&ib_conn->wait); + complete(&ib_conn->flush_completion); } } static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) { + struct iser_conn *ib_conn; + + ib_conn = (struct iser_conn *)cma_id->context; iser_info("event %d status %d conn %p id %p\n", event->event, event->status, cma_id->context, cma_id); + mutex_lock(&ib_conn->state_mutex); switch (event->event) { case RDMA_CM_EVENT_ADDR_RESOLVED: iser_addr_handler(cma_id); @@ -785,24 +826,28 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve case RDMA_CM_EVENT_DISCONNECTED: case RDMA_CM_EVENT_DEVICE_REMOVAL: case RDMA_CM_EVENT_ADDR_CHANGE: + case RDMA_CM_EVENT_TIMEWAIT_EXIT: iser_disconnected_handler(cma_id); break; default: iser_err("Unexpected RDMA CM event (%d)\n", event->event); break; } + mutex_unlock(&ib_conn->state_mutex); return 0; } void iser_conn_init(struct iser_conn *ib_conn) { ib_conn->state = ISER_CONN_INIT; - init_waitqueue_head(&ib_conn->wait); ib_conn->post_recv_buf_count = 0; atomic_set(&ib_conn->post_send_buf_count, 0); init_completion(&ib_conn->stop_completion); + init_completion(&ib_conn->flush_completion); + init_completion(&ib_conn->up_completion); INIT_LIST_HEAD(&ib_conn->conn_list); spin_lock_init(&ib_conn->lock); + mutex_init(&ib_conn->state_mutex); } /** @@ -810,22 +855,21 @@ void iser_conn_init(struct iser_conn *ib_conn) * sleeps until the connection is established or rejected */ int iser_connect(struct iser_conn *ib_conn, - struct sockaddr_in *src_addr, - struct sockaddr_in *dst_addr, + struct sockaddr *src_addr, + struct sockaddr *dst_addr, int non_blocking) { - struct sockaddr *src, *dst; int err = 0; - sprintf(ib_conn->name, "%pI4:%d", - &dst_addr->sin_addr.s_addr, dst_addr->sin_port); + mutex_lock(&ib_conn->state_mutex); + + sprintf(ib_conn->name, "%pISp", dst_addr); + + iser_info("connecting to: %s\n", ib_conn->name); /* the device is known only --after-- address resolution */ ib_conn->device = NULL; - iser_info("connecting to: %pI4, port 0x%x\n", - &dst_addr->sin_addr, dst_addr->sin_port); - ib_conn->state = ISER_CONN_PENDING; ib_conn->cma_id = rdma_create_id(iser_cma_handler, @@ -837,23 +881,21 @@ int iser_connect(struct iser_conn *ib_conn, goto id_failure; } - src = (struct sockaddr *)src_addr; - dst = (struct sockaddr *)dst_addr; - err = rdma_resolve_addr(ib_conn->cma_id, src, dst, 1000); + err = rdma_resolve_addr(ib_conn->cma_id, src_addr, dst_addr, 1000); if (err) { iser_err("rdma_resolve_addr failed: %d\n", err); goto addr_failure; } if (!non_blocking) { - wait_event_interruptible(ib_conn->wait, - (ib_conn->state != ISER_CONN_PENDING)); + wait_for_completion_interruptible(&ib_conn->up_completion); if (ib_conn->state != ISER_CONN_UP) { err = -EIO; goto connect_failure; } } + mutex_unlock(&ib_conn->state_mutex); mutex_lock(&ig.connlist_mutex); list_add(&ib_conn->conn_list, &ig.connlist); @@ -865,6 +907,7 @@ id_failure: addr_failure: ib_conn->state = ISER_CONN_DOWN; connect_failure: + mutex_unlock(&ib_conn->state_mutex); iser_conn_release(ib_conn); return err; } @@ -1049,18 +1092,19 @@ static void iser_handle_comp_error(struct iser_tx_desc *desc, if (ib_conn->post_recv_buf_count == 0 && atomic_read(&ib_conn->post_send_buf_count) == 0) { - /* getting here when the state is UP means that the conn is * - * being terminated asynchronously from the iSCSI layer's * - * perspective. */ - if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP, - ISER_CONN_TERMINATING)) + /** + * getting here when the state is UP means that the conn is + * being terminated asynchronously from the iSCSI layer's + * perspective. It is safe to peek at the connection state + * since iscsi_conn_failure is allowed to be called twice. + **/ + if (ib_conn->state == ISER_CONN_UP) iscsi_conn_failure(ib_conn->iscsi_conn, ISCSI_ERR_CONN_FAILED); /* no more non completed posts to the QP, complete the * termination process w.o worrying on disconnect event */ - ib_conn->state = ISER_CONN_DOWN; - wake_up_interruptible(&ib_conn->wait); + complete(&ib_conn->flush_completion); } } diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index d4c7928a0f36..da8ff124762a 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -586,17 +586,12 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) init_completion(&isert_conn->conn_wait); init_completion(&isert_conn->conn_wait_comp_err); kref_init(&isert_conn->conn_kref); - kref_get(&isert_conn->conn_kref); mutex_init(&isert_conn->conn_mutex); spin_lock_init(&isert_conn->conn_lock); INIT_LIST_HEAD(&isert_conn->conn_fr_pool); cma_id->context = isert_conn; isert_conn->conn_cm_id = cma_id; - isert_conn->responder_resources = event->param.conn.responder_resources; - isert_conn->initiator_depth = event->param.conn.initiator_depth; - pr_debug("Using responder_resources: %u initiator_depth: %u\n", - isert_conn->responder_resources, isert_conn->initiator_depth); isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN + ISER_RX_LOGIN_SIZE, GFP_KERNEL); @@ -643,6 +638,12 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) goto out_rsp_dma_map; } + /* Set max inflight RDMA READ requests */ + isert_conn->initiator_depth = min_t(u8, + event->param.conn.initiator_depth, + device->dev_attr.max_qp_init_rd_atom); + pr_debug("Using initiator_depth: %u\n", isert_conn->initiator_depth); + isert_conn->conn_device = device; isert_conn->conn_pd = ib_alloc_pd(isert_conn->conn_device->ib_device); if (IS_ERR(isert_conn->conn_pd)) { @@ -746,7 +747,9 @@ isert_connect_release(struct isert_conn *isert_conn) static void isert_connected_handler(struct rdma_cm_id *cma_id) { - return; + struct isert_conn *isert_conn = cma_id->context; + + kref_get(&isert_conn->conn_kref); } static void @@ -798,7 +801,6 @@ isert_disconnect_work(struct work_struct *work) wake_up: complete(&isert_conn->conn_wait); - isert_put_conn(isert_conn); } static void @@ -3067,7 +3069,6 @@ isert_rdma_accept(struct isert_conn *isert_conn) int ret; memset(&cp, 0, sizeof(struct rdma_conn_param)); - cp.responder_resources = isert_conn->responder_resources; cp.initiator_depth = isert_conn->initiator_depth; cp.retry_count = 7; cp.rnr_retry_count = 7; @@ -3215,7 +3216,7 @@ static void isert_wait_conn(struct iscsi_conn *conn) pr_debug("isert_wait_conn: Starting \n"); mutex_lock(&isert_conn->conn_mutex); - if (isert_conn->conn_cm_id) { + if (isert_conn->conn_cm_id && !isert_conn->disconnect) { pr_debug("Calling rdma_disconnect from isert_wait_conn\n"); rdma_disconnect(isert_conn->conn_cm_id); } @@ -3234,6 +3235,7 @@ static void isert_wait_conn(struct iscsi_conn *conn) wait_for_completion(&isert_conn->conn_wait_comp_err); wait_for_completion(&isert_conn->conn_wait); + isert_put_conn(isert_conn); } static void isert_free_conn(struct iscsi_conn *conn) diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index e3c2c5b4297f..62d2a18e1b41 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -130,6 +130,7 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr); static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event); static struct scsi_transport_template *ib_srp_transport_template; +static struct workqueue_struct *srp_remove_wq; static struct ib_client srp_client = { .name = "srp", @@ -731,7 +732,7 @@ static bool srp_queue_remove_work(struct srp_target_port *target) spin_unlock_irq(&target->lock); if (changed) - queue_work(system_long_wq, &target->remove_work); + queue_work(srp_remove_wq, &target->remove_work); return changed; } @@ -1643,10 +1644,14 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) SCSI_SENSE_BUFFERSIZE)); } - if (rsp->flags & (SRP_RSP_FLAG_DOOVER | SRP_RSP_FLAG_DOUNDER)) - scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt)); - else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER)) + if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER)) scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt)); + else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER)) + scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt)); + else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER)) + scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt)); + else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER)) + scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt)); srp_free_req(target, req, scmnd, be32_to_cpu(rsp->req_lim_delta)); @@ -3261,9 +3266,10 @@ static void srp_remove_one(struct ib_device *device) spin_unlock(&host->target_lock); /* - * Wait for target port removal tasks. + * Wait for tl_err and target port removal tasks. */ flush_workqueue(system_long_wq); + flush_workqueue(srp_remove_wq); kfree(host); } @@ -3313,16 +3319,22 @@ static int __init srp_init_module(void) indirect_sg_entries = cmd_sg_entries; } + srp_remove_wq = create_workqueue("srp_remove"); + if (!srp_remove_wq) { + ret = -ENOMEM; + goto out; + } + + ret = -ENOMEM; ib_srp_transport_template = srp_attach_transport(&ib_srp_transport_functions); if (!ib_srp_transport_template) - return -ENOMEM; + goto destroy_wq; ret = class_register(&srp_class); if (ret) { pr_err("couldn't register class infiniband_srp\n"); - srp_release_transport(ib_srp_transport_template); - return ret; + goto release_tr; } ib_sa_register_client(&srp_sa_client); @@ -3330,13 +3342,22 @@ static int __init srp_init_module(void) ret = ib_register_client(&srp_client); if (ret) { pr_err("couldn't register IB client\n"); - srp_release_transport(ib_srp_transport_template); - ib_sa_unregister_client(&srp_sa_client); - class_unregister(&srp_class); - return ret; + goto unreg_sa; } - return 0; +out: + return ret; + +unreg_sa: + ib_sa_unregister_client(&srp_sa_client); + class_unregister(&srp_class); + +release_tr: + srp_release_transport(ib_srp_transport_template); + +destroy_wq: + destroy_workqueue(srp_remove_wq); + goto out; } static void __exit srp_cleanup_module(void) @@ -3345,6 +3366,7 @@ static void __exit srp_cleanup_module(void) ib_sa_unregister_client(&srp_sa_client); class_unregister(&srp_class); srp_release_transport(ib_srp_transport_template); + destroy_workqueue(srp_remove_wq); } module_init(srp_init_module); diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index fe09f2788b15..d28a8c284da9 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -198,6 +198,7 @@ static void srpt_event_handler(struct ib_event_handler *handler, case IB_EVENT_PKEY_CHANGE: case IB_EVENT_SM_CHANGE: case IB_EVENT_CLIENT_REREGISTER: + case IB_EVENT_GID_CHANGE: /* Refresh port data asynchronously. */ if (event->element.port_num <= sdev->device->phys_port_cnt) { sport = &sdev->port[event->element.port_num - 1]; @@ -563,7 +564,7 @@ static int srpt_refresh_port(struct srpt_port *sport) ®_req, 0, srpt_mad_send_handler, srpt_mad_recv_handler, - sport); + sport, 0); if (IS_ERR(sport->mad_agent)) { ret = PTR_ERR(sport->mad_agent); sport->mad_agent = NULL; diff --git a/drivers/input/input-mt.c b/drivers/input/input-mt.c index d398f1321f14..fbe29fcb15c5 100644 --- a/drivers/input/input-mt.c +++ b/drivers/input/input-mt.c @@ -236,6 +236,35 @@ void input_mt_report_pointer_emulation(struct input_dev *dev, bool use_count) } EXPORT_SYMBOL(input_mt_report_pointer_emulation); +static void __input_mt_drop_unused(struct input_dev *dev, struct input_mt *mt) +{ + int i; + + for (i = 0; i < mt->num_slots; i++) { + if (!input_mt_is_used(mt, &mt->slots[i])) { + input_mt_slot(dev, i); + input_event(dev, EV_ABS, ABS_MT_TRACKING_ID, -1); + } + } +} + +/** + * input_mt_drop_unused() - Inactivate slots not seen in this frame + * @dev: input device with allocated MT slots + * + * Lift all slots not seen since the last call to this function. + */ +void input_mt_drop_unused(struct input_dev *dev) +{ + struct input_mt *mt = dev->mt; + + if (mt) { + __input_mt_drop_unused(dev, mt); + mt->frame++; + } +} +EXPORT_SYMBOL(input_mt_drop_unused); + /** * input_mt_sync_frame() - synchronize mt frame * @dev: input device with allocated MT slots @@ -247,20 +276,13 @@ EXPORT_SYMBOL(input_mt_report_pointer_emulation); void input_mt_sync_frame(struct input_dev *dev) { struct input_mt *mt = dev->mt; - struct input_mt_slot *s; bool use_count = false; if (!mt) return; - if (mt->flags & INPUT_MT_DROP_UNUSED) { - for (s = mt->slots; s != mt->slots + mt->num_slots; s++) { - if (input_mt_is_used(mt, s)) - continue; - input_mt_slot(dev, s - mt->slots); - input_event(dev, EV_ABS, ABS_MT_TRACKING_ID, -1); - } - } + if (mt->flags & INPUT_MT_DROP_UNUSED) + __input_mt_drop_unused(dev, mt); if ((mt->flags & INPUT_MT_POINTER) && !(mt->flags & INPUT_MT_SEMI_MT)) use_count = true; diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c index 9135606c8649..ab0fdcd36e18 100644 --- a/drivers/input/joystick/analog.c +++ b/drivers/input/joystick/analog.c @@ -158,7 +158,7 @@ static unsigned int get_time_pit(void) #define GET_TIME(x) rdtscl(x) #define DELTA(x,y) ((y)-(x)) #define TIME_NAME "TSC" -#elif defined(__alpha__) || defined(CONFIG_MN10300) || defined(CONFIG_ARM) || defined(CONFIG_TILE) +#elif defined(__alpha__) || defined(CONFIG_MN10300) || defined(CONFIG_ARM) || defined(CONFIG_ARM64) || defined(CONFIG_TILE) #define GET_TIME(x) do { x = get_cycles(); } while (0) #define DELTA(x,y) ((y)-(x)) #define TIME_NAME "get_cycles" diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index 603fe0dd3682..177602cf7079 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c @@ -95,7 +95,8 @@ #define XTYPE_XBOX 0 #define XTYPE_XBOX360 1 #define XTYPE_XBOX360W 2 -#define XTYPE_UNKNOWN 3 +#define XTYPE_XBOXONE 3 +#define XTYPE_UNKNOWN 4 static bool dpad_to_buttons; module_param(dpad_to_buttons, bool, S_IRUGO); @@ -121,6 +122,7 @@ static const struct xpad_device { { 0x045e, 0x0287, "Microsoft Xbox Controller S", 0, XTYPE_XBOX }, { 0x045e, 0x0289, "Microsoft X-Box pad v2 (US)", 0, XTYPE_XBOX }, { 0x045e, 0x028e, "Microsoft X-Box 360 pad", 0, XTYPE_XBOX360 }, + { 0x045e, 0x02d1, "Microsoft X-Box One pad", 0, XTYPE_XBOXONE }, { 0x045e, 0x0291, "Xbox 360 Wireless Receiver (XBOX)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W }, { 0x045e, 0x0719, "Xbox 360 Wireless Receiver", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W }, { 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX }, @@ -231,10 +233,12 @@ static const signed short xpad_abs_triggers[] = { -1 }; -/* Xbox 360 has a vendor-specific class, so we cannot match it with only +/* + * Xbox 360 has a vendor-specific class, so we cannot match it with only * USB_INTERFACE_INFO (also specifically refused by USB subsystem), so we * match against vendor id as well. Wired Xbox 360 devices have protocol 1, - * wireless controllers have protocol 129. */ + * wireless controllers have protocol 129. + */ #define XPAD_XBOX360_VENDOR_PROTOCOL(vend,pr) \ .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_INFO, \ .idVendor = (vend), \ @@ -245,9 +249,20 @@ static const signed short xpad_abs_triggers[] = { { XPAD_XBOX360_VENDOR_PROTOCOL(vend,1) }, \ { XPAD_XBOX360_VENDOR_PROTOCOL(vend,129) } +/* The Xbox One controller uses subclass 71 and protocol 208. */ +#define XPAD_XBOXONE_VENDOR_PROTOCOL(vend, pr) \ + .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_INFO, \ + .idVendor = (vend), \ + .bInterfaceClass = USB_CLASS_VENDOR_SPEC, \ + .bInterfaceSubClass = 71, \ + .bInterfaceProtocol = (pr) +#define XPAD_XBOXONE_VENDOR(vend) \ + { XPAD_XBOXONE_VENDOR_PROTOCOL(vend, 208) } + static struct usb_device_id xpad_table[] = { { USB_INTERFACE_INFO('X', 'B', 0) }, /* X-Box USB-IF not approved class */ XPAD_XBOX360_VENDOR(0x045e), /* Microsoft X-Box 360 controllers */ + XPAD_XBOXONE_VENDOR(0x045e), /* Microsoft X-Box One controllers */ XPAD_XBOX360_VENDOR(0x046d), /* Logitech X-Box 360 style controllers */ XPAD_XBOX360_VENDOR(0x0738), /* Mad Catz X-Box 360 controllers */ { USB_DEVICE(0x0738, 0x4540) }, /* Mad Catz Beat Pad */ @@ -278,12 +293,10 @@ struct usb_xpad { struct urb *bulk_out; unsigned char *bdata; -#if defined(CONFIG_JOYSTICK_XPAD_FF) || defined(CONFIG_JOYSTICK_XPAD_LEDS) struct urb *irq_out; /* urb for interrupt out report */ unsigned char *odata; /* output data */ dma_addr_t odata_dma; struct mutex odata_mutex; -#endif #if defined(CONFIG_JOYSTICK_XPAD_LEDS) struct xpad_led *led; @@ -470,6 +483,105 @@ static void xpad360w_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned cha xpad360_process_packet(xpad, cmd, &data[4]); } +/* + * xpadone_process_buttons + * + * Process a button update packet from an Xbox one controller. + */ +static void xpadone_process_buttons(struct usb_xpad *xpad, + struct input_dev *dev, + unsigned char *data) +{ + /* menu/view buttons */ + input_report_key(dev, BTN_START, data[4] & 0x04); + input_report_key(dev, BTN_SELECT, data[4] & 0x08); + + /* buttons A,B,X,Y */ + input_report_key(dev, BTN_A, data[4] & 0x10); + input_report_key(dev, BTN_B, data[4] & 0x20); + input_report_key(dev, BTN_X, data[4] & 0x40); + input_report_key(dev, BTN_Y, data[4] & 0x80); + + /* digital pad */ + if (xpad->mapping & MAP_DPAD_TO_BUTTONS) { + /* dpad as buttons (left, right, up, down) */ + input_report_key(dev, BTN_TRIGGER_HAPPY1, data[5] & 0x04); + input_report_key(dev, BTN_TRIGGER_HAPPY2, data[5] & 0x08); + input_report_key(dev, BTN_TRIGGER_HAPPY3, data[5] & 0x01); + input_report_key(dev, BTN_TRIGGER_HAPPY4, data[5] & 0x02); + } else { + input_report_abs(dev, ABS_HAT0X, + !!(data[5] & 0x08) - !!(data[5] & 0x04)); + input_report_abs(dev, ABS_HAT0Y, + !!(data[5] & 0x02) - !!(data[5] & 0x01)); + } + + /* TL/TR */ + input_report_key(dev, BTN_TL, data[5] & 0x10); + input_report_key(dev, BTN_TR, data[5] & 0x20); + + /* stick press left/right */ + input_report_key(dev, BTN_THUMBL, data[5] & 0x40); + input_report_key(dev, BTN_THUMBR, data[5] & 0x80); + + if (!(xpad->mapping & MAP_STICKS_TO_NULL)) { + /* left stick */ + input_report_abs(dev, ABS_X, + (__s16) le16_to_cpup((__le16 *)(data + 10))); + input_report_abs(dev, ABS_Y, + ~(__s16) le16_to_cpup((__le16 *)(data + 12))); + + /* right stick */ + input_report_abs(dev, ABS_RX, + (__s16) le16_to_cpup((__le16 *)(data + 14))); + input_report_abs(dev, ABS_RY, + ~(__s16) le16_to_cpup((__le16 *)(data + 16))); + } + + /* triggers left/right */ + if (xpad->mapping & MAP_TRIGGERS_TO_BUTTONS) { + input_report_key(dev, BTN_TL2, + (__u16) le16_to_cpup((__le16 *)(data + 6))); + input_report_key(dev, BTN_TR2, + (__u16) le16_to_cpup((__le16 *)(data + 8))); + } else { + input_report_abs(dev, ABS_Z, + (__u16) le16_to_cpup((__le16 *)(data + 6))); + input_report_abs(dev, ABS_RZ, + (__u16) le16_to_cpup((__le16 *)(data + 8))); + } + + input_sync(dev); +} + +/* + * xpadone_process_packet + * + * Completes a request by converting the data into events for the + * input subsystem. This version is for the Xbox One controller. + * + * The report format was gleaned from + * https://github.com/kylelemons/xbox/blob/master/xbox.go + */ + +static void xpadone_process_packet(struct usb_xpad *xpad, + u16 cmd, unsigned char *data) +{ + struct input_dev *dev = xpad->dev; + + switch (data[0]) { + case 0x20: + xpadone_process_buttons(xpad, dev, data); + break; + + case 0x07: + /* the xbox button has its own special report */ + input_report_key(dev, BTN_MODE, data[4] & 0x01); + input_sync(dev); + break; + } +} + static void xpad_irq_in(struct urb *urb) { struct usb_xpad *xpad = urb->context; @@ -502,6 +614,9 @@ static void xpad_irq_in(struct urb *urb) case XTYPE_XBOX360W: xpad360w_process_packet(xpad, 0, xpad->idata); break; + case XTYPE_XBOXONE: + xpadone_process_packet(xpad, 0, xpad->idata); + break; default: xpad_process_packet(xpad, 0, xpad->idata); } @@ -535,7 +650,6 @@ static void xpad_bulk_out(struct urb *urb) } } -#if defined(CONFIG_JOYSTICK_XPAD_FF) || defined(CONFIG_JOYSTICK_XPAD_LEDS) static void xpad_irq_out(struct urb *urb) { struct usb_xpad *xpad = urb->context; @@ -573,6 +687,7 @@ exit: static int xpad_init_output(struct usb_interface *intf, struct usb_xpad *xpad) { struct usb_endpoint_descriptor *ep_irq_out; + int ep_irq_out_idx; int error; if (xpad->xtype == XTYPE_UNKNOWN) @@ -593,7 +708,10 @@ static int xpad_init_output(struct usb_interface *intf, struct usb_xpad *xpad) goto fail2; } - ep_irq_out = &intf->cur_altsetting->endpoint[1].desc; + /* Xbox One controller has in/out endpoints swapped. */ + ep_irq_out_idx = xpad->xtype == XTYPE_XBOXONE ? 0 : 1; + ep_irq_out = &intf->cur_altsetting->endpoint[ep_irq_out_idx].desc; + usb_fill_int_urb(xpad->irq_out, xpad->udev, usb_sndintpipe(xpad->udev, ep_irq_out->bEndpointAddress), xpad->odata, XPAD_PKT_LEN, @@ -621,11 +739,6 @@ static void xpad_deinit_output(struct usb_xpad *xpad) xpad->odata, xpad->odata_dma); } } -#else -static int xpad_init_output(struct usb_interface *intf, struct usb_xpad *xpad) { return 0; } -static void xpad_deinit_output(struct usb_xpad *xpad) {} -static void xpad_stop_output(struct usb_xpad *xpad) {} -#endif #ifdef CONFIG_JOYSTICK_XPAD_FF static int xpad_play_effect(struct input_dev *dev, void *data, struct ff_effect *effect) @@ -692,7 +805,7 @@ static int xpad_play_effect(struct input_dev *dev, void *data, struct ff_effect static int xpad_init_ff(struct usb_xpad *xpad) { - if (xpad->xtype == XTYPE_UNKNOWN) + if (xpad->xtype == XTYPE_UNKNOWN || xpad->xtype == XTYPE_XBOXONE) return 0; input_set_capability(xpad->dev, EV_FF, FF_RUMBLE); @@ -801,6 +914,14 @@ static int xpad_open(struct input_dev *dev) if (usb_submit_urb(xpad->irq_in, GFP_KERNEL)) return -EIO; + if (xpad->xtype == XTYPE_XBOXONE) { + /* Xbox one controller needs to be initialized. */ + xpad->odata[0] = 0x05; + xpad->odata[1] = 0x20; + xpad->irq_out->transfer_buffer_length = 2; + return usb_submit_urb(xpad->irq_out, GFP_KERNEL); + } + return 0; } @@ -816,6 +937,7 @@ static void xpad_close(struct input_dev *dev) static void xpad_set_up_abs(struct input_dev *input_dev, signed short abs) { + struct usb_xpad *xpad = input_get_drvdata(input_dev); set_bit(abs, input_dev->absbit); switch (abs) { @@ -827,7 +949,10 @@ static void xpad_set_up_abs(struct input_dev *input_dev, signed short abs) break; case ABS_Z: case ABS_RZ: /* the triggers (if mapped to axes) */ - input_set_abs_params(input_dev, abs, 0, 255, 0, 0); + if (xpad->xtype == XTYPE_XBOXONE) + input_set_abs_params(input_dev, abs, 0, 1023, 0, 0); + else + input_set_abs_params(input_dev, abs, 0, 255, 0, 0); break; case ABS_HAT0X: case ABS_HAT0Y: /* the d-pad (only if dpad is mapped to axes */ @@ -842,6 +967,7 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id struct usb_xpad *xpad; struct input_dev *input_dev; struct usb_endpoint_descriptor *ep_irq_in; + int ep_irq_in_idx; int i, error; for (i = 0; xpad_device[i].idVendor; i++) { @@ -850,6 +976,16 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id break; } + if (xpad_device[i].xtype == XTYPE_XBOXONE && + intf->cur_altsetting->desc.bInterfaceNumber != 0) { + /* + * The Xbox One controller lists three interfaces all with the + * same interface class, subclass and protocol. Differentiate by + * interface number. + */ + return -ENODEV; + } + xpad = kzalloc(sizeof(struct usb_xpad), GFP_KERNEL); input_dev = input_allocate_device(); if (!xpad || !input_dev) { @@ -920,7 +1056,8 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id __set_bit(xpad_common_btn[i], input_dev->keybit); /* set up model-specific ones */ - if (xpad->xtype == XTYPE_XBOX360 || xpad->xtype == XTYPE_XBOX360W) { + if (xpad->xtype == XTYPE_XBOX360 || xpad->xtype == XTYPE_XBOX360W || + xpad->xtype == XTYPE_XBOXONE) { for (i = 0; xpad360_btn[i] >= 0; i++) __set_bit(xpad360_btn[i], input_dev->keybit); } else { @@ -933,7 +1070,7 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id __set_bit(xpad_btn_pad[i], input_dev->keybit); } else { for (i = 0; xpad_abs_pad[i] >= 0; i++) - xpad_set_up_abs(input_dev, xpad_abs_pad[i]); + xpad_set_up_abs(input_dev, xpad_abs_pad[i]); } if (xpad->mapping & MAP_TRIGGERS_TO_BUTTONS) { @@ -956,7 +1093,10 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id if (error) goto fail5; - ep_irq_in = &intf->cur_altsetting->endpoint[0].desc; + /* Xbox One controller has in/out endpoints swapped. */ + ep_irq_in_idx = xpad->xtype == XTYPE_XBOXONE ? 1 : 0; + ep_irq_in = &intf->cur_altsetting->endpoint[ep_irq_in_idx].desc; + usb_fill_int_urb(xpad->irq_in, udev, usb_rcvintpipe(udev, ep_irq_in->bEndpointAddress), xpad->idata, XPAD_PKT_LEN, xpad_irq_in, diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig index f7e79b481349..a3958c63d7d5 100644 --- a/drivers/input/keyboard/Kconfig +++ b/drivers/input/keyboard/Kconfig @@ -665,4 +665,14 @@ config KEYBOARD_CROS_EC To compile this driver as a module, choose M here: the module will be called cros_ec_keyb. +config KEYBOARD_CAP1106 + tristate "Microchip CAP1106 touch sensor" + depends on OF && I2C + select REGMAP_I2C + help + Say Y here to enable the CAP1106 touch sensor driver. + + To compile this driver as a module, choose M here: the + module will be called cap1106. + endif diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile index 7504ae19049d..0a3345634d79 100644 --- a/drivers/input/keyboard/Makefile +++ b/drivers/input/keyboard/Makefile @@ -11,6 +11,7 @@ obj-$(CONFIG_KEYBOARD_AMIGA) += amikbd.o obj-$(CONFIG_KEYBOARD_ATARI) += atakbd.o obj-$(CONFIG_KEYBOARD_ATKBD) += atkbd.o obj-$(CONFIG_KEYBOARD_BFIN) += bf54x-keys.o +obj-$(CONFIG_KEYBOARD_CAP1106) += cap1106.o obj-$(CONFIG_KEYBOARD_CLPS711X) += clps711x-keypad.o obj-$(CONFIG_KEYBOARD_CROS_EC) += cros_ec_keyb.o obj-$(CONFIG_KEYBOARD_DAVINCI) += davinci_keyscan.o diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c index 2dd1d0dd4f7d..6f5d79569136 100644 --- a/drivers/input/keyboard/atkbd.c +++ b/drivers/input/keyboard/atkbd.c @@ -1791,14 +1791,6 @@ static const struct dmi_system_id atkbd_dmi_quirk_table[] __initconst = { { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"), - DMI_MATCH(DMI_PRODUCT_NAME, "LW25-B7HV"), - }, - .callback = atkbd_deactivate_fixup, - }, - { - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"), - DMI_MATCH(DMI_PRODUCT_NAME, "P1-J273B"), }, .callback = atkbd_deactivate_fixup, }, diff --git a/drivers/input/keyboard/cap1106.c b/drivers/input/keyboard/cap1106.c new file mode 100644 index 000000000000..d70b65a14ced --- /dev/null +++ b/drivers/input/keyboard/cap1106.c @@ -0,0 +1,341 @@ +/* + * Input driver for Microchip CAP1106, 6 channel capacitive touch sensor + * + * http://www.microchip.com/wwwproducts/Devices.aspx?product=CAP1106 + * + * (c) 2014 Daniel Mack <linux@zonque.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/input.h> +#include <linux/of_irq.h> +#include <linux/regmap.h> +#include <linux/i2c.h> +#include <linux/gpio/consumer.h> + +#define CAP1106_REG_MAIN_CONTROL 0x00 +#define CAP1106_REG_MAIN_CONTROL_GAIN_SHIFT (6) +#define CAP1106_REG_MAIN_CONTROL_GAIN_MASK (0xc0) +#define CAP1106_REG_MAIN_CONTROL_DLSEEP BIT(4) +#define CAP1106_REG_GENERAL_STATUS 0x02 +#define CAP1106_REG_SENSOR_INPUT 0x03 +#define CAP1106_REG_NOISE_FLAG_STATUS 0x0a +#define CAP1106_REG_SENOR_DELTA(X) (0x10 + (X)) +#define CAP1106_REG_SENSITIVITY_CONTROL 0x1f +#define CAP1106_REG_CONFIG 0x20 +#define CAP1106_REG_SENSOR_ENABLE 0x21 +#define CAP1106_REG_SENSOR_CONFIG 0x22 +#define CAP1106_REG_SENSOR_CONFIG2 0x23 +#define CAP1106_REG_SAMPLING_CONFIG 0x24 +#define CAP1106_REG_CALIBRATION 0x26 +#define CAP1106_REG_INT_ENABLE 0x27 +#define CAP1106_REG_REPEAT_RATE 0x28 +#define CAP1106_REG_MT_CONFIG 0x2a +#define CAP1106_REG_MT_PATTERN_CONFIG 0x2b +#define CAP1106_REG_MT_PATTERN 0x2d +#define CAP1106_REG_RECALIB_CONFIG 0x2f +#define CAP1106_REG_SENSOR_THRESH(X) (0x30 + (X)) +#define CAP1106_REG_SENSOR_NOISE_THRESH 0x38 +#define CAP1106_REG_STANDBY_CHANNEL 0x40 +#define CAP1106_REG_STANDBY_CONFIG 0x41 +#define CAP1106_REG_STANDBY_SENSITIVITY 0x42 +#define CAP1106_REG_STANDBY_THRESH 0x43 +#define CAP1106_REG_CONFIG2 0x44 +#define CAP1106_REG_SENSOR_BASE_CNT(X) (0x50 + (X)) +#define CAP1106_REG_SENSOR_CALIB (0xb1 + (X)) +#define CAP1106_REG_SENSOR_CALIB_LSB1 0xb9 +#define CAP1106_REG_SENSOR_CALIB_LSB2 0xba +#define CAP1106_REG_PRODUCT_ID 0xfd +#define CAP1106_REG_MANUFACTURER_ID 0xfe +#define CAP1106_REG_REVISION 0xff + +#define CAP1106_NUM_CHN 6 +#define CAP1106_PRODUCT_ID 0x55 +#define CAP1106_MANUFACTURER_ID 0x5d + +struct cap1106_priv { + struct regmap *regmap; + struct input_dev *idev; + + /* config */ + unsigned short keycodes[CAP1106_NUM_CHN]; +}; + +static const struct reg_default cap1106_reg_defaults[] = { + { CAP1106_REG_MAIN_CONTROL, 0x00 }, + { CAP1106_REG_GENERAL_STATUS, 0x00 }, + { CAP1106_REG_SENSOR_INPUT, 0x00 }, + { CAP1106_REG_NOISE_FLAG_STATUS, 0x00 }, + { CAP1106_REG_SENSITIVITY_CONTROL, 0x2f }, + { CAP1106_REG_CONFIG, 0x20 }, + { CAP1106_REG_SENSOR_ENABLE, 0x3f }, + { CAP1106_REG_SENSOR_CONFIG, 0xa4 }, + { CAP1106_REG_SENSOR_CONFIG2, 0x07 }, + { CAP1106_REG_SAMPLING_CONFIG, 0x39 }, + { CAP1106_REG_CALIBRATION, 0x00 }, + { CAP1106_REG_INT_ENABLE, 0x3f }, + { CAP1106_REG_REPEAT_RATE, 0x3f }, + { CAP1106_REG_MT_CONFIG, 0x80 }, + { CAP1106_REG_MT_PATTERN_CONFIG, 0x00 }, + { CAP1106_REG_MT_PATTERN, 0x3f }, + { CAP1106_REG_RECALIB_CONFIG, 0x8a }, + { CAP1106_REG_SENSOR_THRESH(0), 0x40 }, + { CAP1106_REG_SENSOR_THRESH(1), 0x40 }, + { CAP1106_REG_SENSOR_THRESH(2), 0x40 }, + { CAP1106_REG_SENSOR_THRESH(3), 0x40 }, + { CAP1106_REG_SENSOR_THRESH(4), 0x40 }, + { CAP1106_REG_SENSOR_THRESH(5), 0x40 }, + { CAP1106_REG_SENSOR_NOISE_THRESH, 0x01 }, + { CAP1106_REG_STANDBY_CHANNEL, 0x00 }, + { CAP1106_REG_STANDBY_CONFIG, 0x39 }, + { CAP1106_REG_STANDBY_SENSITIVITY, 0x02 }, + { CAP1106_REG_STANDBY_THRESH, 0x40 }, + { CAP1106_REG_CONFIG2, 0x40 }, + { CAP1106_REG_SENSOR_CALIB_LSB1, 0x00 }, + { CAP1106_REG_SENSOR_CALIB_LSB2, 0x00 }, +}; + +static bool cap1106_volatile_reg(struct device *dev, unsigned int reg) +{ + switch (reg) { + case CAP1106_REG_MAIN_CONTROL: + case CAP1106_REG_SENSOR_INPUT: + case CAP1106_REG_SENOR_DELTA(0): + case CAP1106_REG_SENOR_DELTA(1): + case CAP1106_REG_SENOR_DELTA(2): + case CAP1106_REG_SENOR_DELTA(3): + case CAP1106_REG_SENOR_DELTA(4): + case CAP1106_REG_SENOR_DELTA(5): + case CAP1106_REG_PRODUCT_ID: + case CAP1106_REG_MANUFACTURER_ID: + case CAP1106_REG_REVISION: + return true; + } + + return false; +} + +static const struct regmap_config cap1106_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + + .max_register = CAP1106_REG_REVISION, + .reg_defaults = cap1106_reg_defaults, + + .num_reg_defaults = ARRAY_SIZE(cap1106_reg_defaults), + .cache_type = REGCACHE_RBTREE, + .volatile_reg = cap1106_volatile_reg, +}; + +static irqreturn_t cap1106_thread_func(int irq_num, void *data) +{ + struct cap1106_priv *priv = data; + unsigned int status; + int ret, i; + + /* + * Deassert interrupt. This needs to be done before reading the status + * registers, which will not carry valid values otherwise. + */ + ret = regmap_update_bits(priv->regmap, CAP1106_REG_MAIN_CONTROL, 1, 0); + if (ret < 0) + goto out; + + ret = regmap_read(priv->regmap, CAP1106_REG_SENSOR_INPUT, &status); + if (ret < 0) + goto out; + + for (i = 0; i < CAP1106_NUM_CHN; i++) + input_report_key(priv->idev, priv->keycodes[i], + status & (1 << i)); + + input_sync(priv->idev); + +out: + return IRQ_HANDLED; +} + +static int cap1106_set_sleep(struct cap1106_priv *priv, bool sleep) +{ + return regmap_update_bits(priv->regmap, CAP1106_REG_MAIN_CONTROL, + CAP1106_REG_MAIN_CONTROL_DLSEEP, + sleep ? CAP1106_REG_MAIN_CONTROL_DLSEEP : 0); +} + +static int cap1106_input_open(struct input_dev *idev) +{ + struct cap1106_priv *priv = input_get_drvdata(idev); + + return cap1106_set_sleep(priv, false); +} + +static void cap1106_input_close(struct input_dev *idev) +{ + struct cap1106_priv *priv = input_get_drvdata(idev); + + cap1106_set_sleep(priv, true); +} + +static int cap1106_i2c_probe(struct i2c_client *i2c_client, + const struct i2c_device_id *id) +{ + struct device *dev = &i2c_client->dev; + struct cap1106_priv *priv; + struct device_node *node; + int i, error, irq, gain = 0; + unsigned int val, rev; + u32 gain32, keycodes[CAP1106_NUM_CHN]; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->regmap = devm_regmap_init_i2c(i2c_client, &cap1106_regmap_config); + if (IS_ERR(priv->regmap)) + return PTR_ERR(priv->regmap); + + error = regmap_read(priv->regmap, CAP1106_REG_PRODUCT_ID, &val); + if (error) + return error; + + if (val != CAP1106_PRODUCT_ID) { + dev_err(dev, "Product ID: Got 0x%02x, expected 0x%02x\n", + val, CAP1106_PRODUCT_ID); + return -ENODEV; + } + + error = regmap_read(priv->regmap, CAP1106_REG_MANUFACTURER_ID, &val); + if (error) + return error; + + if (val != CAP1106_MANUFACTURER_ID) { + dev_err(dev, "Manufacturer ID: Got 0x%02x, expected 0x%02x\n", + val, CAP1106_MANUFACTURER_ID); + return -ENODEV; + } + + error = regmap_read(priv->regmap, CAP1106_REG_REVISION, &rev); + if (error < 0) + return error; + + dev_info(dev, "CAP1106 detected, revision 0x%02x\n", rev); + i2c_set_clientdata(i2c_client, priv); + node = dev->of_node; + + if (!of_property_read_u32(node, "microchip,sensor-gain", &gain32)) { + if (is_power_of_2(gain32) && gain32 <= 8) + gain = ilog2(gain32); + else + dev_err(dev, "Invalid sensor-gain value %d\n", gain32); + } + + BUILD_BUG_ON(ARRAY_SIZE(keycodes) != ARRAY_SIZE(priv->keycodes)); + + /* Provide some useful defaults */ + for (i = 0; i < ARRAY_SIZE(keycodes); i++) + keycodes[i] = KEY_A + i; + + of_property_read_u32_array(node, "linux,keycodes", + keycodes, ARRAY_SIZE(keycodes)); + + for (i = 0; i < ARRAY_SIZE(keycodes); i++) + priv->keycodes[i] = keycodes[i]; + + error = regmap_update_bits(priv->regmap, CAP1106_REG_MAIN_CONTROL, + CAP1106_REG_MAIN_CONTROL_GAIN_MASK, + gain << CAP1106_REG_MAIN_CONTROL_GAIN_SHIFT); + if (error) + return error; + + /* Disable autorepeat. The Linux input system has its own handling. */ + error = regmap_write(priv->regmap, CAP1106_REG_REPEAT_RATE, 0); + if (error) + return error; + + priv->idev = devm_input_allocate_device(dev); + if (!priv->idev) + return -ENOMEM; + + priv->idev->name = "CAP1106 capacitive touch sensor"; + priv->idev->id.bustype = BUS_I2C; + priv->idev->evbit[0] = BIT_MASK(EV_KEY); + + if (of_property_read_bool(node, "autorepeat")) + __set_bit(EV_REP, priv->idev->evbit); + + for (i = 0; i < CAP1106_NUM_CHN; i++) + __set_bit(priv->keycodes[i], priv->idev->keybit); + + __clear_bit(KEY_RESERVED, priv->idev->keybit); + + priv->idev->keycode = priv->keycodes; + priv->idev->keycodesize = sizeof(priv->keycodes[0]); + priv->idev->keycodemax = ARRAY_SIZE(priv->keycodes); + + priv->idev->id.vendor = CAP1106_MANUFACTURER_ID; + priv->idev->id.product = CAP1106_PRODUCT_ID; + priv->idev->id.version = rev; + + priv->idev->open = cap1106_input_open; + priv->idev->close = cap1106_input_close; + + input_set_drvdata(priv->idev, priv); + + /* + * Put the device in deep sleep mode for now. + * ->open() will bring it back once the it is actually needed. + */ + cap1106_set_sleep(priv, true); + + error = input_register_device(priv->idev); + if (error) + return error; + + irq = irq_of_parse_and_map(node, 0); + if (!irq) { + dev_err(dev, "Unable to parse or map IRQ\n"); + return -ENXIO; + } + + error = devm_request_threaded_irq(dev, irq, NULL, cap1106_thread_func, + IRQF_ONESHOT, dev_name(dev), priv); + if (error) + return error; + + return 0; +} + +static const struct of_device_id cap1106_dt_ids[] = { + { .compatible = "microchip,cap1106", }, + {} +}; +MODULE_DEVICE_TABLE(of, cap1106_dt_ids); + +static const struct i2c_device_id cap1106_i2c_ids[] = { + { "cap1106", 0 }, + {} +}; +MODULE_DEVICE_TABLE(i2c, cap1106_i2c_ids); + +static struct i2c_driver cap1106_i2c_driver = { + .driver = { + .name = "cap1106", + .owner = THIS_MODULE, + .of_match_table = cap1106_dt_ids, + }, + .id_table = cap1106_i2c_ids, + .probe = cap1106_i2c_probe, +}; + +module_i2c_driver(cap1106_i2c_driver); + +MODULE_ALIAS("platform:cap1106"); +MODULE_DESCRIPTION("Microchip CAP1106 driver"); +MODULE_AUTHOR("Daniel Mack <linux@zonque.org>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/input/keyboard/cros_ec_keyb.c b/drivers/input/keyboard/cros_ec_keyb.c index 408379669d3c..791781ade4e7 100644 --- a/drivers/input/keyboard/cros_ec_keyb.c +++ b/drivers/input/keyboard/cros_ec_keyb.c @@ -24,8 +24,8 @@ #include <linux/module.h> #include <linux/i2c.h> #include <linux/input.h> +#include <linux/interrupt.h> #include <linux/kernel.h> -#include <linux/notifier.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/input/matrix_keypad.h> @@ -42,7 +42,6 @@ * @dev: Device pointer * @idev: Input device * @ec: Top level ChromeOS device to use to talk to EC - * @event_notifier: interrupt event notifier for transport devices */ struct cros_ec_keyb { unsigned int rows; @@ -55,7 +54,6 @@ struct cros_ec_keyb { struct device *dev; struct input_dev *idev; struct cros_ec_device *ec; - struct notifier_block notifier; }; @@ -173,41 +171,55 @@ static void cros_ec_keyb_process(struct cros_ec_keyb *ckdev, input_sync(ckdev->idev); } -static int cros_ec_keyb_open(struct input_dev *dev) -{ - struct cros_ec_keyb *ckdev = input_get_drvdata(dev); - - return blocking_notifier_chain_register(&ckdev->ec->event_notifier, - &ckdev->notifier); -} - -static void cros_ec_keyb_close(struct input_dev *dev) -{ - struct cros_ec_keyb *ckdev = input_get_drvdata(dev); - - blocking_notifier_chain_unregister(&ckdev->ec->event_notifier, - &ckdev->notifier); -} - static int cros_ec_keyb_get_state(struct cros_ec_keyb *ckdev, uint8_t *kb_state) { - return ckdev->ec->command_recv(ckdev->ec, EC_CMD_MKBP_STATE, - kb_state, ckdev->cols); + struct cros_ec_command msg = { + .version = 0, + .command = EC_CMD_MKBP_STATE, + .outdata = NULL, + .outsize = 0, + .indata = kb_state, + .insize = ckdev->cols, + }; + + return ckdev->ec->cmd_xfer(ckdev->ec, &msg); } -static int cros_ec_keyb_work(struct notifier_block *nb, - unsigned long state, void *_notify) +static irqreturn_t cros_ec_keyb_irq(int irq, void *data) { + struct cros_ec_keyb *ckdev = data; + struct cros_ec_device *ec = ckdev->ec; int ret; - struct cros_ec_keyb *ckdev = container_of(nb, struct cros_ec_keyb, - notifier); uint8_t kb_state[ckdev->cols]; + if (device_may_wakeup(ec->dev)) + pm_wakeup_event(ec->dev, 0); + ret = cros_ec_keyb_get_state(ckdev, kb_state); if (ret >= 0) cros_ec_keyb_process(ckdev, kb_state, ret); + else + dev_err(ec->dev, "failed to get keyboard state: %d\n", ret); - return NOTIFY_DONE; + return IRQ_HANDLED; +} + +static int cros_ec_keyb_open(struct input_dev *dev) +{ + struct cros_ec_keyb *ckdev = input_get_drvdata(dev); + struct cros_ec_device *ec = ckdev->ec; + + return request_threaded_irq(ec->irq, NULL, cros_ec_keyb_irq, + IRQF_TRIGGER_LOW | IRQF_ONESHOT, + "cros_ec_keyb", ckdev); +} + +static void cros_ec_keyb_close(struct input_dev *dev) +{ + struct cros_ec_keyb *ckdev = input_get_drvdata(dev); + struct cros_ec_device *ec = ckdev->ec; + + free_irq(ec->irq, ckdev); } static int cros_ec_keyb_probe(struct platform_device *pdev) @@ -238,8 +250,12 @@ static int cros_ec_keyb_probe(struct platform_device *pdev) if (!idev) return -ENOMEM; + if (!ec->irq) { + dev_err(dev, "no EC IRQ specified\n"); + return -EINVAL; + } + ckdev->ec = ec; - ckdev->notifier.notifier_call = cros_ec_keyb_work; ckdev->dev = dev; dev_set_drvdata(&pdev->dev, ckdev); diff --git a/drivers/input/keyboard/imx_keypad.c b/drivers/input/keyboard/imx_keypad.c index 8280cb16260b..20a99c368d16 100644 --- a/drivers/input/keyboard/imx_keypad.c +++ b/drivers/input/keyboard/imx_keypad.c @@ -531,8 +531,7 @@ static int imx_keypad_probe(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM_SLEEP -static int imx_kbd_suspend(struct device *dev) +static int __maybe_unused imx_kbd_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct imx_keypad *kbd = platform_get_drvdata(pdev); @@ -552,7 +551,7 @@ static int imx_kbd_suspend(struct device *dev) return 0; } -static int imx_kbd_resume(struct device *dev) +static int __maybe_unused imx_kbd_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct imx_keypad *kbd = platform_get_drvdata(pdev); @@ -575,7 +574,6 @@ err_clk: return ret; } -#endif static SIMPLE_DEV_PM_OPS(imx_kbd_pm_ops, imx_kbd_suspend, imx_kbd_resume); diff --git a/drivers/input/keyboard/lm8323.c b/drivers/input/keyboard/lm8323.c index 0b42118cbf8f..cb32e2b506b7 100644 --- a/drivers/input/keyboard/lm8323.c +++ b/drivers/input/keyboard/lm8323.c @@ -558,6 +558,12 @@ static ssize_t lm8323_pwm_store_time(struct device *dev, } static DEVICE_ATTR(time, 0644, lm8323_pwm_show_time, lm8323_pwm_store_time); +static struct attribute *lm8323_pwm_attrs[] = { + &dev_attr_time.attr, + NULL +}; +ATTRIBUTE_GROUPS(lm8323_pwm); + static int init_pwm(struct lm8323_chip *lm, int id, struct device *dev, const char *name) { @@ -580,16 +586,11 @@ static int init_pwm(struct lm8323_chip *lm, int id, struct device *dev, if (name) { pwm->cdev.name = name; pwm->cdev.brightness_set = lm8323_pwm_set_brightness; + pwm->cdev.groups = lm8323_pwm_groups; if (led_classdev_register(dev, &pwm->cdev) < 0) { dev_err(dev, "couldn't register PWM %d\n", id); return -1; } - if (device_create_file(pwm->cdev.dev, - &dev_attr_time) < 0) { - dev_err(dev, "couldn't register time attribute\n"); - led_classdev_unregister(&pwm->cdev); - return -1; - } pwm->enabled = true; } @@ -753,11 +754,8 @@ fail3: device_remove_file(&client->dev, &dev_attr_disable_kp); fail2: while (--pwm >= 0) - if (lm->pwm[pwm].enabled) { - device_remove_file(lm->pwm[pwm].cdev.dev, - &dev_attr_time); + if (lm->pwm[pwm].enabled) led_classdev_unregister(&lm->pwm[pwm].cdev); - } fail1: input_free_device(idev); kfree(lm); @@ -777,10 +775,8 @@ static int lm8323_remove(struct i2c_client *client) device_remove_file(&lm->client->dev, &dev_attr_disable_kp); for (i = 0; i < 3; i++) - if (lm->pwm[i].enabled) { - device_remove_file(lm->pwm[i].cdev.dev, &dev_attr_time); + if (lm->pwm[i].enabled) led_classdev_unregister(&lm->pwm[i].cdev); - } kfree(lm); diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c index 8d2e19e81e1e..e651fa692afe 100644 --- a/drivers/input/keyboard/matrix_keypad.c +++ b/drivers/input/keyboard/matrix_keypad.c @@ -332,23 +332,24 @@ static int matrix_keypad_init_gpio(struct platform_device *pdev, } if (pdata->clustered_irq > 0) { - err = request_irq(pdata->clustered_irq, + err = request_any_context_irq(pdata->clustered_irq, matrix_keypad_interrupt, pdata->clustered_irq_flags, "matrix-keypad", keypad); - if (err) { + if (err < 0) { dev_err(&pdev->dev, "Unable to acquire clustered interrupt\n"); goto err_free_rows; } } else { for (i = 0; i < pdata->num_row_gpios; i++) { - err = request_irq(gpio_to_irq(pdata->row_gpios[i]), + err = request_any_context_irq( + gpio_to_irq(pdata->row_gpios[i]), matrix_keypad_interrupt, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, "matrix-keypad", keypad); - if (err) { + if (err < 0) { dev_err(&pdev->dev, "Unable to acquire interrupt for GPIO line %i\n", pdata->row_gpios[i]); diff --git a/drivers/input/keyboard/max7359_keypad.c b/drivers/input/keyboard/max7359_keypad.c index 430b54539720..faa6da53eba8 100644 --- a/drivers/input/keyboard/max7359_keypad.c +++ b/drivers/input/keyboard/max7359_keypad.c @@ -203,12 +203,17 @@ static int max7359_probe(struct i2c_client *client, dev_dbg(&client->dev, "keys FIFO is 0x%02x\n", ret); - keypad = kzalloc(sizeof(struct max7359_keypad), GFP_KERNEL); - input_dev = input_allocate_device(); - if (!keypad || !input_dev) { + keypad = devm_kzalloc(&client->dev, sizeof(struct max7359_keypad), + GFP_KERNEL); + if (!keypad) { dev_err(&client->dev, "failed to allocate memory\n"); - error = -ENOMEM; - goto failed_free_mem; + return -ENOMEM; + } + + input_dev = devm_input_allocate_device(&client->dev); + if (!input_dev) { + dev_err(&client->dev, "failed to allocate input device\n"); + return -ENOMEM; } keypad->client = client; @@ -230,19 +235,20 @@ static int max7359_probe(struct i2c_client *client, max7359_build_keycode(keypad, keymap_data); - error = request_threaded_irq(client->irq, NULL, max7359_interrupt, - IRQF_TRIGGER_LOW | IRQF_ONESHOT, - client->name, keypad); + error = devm_request_threaded_irq(&client->dev, client->irq, NULL, + max7359_interrupt, + IRQF_TRIGGER_LOW | IRQF_ONESHOT, + client->name, keypad); if (error) { dev_err(&client->dev, "failed to register interrupt\n"); - goto failed_free_mem; + return error; } /* Register the input device */ error = input_register_device(input_dev); if (error) { dev_err(&client->dev, "failed to register input device\n"); - goto failed_free_irq; + return error; } /* Initialize MAX7359 */ @@ -252,24 +258,6 @@ static int max7359_probe(struct i2c_client *client, device_init_wakeup(&client->dev, 1); return 0; - -failed_free_irq: - free_irq(client->irq, keypad); -failed_free_mem: - input_free_device(input_dev); - kfree(keypad); - return error; -} - -static int max7359_remove(struct i2c_client *client) -{ - struct max7359_keypad *keypad = i2c_get_clientdata(client); - - free_irq(client->irq, keypad); - input_unregister_device(keypad->input_dev); - kfree(keypad); - - return 0; } #ifdef CONFIG_PM_SLEEP @@ -313,7 +301,6 @@ static struct i2c_driver max7359_i2c_driver = { .pm = &max7359_pm, }, .probe = max7359_probe, - .remove = max7359_remove, .id_table = max7359_ids, }; diff --git a/drivers/input/misc/keyspan_remote.c b/drivers/input/misc/keyspan_remote.c index 01f3b5b300f3..a3fe4a990cc9 100644 --- a/drivers/input/misc/keyspan_remote.c +++ b/drivers/input/misc/keyspan_remote.c @@ -392,7 +392,6 @@ static void keyspan_irq_recv(struct urb *urb) default: goto resubmit; - break; } if (debug) diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c index 5a6334be30b8..e34dfc29beb3 100644 --- a/drivers/input/misc/soc_button_array.c +++ b/drivers/input/misc/soc_button_array.c @@ -83,6 +83,9 @@ soc_button_device_create(struct pnp_dev *pdev, sizeof(*gpio_keys_pdata) + sizeof(*gpio_keys) * MAX_NBUTTONS, GFP_KERNEL); + if (!gpio_keys_pdata) + return ERR_PTR(-ENOMEM); + gpio_keys = (void *)(gpio_keys_pdata + 1); for (info = button_info; info->name; info++) { diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c index 856936247500..421e29e4cd81 100644 --- a/drivers/input/misc/uinput.c +++ b/drivers/input/misc/uinput.c @@ -311,7 +311,14 @@ static int uinput_open(struct inode *inode, struct file *file) static int uinput_validate_absbits(struct input_dev *dev) { unsigned int cnt; - int retval = 0; + int nslot; + + if (!test_bit(EV_ABS, dev->evbit)) + return 0; + + /* + * Check if absmin/absmax/absfuzz/absflat are sane. + */ for (cnt = 0; cnt < ABS_CNT; cnt++) { int min, max; @@ -327,8 +334,7 @@ static int uinput_validate_absbits(struct input_dev *dev) UINPUT_NAME, cnt, input_abs_get_min(dev, cnt), input_abs_get_max(dev, cnt)); - retval = -EINVAL; - break; + return -EINVAL; } if (input_abs_get_flat(dev, cnt) > @@ -340,11 +346,18 @@ static int uinput_validate_absbits(struct input_dev *dev) input_abs_get_flat(dev, cnt), input_abs_get_min(dev, cnt), input_abs_get_max(dev, cnt)); - retval = -EINVAL; - break; + return -EINVAL; } } - return retval; + + if (test_bit(ABS_MT_SLOT, dev->absbit)) { + nslot = input_abs_get_max(dev, ABS_MT_SLOT) + 1; + input_mt_init_slots(dev, nslot, 0); + } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) { + input_set_events_per_packet(dev, 60); + } + + return 0; } static int uinput_allocate_device(struct uinput_device *udev) @@ -410,19 +423,9 @@ static int uinput_setup_device(struct uinput_device *udev, input_abs_set_flat(dev, i, user_dev->absflat[i]); } - /* check if absmin/absmax/absfuzz/absflat are filled as - * told in Documentation/input/input-programming.txt */ - if (test_bit(EV_ABS, dev->evbit)) { - retval = uinput_validate_absbits(dev); - if (retval < 0) - goto exit; - if (test_bit(ABS_MT_SLOT, dev->absbit)) { - int nslot = input_abs_get_max(dev, ABS_MT_SLOT) + 1; - input_mt_init_slots(dev, nslot, 0); - } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) { - input_set_events_per_packet(dev, 60); - } - } + retval = uinput_validate_absbits(dev); + if (retval < 0) + goto exit; udev->state = UIST_SETUP_COMPLETE; retval = count; @@ -720,6 +723,12 @@ static long uinput_ioctl_handler(struct file *file, unsigned int cmd, } switch (cmd) { + case UI_GET_VERSION: + if (put_user(UINPUT_VERSION, + (unsigned int __user *)p)) + retval = -EFAULT; + goto out; + case UI_DEV_CREATE: retval = uinput_create_device(udev); goto out; diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c index fb15c64ffb95..35a49bf57227 100644 --- a/drivers/input/mouse/alps.c +++ b/drivers/input/mouse/alps.c @@ -99,6 +99,8 @@ static const struct alps_nibble_commands alps_v6_nibble_commands[] = { #define ALPS_FOUR_BUTTONS 0x40 /* 4 direction button present */ #define ALPS_PS2_INTERLEAVED 0x80 /* 3-byte PS/2 packet interleaved with 6-byte ALPS packet */ +#define ALPS_IS_RUSHMORE 0x100 /* device is a rushmore */ +#define ALPS_BUTTONPAD 0x200 /* device is a clickpad */ static const struct alps_model_info alps_model_data[] = { { { 0x32, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* Toshiba Salellite Pro M10 */ @@ -281,11 +283,10 @@ static void alps_process_packet_v1_v2(struct psmouse *psmouse) * * The bitmaps don't have enough data to track fingers, so this function * only generates points representing a bounding box of at most two contacts. - * These two points are returned in x1, y1, x2, and y2. + * These two points are returned in fields->mt. */ static void alps_process_bitmap_dolphin(struct alps_data *priv, - struct alps_fields *fields, - int *x1, int *y1, int *x2, int *y2) + struct alps_fields *fields) { int box_middle_x, box_middle_y; unsigned int x_map, y_map; @@ -308,8 +309,6 @@ static void alps_process_bitmap_dolphin(struct alps_data *priv, if (x_msb > priv->x_bits || y_msb > priv->y_bits) return; - *x1 = *y1 = *x2 = *y2 = 0; - if (fields->fingers > 1) { start_bit = priv->x_bits - x_msb; end_bit = priv->x_bits - x_lsb; @@ -320,10 +319,35 @@ static void alps_process_bitmap_dolphin(struct alps_data *priv, end_bit = y_msb - 1; box_middle_y = (priv->y_max * (start_bit + end_bit)) / (2 * (priv->y_bits - 1)); - *x1 = fields->x; - *y1 = fields->y; - *x2 = 2 * box_middle_x - *x1; - *y2 = 2 * box_middle_y - *y1; + fields->mt[0] = fields->st; + fields->mt[1].x = 2 * box_middle_x - fields->mt[0].x; + fields->mt[1].y = 2 * box_middle_y - fields->mt[0].y; + } +} + +static void alps_get_bitmap_points(unsigned int map, + struct alps_bitmap_point *low, + struct alps_bitmap_point *high, + int *fingers) +{ + struct alps_bitmap_point *point; + int i, bit, prev_bit = 0; + + point = low; + for (i = 0; map != 0; i++, map >>= 1) { + bit = map & 1; + if (bit) { + if (!prev_bit) { + point->start_bit = i; + point->num_bits = 0; + (*fingers)++; + } + point->num_bits++; + } else { + if (prev_bit) + point = high; + } + prev_bit = bit; } } @@ -334,71 +358,21 @@ static void alps_process_bitmap_dolphin(struct alps_data *priv, * * The bitmaps don't have enough data to track fingers, so this function * only generates points representing a bounding box of all contacts. - * These points are returned in x1, y1, x2, and y2 when the return value + * These points are returned in fields->mt when the return value * is greater than 0. */ static int alps_process_bitmap(struct alps_data *priv, - unsigned int x_map, unsigned int y_map, - int *x1, int *y1, int *x2, int *y2) + struct alps_fields *fields) { - struct alps_bitmap_point { - int start_bit; - int num_bits; - }; - - int fingers_x = 0, fingers_y = 0, fingers; - int i, bit, prev_bit; + int i, fingers_x = 0, fingers_y = 0, fingers; struct alps_bitmap_point x_low = {0,}, x_high = {0,}; struct alps_bitmap_point y_low = {0,}, y_high = {0,}; - struct alps_bitmap_point *point; - if (!x_map || !y_map) + if (!fields->x_map || !fields->y_map) return 0; - *x1 = *y1 = *x2 = *y2 = 0; - - prev_bit = 0; - point = &x_low; - for (i = 0; x_map != 0; i++, x_map >>= 1) { - bit = x_map & 1; - if (bit) { - if (!prev_bit) { - point->start_bit = i; - fingers_x++; - } - point->num_bits++; - } else { - if (prev_bit) - point = &x_high; - else - point->num_bits = 0; - } - prev_bit = bit; - } - - /* - * y bitmap is reversed for what we need (lower positions are in - * higher bits), so we process from the top end. - */ - y_map = y_map << (sizeof(y_map) * BITS_PER_BYTE - priv->y_bits); - prev_bit = 0; - point = &y_low; - for (i = 0; y_map != 0; i++, y_map <<= 1) { - bit = y_map & (1 << (sizeof(y_map) * BITS_PER_BYTE - 1)); - if (bit) { - if (!prev_bit) { - point->start_bit = i; - fingers_y++; - } - point->num_bits++; - } else { - if (prev_bit) - point = &y_high; - else - point->num_bits = 0; - } - prev_bit = bit; - } + alps_get_bitmap_points(fields->x_map, &x_low, &x_high, &fingers_x); + alps_get_bitmap_points(fields->y_map, &y_low, &y_high, &fingers_y); /* * Fingers can overlap, so we use the maximum count of fingers @@ -407,58 +381,91 @@ static int alps_process_bitmap(struct alps_data *priv, fingers = max(fingers_x, fingers_y); /* - * If total fingers is > 1 but either axis reports only a single - * contact, we have overlapping or adjacent fingers. For the - * purposes of creating a bounding box, divide the single contact - * (roughly) equally between the two points. + * If an axis reports only a single contact, we have overlapping or + * adjacent fingers. Divide the single contact between the two points. */ - if (fingers > 1) { - if (fingers_x == 1) { - i = x_low.num_bits / 2; - x_low.num_bits = x_low.num_bits - i; - x_high.start_bit = x_low.start_bit + i; - x_high.num_bits = max(i, 1); - } else if (fingers_y == 1) { - i = y_low.num_bits / 2; - y_low.num_bits = y_low.num_bits - i; - y_high.start_bit = y_low.start_bit + i; - y_high.num_bits = max(i, 1); - } + if (fingers_x == 1) { + i = (x_low.num_bits - 1) / 2; + x_low.num_bits = x_low.num_bits - i; + x_high.start_bit = x_low.start_bit + i; + x_high.num_bits = max(i, 1); + } + if (fingers_y == 1) { + i = (y_low.num_bits - 1) / 2; + y_low.num_bits = y_low.num_bits - i; + y_high.start_bit = y_low.start_bit + i; + y_high.num_bits = max(i, 1); } - *x1 = (priv->x_max * (2 * x_low.start_bit + x_low.num_bits - 1)) / - (2 * (priv->x_bits - 1)); - *y1 = (priv->y_max * (2 * y_low.start_bit + y_low.num_bits - 1)) / - (2 * (priv->y_bits - 1)); - - if (fingers > 1) { - *x2 = (priv->x_max * - (2 * x_high.start_bit + x_high.num_bits - 1)) / - (2 * (priv->x_bits - 1)); - *y2 = (priv->y_max * - (2 * y_high.start_bit + y_high.num_bits - 1)) / - (2 * (priv->y_bits - 1)); + fields->mt[0].x = + (priv->x_max * (2 * x_low.start_bit + x_low.num_bits - 1)) / + (2 * (priv->x_bits - 1)); + fields->mt[0].y = + (priv->y_max * (2 * y_low.start_bit + y_low.num_bits - 1)) / + (2 * (priv->y_bits - 1)); + + fields->mt[1].x = + (priv->x_max * (2 * x_high.start_bit + x_high.num_bits - 1)) / + (2 * (priv->x_bits - 1)); + fields->mt[1].y = + (priv->y_max * (2 * y_high.start_bit + y_high.num_bits - 1)) / + (2 * (priv->y_bits - 1)); + + /* y-bitmap order is reversed, except on rushmore */ + if (!(priv->flags & ALPS_IS_RUSHMORE)) { + fields->mt[0].y = priv->y_max - fields->mt[0].y; + fields->mt[1].y = priv->y_max - fields->mt[1].y; } return fingers; } -static void alps_set_slot(struct input_dev *dev, int slot, bool active, - int x, int y) +static void alps_set_slot(struct input_dev *dev, int slot, int x, int y) { input_mt_slot(dev, slot); - input_mt_report_slot_state(dev, MT_TOOL_FINGER, active); - if (active) { - input_report_abs(dev, ABS_MT_POSITION_X, x); - input_report_abs(dev, ABS_MT_POSITION_Y, y); - } + input_mt_report_slot_state(dev, MT_TOOL_FINGER, true); + input_report_abs(dev, ABS_MT_POSITION_X, x); + input_report_abs(dev, ABS_MT_POSITION_Y, y); } -static void alps_report_semi_mt_data(struct input_dev *dev, int num_fingers, - int x1, int y1, int x2, int y2) +static void alps_report_mt_data(struct psmouse *psmouse, int n) { - alps_set_slot(dev, 0, num_fingers != 0, x1, y1); - alps_set_slot(dev, 1, num_fingers == 2, x2, y2); + struct alps_data *priv = psmouse->private; + struct input_dev *dev = psmouse->dev; + struct alps_fields *f = &priv->f; + int i, slot[MAX_TOUCHES]; + + input_mt_assign_slots(dev, slot, f->mt, n); + for (i = 0; i < n; i++) + alps_set_slot(dev, slot[i], f->mt[i].x, f->mt[i].y); + + input_mt_sync_frame(dev); +} + +static void alps_report_semi_mt_data(struct psmouse *psmouse, int fingers) +{ + struct alps_data *priv = psmouse->private; + struct input_dev *dev = psmouse->dev; + struct alps_fields *f = &priv->f; + + /* Use st data when we don't have mt data */ + if (fingers < 2) { + f->mt[0].x = f->st.x; + f->mt[0].y = f->st.y; + fingers = f->pressure > 0 ? 1 : 0; + } + + alps_report_mt_data(psmouse, (fingers <= 2) ? fingers : 2); + + input_mt_report_finger_count(dev, fingers); + + input_report_key(dev, BTN_LEFT, f->left); + input_report_key(dev, BTN_RIGHT, f->right); + input_report_key(dev, BTN_MIDDLE, f->middle); + + input_report_abs(dev, ABS_PRESSURE, f->pressure); + + input_sync(dev); } static void alps_process_trackstick_packet_v3(struct psmouse *psmouse) @@ -532,7 +539,7 @@ static void alps_decode_buttons_v3(struct alps_fields *f, unsigned char *p) f->ts_middle = !!(p[3] & 0x40); } -static void alps_decode_pinnacle(struct alps_fields *f, unsigned char *p, +static int alps_decode_pinnacle(struct alps_fields *f, unsigned char *p, struct psmouse *psmouse) { f->first_mp = !!(p[4] & 0x40); @@ -546,24 +553,31 @@ static void alps_decode_pinnacle(struct alps_fields *f, unsigned char *p, ((p[2] & 0x7f) << 1) | (p[4] & 0x01); - f->x = ((p[1] & 0x7f) << 4) | ((p[4] & 0x30) >> 2) | + f->st.x = ((p[1] & 0x7f) << 4) | ((p[4] & 0x30) >> 2) | ((p[0] & 0x30) >> 4); - f->y = ((p[2] & 0x7f) << 4) | (p[4] & 0x0f); - f->z = p[5] & 0x7f; + f->st.y = ((p[2] & 0x7f) << 4) | (p[4] & 0x0f); + f->pressure = p[5] & 0x7f; alps_decode_buttons_v3(f, p); + + return 0; } -static void alps_decode_rushmore(struct alps_fields *f, unsigned char *p, +static int alps_decode_rushmore(struct alps_fields *f, unsigned char *p, struct psmouse *psmouse) { alps_decode_pinnacle(f, p, psmouse); + /* Rushmore's packet decode has a bit difference with Pinnacle's */ + f->is_mp = !!(p[5] & 0x40); + f->fingers = max((p[5] & 0x3), ((p[5] >> 2) & 0x3)) + 1; f->x_map |= (p[5] & 0x10) << 11; f->y_map |= (p[5] & 0x20) << 6; + + return 0; } -static void alps_decode_dolphin(struct alps_fields *f, unsigned char *p, +static int alps_decode_dolphin(struct alps_fields *f, unsigned char *p, struct psmouse *psmouse) { u64 palm_data = 0; @@ -573,9 +587,9 @@ static void alps_decode_dolphin(struct alps_fields *f, unsigned char *p, f->is_mp = !!(p[0] & 0x20); if (!f->is_mp) { - f->x = ((p[1] & 0x7f) | ((p[4] & 0x0f) << 7)); - f->y = ((p[2] & 0x7f) | ((p[4] & 0xf0) << 3)); - f->z = (p[0] & 4) ? 0 : p[5] & 0x7f; + f->st.x = ((p[1] & 0x7f) | ((p[4] & 0x0f) << 7)); + f->st.y = ((p[2] & 0x7f) | ((p[4] & 0xf0) << 3)); + f->pressure = (p[0] & 4) ? 0 : p[5] & 0x7f; alps_decode_buttons_v3(f, p); } else { f->fingers = ((p[0] & 0x6) >> 1 | @@ -596,19 +610,21 @@ static void alps_decode_dolphin(struct alps_fields *f, unsigned char *p, f->x_map = (palm_data >> priv->y_bits) & (BIT(priv->x_bits) - 1); } + + return 0; } static void alps_process_touchpad_packet_v3_v5(struct psmouse *psmouse) { struct alps_data *priv = psmouse->private; unsigned char *packet = psmouse->packet; - struct input_dev *dev = psmouse->dev; struct input_dev *dev2 = priv->dev2; - int x1 = 0, y1 = 0, x2 = 0, y2 = 0; - int fingers = 0, bmap_fn; - struct alps_fields f = {0}; + struct alps_fields *f = &priv->f; + int fingers = 0; - priv->decode_fields(&f, packet, psmouse); + memset(f, 0, sizeof(*f)); + + priv->decode_fields(f, packet, psmouse); /* * There's no single feature of touchpad position and bitmap packets @@ -623,22 +639,14 @@ static void alps_process_touchpad_packet_v3_v5(struct psmouse *psmouse) * packet. Check for this, and when it happens process the * position packet as usual. */ - if (f.is_mp) { - fingers = f.fingers; + if (f->is_mp) { + fingers = f->fingers; if (priv->proto_version == ALPS_PROTO_V3) { - bmap_fn = alps_process_bitmap(priv, f.x_map, - f.y_map, &x1, &y1, - &x2, &y2); - - /* - * We shouldn't report more than one finger if - * we don't have two coordinates. - */ - if (fingers > 1 && bmap_fn < 2) - fingers = bmap_fn; + if (alps_process_bitmap(priv, f) == 0) + fingers = 0; /* Use st data */ /* Now process position packet */ - priv->decode_fields(&f, priv->multi_data, + priv->decode_fields(f, priv->multi_data, psmouse); } else { /* @@ -647,15 +655,14 @@ static void alps_process_touchpad_packet_v3_v5(struct psmouse *psmouse) * calculate Pt2, so we need to do position * packet decode first. */ - priv->decode_fields(&f, priv->multi_data, + priv->decode_fields(f, priv->multi_data, psmouse); /* * Since Dolphin's finger number is reliable, * there is no need to compare with bmap_fn. */ - alps_process_bitmap_dolphin(priv, &f, &x1, &y1, - &x2, &y2); + alps_process_bitmap_dolphin(priv, f); } } else { priv->multi_packet = 0; @@ -670,10 +677,10 @@ static void alps_process_touchpad_packet_v3_v5(struct psmouse *psmouse) * out misidentified bitmap packets, we reject anything with this * bit set. */ - if (f.is_mp) + if (f->is_mp) return; - if (!priv->multi_packet && f.first_mp) { + if (!priv->multi_packet && f->first_mp) { priv->multi_packet = 1; memcpy(priv->multi_data, packet, sizeof(priv->multi_data)); return; @@ -687,44 +694,15 @@ static void alps_process_touchpad_packet_v3_v5(struct psmouse *psmouse) * with x, y, and z all zero, so these seem to be flukes. * Ignore them. */ - if (f.x && f.y && !f.z) + if (f->st.x && f->st.y && !f->pressure) return; - /* - * If we don't have MT data or the bitmaps were empty, we have - * to rely on ST data. - */ - if (!fingers) { - x1 = f.x; - y1 = f.y; - fingers = f.z > 0 ? 1 : 0; - } - - if (f.z >= 64) - input_report_key(dev, BTN_TOUCH, 1); - else - input_report_key(dev, BTN_TOUCH, 0); - - alps_report_semi_mt_data(dev, fingers, x1, y1, x2, y2); - - input_mt_report_finger_count(dev, fingers); - - input_report_key(dev, BTN_LEFT, f.left); - input_report_key(dev, BTN_RIGHT, f.right); - input_report_key(dev, BTN_MIDDLE, f.middle); - - if (f.z > 0) { - input_report_abs(dev, ABS_X, f.x); - input_report_abs(dev, ABS_Y, f.y); - } - input_report_abs(dev, ABS_PRESSURE, f.z); - - input_sync(dev); + alps_report_semi_mt_data(psmouse, fingers); if (!(priv->quirks & ALPS_QUIRK_TRACKSTICK_BUTTONS)) { - input_report_key(dev2, BTN_LEFT, f.ts_left); - input_report_key(dev2, BTN_RIGHT, f.ts_right); - input_report_key(dev2, BTN_MIDDLE, f.ts_middle); + input_report_key(dev2, BTN_LEFT, f->ts_left); + input_report_key(dev2, BTN_RIGHT, f->ts_right); + input_report_key(dev2, BTN_MIDDLE, f->ts_middle); input_sync(dev2); } } @@ -823,13 +801,8 @@ static void alps_process_packet_v4(struct psmouse *psmouse) { struct alps_data *priv = psmouse->private; unsigned char *packet = psmouse->packet; - struct input_dev *dev = psmouse->dev; + struct alps_fields *f = &priv->f; int offset; - int x, y, z; - int left, right; - int x1, y1, x2, y2; - int fingers = 0; - unsigned int x_bitmap, y_bitmap; /* * v4 has a 6-byte encoding for bitmap data, but this data is @@ -851,71 +824,207 @@ static void alps_process_packet_v4(struct psmouse *psmouse) if (++priv->multi_packet > 2) { priv->multi_packet = 0; - x_bitmap = ((priv->multi_data[2] & 0x1f) << 10) | + f->x_map = ((priv->multi_data[2] & 0x1f) << 10) | ((priv->multi_data[3] & 0x60) << 3) | ((priv->multi_data[0] & 0x3f) << 2) | ((priv->multi_data[1] & 0x60) >> 5); - y_bitmap = ((priv->multi_data[5] & 0x01) << 10) | + f->y_map = ((priv->multi_data[5] & 0x01) << 10) | ((priv->multi_data[3] & 0x1f) << 5) | (priv->multi_data[1] & 0x1f); - fingers = alps_process_bitmap(priv, x_bitmap, y_bitmap, - &x1, &y1, &x2, &y2); - - /* Store MT data.*/ - priv->fingers = fingers; - priv->x1 = x1; - priv->x2 = x2; - priv->y1 = y1; - priv->y2 = y2; + f->fingers = alps_process_bitmap(priv, f); } - left = packet[4] & 0x01; - right = packet[4] & 0x02; + f->left = packet[4] & 0x01; + f->right = packet[4] & 0x02; - x = ((packet[1] & 0x7f) << 4) | ((packet[3] & 0x30) >> 2) | - ((packet[0] & 0x30) >> 4); - y = ((packet[2] & 0x7f) << 4) | (packet[3] & 0x0f); - z = packet[5] & 0x7f; + f->st.x = ((packet[1] & 0x7f) << 4) | ((packet[3] & 0x30) >> 2) | + ((packet[0] & 0x30) >> 4); + f->st.y = ((packet[2] & 0x7f) << 4) | (packet[3] & 0x0f); + f->pressure = packet[5] & 0x7f; - /* - * If there were no contacts in the bitmap, use ST - * points in MT reports. - * If there were two contacts or more, report MT data. - */ - if (priv->fingers < 2) { - x1 = x; - y1 = y; - fingers = z > 0 ? 1 : 0; - } else { - fingers = priv->fingers; - x1 = priv->x1; - x2 = priv->x2; - y1 = priv->y1; - y2 = priv->y2; + alps_report_semi_mt_data(psmouse, f->fingers); +} + +static bool alps_is_valid_package_v7(struct psmouse *psmouse) +{ + switch (psmouse->pktcnt) { + case 3: + return (psmouse->packet[2] & 0x40) == 0x40; + case 4: + return (psmouse->packet[3] & 0x48) == 0x48; + case 6: + return (psmouse->packet[5] & 0x40) == 0x00; } + return true; +} - if (z >= 64) - input_report_key(dev, BTN_TOUCH, 1); +static unsigned char alps_get_packet_id_v7(char *byte) +{ + unsigned char packet_id; + + if (byte[4] & 0x40) + packet_id = V7_PACKET_ID_TWO; + else if (byte[4] & 0x01) + packet_id = V7_PACKET_ID_MULTI; + else if ((byte[0] & 0x10) && !(byte[4] & 0x43)) + packet_id = V7_PACKET_ID_NEW; + else if (byte[1] == 0x00 && byte[4] == 0x00) + packet_id = V7_PACKET_ID_IDLE; else - input_report_key(dev, BTN_TOUCH, 0); + packet_id = V7_PACKET_ID_UNKNOWN; - alps_report_semi_mt_data(dev, fingers, x1, y1, x2, y2); + return packet_id; +} - input_mt_report_finger_count(dev, fingers); +static void alps_get_finger_coordinate_v7(struct input_mt_pos *mt, + unsigned char *pkt, + unsigned char pkt_id) +{ + mt[0].x = ((pkt[2] & 0x80) << 4); + mt[0].x |= ((pkt[2] & 0x3F) << 5); + mt[0].x |= ((pkt[3] & 0x30) >> 1); + mt[0].x |= (pkt[3] & 0x07); + mt[0].y = (pkt[1] << 3) | (pkt[0] & 0x07); + + mt[1].x = ((pkt[3] & 0x80) << 4); + mt[1].x |= ((pkt[4] & 0x80) << 3); + mt[1].x |= ((pkt[4] & 0x3F) << 4); + mt[1].y = ((pkt[5] & 0x80) << 3); + mt[1].y |= ((pkt[5] & 0x3F) << 4); + + switch (pkt_id) { + case V7_PACKET_ID_TWO: + mt[1].x &= ~0x000F; + mt[1].y |= 0x000F; + break; - input_report_key(dev, BTN_LEFT, left); - input_report_key(dev, BTN_RIGHT, right); + case V7_PACKET_ID_MULTI: + mt[1].x &= ~0x003F; + mt[1].y &= ~0x0020; + mt[1].y |= ((pkt[4] & 0x02) << 4); + mt[1].y |= 0x001F; + break; - if (z > 0) { - input_report_abs(dev, ABS_X, x); - input_report_abs(dev, ABS_Y, y); + case V7_PACKET_ID_NEW: + mt[1].x &= ~0x003F; + mt[1].x |= (pkt[0] & 0x20); + mt[1].y |= 0x000F; + break; } - input_report_abs(dev, ABS_PRESSURE, z); + + mt[0].y = 0x7FF - mt[0].y; + mt[1].y = 0x7FF - mt[1].y; +} + +static int alps_get_mt_count(struct input_mt_pos *mt) +{ + int i; + + for (i = 0; i < MAX_TOUCHES && mt[i].x != 0 && mt[i].y != 0; i++) + /* empty */; + + return i; +} + +static int alps_decode_packet_v7(struct alps_fields *f, + unsigned char *p, + struct psmouse *psmouse) +{ + unsigned char pkt_id; + + pkt_id = alps_get_packet_id_v7(p); + if (pkt_id == V7_PACKET_ID_IDLE) + return 0; + if (pkt_id == V7_PACKET_ID_UNKNOWN) + return -1; + + alps_get_finger_coordinate_v7(f->mt, p, pkt_id); + + if (pkt_id == V7_PACKET_ID_TWO || pkt_id == V7_PACKET_ID_MULTI) { + f->left = (p[0] & 0x80) >> 7; + f->right = (p[0] & 0x20) >> 5; + f->middle = (p[0] & 0x10) >> 4; + } + + if (pkt_id == V7_PACKET_ID_TWO) + f->fingers = alps_get_mt_count(f->mt); + else if (pkt_id == V7_PACKET_ID_MULTI) + f->fingers = 3 + (p[5] & 0x03); + + return 0; +} + +static void alps_process_trackstick_packet_v7(struct psmouse *psmouse) +{ + struct alps_data *priv = psmouse->private; + unsigned char *packet = psmouse->packet; + struct input_dev *dev2 = priv->dev2; + int x, y, z, left, right, middle; + + /* + * b7 b6 b5 b4 b3 b2 b1 b0 + * Byte0 0 1 0 0 1 0 0 0 + * Byte1 1 1 * * 1 M R L + * Byte2 X7 1 X5 X4 X3 X2 X1 X0 + * Byte3 Z6 1 Y6 X6 1 Y2 Y1 Y0 + * Byte4 Y7 0 Y5 Y4 Y3 1 1 0 + * Byte5 T&P 0 Z5 Z4 Z3 Z2 Z1 Z0 + * M / R / L: Middle / Right / Left button + */ + + x = ((packet[2] & 0xbf)) | ((packet[3] & 0x10) << 2); + y = (packet[3] & 0x07) | (packet[4] & 0xb8) | + ((packet[3] & 0x20) << 1); + z = (packet[5] & 0x3f) | ((packet[3] & 0x80) >> 1); + + left = (packet[1] & 0x01); + right = (packet[1] & 0x02) >> 1; + middle = (packet[1] & 0x04) >> 2; + + /* Divide 2 since trackpoint's speed is too fast */ + input_report_rel(dev2, REL_X, (char)x / 2); + input_report_rel(dev2, REL_Y, -((char)y / 2)); + + input_report_key(dev2, BTN_LEFT, left); + input_report_key(dev2, BTN_RIGHT, right); + input_report_key(dev2, BTN_MIDDLE, middle); + + input_sync(dev2); +} + +static void alps_process_touchpad_packet_v7(struct psmouse *psmouse) +{ + struct alps_data *priv = psmouse->private; + struct input_dev *dev = psmouse->dev; + struct alps_fields *f = &priv->f; + + memset(f, 0, sizeof(*f)); + + if (priv->decode_fields(f, psmouse->packet, psmouse)) + return; + + alps_report_mt_data(psmouse, alps_get_mt_count(f->mt)); + + input_mt_report_finger_count(dev, f->fingers); + + input_report_key(dev, BTN_LEFT, f->left); + input_report_key(dev, BTN_RIGHT, f->right); + input_report_key(dev, BTN_MIDDLE, f->middle); input_sync(dev); } +static void alps_process_packet_v7(struct psmouse *psmouse) +{ + unsigned char *packet = psmouse->packet; + + if (packet[0] == 0x48 && (packet[4] & 0x47) == 0x06) + alps_process_trackstick_packet_v7(psmouse); + else + alps_process_touchpad_packet_v7(psmouse); +} + static void alps_report_bare_ps2_packet(struct psmouse *psmouse, unsigned char packet[], bool report_buttons) @@ -1080,6 +1189,14 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse) return PSMOUSE_BAD_DATA; } + if (priv->proto_version == ALPS_PROTO_V7 && + !alps_is_valid_package_v7(psmouse)) { + psmouse_dbg(psmouse, "refusing packet[%i] = %x\n", + psmouse->pktcnt - 1, + psmouse->packet[psmouse->pktcnt - 1]); + return PSMOUSE_BAD_DATA; + } + if (psmouse->pktcnt == psmouse->pktsize) { priv->process_packet(psmouse); return PSMOUSE_FULL_PACKET; @@ -1192,6 +1309,22 @@ static int alps_rpt_cmd(struct psmouse *psmouse, int init_command, return 0; } +static bool alps_check_valid_firmware_id(unsigned char id[]) +{ + if (id[0] == 0x73) + return true; + + if (id[0] == 0x88 && + (id[1] == 0x07 || + id[1] == 0x08 || + (id[1] & 0xf0) == 0xb0 || + (id[1] & 0xf0) == 0xc0)) { + return true; + } + + return false; +} + static int alps_enter_command_mode(struct psmouse *psmouse) { unsigned char param[4]; @@ -1201,8 +1334,7 @@ static int alps_enter_command_mode(struct psmouse *psmouse) return -1; } - if ((param[0] != 0x88 || (param[1] != 0x07 && param[1] != 0x08)) && - param[0] != 0x73) { + if (!alps_check_valid_firmware_id(param)) { psmouse_dbg(psmouse, "unknown response while entering command mode\n"); return -1; @@ -1660,6 +1792,45 @@ error: return -1; } +static int alps_get_v3_v7_resolution(struct psmouse *psmouse, int reg_pitch) +{ + int reg, x_pitch, y_pitch, x_electrode, y_electrode, x_phys, y_phys; + struct alps_data *priv = psmouse->private; + + reg = alps_command_mode_read_reg(psmouse, reg_pitch); + if (reg < 0) + return reg; + + x_pitch = (char)(reg << 4) >> 4; /* sign extend lower 4 bits */ + x_pitch = 50 + 2 * x_pitch; /* In 0.1 mm units */ + + y_pitch = (char)reg >> 4; /* sign extend upper 4 bits */ + y_pitch = 36 + 2 * y_pitch; /* In 0.1 mm units */ + + reg = alps_command_mode_read_reg(psmouse, reg_pitch + 1); + if (reg < 0) + return reg; + + x_electrode = (char)(reg << 4) >> 4; /* sign extend lower 4 bits */ + x_electrode = 17 + x_electrode; + + y_electrode = (char)reg >> 4; /* sign extend upper 4 bits */ + y_electrode = 13 + y_electrode; + + x_phys = x_pitch * (x_electrode - 1); /* In 0.1 mm units */ + y_phys = y_pitch * (y_electrode - 1); /* In 0.1 mm units */ + + priv->x_res = priv->x_max * 10 / x_phys; /* units / mm */ + priv->y_res = priv->y_max * 10 / y_phys; /* units / mm */ + + psmouse_dbg(psmouse, + "pitch %dx%d num-electrodes %dx%d physical size %dx%d mm res %dx%d\n", + x_pitch, y_pitch, x_electrode, y_electrode, + x_phys / 10, y_phys / 10, priv->x_res, priv->y_res); + + return 0; +} + static int alps_hw_init_rushmore_v3(struct psmouse *psmouse) { struct alps_data *priv = psmouse->private; @@ -1680,6 +1851,9 @@ static int alps_hw_init_rushmore_v3(struct psmouse *psmouse) alps_command_mode_write_reg(psmouse, 0xc2cb, 0x00)) goto error; + if (alps_get_v3_v7_resolution(psmouse, 0xc2da)) + goto error; + reg_val = alps_command_mode_read_reg(psmouse, 0xc2c6); if (reg_val == -1) goto error; @@ -1856,6 +2030,35 @@ static int alps_hw_init_dolphin_v1(struct psmouse *psmouse) return 0; } +static int alps_hw_init_v7(struct psmouse *psmouse) +{ + struct ps2dev *ps2dev = &psmouse->ps2dev; + int reg_val, ret = -1; + + if (alps_enter_command_mode(psmouse) || + alps_command_mode_read_reg(psmouse, 0xc2d9) == -1) + goto error; + + if (alps_get_v3_v7_resolution(psmouse, 0xc397)) + goto error; + + if (alps_command_mode_write_reg(psmouse, 0xc2c9, 0x64)) + goto error; + + reg_val = alps_command_mode_read_reg(psmouse, 0xc2c4); + if (reg_val == -1) + goto error; + if (__alps_command_mode_write_reg(psmouse, reg_val | 0x02)) + goto error; + + alps_exit_command_mode(psmouse); + return ps2_command(ps2dev, NULL, PSMOUSE_CMD_ENABLE); + +error: + alps_exit_command_mode(psmouse); + return ret; +} + static void alps_set_defaults(struct alps_data *priv) { priv->byte0 = 0x8f; @@ -1914,6 +2117,21 @@ static void alps_set_defaults(struct alps_data *priv) priv->x_max = 2047; priv->y_max = 1535; break; + case ALPS_PROTO_V7: + priv->hw_init = alps_hw_init_v7; + priv->process_packet = alps_process_packet_v7; + priv->decode_fields = alps_decode_packet_v7; + priv->set_abs_params = alps_set_abs_params_mt; + priv->nibble_commands = alps_v3_nibble_commands; + priv->addr_command = PSMOUSE_CMD_RESET_WRAP; + priv->x_max = 0xfff; + priv->y_max = 0x7ff; + priv->byte0 = 0x48; + priv->mask0 = 0x48; + + if (priv->fw_ver[1] != 0xba) + priv->flags |= ALPS_BUTTONPAD; + break; } } @@ -1972,6 +2190,9 @@ static int alps_identify(struct psmouse *psmouse, struct alps_data *priv) alps_exit_command_mode(psmouse)) return -EIO; + /* Save the Firmware version */ + memcpy(priv->fw_ver, ec, 3); + if (alps_match_table(psmouse, priv, e7, ec) == 0) { return 0; } else if (e7[0] == 0x73 && e7[1] == 0x03 && e7[2] == 0x50 && @@ -1982,6 +2203,12 @@ static int alps_identify(struct psmouse *psmouse, struct alps_data *priv) return -EIO; else return 0; + } else if (ec[0] == 0x88 && + ((ec[1] & 0xf0) == 0xb0 || (ec[1] & 0xf0) == 0xc0)) { + priv->proto_version = ALPS_PROTO_V7; + alps_set_defaults(priv); + + return 0; } else if (ec[0] == 0x88 && ec[1] == 0x08) { priv->proto_version = ALPS_PROTO_V3; alps_set_defaults(priv); @@ -1990,6 +2217,7 @@ static int alps_identify(struct psmouse *psmouse, struct alps_data *priv) priv->decode_fields = alps_decode_rushmore; priv->x_bits = 16; priv->y_bits = 12; + priv->flags |= ALPS_IS_RUSHMORE; /* hack to make addr_command, nibble_command available */ psmouse->private = priv; @@ -2006,8 +2234,8 @@ static int alps_identify(struct psmouse *psmouse, struct alps_data *priv) return 0; } - psmouse_info(psmouse, - "Unknown ALPS touchpad: E7=%3ph, EC=%3ph\n", e7, ec); + psmouse_dbg(psmouse, + "Likely not an ALPS touchpad: E7=%3ph, EC=%3ph\n", e7, ec); return -EINVAL; } @@ -2044,17 +2272,21 @@ static void alps_set_abs_params_st(struct alps_data *priv, static void alps_set_abs_params_mt(struct alps_data *priv, struct input_dev *dev1) { - set_bit(INPUT_PROP_SEMI_MT, dev1->propbit); - input_mt_init_slots(dev1, 2, 0); input_set_abs_params(dev1, ABS_MT_POSITION_X, 0, priv->x_max, 0, 0); input_set_abs_params(dev1, ABS_MT_POSITION_Y, 0, priv->y_max, 0, 0); - set_bit(BTN_TOOL_DOUBLETAP, dev1->keybit); + input_abs_set_res(dev1, ABS_MT_POSITION_X, priv->x_res); + input_abs_set_res(dev1, ABS_MT_POSITION_Y, priv->y_res); + + input_mt_init_slots(dev1, MAX_TOUCHES, INPUT_MT_POINTER | + INPUT_MT_DROP_UNUSED | INPUT_MT_TRACK | INPUT_MT_SEMI_MT); + set_bit(BTN_TOOL_TRIPLETAP, dev1->keybit); set_bit(BTN_TOOL_QUADTAP, dev1->keybit); - input_set_abs_params(dev1, ABS_X, 0, priv->x_max, 0, 0); - input_set_abs_params(dev1, ABS_Y, 0, priv->y_max, 0, 0); + /* V7 is real multi-touch */ + if (priv->proto_version == ALPS_PROTO_V7) + clear_bit(INPUT_PROP_SEMI_MT, dev1->propbit); } int alps_init(struct psmouse *psmouse) @@ -2100,7 +2332,9 @@ int alps_init(struct psmouse *psmouse) dev1->evbit[BIT_WORD(EV_ABS)] |= BIT_MASK(EV_ABS); priv->set_abs_params(priv, dev1); - input_set_abs_params(dev1, ABS_PRESSURE, 0, 127, 0, 0); + /* No pressure on V7 */ + if (priv->proto_version != ALPS_PROTO_V7) + input_set_abs_params(dev1, ABS_PRESSURE, 0, 127, 0, 0); if (priv->flags & ALPS_WHEEL) { dev1->evbit[BIT_WORD(EV_REL)] |= BIT_MASK(EV_REL); @@ -2117,6 +2351,9 @@ int alps_init(struct psmouse *psmouse) dev1->keybit[BIT_WORD(BTN_1)] |= BIT_MASK(BTN_1); dev1->keybit[BIT_WORD(BTN_2)] |= BIT_MASK(BTN_2); dev1->keybit[BIT_WORD(BTN_3)] |= BIT_MASK(BTN_3); + } else if (priv->flags & ALPS_BUTTONPAD) { + set_bit(INPUT_PROP_BUTTONPAD, dev1->propbit); + clear_bit(BTN_RIGHT, dev1->keybit); } else { dev1->keybit[BIT_WORD(BTN_MIDDLE)] |= BIT_MASK(BTN_MIDDLE); } @@ -2136,6 +2373,10 @@ int alps_init(struct psmouse *psmouse) dev2->keybit[BIT_WORD(BTN_LEFT)] = BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_MIDDLE) | BIT_MASK(BTN_RIGHT); + __set_bit(INPUT_PROP_POINTER, dev2->propbit); + if (priv->flags & ALPS_DUALPOINT) + __set_bit(INPUT_PROP_POINTING_STICK, dev2->propbit); + if (input_register_device(priv->dev2)) goto init_fail; diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h index 03f88b6940c7..66240b47819a 100644 --- a/drivers/input/mouse/alps.h +++ b/drivers/input/mouse/alps.h @@ -12,17 +12,39 @@ #ifndef _ALPS_H #define _ALPS_H +#include <linux/input/mt.h> + #define ALPS_PROTO_V1 1 #define ALPS_PROTO_V2 2 #define ALPS_PROTO_V3 3 #define ALPS_PROTO_V4 4 #define ALPS_PROTO_V5 5 #define ALPS_PROTO_V6 6 +#define ALPS_PROTO_V7 7 /* t3btl t4s */ + +#define MAX_TOUCHES 2 #define DOLPHIN_COUNT_PER_ELECTRODE 64 #define DOLPHIN_PROFILE_XOFFSET 8 /* x-electrode offset */ #define DOLPHIN_PROFILE_YOFFSET 1 /* y-electrode offset */ +/* + * enum V7_PACKET_ID - defines the packet type for V7 + * V7_PACKET_ID_IDLE: There's no finger and no button activity. + * V7_PACKET_ID_TWO: There's one or two non-resting fingers on touchpad + * or there's button activities. + * V7_PACKET_ID_MULTI: There are at least three non-resting fingers. + * V7_PACKET_ID_NEW: The finger position in slot is not continues from + * previous packet. +*/ +enum V7_PACKET_ID { + V7_PACKET_ID_IDLE, + V7_PACKET_ID_TWO, + V7_PACKET_ID_MULTI, + V7_PACKET_ID_NEW, + V7_PACKET_ID_UNKNOWN, +}; + /** * struct alps_model_info - touchpad ID table * @signature: E7 response string to match. @@ -46,7 +68,7 @@ struct alps_model_info { unsigned char command_mode_resp; unsigned char proto_version; unsigned char byte0, mask0; - unsigned char flags; + int flags; }; /** @@ -65,14 +87,19 @@ struct alps_nibble_commands { unsigned char data; }; +struct alps_bitmap_point { + int start_bit; + int num_bits; +}; + /** * struct alps_fields - decoded version of the report packet * @x_map: Bitmap of active X positions for MT. * @y_map: Bitmap of active Y positions for MT. * @fingers: Number of fingers for MT. - * @x: X position for ST. - * @y: Y position for ST. - * @z: Z position for ST. + * @pressure: Pressure. + * @st: position for ST. + * @mt: position for MT. * @first_mp: Packet is the first of a multi-packet report. * @is_mp: Packet is part of a multi-packet report. * @left: Left touchpad button is active. @@ -86,9 +113,11 @@ struct alps_fields { unsigned int x_map; unsigned int y_map; unsigned int fingers; - unsigned int x; - unsigned int y; - unsigned int z; + + int pressure; + struct input_mt_pos st; + struct input_mt_pos mt[MAX_TOUCHES]; + unsigned int first_mp:1; unsigned int is_mp:1; @@ -113,6 +142,7 @@ struct alps_fields { * known format for this model. The first byte of the report, ANDed with * mask0, should match byte0. * @mask0: The mask used to check the first byte of the report. + * @fw_ver: cached copy of firmware version (EC report) * @flags: Additional device capabilities (passthrough port, trackstick, etc.). * @x_max: Largest possible X position value. * @y_max: Largest possible Y position value. @@ -125,11 +155,7 @@ struct alps_fields { * @prev_fin: Finger bit from previous packet. * @multi_packet: Multi-packet data in progress. * @multi_data: Saved multi-packet data. - * @x1: First X coordinate from last MT report. - * @x2: Second X coordinate from last MT report. - * @y1: First Y coordinate from last MT report. - * @y2: Second Y coordinate from last MT report. - * @fingers: Number of fingers from last MT report. + * @f: Decoded packet data fields. * @quirks: Bitmap of ALPS_QUIRK_*. * @timer: Timer for flushing out the final report packet in the stream. */ @@ -142,23 +168,25 @@ struct alps_data { int addr_command; unsigned char proto_version; unsigned char byte0, mask0; - unsigned char flags; + unsigned char fw_ver[3]; + int flags; int x_max; int y_max; int x_bits; int y_bits; + unsigned int x_res; + unsigned int y_res; int (*hw_init)(struct psmouse *psmouse); void (*process_packet)(struct psmouse *psmouse); - void (*decode_fields)(struct alps_fields *f, unsigned char *p, + int (*decode_fields)(struct alps_fields *f, unsigned char *p, struct psmouse *psmouse); void (*set_abs_params)(struct alps_data *priv, struct input_dev *dev1); int prev_fin; int multi_packet; unsigned char multi_data[6]; - int x1, x2, y1, y2; - int fingers; + struct alps_fields f; u8 quirks; struct timer_list timer; }; diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index ee2a04d90d20..06fc6e76ffbe 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c @@ -18,6 +18,7 @@ #include <linux/input/mt.h> #include <linux/serio.h> #include <linux/libps2.h> +#include <asm/unaligned.h> #include "psmouse.h" #include "elantech.h" @@ -403,6 +404,68 @@ static void elantech_report_absolute_v2(struct psmouse *psmouse) input_sync(dev); } +static void elantech_report_trackpoint(struct psmouse *psmouse, + int packet_type) +{ + /* + * byte 0: 0 0 sx sy 0 M R L + * byte 1:~sx 0 0 0 0 0 0 0 + * byte 2:~sy 0 0 0 0 0 0 0 + * byte 3: 0 0 ~sy ~sx 0 1 1 0 + * byte 4: x7 x6 x5 x4 x3 x2 x1 x0 + * byte 5: y7 y6 y5 y4 y3 y2 y1 y0 + * + * x and y are written in two's complement spread + * over 9 bits with sx/sy the relative top bit and + * x7..x0 and y7..y0 the lower bits. + * The sign of y is opposite to what the input driver + * expects for a relative movement + */ + + struct elantech_data *etd = psmouse->private; + struct input_dev *tp_dev = etd->tp_dev; + unsigned char *packet = psmouse->packet; + int x, y; + u32 t; + + if (dev_WARN_ONCE(&psmouse->ps2dev.serio->dev, + !tp_dev, + psmouse_fmt("Unexpected trackpoint message\n"))) { + if (etd->debug == 1) + elantech_packet_dump(psmouse); + return; + } + + t = get_unaligned_le32(&packet[0]); + + switch (t & ~7U) { + case 0x06000030U: + case 0x16008020U: + case 0x26800010U: + case 0x36808000U: + x = packet[4] - (int)((packet[1]^0x80) << 1); + y = (int)((packet[2]^0x80) << 1) - packet[5]; + + input_report_key(tp_dev, BTN_LEFT, packet[0] & 0x01); + input_report_key(tp_dev, BTN_RIGHT, packet[0] & 0x02); + input_report_key(tp_dev, BTN_MIDDLE, packet[0] & 0x04); + + input_report_rel(tp_dev, REL_X, x); + input_report_rel(tp_dev, REL_Y, y); + + input_sync(tp_dev); + + break; + + default: + /* Dump unexpected packet sequences if debug=1 (default) */ + if (etd->debug == 1) + elantech_packet_dump(psmouse); + + break; + } +} + /* * Interpret complete data packets and report absolute mode input events for * hardware version 3. (12 byte packets for two fingers) @@ -715,6 +778,8 @@ static int elantech_packet_check_v3(struct psmouse *psmouse) if ((packet[0] & 0x0c) == 0x0c && (packet[3] & 0xce) == 0x0c) return PACKET_V3_TAIL; + if ((packet[3] & 0x0f) == 0x06) + return PACKET_TRACKPOINT; } return PACKET_UNKNOWN; @@ -791,14 +856,23 @@ static psmouse_ret_t elantech_process_byte(struct psmouse *psmouse) case 3: packet_type = elantech_packet_check_v3(psmouse); - /* ignore debounce */ - if (packet_type == PACKET_DEBOUNCE) - return PSMOUSE_FULL_PACKET; - - if (packet_type == PACKET_UNKNOWN) + switch (packet_type) { + case PACKET_UNKNOWN: return PSMOUSE_BAD_DATA; - elantech_report_absolute_v3(psmouse, packet_type); + case PACKET_DEBOUNCE: + /* ignore debounce */ + break; + + case PACKET_TRACKPOINT: + elantech_report_trackpoint(psmouse, packet_type); + break; + + default: + elantech_report_absolute_v3(psmouse, packet_type); + break; + } + break; case 4: @@ -1018,8 +1092,10 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse, * Asus UX31 0x361f00 20, 15, 0e clickpad * Asus UX32VD 0x361f02 00, 15, 0e clickpad * Avatar AVIU-145A2 0x361f00 ? clickpad + * Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**) * Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons * Lenovo L430 0x350f02 b9, 15, 0c 2 hw buttons (*) + * Lenovo L530 0x350f02 b9, 15, 0c 2 hw buttons (*) * Samsung NF210 0x150b00 78, 14, 0a 2 hw buttons * Samsung NP770Z5E 0x575f01 10, 15, 0f clickpad * Samsung NP700Z5B 0x361f06 21, 15, 0f clickpad @@ -1029,6 +1105,8 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse, * Samsung RF710 0x450f00 ? 2 hw buttons * System76 Pangolin 0x250f01 ? 2 hw buttons * (*) + 3 trackpoint buttons + * (**) + 0 trackpoint buttons + * Note: Lenovo L430 and Lenovo L430 have the same fw_version/caps */ static void elantech_set_buttonpad_prop(struct psmouse *psmouse) { @@ -1253,6 +1331,13 @@ static bool elantech_is_signature_valid(const unsigned char *param) if (param[1] == 0) return true; + /* + * Some models have a revision higher then 20. Meaning param[2] may + * be 10 or 20, skip the rates check for these. + */ + if (param[0] == 0x46 && (param[1] & 0xef) == 0x0f && param[2] < 40) + return true; + for (i = 0; i < ARRAY_SIZE(rates); i++) if (param[2] == rates[i]) return false; @@ -1324,6 +1409,10 @@ int elantech_detect(struct psmouse *psmouse, bool set_properties) */ static void elantech_disconnect(struct psmouse *psmouse) { + struct elantech_data *etd = psmouse->private; + + if (etd->tp_dev) + input_unregister_device(etd->tp_dev); sysfs_remove_group(&psmouse->ps2dev.serio->dev.kobj, &elantech_attr_group); kfree(psmouse->private); @@ -1438,8 +1527,10 @@ static int elantech_set_properties(struct elantech_data *etd) int elantech_init(struct psmouse *psmouse) { struct elantech_data *etd; - int i, error; + int i; + int error = -EINVAL; unsigned char param[3]; + struct input_dev *tp_dev; psmouse->private = etd = kzalloc(sizeof(struct elantech_data), GFP_KERNEL); if (!etd) @@ -1498,14 +1589,53 @@ int elantech_init(struct psmouse *psmouse) goto init_fail; } + /* The MSB indicates the presence of the trackpoint */ + if ((etd->capabilities[0] & 0x80) == 0x80) { + tp_dev = input_allocate_device(); + + if (!tp_dev) { + error = -ENOMEM; + goto init_fail_tp_alloc; + } + + etd->tp_dev = tp_dev; + snprintf(etd->tp_phys, sizeof(etd->tp_phys), "%s/input1", + psmouse->ps2dev.serio->phys); + tp_dev->phys = etd->tp_phys; + tp_dev->name = "Elantech PS/2 TrackPoint"; + tp_dev->id.bustype = BUS_I8042; + tp_dev->id.vendor = 0x0002; + tp_dev->id.product = PSMOUSE_ELANTECH; + tp_dev->id.version = 0x0000; + tp_dev->dev.parent = &psmouse->ps2dev.serio->dev; + tp_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL); + tp_dev->relbit[BIT_WORD(REL_X)] = + BIT_MASK(REL_X) | BIT_MASK(REL_Y); + tp_dev->keybit[BIT_WORD(BTN_LEFT)] = + BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_MIDDLE) | + BIT_MASK(BTN_RIGHT); + + __set_bit(INPUT_PROP_POINTER, tp_dev->propbit); + __set_bit(INPUT_PROP_POINTING_STICK, tp_dev->propbit); + + error = input_register_device(etd->tp_dev); + if (error < 0) + goto init_fail_tp_reg; + } + psmouse->protocol_handler = elantech_process_byte; psmouse->disconnect = elantech_disconnect; psmouse->reconnect = elantech_reconnect; psmouse->pktsize = etd->hw_version > 1 ? 6 : 4; return 0; - + init_fail_tp_reg: + input_free_device(tp_dev); + init_fail_tp_alloc: + sysfs_remove_group(&psmouse->ps2dev.serio->dev.kobj, + &elantech_attr_group); init_fail: + psmouse_reset(psmouse); kfree(etd); - return -1; + return error; } diff --git a/drivers/input/mouse/elantech.h b/drivers/input/mouse/elantech.h index 9e0e2a1f340d..6f3afec02f03 100644 --- a/drivers/input/mouse/elantech.h +++ b/drivers/input/mouse/elantech.h @@ -94,6 +94,7 @@ #define PACKET_V4_HEAD 0x05 #define PACKET_V4_MOTION 0x06 #define PACKET_V4_STATUS 0x07 +#define PACKET_TRACKPOINT 0x08 /* * track up to 5 fingers for v4 hardware @@ -114,6 +115,8 @@ struct finger_pos { }; struct elantech_data { + struct input_dev *tp_dev; /* Relative device for trackpoint */ + char tp_phys[32]; unsigned char reg_07; unsigned char reg_10; unsigned char reg_11; diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c index cff065f6261c..b4e1f014ddc2 100644 --- a/drivers/input/mouse/psmouse-base.c +++ b/drivers/input/mouse/psmouse-base.c @@ -670,6 +670,8 @@ static void psmouse_apply_defaults(struct psmouse *psmouse) __set_bit(REL_X, input_dev->relbit); __set_bit(REL_Y, input_dev->relbit); + __set_bit(INPUT_PROP_POINTER, input_dev->propbit); + psmouse->set_rate = psmouse_set_rate; psmouse->set_resolution = psmouse_set_resolution; psmouse->poll = psmouse_poll; diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index ef9e0b8a9aa7..fd23181c1fb7 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -117,6 +117,9 @@ void synaptics_reset(struct psmouse *psmouse) } #ifdef CONFIG_MOUSE_PS2_SYNAPTICS + +static bool cr48_profile_sensor; + struct min_max_quirk { const char * const *pnp_ids; int x_min, x_max, y_min, y_max; @@ -626,10 +629,61 @@ static int synaptics_parse_hw_state(const unsigned char buf[], ((buf[0] & 0x04) >> 1) | ((buf[3] & 0x04) >> 2)); + if ((SYN_CAP_ADV_GESTURE(priv->ext_cap_0c) || + SYN_CAP_IMAGE_SENSOR(priv->ext_cap_0c)) && + hw->w == 2) { + synaptics_parse_agm(buf, priv, hw); + return 1; + } + + hw->x = (((buf[3] & 0x10) << 8) | + ((buf[1] & 0x0f) << 8) | + buf[4]); + hw->y = (((buf[3] & 0x20) << 7) | + ((buf[1] & 0xf0) << 4) | + buf[5]); + hw->z = buf[2]; + hw->left = (buf[0] & 0x01) ? 1 : 0; hw->right = (buf[0] & 0x02) ? 1 : 0; - if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) { + if (SYN_CAP_FORCEPAD(priv->ext_cap_0c)) { + /* + * ForcePads, like Clickpads, use middle button + * bits to report primary button clicks. + * Unfortunately they report primary button not + * only when user presses on the pad above certain + * threshold, but also when there are more than one + * finger on the touchpad, which interferes with + * out multi-finger gestures. + */ + if (hw->z == 0) { + /* No contacts */ + priv->press = priv->report_press = false; + } else if (hw->w >= 4 && ((buf[0] ^ buf[3]) & 0x01)) { + /* + * Single-finger touch with pressure above + * the threshold. If pressure stays long + * enough, we'll start reporting primary + * button. We rely on the device continuing + * sending data even if finger does not + * move. + */ + if (!priv->press) { + priv->press_start = jiffies; + priv->press = true; + } else if (time_after(jiffies, + priv->press_start + + msecs_to_jiffies(50))) { + priv->report_press = true; + } + } else { + priv->press = false; + } + + hw->left = priv->report_press; + + } else if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) { /* * Clickpad's button is transmitted as middle button, * however, since it is primary button, we will report @@ -648,21 +702,6 @@ static int synaptics_parse_hw_state(const unsigned char buf[], hw->down = ((buf[0] ^ buf[3]) & 0x02) ? 1 : 0; } - if ((SYN_CAP_ADV_GESTURE(priv->ext_cap_0c) || - SYN_CAP_IMAGE_SENSOR(priv->ext_cap_0c)) && - hw->w == 2) { - synaptics_parse_agm(buf, priv, hw); - return 1; - } - - hw->x = (((buf[3] & 0x10) << 8) | - ((buf[1] & 0x0f) << 8) | - buf[4]); - hw->y = (((buf[3] & 0x20) << 7) | - ((buf[1] & 0xf0) << 4) | - buf[5]); - hw->z = buf[2]; - if (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) && ((buf[0] ^ buf[3]) & 0x02)) { switch (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) & ~0x01) { @@ -1152,6 +1191,42 @@ static void synaptics_image_sensor_process(struct psmouse *psmouse, priv->agm_pending = false; } +static void synaptics_profile_sensor_process(struct psmouse *psmouse, + struct synaptics_hw_state *sgm, + int num_fingers) +{ + struct input_dev *dev = psmouse->dev; + struct synaptics_data *priv = psmouse->private; + struct synaptics_hw_state *hw[2] = { sgm, &priv->agm }; + struct input_mt_pos pos[2]; + int slot[2], nsemi, i; + + nsemi = clamp_val(num_fingers, 0, 2); + + for (i = 0; i < nsemi; i++) { + pos[i].x = hw[i]->x; + pos[i].y = synaptics_invert_y(hw[i]->y); + } + + input_mt_assign_slots(dev, slot, pos, nsemi); + + for (i = 0; i < nsemi; i++) { + input_mt_slot(dev, slot[i]); + input_mt_report_slot_state(dev, MT_TOOL_FINGER, true); + input_report_abs(dev, ABS_MT_POSITION_X, pos[i].x); + input_report_abs(dev, ABS_MT_POSITION_Y, pos[i].y); + input_report_abs(dev, ABS_MT_PRESSURE, hw[i]->z); + } + + input_mt_drop_unused(dev); + input_mt_report_pointer_emulation(dev, false); + input_mt_report_finger_count(dev, num_fingers); + + synaptics_report_buttons(psmouse, sgm); + + input_sync(dev); +} + /* * called for each full received packet from the touchpad */ @@ -1215,6 +1290,11 @@ static void synaptics_process_packet(struct psmouse *psmouse) finger_width = 0; } + if (cr48_profile_sensor) { + synaptics_profile_sensor_process(psmouse, &hw, num_fingers); + return; + } + if (SYN_CAP_ADV_GESTURE(priv->ext_cap_0c)) synaptics_report_semi_mt_data(dev, &hw, &priv->agm, num_fingers); @@ -1360,6 +1440,9 @@ static void set_input_params(struct psmouse *psmouse, set_abs_position_params(dev, priv, ABS_X, ABS_Y); input_set_abs_params(dev, ABS_PRESSURE, 0, 255, 0, 0); + if (cr48_profile_sensor) + input_set_abs_params(dev, ABS_MT_PRESSURE, 0, 255, 0, 0); + if (SYN_CAP_IMAGE_SENSOR(priv->ext_cap_0c)) { set_abs_position_params(dev, priv, ABS_MT_POSITION_X, ABS_MT_POSITION_Y); @@ -1371,11 +1454,16 @@ static void set_input_params(struct psmouse *psmouse, __set_bit(BTN_TOOL_QUADTAP, dev->keybit); __set_bit(BTN_TOOL_QUINTTAP, dev->keybit); } else if (SYN_CAP_ADV_GESTURE(priv->ext_cap_0c)) { - /* Non-image sensors with AGM use semi-mt */ - __set_bit(INPUT_PROP_SEMI_MT, dev->propbit); - input_mt_init_slots(dev, 2, 0); set_abs_position_params(dev, priv, ABS_MT_POSITION_X, ABS_MT_POSITION_Y); + /* + * Profile sensor in CR-48 tracks contacts reasonably well, + * other non-image sensors with AGM use semi-mt. + */ + input_mt_init_slots(dev, 2, + INPUT_MT_POINTER | + (cr48_profile_sensor ? + INPUT_MT_TRACK : INPUT_MT_SEMI_MT)); } if (SYN_CAP_PALMDETECT(priv->capabilities)) @@ -1577,10 +1665,24 @@ static const struct dmi_system_id olpc_dmi_table[] __initconst = { { } }; +static const struct dmi_system_id __initconst cr48_dmi_table[] = { +#if defined(CONFIG_DMI) && defined(CONFIG_X86) + { + /* Cr-48 Chromebook (Codename Mario) */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "IEC"), + DMI_MATCH(DMI_PRODUCT_NAME, "Mario"), + }, + }, +#endif + { } +}; + void __init synaptics_module_init(void) { impaired_toshiba_kbc = dmi_check_system(toshiba_dmi_table); broken_olpc_ec = dmi_check_system(olpc_dmi_table); + cr48_profile_sensor = dmi_check_system(cr48_dmi_table); } static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode) diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h index e594af0b264b..fb2e076738ae 100644 --- a/drivers/input/mouse/synaptics.h +++ b/drivers/input/mouse/synaptics.h @@ -78,6 +78,11 @@ * 2 0x08 image sensor image sensor tracks 5 fingers, but only * reports 2. * 2 0x20 report min query 0x0f gives min coord reported + * 2 0x80 forcepad forcepad is a variant of clickpad that + * does not have physical buttons but rather + * uses pressure above certain threshold to + * report primary clicks. Forcepads also have + * clickpad bit set. */ #define SYN_CAP_CLICKPAD(ex0c) ((ex0c) & 0x100000) /* 1-button ClickPad */ #define SYN_CAP_CLICKPAD2BTN(ex0c) ((ex0c) & 0x000100) /* 2-button ClickPad */ @@ -86,6 +91,7 @@ #define SYN_CAP_ADV_GESTURE(ex0c) ((ex0c) & 0x080000) #define SYN_CAP_REDUCED_FILTERING(ex0c) ((ex0c) & 0x000400) #define SYN_CAP_IMAGE_SENSOR(ex0c) ((ex0c) & 0x000800) +#define SYN_CAP_FORCEPAD(ex0c) ((ex0c) & 0x008000) /* synaptics modes query bits */ #define SYN_MODE_ABSOLUTE(m) ((m) & (1 << 7)) @@ -177,6 +183,11 @@ struct synaptics_data { */ struct synaptics_hw_state agm; bool agm_pending; /* new AGM packet received */ + + /* ForcePad handling */ + unsigned long press_start; + bool press; + bool report_press; }; void synaptics_module_init(void); diff --git a/drivers/input/mouse/synaptics_usb.c b/drivers/input/mouse/synaptics_usb.c index e122bda16aab..6bcc0189c1c9 100644 --- a/drivers/input/mouse/synaptics_usb.c +++ b/drivers/input/mouse/synaptics_usb.c @@ -387,6 +387,7 @@ static int synusb_probe(struct usb_interface *intf, __set_bit(EV_REL, input_dev->evbit); __set_bit(REL_X, input_dev->relbit); __set_bit(REL_Y, input_dev->relbit); + __set_bit(INPUT_PROP_POINTING_STICK, input_dev->propbit); input_set_abs_params(input_dev, ABS_PRESSURE, 0, 127, 0, 0); } else { input_set_abs_params(input_dev, ABS_X, @@ -401,6 +402,11 @@ static int synusb_probe(struct usb_interface *intf, __set_bit(BTN_TOOL_TRIPLETAP, input_dev->keybit); } + if (synusb->flags & SYNUSB_TOUCHSCREEN) + __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); + else + __set_bit(INPUT_PROP_POINTER, input_dev->propbit); + __set_bit(BTN_LEFT, input_dev->keybit); __set_bit(BTN_RIGHT, input_dev->keybit); __set_bit(BTN_MIDDLE, input_dev->keybit); diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c index ca843b6cf6bd..30c8b6998808 100644 --- a/drivers/input/mouse/trackpoint.c +++ b/drivers/input/mouse/trackpoint.c @@ -393,6 +393,9 @@ int trackpoint_detect(struct psmouse *psmouse, bool set_properties) if ((button_info & 0x0f) >= 3) __set_bit(BTN_MIDDLE, psmouse->dev->keybit); + __set_bit(INPUT_PROP_POINTER, psmouse->dev->propbit); + __set_bit(INPUT_PROP_POINTING_STICK, psmouse->dev->propbit); + trackpoint_defaults(psmouse->private); error = trackpoint_power_on_reset(&psmouse->ps2dev); diff --git a/drivers/input/serio/hyperv-keyboard.c b/drivers/input/serio/hyperv-keyboard.c index 613261994621..e74e5d6e5f9f 100644 --- a/drivers/input/serio/hyperv-keyboard.c +++ b/drivers/input/serio/hyperv-keyboard.c @@ -170,6 +170,15 @@ static void hv_kbd_on_receive(struct hv_device *hv_dev, serio_interrupt(kbd_dev->hv_serio, scan_code, 0); } spin_unlock_irqrestore(&kbd_dev->lock, flags); + + /* + * Only trigger a wakeup on key down, otherwise + * "echo freeze > /sys/power/state" can't really enter the + * state because the Enter-UP can trigger a wakeup at once. + */ + if (!(info & IS_BREAK)) + pm_wakeup_event(&hv_dev->device, 0); + break; default: @@ -376,6 +385,9 @@ static int hv_kbd_probe(struct hv_device *hv_dev, goto err_close_vmbus; serio_register_port(kbd_dev->hv_serio); + + device_init_wakeup(&hv_dev->device, true); + return 0; err_close_vmbus: @@ -390,6 +402,7 @@ static int hv_kbd_remove(struct hv_device *hv_dev) { struct hv_kbd_dev *kbd_dev = hv_get_drvdata(hv_dev); + device_init_wakeup(&hv_dev->device, false); serio_unregister_port(kbd_dev->hv_serio); vmbus_close(hv_dev->channel); kfree(kbd_dev); diff --git a/drivers/input/serio/i8042-sparcio.h b/drivers/input/serio/i8042-sparcio.h index d6aa4c67dbb6..93cb7912703c 100644 --- a/drivers/input/serio/i8042-sparcio.h +++ b/drivers/input/serio/i8042-sparcio.h @@ -17,7 +17,6 @@ static int i8042_aux_irq = -1; #define I8042_MUX_PHYS_DESC "sparcps2/serio%d" static void __iomem *kbd_iobase; -static struct resource *kbd_res; #define I8042_COMMAND_REG (kbd_iobase + 0x64UL) #define I8042_DATA_REG (kbd_iobase + 0x60UL) @@ -44,6 +43,8 @@ static inline void i8042_write_command(int val) #ifdef CONFIG_PCI +static struct resource *kbd_res; + #define OBP_PS2KBD_NAME1 "kb_ps2" #define OBP_PS2KBD_NAME2 "keyboard" #define OBP_PS2MS_NAME1 "kdmouse" diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index 136b7b204f56..40b7d6c0ff17 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -465,6 +465,20 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"), }, }, + { + /* Asus X450LCP */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "X450LCP"), + }, + }, + { + /* Avatar AVIU-145A6 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Intel"), + DMI_MATCH(DMI_PRODUCT_NAME, "IC4I"), + }, + }, { } }; @@ -608,6 +622,14 @@ static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"), }, }, + { + /* Fujitsu U574 laptop */ + /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U574"), + }, + }, { } }; diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c index 3807c3e971cc..f5a98af3b325 100644 --- a/drivers/input/serio/i8042.c +++ b/drivers/input/serio/i8042.c @@ -1254,6 +1254,8 @@ static int __init i8042_create_aux_port(int idx) } else { snprintf(serio->name, sizeof(serio->name), "i8042 AUX%d port", idx); snprintf(serio->phys, sizeof(serio->phys), I8042_MUX_PHYS_DESC, idx + 1); + strlcpy(serio->firmware_id, i8042_aux_firmware_id, + sizeof(serio->firmware_id)); } port->serio = serio; diff --git a/drivers/input/serio/serport.c b/drivers/input/serio/serport.c index 0cb7ef59071b..69175b825346 100644 --- a/drivers/input/serio/serport.c +++ b/drivers/input/serio/serport.c @@ -21,6 +21,7 @@ #include <linux/init.h> #include <linux/serio.h> #include <linux/tty.h> +#include <linux/compat.h> MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); MODULE_DESCRIPTION("Input device TTY line discipline"); @@ -198,28 +199,55 @@ static ssize_t serport_ldisc_read(struct tty_struct * tty, struct file * file, u return 0; } +static void serport_set_type(struct tty_struct *tty, unsigned long type) +{ + struct serport *serport = tty->disc_data; + + serport->id.proto = type & 0x000000ff; + serport->id.id = (type & 0x0000ff00) >> 8; + serport->id.extra = (type & 0x00ff0000) >> 16; +} + /* * serport_ldisc_ioctl() allows to set the port protocol, and device ID */ -static int serport_ldisc_ioctl(struct tty_struct * tty, struct file * file, unsigned int cmd, unsigned long arg) +static int serport_ldisc_ioctl(struct tty_struct *tty, struct file *file, + unsigned int cmd, unsigned long arg) { - struct serport *serport = (struct serport*) tty->disc_data; - unsigned long type; - if (cmd == SPIOCSTYPE) { + unsigned long type; + if (get_user(type, (unsigned long __user *) arg)) return -EFAULT; - serport->id.proto = type & 0x000000ff; - serport->id.id = (type & 0x0000ff00) >> 8; - serport->id.extra = (type & 0x00ff0000) >> 16; + serport_set_type(tty, type); + return 0; + } + + return -EINVAL; +} + +#ifdef CONFIG_COMPAT +#define COMPAT_SPIOCSTYPE _IOW('q', 0x01, compat_ulong_t) +static long serport_ldisc_compat_ioctl(struct tty_struct *tty, + struct file *file, + unsigned int cmd, unsigned long arg) +{ + if (cmd == COMPAT_SPIOCSTYPE) { + void __user *uarg = compat_ptr(arg); + compat_ulong_t compat_type; + + if (get_user(compat_type, (compat_ulong_t __user *)uarg)) + return -EFAULT; + serport_set_type(tty, compat_type); return 0; } return -EINVAL; } +#endif static void serport_ldisc_write_wakeup(struct tty_struct * tty) { @@ -243,6 +271,9 @@ static struct tty_ldisc_ops serport_ldisc = { .close = serport_ldisc_close, .read = serport_ldisc_read, .ioctl = serport_ldisc_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = serport_ldisc_compat_ioctl, +#endif .receive_buf = serport_ldisc_receive, .write_wakeup = serport_ldisc_write_wakeup }; diff --git a/drivers/input/tablet/Kconfig b/drivers/input/tablet/Kconfig index bed7cbf84cfd..623bb9e0d5a4 100644 --- a/drivers/input/tablet/Kconfig +++ b/drivers/input/tablet/Kconfig @@ -73,20 +73,14 @@ config TABLET_USB_KBTAB To compile this driver as a module, choose M here: the module will be called kbtab. -config TABLET_USB_WACOM - tristate "Wacom Intuos/Graphire tablet support (USB)" - depends on USB_ARCH_HAS_HCD - select POWER_SUPPLY - select USB - select NEW_LEDS - select LEDS_CLASS +config TABLET_SERIAL_WACOM4 + tristate "Wacom protocol 4 serial tablet support" + select SERIO help - Say Y here if you want to use the USB version of the Wacom Intuos - or Graphire tablet. Make sure to say Y to "Mouse support" - (CONFIG_INPUT_MOUSEDEV) and/or "Event interface support" - (CONFIG_INPUT_EVDEV) as well. + Say Y here if you want to use Wacom protocol 4 serial tablets. + E.g. serial versions of the Cintiq, Graphire or Penpartner. To compile this driver as a module, choose M here: the - module will be called wacom. + module will be called wacom_serial4. endif diff --git a/drivers/input/tablet/Makefile b/drivers/input/tablet/Makefile index 3f6c25220638..2e130101cf3c 100644 --- a/drivers/input/tablet/Makefile +++ b/drivers/input/tablet/Makefile @@ -2,12 +2,10 @@ # Makefile for the tablet drivers # -# Multipart objects. -wacom-objs := wacom_wac.o wacom_sys.o obj-$(CONFIG_TABLET_USB_ACECAD) += acecad.o obj-$(CONFIG_TABLET_USB_AIPTEK) += aiptek.o obj-$(CONFIG_TABLET_USB_GTCO) += gtco.o obj-$(CONFIG_TABLET_USB_HANWANG) += hanwang.o obj-$(CONFIG_TABLET_USB_KBTAB) += kbtab.o -obj-$(CONFIG_TABLET_USB_WACOM) += wacom.o +obj-$(CONFIG_TABLET_SERIAL_WACOM4) += wacom_serial4.o diff --git a/drivers/input/tablet/wacom_serial4.c b/drivers/input/tablet/wacom_serial4.c new file mode 100644 index 000000000000..20ab802461e7 --- /dev/null +++ b/drivers/input/tablet/wacom_serial4.c @@ -0,0 +1,620 @@ +/* + * Wacom protocol 4 serial tablet driver + * + * Copyright 2014 Hans de Goede <hdegoede@redhat.com> + * Copyright 2011-2012 Julian Squires <julian@cipht.net> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version of 2 of the License, or (at your + * option) any later version. See the file COPYING in the main directory of + * this archive for more details. + * + * Many thanks to Bill Seremetis, without whom PenPartner support + * would not have been possible. Thanks to Patrick Mahoney. + * + * This driver was developed with reference to much code written by others, + * particularly: + * - elo, gunze drivers by Vojtech Pavlik <vojtech@ucw.cz>; + * - wacom_w8001 driver by Jaya Kumar <jayakumar.lkml@gmail.com>; + * - the USB wacom input driver, credited to many people + * (see drivers/input/tablet/wacom.h); + * - new and old versions of linuxwacom / xf86-input-wacom credited to + * Frederic Lepied, France. <Lepied@XFree86.org> and + * Ping Cheng, Wacom. <pingc@wacom.com>; + * - and xf86wacom.c (a presumably ancient version of the linuxwacom code), + * by Frederic Lepied and Raph Levien <raph@gtk.org>. + * + * To do: + * - support pad buttons; (requires access to a model with pad buttons) + * - support (protocol 4-style) tilt (requires access to a > 1.4 rom model) + */ + +/* + * Wacom serial protocol 4 documentation taken from linuxwacom-0.9.9 code, + * protocol 4 uses 7 or 9 byte of data in the following format: + * + * Byte 1 + * bit 7 Sync bit always 1 + * bit 6 Pointing device detected + * bit 5 Cursor = 0 / Stylus = 1 + * bit 4 Reserved + * bit 3 1 if a button on the pointing device has been pressed + * bit 2 P0 (optional) + * bit 1 X15 + * bit 0 X14 + * + * Byte 2 + * bit 7 Always 0 + * bits 6-0 = X13 - X7 + * + * Byte 3 + * bit 7 Always 0 + * bits 6-0 = X6 - X0 + * + * Byte 4 + * bit 7 Always 0 + * bit 6 B3 + * bit 5 B2 + * bit 4 B1 + * bit 3 B0 + * bit 2 P1 (optional) + * bit 1 Y15 + * bit 0 Y14 + * + * Byte 5 + * bit 7 Always 0 + * bits 6-0 = Y13 - Y7 + * + * Byte 6 + * bit 7 Always 0 + * bits 6-0 = Y6 - Y0 + * + * Byte 7 + * bit 7 Always 0 + * bit 6 Sign of pressure data; or wheel-rel for cursor tool + * bit 5 P7; or REL1 for cursor tool + * bit 4 P6; or REL0 for cursor tool + * bit 3 P5 + * bit 2 P4 + * bit 1 P3 + * bit 0 P2 + * + * byte 8 and 9 are optional and present only + * in tilt mode. + * + * Byte 8 + * bit 7 Always 0 + * bit 6 Sign of tilt X + * bit 5 Xt6 + * bit 4 Xt5 + * bit 3 Xt4 + * bit 2 Xt3 + * bit 1 Xt2 + * bit 0 Xt1 + * + * Byte 9 + * bit 7 Always 0 + * bit 6 Sign of tilt Y + * bit 5 Yt6 + * bit 4 Yt5 + * bit 3 Yt4 + * bit 2 Yt3 + * bit 1 Yt2 + * bit 0 Yt1 + */ + +#include <linux/completion.h> +#include <linux/init.h> +#include <linux/input.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/serio.h> +#include <linux/slab.h> +#include <linux/string.h> + +MODULE_AUTHOR("Julian Squires <julian@cipht.net>, Hans de Goede <hdegoede@redhat.com>"); +MODULE_DESCRIPTION("Wacom protocol 4 serial tablet driver"); +MODULE_LICENSE("GPL"); + +#define REQUEST_MODEL_AND_ROM_VERSION "~#" +#define REQUEST_MAX_COORDINATES "~C\r" +#define REQUEST_CONFIGURATION_STRING "~R\r" +#define REQUEST_RESET_TO_PROTOCOL_IV "\r#" +/* + * Note: sending "\r$\r" causes at least the Digitizer II to send + * packets in ASCII instead of binary. "\r#" seems to undo that. + */ + +#define COMMAND_START_SENDING_PACKETS "ST\r" +#define COMMAND_STOP_SENDING_PACKETS "SP\r" +#define COMMAND_MULTI_MODE_INPUT "MU1\r" +#define COMMAND_ORIGIN_IN_UPPER_LEFT "OC1\r" +#define COMMAND_ENABLE_ALL_MACRO_BUTTONS "~M0\r" +#define COMMAND_DISABLE_GROUP_1_MACRO_BUTTONS "~M1\r" +#define COMMAND_TRANSMIT_AT_MAX_RATE "IT0\r" +#define COMMAND_DISABLE_INCREMENTAL_MODE "IN0\r" +#define COMMAND_ENABLE_CONTINUOUS_MODE "SR\r" +#define COMMAND_ENABLE_PRESSURE_MODE "PH1\r" +#define COMMAND_Z_FILTER "ZF1\r" + +/* Note that this is a protocol 4 packet without tilt information. */ +#define PACKET_LENGTH 7 +#define DATA_SIZE 32 + +/* flags */ +#define F_COVERS_SCREEN 0x01 +#define F_HAS_STYLUS2 0x02 +#define F_HAS_SCROLLWHEEL 0x04 + +/* device IDs */ +#define STYLUS_DEVICE_ID 0x02 +#define CURSOR_DEVICE_ID 0x06 +#define ERASER_DEVICE_ID 0x0A + +enum { STYLUS = 1, ERASER, CURSOR }; + +static const struct { + int device_id; + int input_id; +} tools[] = { + { 0, 0 }, + { STYLUS_DEVICE_ID, BTN_TOOL_PEN }, + { ERASER_DEVICE_ID, BTN_TOOL_RUBBER }, + { CURSOR_DEVICE_ID, BTN_TOOL_MOUSE }, +}; + +struct wacom { + struct input_dev *dev; + struct completion cmd_done; + int result; + u8 expect; + u8 eraser_mask; + unsigned int extra_z_bits; + unsigned int flags; + unsigned int res_x, res_y; + unsigned int max_x, max_y; + unsigned int tool; + unsigned int idx; + u8 data[DATA_SIZE]; + char phys[32]; +}; + +enum { + MODEL_CINTIQ = 0x504C, /* PL */ + MODEL_CINTIQ2 = 0x4454, /* DT */ + MODEL_DIGITIZER_II = 0x5544, /* UD */ + MODEL_GRAPHIRE = 0x4554, /* ET */ + MODEL_PENPARTNER = 0x4354, /* CT */ +}; + +static void wacom_handle_model_response(struct wacom *wacom) +{ + int major_v, minor_v, r = 0; + char *p; + + p = strrchr(wacom->data, 'V'); + if (p) + r = sscanf(p + 1, "%u.%u", &major_v, &minor_v); + if (r != 2) + major_v = minor_v = 0; + + switch (wacom->data[2] << 8 | wacom->data[3]) { + case MODEL_CINTIQ: /* UNTESTED */ + case MODEL_CINTIQ2: + if ((wacom->data[2] << 8 | wacom->data[3]) == MODEL_CINTIQ) { + wacom->dev->name = "Wacom Cintiq"; + wacom->dev->id.version = MODEL_CINTIQ; + } else { + wacom->dev->name = "Wacom Cintiq II"; + wacom->dev->id.version = MODEL_CINTIQ2; + } + wacom->res_x = 508; + wacom->res_y = 508; + + switch (wacom->data[5] << 8 | wacom->data[6]) { + case 0x3731: /* PL-710 */ + wacom->res_x = 2540; + wacom->res_y = 2540; + /* fall through */ + case 0x3535: /* PL-550 */ + case 0x3830: /* PL-800 */ + wacom->extra_z_bits = 2; + } + + wacom->flags = F_COVERS_SCREEN; + break; + + case MODEL_PENPARTNER: + wacom->dev->name = "Wacom Penpartner"; + wacom->dev->id.version = MODEL_PENPARTNER; + wacom->res_x = 1000; + wacom->res_y = 1000; + break; + + case MODEL_GRAPHIRE: + wacom->dev->name = "Wacom Graphire"; + wacom->dev->id.version = MODEL_GRAPHIRE; + wacom->res_x = 1016; + wacom->res_y = 1016; + wacom->max_x = 5103; + wacom->max_y = 3711; + wacom->extra_z_bits = 2; + wacom->eraser_mask = 0x08; + wacom->flags = F_HAS_STYLUS2 | F_HAS_SCROLLWHEEL; + break; + + case MODEL_DIGITIZER_II: + wacom->dev->name = "Wacom Digitizer II"; + wacom->dev->id.version = MODEL_DIGITIZER_II; + if (major_v == 1 && minor_v <= 2) + wacom->extra_z_bits = 0; /* UNTESTED */ + break; + + default: + dev_err(&wacom->dev->dev, "Unsupported Wacom model %s\n", + wacom->data); + wacom->result = -ENODEV; + return; + } + + dev_info(&wacom->dev->dev, "%s tablet, version %u.%u\n", + wacom->dev->name, major_v, minor_v); +} + +static void wacom_handle_configuration_response(struct wacom *wacom) +{ + int r, skip; + + dev_dbg(&wacom->dev->dev, "Configuration string: %s\n", wacom->data); + r = sscanf(wacom->data, "~R%x,%u,%u,%u,%u", &skip, &skip, &skip, + &wacom->res_x, &wacom->res_y); + if (r != 5) + dev_warn(&wacom->dev->dev, "could not get resolution\n"); +} + +static void wacom_handle_coordinates_response(struct wacom *wacom) +{ + int r; + + dev_dbg(&wacom->dev->dev, "Coordinates string: %s\n", wacom->data); + r = sscanf(wacom->data, "~C%u,%u", &wacom->max_x, &wacom->max_y); + if (r != 2) + dev_warn(&wacom->dev->dev, "could not get max coordinates\n"); +} + +static void wacom_handle_response(struct wacom *wacom) +{ + if (wacom->data[0] != '~' || wacom->data[1] != wacom->expect) { + dev_err(&wacom->dev->dev, + "Wacom got an unexpected response: %s\n", wacom->data); + wacom->result = -EIO; + } else { + wacom->result = 0; + + switch (wacom->data[1]) { + case '#': + wacom_handle_model_response(wacom); + break; + case 'R': + wacom_handle_configuration_response(wacom); + break; + case 'C': + wacom_handle_coordinates_response(wacom); + break; + } + } + + complete(&wacom->cmd_done); +} + +static void wacom_handle_packet(struct wacom *wacom) +{ + u8 in_proximity_p, stylus_p, button; + unsigned int tool; + int x, y, z; + + in_proximity_p = wacom->data[0] & 0x40; + stylus_p = wacom->data[0] & 0x20; + button = (wacom->data[3] & 0x78) >> 3; + x = (wacom->data[0] & 3) << 14 | wacom->data[1]<<7 | wacom->data[2]; + y = (wacom->data[3] & 3) << 14 | wacom->data[4]<<7 | wacom->data[5]; + + if (in_proximity_p && stylus_p) { + z = wacom->data[6] & 0x7f; + if (wacom->extra_z_bits >= 1) + z = z << 1 | (wacom->data[3] & 0x4) >> 2; + if (wacom->extra_z_bits > 1) + z = z << 1 | (wacom->data[0] & 0x4) >> 2; + z = z ^ (0x40 << wacom->extra_z_bits); + } else { + z = -1; + } + + if (stylus_p) + tool = (button & wacom->eraser_mask) ? ERASER : STYLUS; + else + tool = CURSOR; + + if (tool != wacom->tool && wacom->tool != 0) { + input_report_key(wacom->dev, tools[wacom->tool].input_id, 0); + input_sync(wacom->dev); + } + wacom->tool = tool; + + input_report_key(wacom->dev, tools[tool].input_id, in_proximity_p); + input_report_abs(wacom->dev, ABS_MISC, + in_proximity_p ? tools[tool].device_id : 0); + input_report_abs(wacom->dev, ABS_X, x); + input_report_abs(wacom->dev, ABS_Y, y); + input_report_abs(wacom->dev, ABS_PRESSURE, z); + if (stylus_p) { + input_report_key(wacom->dev, BTN_TOUCH, button & 1); + input_report_key(wacom->dev, BTN_STYLUS, button & 2); + input_report_key(wacom->dev, BTN_STYLUS2, button & 4); + } else { + input_report_key(wacom->dev, BTN_LEFT, button & 1); + input_report_key(wacom->dev, BTN_RIGHT, button & 2); + input_report_key(wacom->dev, BTN_MIDDLE, button & 4); + /* handle relative wheel for non-stylus device */ + z = (wacom->data[6] & 0x30) >> 4; + if (wacom->data[6] & 0x40) + z = -z; + input_report_rel(wacom->dev, REL_WHEEL, z); + } + input_sync(wacom->dev); +} + +static void wacom_clear_data_buf(struct wacom *wacom) +{ + memset(wacom->data, 0, DATA_SIZE); + wacom->idx = 0; +} + +static irqreturn_t wacom_interrupt(struct serio *serio, unsigned char data, + unsigned int flags) +{ + struct wacom *wacom = serio_get_drvdata(serio); + + if (data & 0x80) + wacom->idx = 0; + + /* + * We're either expecting a carriage return-terminated ASCII + * response string, or a seven-byte packet with the MSB set on + * the first byte. + * + * Note however that some tablets (the PenPartner, for + * example) don't send a carriage return at the end of a + * command. We handle these by waiting for timeout. + */ + if (data == '\r' && !(wacom->data[0] & 0x80)) { + wacom_handle_response(wacom); + wacom_clear_data_buf(wacom); + return IRQ_HANDLED; + } + + /* Leave place for 0 termination */ + if (wacom->idx > (DATA_SIZE - 2)) { + dev_dbg(&wacom->dev->dev, + "throwing away %d bytes of garbage\n", wacom->idx); + wacom_clear_data_buf(wacom); + } + wacom->data[wacom->idx++] = data; + + if (wacom->idx == PACKET_LENGTH && (wacom->data[0] & 0x80)) { + wacom_handle_packet(wacom); + wacom_clear_data_buf(wacom); + } + + return IRQ_HANDLED; +} + +static void wacom_disconnect(struct serio *serio) +{ + struct wacom *wacom = serio_get_drvdata(serio); + + serio_close(serio); + serio_set_drvdata(serio, NULL); + input_unregister_device(wacom->dev); + kfree(wacom); +} + +static int wacom_send(struct serio *serio, const u8 *command) +{ + int err = 0; + + for (; !err && *command; command++) + err = serio_write(serio, *command); + + return err; +} + +static int wacom_send_setup_string(struct wacom *wacom, struct serio *serio) +{ + const u8 *cmd; + + switch (wacom->dev->id.version) { + case MODEL_CINTIQ: /* UNTESTED */ + cmd = COMMAND_ORIGIN_IN_UPPER_LEFT + COMMAND_TRANSMIT_AT_MAX_RATE + COMMAND_ENABLE_CONTINUOUS_MODE + COMMAND_START_SENDING_PACKETS; + break; + + case MODEL_PENPARTNER: + cmd = COMMAND_ENABLE_PRESSURE_MODE + COMMAND_START_SENDING_PACKETS; + break; + + default: + cmd = COMMAND_MULTI_MODE_INPUT + COMMAND_ORIGIN_IN_UPPER_LEFT + COMMAND_ENABLE_ALL_MACRO_BUTTONS + COMMAND_DISABLE_GROUP_1_MACRO_BUTTONS + COMMAND_TRANSMIT_AT_MAX_RATE + COMMAND_DISABLE_INCREMENTAL_MODE + COMMAND_ENABLE_CONTINUOUS_MODE + COMMAND_Z_FILTER + COMMAND_START_SENDING_PACKETS; + break; + } + + return wacom_send(serio, cmd); +} + +static int wacom_send_and_wait(struct wacom *wacom, struct serio *serio, + const u8 *cmd, const char *desc) +{ + int err; + unsigned long u; + + wacom->expect = cmd[1]; + init_completion(&wacom->cmd_done); + + err = wacom_send(serio, cmd); + if (err) + return err; + + u = wait_for_completion_timeout(&wacom->cmd_done, HZ); + if (u == 0) { + /* Timeout, process what we've received. */ + wacom_handle_response(wacom); + } + + wacom->expect = 0; + return wacom->result; +} + +static int wacom_setup(struct wacom *wacom, struct serio *serio) +{ + int err; + + /* Note that setting the link speed is the job of inputattach. + * We assume that reset negotiation has already happened, + * here. */ + err = wacom_send_and_wait(wacom, serio, REQUEST_MODEL_AND_ROM_VERSION, + "model and version"); + if (err) + return err; + + if (!(wacom->res_x && wacom->res_y)) { + err = wacom_send_and_wait(wacom, serio, + REQUEST_CONFIGURATION_STRING, + "configuration string"); + if (err) + return err; + } + + if (!(wacom->max_x && wacom->max_y)) { + err = wacom_send_and_wait(wacom, serio, + REQUEST_MAX_COORDINATES, + "coordinates string"); + if (err) + return err; + } + + return wacom_send_setup_string(wacom, serio); +} + +static int wacom_connect(struct serio *serio, struct serio_driver *drv) +{ + struct wacom *wacom; + struct input_dev *input_dev; + int err = -ENOMEM; + + wacom = kzalloc(sizeof(struct wacom), GFP_KERNEL); + input_dev = input_allocate_device(); + if (!wacom || !input_dev) + goto free_device; + + wacom->dev = input_dev; + wacom->extra_z_bits = 1; + wacom->eraser_mask = 0x04; + wacom->tool = wacom->idx = 0; + snprintf(wacom->phys, sizeof(wacom->phys), "%s/input0", serio->phys); + input_dev->phys = wacom->phys; + input_dev->id.bustype = BUS_RS232; + input_dev->id.vendor = SERIO_WACOM_IV; + input_dev->id.product = serio->id.extra; + input_dev->dev.parent = &serio->dev; + + input_dev->evbit[0] = + BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS) | BIT_MASK(EV_REL); + set_bit(ABS_MISC, input_dev->absbit); + set_bit(BTN_TOOL_PEN, input_dev->keybit); + set_bit(BTN_TOOL_RUBBER, input_dev->keybit); + set_bit(BTN_TOOL_MOUSE, input_dev->keybit); + set_bit(BTN_TOUCH, input_dev->keybit); + set_bit(BTN_STYLUS, input_dev->keybit); + set_bit(BTN_LEFT, input_dev->keybit); + set_bit(BTN_RIGHT, input_dev->keybit); + set_bit(BTN_MIDDLE, input_dev->keybit); + + serio_set_drvdata(serio, wacom); + + err = serio_open(serio, drv); + if (err) + goto free_device; + + err = wacom_setup(wacom, serio); + if (err) + goto close_serio; + + set_bit(INPUT_PROP_DIRECT, input_dev->propbit); + if (!(wacom->flags & F_COVERS_SCREEN)) + __set_bit(INPUT_PROP_POINTER, input_dev->propbit); + + if (wacom->flags & F_HAS_STYLUS2) + __set_bit(BTN_STYLUS2, input_dev->keybit); + + if (wacom->flags & F_HAS_SCROLLWHEEL) + __set_bit(REL_WHEEL, input_dev->relbit); + + input_abs_set_res(wacom->dev, ABS_X, wacom->res_x); + input_abs_set_res(wacom->dev, ABS_Y, wacom->res_y); + input_set_abs_params(wacom->dev, ABS_X, 0, wacom->max_x, 0, 0); + input_set_abs_params(wacom->dev, ABS_Y, 0, wacom->max_y, 0, 0); + input_set_abs_params(wacom->dev, ABS_PRESSURE, -1, + (1 << (7 + wacom->extra_z_bits)) - 1, 0, 0); + + err = input_register_device(wacom->dev); + if (err) + goto close_serio; + + return 0; + +close_serio: + serio_close(serio); +free_device: + serio_set_drvdata(serio, NULL); + input_free_device(input_dev); + kfree(wacom); + return err; +} + +static struct serio_device_id wacom_serio_ids[] = { + { + .type = SERIO_RS232, + .proto = SERIO_WACOM_IV, + .id = SERIO_ANY, + .extra = SERIO_ANY, + }, + { 0 } +}; + +MODULE_DEVICE_TABLE(serio, wacom_serio_ids); + +static struct serio_driver wacom_drv = { + .driver = { + .name = "wacom_serial4", + }, + .description = "Wacom protocol 4 serial tablet driver", + .id_table = wacom_serio_ids, + .interrupt = wacom_interrupt, + .connect = wacom_connect, + .disconnect = wacom_disconnect, +}; + +module_serio_driver(wacom_drv); diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig index a23a94bb4bcb..6bb9a7dd23b6 100644 --- a/drivers/input/touchscreen/Kconfig +++ b/drivers/input/touchscreen/Kconfig @@ -471,6 +471,18 @@ config TOUCHSCREEN_HP7XX To compile this driver as a module, choose M here: the module will be called jornada720_ts. +config TOUCHSCREEN_IPAQ_MICRO + tristate "HP iPAQ Atmel Micro ASIC touchscreen" + depends on MFD_IPAQ_MICRO + help + Say Y here to enable support for the touchscreen attached to + the Atmel Micro peripheral controller on iPAQ h3100/h3600/h3700 + + If unsure, say N. + + To compile this driver as a module, choose M here: the + module will be called ipaq-micro-ts. + config TOUCHSCREEN_HTCPEN tristate "HTC Shift X9500 touchscreen" depends on ISA diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile index 126479d8c29a..4be94fce41af 100644 --- a/drivers/input/touchscreen/Makefile +++ b/drivers/input/touchscreen/Makefile @@ -46,6 +46,7 @@ obj-$(CONFIG_TOUCHSCREEN_MTOUCH) += mtouch.o obj-$(CONFIG_TOUCHSCREEN_MK712) += mk712.o obj-$(CONFIG_TOUCHSCREEN_HP600) += hp680_ts_input.o obj-$(CONFIG_TOUCHSCREEN_HP7XX) += jornada720_ts.o +obj-$(CONFIG_TOUCHSCREEN_IPAQ_MICRO) += ipaq-micro-ts.o obj-$(CONFIG_TOUCHSCREEN_HTCPEN) += htcpen.o obj-$(CONFIG_TOUCHSCREEN_USB_COMPOSITE) += usbtouchscreen.o obj-$(CONFIG_TOUCHSCREEN_PCAP) += pcap_ts.o diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c index da201b8e37dc..e57ba52bf484 100644 --- a/drivers/input/touchscreen/ads7846.c +++ b/drivers/input/touchscreen/ads7846.c @@ -1302,8 +1302,10 @@ static int ads7846_probe(struct spi_device *spi) pdata = dev_get_platdata(&spi->dev); if (!pdata) { pdata = ads7846_probe_dt(&spi->dev); - if (IS_ERR(pdata)) - return PTR_ERR(pdata); + if (IS_ERR(pdata)) { + err = PTR_ERR(pdata); + goto err_free_mem; + } } ts->model = pdata->model ? : 7846; diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c index 6e0b4a2120d3..aaacf8bfa61f 100644 --- a/drivers/input/touchscreen/atmel_mxt_ts.c +++ b/drivers/input/touchscreen/atmel_mxt_ts.c @@ -2,6 +2,7 @@ * Atmel maXTouch Touchscreen driver * * Copyright (C) 2010 Samsung Electronics Co.Ltd + * Copyright (C) 2011-2014 Atmel Corporation * Copyright (C) 2012 Google, Inc. * * Author: Joonyoung Shim <jy0922.shim@samsung.com> @@ -22,6 +23,7 @@ #include <linux/i2c/atmel_mxt_ts.h> #include <linux/input/mt.h> #include <linux/interrupt.h> +#include <linux/of.h> #include <linux/slab.h> /* Version */ @@ -29,8 +31,10 @@ #define MXT_VER_21 21 #define MXT_VER_22 22 -/* Firmware */ +/* Firmware files */ #define MXT_FW_NAME "maxtouch.fw" +#define MXT_CFG_NAME "maxtouch.cfg" +#define MXT_CFG_MAGIC "OBP_RAW V1" /* Registers */ #define MXT_INFO 0x00 @@ -44,6 +48,8 @@ #define MXT_OBJECT_START 0x07 #define MXT_OBJECT_SIZE 6 +#define MXT_INFO_CHECKSUM_SIZE 3 +#define MXT_MAX_BLOCK_WRITE 256 /* Object types */ #define MXT_DEBUG_DIAGNOSTIC_T37 37 @@ -74,6 +80,9 @@ #define MXT_SPT_MESSAGECOUNT_T44 44 #define MXT_SPT_CTECONFIG_T46 46 +/* MXT_GEN_MESSAGE_T5 object */ +#define MXT_RPTID_NOMSG 0xff + /* MXT_GEN_COMMAND_T6 field */ #define MXT_COMMAND_RESET 0 #define MXT_COMMAND_BACKUPNV 1 @@ -83,11 +92,20 @@ /* Define for T6 status byte */ #define MXT_T6_STATUS_RESET (1 << 7) +#define MXT_T6_STATUS_OFL (1 << 6) +#define MXT_T6_STATUS_SIGERR (1 << 5) +#define MXT_T6_STATUS_CAL (1 << 4) +#define MXT_T6_STATUS_CFGERR (1 << 3) +#define MXT_T6_STATUS_COMSERR (1 << 2) /* MXT_GEN_POWER_T7 field */ -#define MXT_POWER_IDLEACQINT 0 -#define MXT_POWER_ACTVACQINT 1 -#define MXT_POWER_ACTV2IDLETO 2 +struct t7_config { + u8 idle; + u8 active; +} __packed; + +#define MXT_POWER_CFG_RUN 0 +#define MXT_POWER_CFG_DEEPSLEEP 1 /* MXT_GEN_ACQUIRE_T8 field */ #define MXT_ACQUIRE_CHRGTIME 0 @@ -99,7 +117,6 @@ #define MXT_ACQUIRE_ATCHCALSTHR 7 /* MXT_TOUCH_MULTI_T9 field */ -#define MXT_TOUCH_CTRL 0 #define MXT_T9_ORIENT 9 #define MXT_T9_RANGE 18 @@ -217,11 +234,6 @@ struct mxt_object { u8 num_report_ids; } __packed; -struct mxt_message { - u8 reportid; - u8 message[7]; -}; - /* Each client has this additional data */ struct mxt_data { struct i2c_client *client; @@ -234,15 +246,28 @@ struct mxt_data { unsigned int max_x; unsigned int max_y; bool in_bootloader; + u16 mem_size; + u8 max_reportid; u32 config_crc; + u32 info_crc; u8 bootloader_addr; + u8 *msg_buf; + u8 t6_status; + bool update_input; + u8 last_message_count; + u8 num_touchids; + struct t7_config t7_cfg; /* Cached parameters from object table */ + u16 T5_address; + u8 T5_msg_size; u8 T6_reportid; u16 T6_address; + u16 T7_address; u8 T9_reportid_min; u8 T9_reportid_max; u8 T19_reportid; + u16 T44_address; /* for fw update in bootloader */ struct completion bl_completion; @@ -297,42 +322,10 @@ static bool mxt_object_readable(unsigned int type) } } -static bool mxt_object_writable(unsigned int type) -{ - switch (type) { - case MXT_GEN_COMMAND_T6: - case MXT_GEN_POWER_T7: - case MXT_GEN_ACQUIRE_T8: - case MXT_TOUCH_MULTI_T9: - case MXT_TOUCH_KEYARRAY_T15: - case MXT_TOUCH_PROXIMITY_T23: - case MXT_TOUCH_PROXKEY_T52: - case MXT_PROCI_GRIPFACE_T20: - case MXT_PROCG_NOISE_T22: - case MXT_PROCI_ONETOUCH_T24: - case MXT_PROCI_TWOTOUCH_T27: - case MXT_PROCI_GRIP_T40: - case MXT_PROCI_PALM_T41: - case MXT_PROCI_TOUCHSUPPRESSION_T42: - case MXT_PROCI_STYLUS_T47: - case MXT_PROCG_NOISESUPPRESSION_T48: - case MXT_SPT_COMMSCONFIG_T18: - case MXT_SPT_GPIOPWM_T19: - case MXT_SPT_SELFTEST_T25: - case MXT_SPT_CTECONFIG_T28: - case MXT_SPT_DIGITIZER_T43: - case MXT_SPT_CTECONFIG_T46: - return true; - default: - return false; - } -} - -static void mxt_dump_message(struct device *dev, - struct mxt_message *message) +static void mxt_dump_message(struct mxt_data *data, u8 *message) { - dev_dbg(dev, "reportid: %u\tmessage: %*ph\n", - message->reportid, 7, message->message); + dev_dbg(&data->client->dev, "message: %*ph\n", + data->T5_msg_size, message); } static int mxt_wait_for_completion(struct mxt_data *data, @@ -366,7 +359,6 @@ static int mxt_bootloader_read(struct mxt_data *data, msg.buf = val; ret = i2c_transfer(data->client->adapter, &msg, 1); - if (ret == 1) { ret = 0; } else { @@ -401,7 +393,7 @@ static int mxt_bootloader_write(struct mxt_data *data, return ret; } -static int mxt_lookup_bootloader_address(struct mxt_data *data) +static int mxt_lookup_bootloader_address(struct mxt_data *data, bool retry) { u8 appmode = data->client->addr; u8 bootloader; @@ -409,12 +401,19 @@ static int mxt_lookup_bootloader_address(struct mxt_data *data) switch (appmode) { case 0x4a: case 0x4b: + /* Chips after 1664S use different scheme */ + if (retry || data->info.family_id >= 0xa2) { + bootloader = appmode - 0x24; + break; + } + /* Fall through for normal case */ case 0x4c: case 0x4d: case 0x5a: case 0x5b: bootloader = appmode - 0x26; break; + default: dev_err(&data->client->dev, "Appmode i2c address 0x%02x not found\n", @@ -426,6 +425,30 @@ static int mxt_lookup_bootloader_address(struct mxt_data *data) return 0; } +static int mxt_probe_bootloader(struct mxt_data *data, bool alt_address) +{ + struct device *dev = &data->client->dev; + int error; + u8 val; + bool crc_failure; + + error = mxt_lookup_bootloader_address(data, alt_address); + if (error) + return error; + + error = mxt_bootloader_read(data, &val, 1); + if (error) + return error; + + /* Check app crc fail mode */ + crc_failure = (val & ~MXT_BOOT_STATUS_MASK) == MXT_APP_CRC_FAIL; + + dev_err(dev, "Detected bootloader, status:%02X%s\n", + val, crc_failure ? ", APP_CRC_FAIL" : ""); + + return 0; +} + static u8 mxt_get_bootloader_version(struct mxt_data *data, u8 val) { struct device *dev = &data->client->dev; @@ -447,14 +470,15 @@ static u8 mxt_get_bootloader_version(struct mxt_data *data, u8 val) } } -static int mxt_check_bootloader(struct mxt_data *data, unsigned int state) +static int mxt_check_bootloader(struct mxt_data *data, unsigned int state, + bool wait) { struct device *dev = &data->client->dev; u8 val; int ret; recheck: - if (state != MXT_WAITING_BOOTLOAD_CMD) { + if (wait) { /* * In application update mode, the interrupt * line signals state transitions. We must wait for the @@ -485,6 +509,7 @@ recheck: switch (state) { case MXT_WAITING_BOOTLOAD_CMD: case MXT_WAITING_FRAME_DATA: + case MXT_APP_CRC_FAIL: val &= ~MXT_BOOT_STATUS_MASK; break; case MXT_FRAME_CRC_PASS: @@ -508,13 +533,18 @@ recheck: return 0; } -static int mxt_unlock_bootloader(struct mxt_data *data) +static int mxt_send_bootloader_cmd(struct mxt_data *data, bool unlock) { int ret; u8 buf[2]; - buf[0] = MXT_UNLOCK_CMD_LSB; - buf[1] = MXT_UNLOCK_CMD_MSB; + if (unlock) { + buf[0] = MXT_UNLOCK_CMD_LSB; + buf[1] = MXT_UNLOCK_CMD_MSB; + } else { + buf[0] = 0x01; + buf[1] = 0x01; + } ret = mxt_bootloader_write(data, buf, 2); if (ret) @@ -605,40 +635,44 @@ mxt_get_object(struct mxt_data *data, u8 type) return object; } - dev_err(&data->client->dev, "Invalid object type T%u\n", type); + dev_warn(&data->client->dev, "Invalid object type T%u\n", type); return NULL; } -static int mxt_read_message(struct mxt_data *data, - struct mxt_message *message) +static void mxt_proc_t6_messages(struct mxt_data *data, u8 *msg) { - struct mxt_object *object; - u16 reg; - - object = mxt_get_object(data, MXT_GEN_MESSAGE_T5); - if (!object) - return -EINVAL; + struct device *dev = &data->client->dev; + u8 status = msg[1]; + u32 crc = msg[2] | (msg[3] << 8) | (msg[4] << 16); - reg = object->start_address; - return __mxt_read_reg(data->client, reg, - sizeof(struct mxt_message), message); -} + complete(&data->crc_completion); -static int mxt_write_object(struct mxt_data *data, - u8 type, u8 offset, u8 val) -{ - struct mxt_object *object; - u16 reg; - - object = mxt_get_object(data, type); - if (!object || offset >= mxt_obj_size(object)) - return -EINVAL; + if (crc != data->config_crc) { + data->config_crc = crc; + dev_dbg(dev, "T6 Config Checksum: 0x%06X\n", crc); + } - reg = object->start_address; - return mxt_write_reg(data->client, reg + offset, val); + /* Detect reset */ + if (status & MXT_T6_STATUS_RESET) + complete(&data->reset_completion); + + /* Output debug if status has changed */ + if (status != data->t6_status) + dev_dbg(dev, "T6 Status 0x%02X%s%s%s%s%s%s%s\n", + status, + status == 0 ? " OK" : "", + status & MXT_T6_STATUS_RESET ? " RESET" : "", + status & MXT_T6_STATUS_OFL ? " OFL" : "", + status & MXT_T6_STATUS_SIGERR ? " SIGERR" : "", + status & MXT_T6_STATUS_CAL ? " CAL" : "", + status & MXT_T6_STATUS_CFGERR ? " CFGERR" : "", + status & MXT_T6_STATUS_COMSERR ? " COMSERR" : ""); + + /* Save current status */ + data->t6_status = status; } -static void mxt_input_button(struct mxt_data *data, struct mxt_message *message) +static void mxt_input_button(struct mxt_data *data, u8 *message) { struct input_dev *input = data->input_dev; const struct mxt_platform_data *pdata = data->pdata; @@ -649,30 +683,33 @@ static void mxt_input_button(struct mxt_data *data, struct mxt_message *message) for (i = 0; i < pdata->t19_num_keys; i++) { if (pdata->t19_keymap[i] == KEY_RESERVED) continue; - button = !(message->message[0] & (1 << i)); + button = !(message[1] & (1 << i)); input_report_key(input, pdata->t19_keymap[i], button); } } -static void mxt_input_sync(struct input_dev *input_dev) +static void mxt_input_sync(struct mxt_data *data) { - input_mt_report_pointer_emulation(input_dev, false); - input_sync(input_dev); + input_mt_report_pointer_emulation(data->input_dev, + data->pdata->t19_num_keys); + input_sync(data->input_dev); } -static void mxt_input_touchevent(struct mxt_data *data, - struct mxt_message *message, int id) +static void mxt_proc_t9_message(struct mxt_data *data, u8 *message) { struct device *dev = &data->client->dev; - u8 status = message->message[0]; struct input_dev *input_dev = data->input_dev; + int id; + u8 status; int x; int y; int area; int amplitude; - x = (message->message[1] << 4) | ((message->message[3] >> 4) & 0xf); - y = (message->message[2] << 4) | ((message->message[3] & 0xf)); + id = message[0] - data->T9_reportid_min; + status = message[1]; + x = (message[2] << 4) | ((message[4] >> 4) & 0xf); + y = (message[3] << 4) | ((message[4] & 0xf)); /* Handle 10/12 bit switching */ if (data->max_x < 1024) @@ -680,8 +717,8 @@ static void mxt_input_touchevent(struct mxt_data *data, if (data->max_y < 1024) y >>= 2; - area = message->message[4]; - amplitude = message->message[5]; + area = message[5]; + amplitude = message[6]; dev_dbg(dev, "[%u] %c%c%c%c%c%c%c%c x: %5u y: %5u area: %3u amp: %3u\n", @@ -707,7 +744,7 @@ static void mxt_input_touchevent(struct mxt_data *data, if (status & MXT_T9_RELEASE) { input_mt_report_slot_state(input_dev, MT_TOOL_FINGER, 0); - mxt_input_sync(input_dev); + mxt_input_sync(data); } /* Touch active */ @@ -720,64 +757,184 @@ static void mxt_input_touchevent(struct mxt_data *data, /* Touch no longer active, close out slot */ input_mt_report_slot_state(input_dev, MT_TOOL_FINGER, 0); } + + data->update_input = true; +} + +static int mxt_proc_message(struct mxt_data *data, u8 *message) +{ + u8 report_id = message[0]; + + if (report_id == MXT_RPTID_NOMSG) + return 0; + + if (report_id == data->T6_reportid) { + mxt_proc_t6_messages(data, message); + } else if (!data->input_dev) { + /* + * Do not report events if input device + * is not yet registered. + */ + mxt_dump_message(data, message); + } else if (report_id >= data->T9_reportid_min + && report_id <= data->T9_reportid_max) { + mxt_proc_t9_message(data, message); + } else if (report_id == data->T19_reportid) { + mxt_input_button(data, message); + data->update_input = true; + } else { + mxt_dump_message(data, message); + } + + return 1; } -static u16 mxt_extract_T6_csum(const u8 *csum) +static int mxt_read_and_process_messages(struct mxt_data *data, u8 count) { - return csum[0] | (csum[1] << 8) | (csum[2] << 16); + struct device *dev = &data->client->dev; + int ret; + int i; + u8 num_valid = 0; + + /* Safety check for msg_buf */ + if (count > data->max_reportid) + return -EINVAL; + + /* Process remaining messages if necessary */ + ret = __mxt_read_reg(data->client, data->T5_address, + data->T5_msg_size * count, data->msg_buf); + if (ret) { + dev_err(dev, "Failed to read %u messages (%d)\n", count, ret); + return ret; + } + + for (i = 0; i < count; i++) { + ret = mxt_proc_message(data, + data->msg_buf + data->T5_msg_size * i); + + if (ret == 1) + num_valid++; + } + + /* return number of messages read */ + return num_valid; } -static bool mxt_is_T9_message(struct mxt_data *data, struct mxt_message *msg) +static irqreturn_t mxt_process_messages_t44(struct mxt_data *data) { - u8 id = msg->reportid; - return (id >= data->T9_reportid_min && id <= data->T9_reportid_max); + struct device *dev = &data->client->dev; + int ret; + u8 count, num_left; + + /* Read T44 and T5 together */ + ret = __mxt_read_reg(data->client, data->T44_address, + data->T5_msg_size + 1, data->msg_buf); + if (ret) { + dev_err(dev, "Failed to read T44 and T5 (%d)\n", ret); + return IRQ_NONE; + } + + count = data->msg_buf[0]; + + if (count == 0) { + /* + * This condition is caused by the CHG line being configured + * in Mode 0. It results in unnecessary I2C operations but it + * is benign. + */ + dev_dbg(dev, "Interrupt triggered but zero messages\n"); + return IRQ_NONE; + } else if (count > data->max_reportid) { + dev_err(dev, "T44 count %d exceeded max report id\n", count); + count = data->max_reportid; + } + + /* Process first message */ + ret = mxt_proc_message(data, data->msg_buf + 1); + if (ret < 0) { + dev_warn(dev, "Unexpected invalid message\n"); + return IRQ_NONE; + } + + num_left = count - 1; + + /* Process remaining messages if necessary */ + if (num_left) { + ret = mxt_read_and_process_messages(data, num_left); + if (ret < 0) + goto end; + else if (ret != num_left) + dev_warn(dev, "Unexpected invalid message\n"); + } + +end: + if (data->update_input) { + mxt_input_sync(data); + data->update_input = false; + } + + return IRQ_HANDLED; } -static irqreturn_t mxt_process_messages_until_invalid(struct mxt_data *data) +static int mxt_process_messages_until_invalid(struct mxt_data *data) { - struct mxt_message message; - const u8 *payload = &message.message[0]; struct device *dev = &data->client->dev; - u8 reportid; - bool update_input = false; - u32 crc; + int count, read; + u8 tries = 2; + count = data->max_reportid; + + /* Read messages until we force an invalid */ do { - if (mxt_read_message(data, &message)) { - dev_err(dev, "Failed to read message\n"); - return IRQ_NONE; - } + read = mxt_read_and_process_messages(data, count); + if (read < count) + return 0; + } while (--tries); - reportid = message.reportid; + if (data->update_input) { + mxt_input_sync(data); + data->update_input = false; + } - if (reportid == data->T6_reportid) { - u8 status = payload[0]; + dev_err(dev, "CHG pin isn't cleared\n"); + return -EBUSY; +} - crc = mxt_extract_T6_csum(&payload[1]); - if (crc != data->config_crc) { - data->config_crc = crc; - complete(&data->crc_completion); - } +static irqreturn_t mxt_process_messages(struct mxt_data *data) +{ + int total_handled, num_handled; + u8 count = data->last_message_count; - dev_dbg(dev, "Status: %02x Config Checksum: %06x\n", - status, data->config_crc); - - if (status & MXT_T6_STATUS_RESET) - complete(&data->reset_completion); - } else if (mxt_is_T9_message(data, &message)) { - int id = reportid - data->T9_reportid_min; - mxt_input_touchevent(data, &message, id); - update_input = true; - } else if (message.reportid == data->T19_reportid) { - mxt_input_button(data, &message); - update_input = true; - } else { - mxt_dump_message(dev, &message); - } - } while (reportid != 0xff); + if (count < 1 || count > data->max_reportid) + count = 1; + + /* include final invalid message */ + total_handled = mxt_read_and_process_messages(data, count + 1); + if (total_handled < 0) + return IRQ_NONE; + /* if there were invalid messages, then we are done */ + else if (total_handled <= count) + goto update_count; + + /* keep reading two msgs until one is invalid or reportid limit */ + do { + num_handled = mxt_read_and_process_messages(data, 2); + if (num_handled < 0) + return IRQ_NONE; + + total_handled += num_handled; + + if (num_handled < 2) + break; + } while (total_handled < data->num_touchids); + +update_count: + data->last_message_count = total_handled; - if (update_input) - mxt_input_sync(data->input_dev); + if (data->update_input) { + mxt_input_sync(data); + data->update_input = false; + } return IRQ_HANDLED; } @@ -792,7 +949,14 @@ static irqreturn_t mxt_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } - return mxt_process_messages_until_invalid(data); + if (!data->object_table) + return IRQ_HANDLED; + + if (data->T44_address) { + return mxt_process_messages_t44(data); + } else { + return mxt_process_messages(data); + } } static int mxt_t6_command(struct mxt_data *data, u16 cmd_offset, @@ -866,78 +1030,337 @@ static void mxt_update_crc(struct mxt_data *data, u8 cmd, u8 value) mxt_wait_for_completion(data, &data->crc_completion, MXT_CRC_TIMEOUT); } -static int mxt_check_reg_init(struct mxt_data *data) +static void mxt_calc_crc24(u32 *crc, u8 firstbyte, u8 secondbyte) +{ + static const unsigned int crcpoly = 0x80001B; + u32 result; + u32 data_word; + + data_word = (secondbyte << 8) | firstbyte; + result = ((*crc << 1) ^ data_word); + + if (result & 0x1000000) + result ^= crcpoly; + + *crc = result; +} + +static u32 mxt_calculate_crc(u8 *base, off_t start_off, off_t end_off) +{ + u32 crc = 0; + u8 *ptr = base + start_off; + u8 *last_val = base + end_off - 1; + + if (end_off < start_off) + return -EINVAL; + + while (ptr < last_val) { + mxt_calc_crc24(&crc, *ptr, *(ptr + 1)); + ptr += 2; + } + + /* if len is odd, fill the last byte with 0 */ + if (ptr == last_val) + mxt_calc_crc24(&crc, *ptr, 0); + + /* Mask to 24-bit */ + crc &= 0x00FFFFFF; + + return crc; +} + +static int mxt_prepare_cfg_mem(struct mxt_data *data, + const struct firmware *cfg, + unsigned int data_pos, + unsigned int cfg_start_ofs, + u8 *config_mem, + size_t config_mem_size) { - const struct mxt_platform_data *pdata = data->pdata; - struct mxt_object *object; struct device *dev = &data->client->dev; - int index = 0; - int i, size; + struct mxt_object *object; + unsigned int type, instance, size, byte_offset; + int offset; int ret; + int i; + u16 reg; + u8 val; - if (!pdata->config) { - dev_dbg(dev, "No cfg data defined, skipping reg init\n"); - return 0; + while (data_pos < cfg->size) { + /* Read type, instance, length */ + ret = sscanf(cfg->data + data_pos, "%x %x %x%n", + &type, &instance, &size, &offset); + if (ret == 0) { + /* EOF */ + break; + } else if (ret != 3) { + dev_err(dev, "Bad format: failed to parse object\n"); + return -EINVAL; + } + data_pos += offset; + + object = mxt_get_object(data, type); + if (!object) { + /* Skip object */ + for (i = 0; i < size; i++) { + ret = sscanf(cfg->data + data_pos, "%hhx%n", + &val, &offset); + if (ret != 1) { + dev_err(dev, "Bad format in T%d at %d\n", + type, i); + return -EINVAL; + } + data_pos += offset; + } + continue; + } + + if (size > mxt_obj_size(object)) { + /* + * Either we are in fallback mode due to wrong + * config or config from a later fw version, + * or the file is corrupt or hand-edited. + */ + dev_warn(dev, "Discarding %zu byte(s) in T%u\n", + size - mxt_obj_size(object), type); + } else if (mxt_obj_size(object) > size) { + /* + * If firmware is upgraded, new bytes may be added to + * end of objects. It is generally forward compatible + * to zero these bytes - previous behaviour will be + * retained. However this does invalidate the CRC and + * will force fallback mode until the configuration is + * updated. We warn here but do nothing else - the + * malloc has zeroed the entire configuration. + */ + dev_warn(dev, "Zeroing %zu byte(s) in T%d\n", + mxt_obj_size(object) - size, type); + } + + if (instance >= mxt_obj_instances(object)) { + dev_err(dev, "Object instances exceeded!\n"); + return -EINVAL; + } + + reg = object->start_address + mxt_obj_size(object) * instance; + + for (i = 0; i < size; i++) { + ret = sscanf(cfg->data + data_pos, "%hhx%n", + &val, + &offset); + if (ret != 1) { + dev_err(dev, "Bad format in T%d at %d\n", + type, i); + return -EINVAL; + } + data_pos += offset; + + if (i > mxt_obj_size(object)) + continue; + + byte_offset = reg + i - cfg_start_ofs; + + if (byte_offset >= 0 && byte_offset < config_mem_size) { + *(config_mem + byte_offset) = val; + } else { + dev_err(dev, "Bad object: reg:%d, T%d, ofs=%d\n", + reg, object->type, byte_offset); + return -EINVAL; + } + } } - mxt_update_crc(data, MXT_COMMAND_REPORTALL, 1); + return 0; +} - if (data->config_crc == pdata->config_crc) { - dev_info(dev, "Config CRC 0x%06X: OK\n", data->config_crc); - return 0; +static int mxt_upload_cfg_mem(struct mxt_data *data, unsigned int cfg_start, + u8 *config_mem, size_t config_mem_size) +{ + unsigned int byte_offset = 0; + int error; + + /* Write configuration as blocks */ + while (byte_offset < config_mem_size) { + unsigned int size = config_mem_size - byte_offset; + + if (size > MXT_MAX_BLOCK_WRITE) + size = MXT_MAX_BLOCK_WRITE; + + error = __mxt_write_reg(data->client, + cfg_start + byte_offset, + size, config_mem + byte_offset); + if (error) { + dev_err(&data->client->dev, + "Config write error, ret=%d\n", error); + return error; + } + + byte_offset += size; } - dev_info(dev, "Config CRC 0x%06X: does not match 0x%06X\n", - data->config_crc, pdata->config_crc); + return 0; +} - for (i = 0; i < data->info.object_num; i++) { - object = data->object_table + i; +/* + * mxt_update_cfg - download configuration to chip + * + * Atmel Raw Config File Format + * + * The first four lines of the raw config file contain: + * 1) Version + * 2) Chip ID Information (first 7 bytes of device memory) + * 3) Chip Information Block 24-bit CRC Checksum + * 4) Chip Configuration 24-bit CRC Checksum + * + * The rest of the file consists of one line per object instance: + * <TYPE> <INSTANCE> <SIZE> <CONTENTS> + * + * <TYPE> - 2-byte object type as hex + * <INSTANCE> - 2-byte object instance number as hex + * <SIZE> - 2-byte object size as hex + * <CONTENTS> - array of <SIZE> 1-byte hex values + */ +static int mxt_update_cfg(struct mxt_data *data, const struct firmware *cfg) +{ + struct device *dev = &data->client->dev; + struct mxt_info cfg_info; + int ret; + int offset; + int data_pos; + int i; + int cfg_start_ofs; + u32 info_crc, config_crc, calculated_crc; + u8 *config_mem; + size_t config_mem_size; - if (!mxt_object_writable(object->type)) - continue; + mxt_update_crc(data, MXT_COMMAND_REPORTALL, 1); - size = mxt_obj_size(object) * mxt_obj_instances(object); - if (index + size > pdata->config_length) { - dev_err(dev, "Not enough config data!\n"); + if (strncmp(cfg->data, MXT_CFG_MAGIC, strlen(MXT_CFG_MAGIC))) { + dev_err(dev, "Unrecognised config file\n"); + return -EINVAL; + } + + data_pos = strlen(MXT_CFG_MAGIC); + + /* Load information block and check */ + for (i = 0; i < sizeof(struct mxt_info); i++) { + ret = sscanf(cfg->data + data_pos, "%hhx%n", + (unsigned char *)&cfg_info + i, + &offset); + if (ret != 1) { + dev_err(dev, "Bad format\n"); return -EINVAL; } - ret = __mxt_write_reg(data->client, object->start_address, - size, &pdata->config[index]); - if (ret) - return ret; - index += size; + data_pos += offset; + } + + if (cfg_info.family_id != data->info.family_id) { + dev_err(dev, "Family ID mismatch!\n"); + return -EINVAL; + } + + if (cfg_info.variant_id != data->info.variant_id) { + dev_err(dev, "Variant ID mismatch!\n"); + return -EINVAL; + } + + /* Read CRCs */ + ret = sscanf(cfg->data + data_pos, "%x%n", &info_crc, &offset); + if (ret != 1) { + dev_err(dev, "Bad format: failed to parse Info CRC\n"); + return -EINVAL; + } + data_pos += offset; + + ret = sscanf(cfg->data + data_pos, "%x%n", &config_crc, &offset); + if (ret != 1) { + dev_err(dev, "Bad format: failed to parse Config CRC\n"); + return -EINVAL; + } + data_pos += offset; + + /* + * The Info Block CRC is calculated over mxt_info and the object + * table. If it does not match then we are trying to load the + * configuration from a different chip or firmware version, so + * the configuration CRC is invalid anyway. + */ + if (info_crc == data->info_crc) { + if (config_crc == 0 || data->config_crc == 0) { + dev_info(dev, "CRC zero, attempting to apply config\n"); + } else if (config_crc == data->config_crc) { + dev_dbg(dev, "Config CRC 0x%06X: OK\n", + data->config_crc); + return 0; + } else { + dev_info(dev, "Config CRC 0x%06X: does not match file 0x%06X\n", + data->config_crc, config_crc); + } + } else { + dev_warn(dev, + "Warning: Info CRC error - device=0x%06X file=0x%06X\n", + data->info_crc, info_crc); + } + + /* Malloc memory to store configuration */ + cfg_start_ofs = MXT_OBJECT_START + + data->info.object_num * sizeof(struct mxt_object) + + MXT_INFO_CHECKSUM_SIZE; + config_mem_size = data->mem_size - cfg_start_ofs; + config_mem = kzalloc(config_mem_size, GFP_KERNEL); + if (!config_mem) { + dev_err(dev, "Failed to allocate memory\n"); + return -ENOMEM; + } + + ret = mxt_prepare_cfg_mem(data, cfg, data_pos, cfg_start_ofs, + config_mem, config_mem_size); + if (ret) + goto release_mem; + + /* Calculate crc of the received configs (not the raw config file) */ + if (data->T7_address < cfg_start_ofs) { + dev_err(dev, "Bad T7 address, T7addr = %x, config offset %x\n", + data->T7_address, cfg_start_ofs); + ret = 0; + goto release_mem; } + calculated_crc = mxt_calculate_crc(config_mem, + data->T7_address - cfg_start_ofs, + config_mem_size); + + if (config_crc > 0 && config_crc != calculated_crc) + dev_warn(dev, "Config CRC error, calculated=%06X, file=%06X\n", + calculated_crc, config_crc); + + ret = mxt_upload_cfg_mem(data, cfg_start_ofs, + config_mem, config_mem_size); + if (ret) + goto release_mem; + mxt_update_crc(data, MXT_COMMAND_BACKUPNV, MXT_BACKUP_VALUE); ret = mxt_soft_reset(data); if (ret) - return ret; + goto release_mem; dev_info(dev, "Config successfully updated\n"); - return 0; +release_mem: + kfree(config_mem); + return ret; } -static int mxt_make_highchg(struct mxt_data *data) +static int mxt_acquire_irq(struct mxt_data *data) { - struct device *dev = &data->client->dev; - struct mxt_message message; - int count = 10; int error; - /* Read dummy message to make high CHG pin */ - do { - error = mxt_read_message(data, &message); - if (error) - return error; - } while (message.reportid != 0xff && --count); + enable_irq(data->irq); - if (!count) { - dev_err(dev, "CHG pin isn't cleared\n"); - return -EBUSY; - } + error = mxt_process_messages_until_invalid(data); + if (error) + return error; return 0; } @@ -956,24 +1379,60 @@ static int mxt_get_info(struct mxt_data *data) return 0; } +static void mxt_free_input_device(struct mxt_data *data) +{ + if (data->input_dev) { + input_unregister_device(data->input_dev); + data->input_dev = NULL; + } +} + +static void mxt_free_object_table(struct mxt_data *data) +{ + kfree(data->object_table); + data->object_table = NULL; + kfree(data->msg_buf); + data->msg_buf = NULL; + data->T5_address = 0; + data->T5_msg_size = 0; + data->T6_reportid = 0; + data->T7_address = 0; + data->T9_reportid_min = 0; + data->T9_reportid_max = 0; + data->T19_reportid = 0; + data->T44_address = 0; + data->max_reportid = 0; +} + static int mxt_get_object_table(struct mxt_data *data) { struct i2c_client *client = data->client; size_t table_size; + struct mxt_object *object_table; int error; int i; u8 reportid; + u16 end_address; table_size = data->info.object_num * sizeof(struct mxt_object); + object_table = kzalloc(table_size, GFP_KERNEL); + if (!object_table) { + dev_err(&data->client->dev, "Failed to allocate memory\n"); + return -ENOMEM; + } + error = __mxt_read_reg(client, MXT_OBJECT_START, table_size, - data->object_table); - if (error) + object_table); + if (error) { + kfree(object_table); return error; + } /* Valid Report IDs start counting from 1 */ reportid = 1; + data->mem_size = 0; for (i = 0; i < data->info.object_num; i++) { - struct mxt_object *object = data->object_table + i; + struct mxt_object *object = object_table + i; u8 min_id, max_id; le16_to_cpus(&object->start_address); @@ -995,31 +1454,74 @@ static int mxt_get_object_table(struct mxt_data *data) min_id, max_id); switch (object->type) { + case MXT_GEN_MESSAGE_T5: + if (data->info.family_id == 0x80 && + data->info.version < 0x20) { + /* + * On mXT224 firmware versions prior to V2.0 + * read and discard unused CRC byte otherwise + * DMA reads are misaligned. + */ + data->T5_msg_size = mxt_obj_size(object); + } else { + /* CRC not enabled, so skip last byte */ + data->T5_msg_size = mxt_obj_size(object) - 1; + } + data->T5_address = object->start_address; + break; case MXT_GEN_COMMAND_T6: data->T6_reportid = min_id; data->T6_address = object->start_address; break; + case MXT_GEN_POWER_T7: + data->T7_address = object->start_address; + break; case MXT_TOUCH_MULTI_T9: data->T9_reportid_min = min_id; data->T9_reportid_max = max_id; + data->num_touchids = object->num_report_ids + * mxt_obj_instances(object); + break; + case MXT_SPT_MESSAGECOUNT_T44: + data->T44_address = object->start_address; break; case MXT_SPT_GPIOPWM_T19: data->T19_reportid = min_id; break; } + + end_address = object->start_address + + mxt_obj_size(object) * mxt_obj_instances(object) - 1; + + if (end_address >= data->mem_size) + data->mem_size = end_address + 1; + } + + /* Store maximum reportid */ + data->max_reportid = reportid; + + /* If T44 exists, T5 position has to be directly after */ + if (data->T44_address && (data->T5_address != data->T44_address + 1)) { + dev_err(&client->dev, "Invalid T44 position\n"); + error = -EINVAL; + goto free_object_table; + } + + data->msg_buf = kcalloc(data->max_reportid, + data->T5_msg_size, GFP_KERNEL); + if (!data->msg_buf) { + dev_err(&client->dev, "Failed to allocate message buffer\n"); + error = -ENOMEM; + goto free_object_table; } + data->object_table = object_table; + return 0; -} -static void mxt_free_object_table(struct mxt_data *data) -{ - kfree(data->object_table); - data->object_table = NULL; - data->T6_reportid = 0; - data->T9_reportid_min = 0; - data->T9_reportid_max = 0; - data->T19_reportid = 0; +free_object_table: + mxt_free_object_table(data); + return error; } static int mxt_read_t9_resolution(struct mxt_data *data) @@ -1070,55 +1572,259 @@ static int mxt_read_t9_resolution(struct mxt_data *data) return 0; } -static int mxt_initialize(struct mxt_data *data) +static int mxt_input_open(struct input_dev *dev); +static void mxt_input_close(struct input_dev *dev); + +static int mxt_initialize_t9_input_device(struct mxt_data *data) { - struct i2c_client *client = data->client; - struct mxt_info *info = &data->info; + struct device *dev = &data->client->dev; + const struct mxt_platform_data *pdata = data->pdata; + struct input_dev *input_dev; int error; + unsigned int num_mt_slots; + unsigned int mt_flags = 0; + int i; - error = mxt_get_info(data); + error = mxt_read_t9_resolution(data); if (error) - return error; + dev_warn(dev, "Failed to initialize T9 resolution\n"); - data->object_table = kcalloc(info->object_num, - sizeof(struct mxt_object), - GFP_KERNEL); - if (!data->object_table) { - dev_err(&client->dev, "Failed to allocate memory\n"); + input_dev = input_allocate_device(); + if (!input_dev) { + dev_err(dev, "Failed to allocate memory\n"); return -ENOMEM; } + input_dev->name = "Atmel maXTouch Touchscreen"; + input_dev->phys = data->phys; + input_dev->id.bustype = BUS_I2C; + input_dev->dev.parent = dev; + input_dev->open = mxt_input_open; + input_dev->close = mxt_input_close; + + __set_bit(EV_ABS, input_dev->evbit); + __set_bit(EV_KEY, input_dev->evbit); + __set_bit(BTN_TOUCH, input_dev->keybit); + + if (pdata->t19_num_keys) { + __set_bit(INPUT_PROP_BUTTONPAD, input_dev->propbit); + + for (i = 0; i < pdata->t19_num_keys; i++) + if (pdata->t19_keymap[i] != KEY_RESERVED) + input_set_capability(input_dev, EV_KEY, + pdata->t19_keymap[i]); + + mt_flags |= INPUT_MT_POINTER; + + input_abs_set_res(input_dev, ABS_X, MXT_PIXELS_PER_MM); + input_abs_set_res(input_dev, ABS_Y, MXT_PIXELS_PER_MM); + input_abs_set_res(input_dev, ABS_MT_POSITION_X, + MXT_PIXELS_PER_MM); + input_abs_set_res(input_dev, ABS_MT_POSITION_Y, + MXT_PIXELS_PER_MM); + + input_dev->name = "Atmel maXTouch Touchpad"; + } + + /* For single touch */ + input_set_abs_params(input_dev, ABS_X, + 0, data->max_x, 0, 0); + input_set_abs_params(input_dev, ABS_Y, + 0, data->max_y, 0, 0); + input_set_abs_params(input_dev, ABS_PRESSURE, + 0, 255, 0, 0); + + /* For multi touch */ + num_mt_slots = data->T9_reportid_max - data->T9_reportid_min + 1; + error = input_mt_init_slots(input_dev, num_mt_slots, mt_flags); + if (error) { + dev_err(dev, "Error %d initialising slots\n", error); + goto err_free_mem; + } + + input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, + 0, MXT_MAX_AREA, 0, 0); + input_set_abs_params(input_dev, ABS_MT_POSITION_X, + 0, data->max_x, 0, 0); + input_set_abs_params(input_dev, ABS_MT_POSITION_Y, + 0, data->max_y, 0, 0); + input_set_abs_params(input_dev, ABS_MT_PRESSURE, + 0, 255, 0, 0); + + input_set_drvdata(input_dev, data); + + error = input_register_device(input_dev); + if (error) { + dev_err(dev, "Error %d registering input device\n", error); + goto err_free_mem; + } + + data->input_dev = input_dev; + + return 0; + +err_free_mem: + input_free_device(input_dev); + return error; +} + +static int mxt_configure_objects(struct mxt_data *data, + const struct firmware *cfg); + +static void mxt_config_cb(const struct firmware *cfg, void *ctx) +{ + mxt_configure_objects(ctx, cfg); + release_firmware(cfg); +} + +static int mxt_initialize(struct mxt_data *data) +{ + struct i2c_client *client = data->client; + int recovery_attempts = 0; + int error; + + while (1) { + error = mxt_get_info(data); + if (!error) + break; + + /* Check bootloader state */ + error = mxt_probe_bootloader(data, false); + if (error) { + dev_info(&client->dev, "Trying alternate bootloader address\n"); + error = mxt_probe_bootloader(data, true); + if (error) { + /* Chip is not in appmode or bootloader mode */ + return error; + } + } + + /* OK, we are in bootloader, see if we can recover */ + if (++recovery_attempts > 1) { + dev_err(&client->dev, "Could not recover from bootloader mode\n"); + /* + * We can reflash from this state, so do not + * abort initialization. + */ + data->in_bootloader = true; + return 0; + } + + /* Attempt to exit bootloader into app mode */ + mxt_send_bootloader_cmd(data, false); + msleep(MXT_FW_RESET_TIME); + } + /* Get object table information */ error = mxt_get_object_table(data); if (error) { dev_err(&client->dev, "Error %d reading object table\n", error); - goto err_free_object_table; + return error; } - /* Check register init values */ - error = mxt_check_reg_init(data); + error = mxt_acquire_irq(data); + if (error) + goto err_free_object_table; + + error = request_firmware_nowait(THIS_MODULE, true, MXT_CFG_NAME, + &client->dev, GFP_KERNEL, data, + mxt_config_cb); if (error) { - dev_err(&client->dev, "Error %d initializing configuration\n", + dev_err(&client->dev, "Failed to invoke firmware loader: %d\n", error); goto err_free_object_table; } - error = mxt_read_t9_resolution(data); + return 0; + +err_free_object_table: + mxt_free_object_table(data); + return error; +} + +static int mxt_set_t7_power_cfg(struct mxt_data *data, u8 sleep) +{ + struct device *dev = &data->client->dev; + int error; + struct t7_config *new_config; + struct t7_config deepsleep = { .active = 0, .idle = 0 }; + + if (sleep == MXT_POWER_CFG_DEEPSLEEP) + new_config = &deepsleep; + else + new_config = &data->t7_cfg; + + error = __mxt_write_reg(data->client, data->T7_address, + sizeof(data->t7_cfg), new_config); + if (error) + return error; + + dev_dbg(dev, "Set T7 ACTV:%d IDLE:%d\n", + new_config->active, new_config->idle); + + return 0; +} + +static int mxt_init_t7_power_cfg(struct mxt_data *data) +{ + struct device *dev = &data->client->dev; + int error; + bool retry = false; + +recheck: + error = __mxt_read_reg(data->client, data->T7_address, + sizeof(data->t7_cfg), &data->t7_cfg); + if (error) + return error; + + if (data->t7_cfg.active == 0 || data->t7_cfg.idle == 0) { + if (!retry) { + dev_dbg(dev, "T7 cfg zero, resetting\n"); + mxt_soft_reset(data); + retry = true; + goto recheck; + } else { + dev_dbg(dev, "T7 cfg zero after reset, overriding\n"); + data->t7_cfg.active = 20; + data->t7_cfg.idle = 100; + return mxt_set_t7_power_cfg(data, MXT_POWER_CFG_RUN); + } + } + + dev_dbg(dev, "Initialized power cfg: ACTV %d, IDLE %d\n", + data->t7_cfg.active, data->t7_cfg.idle); + return 0; +} + +static int mxt_configure_objects(struct mxt_data *data, + const struct firmware *cfg) +{ + struct device *dev = &data->client->dev; + struct mxt_info *info = &data->info; + int error; + + if (cfg) { + error = mxt_update_cfg(data, cfg); + if (error) + dev_warn(dev, "Error %d updating config\n", error); + } + + error = mxt_init_t7_power_cfg(data); if (error) { - dev_err(&client->dev, "Failed to initialize T9 resolution\n"); - goto err_free_object_table; + dev_err(dev, "Failed to initialize power cfg\n"); + return error; } - dev_info(&client->dev, + error = mxt_initialize_t9_input_device(data); + if (error) + return error; + + dev_info(dev, "Family: %u Variant: %u Firmware V%u.%u.%02X Objects: %u\n", info->family_id, info->variant_id, info->version >> 4, info->version & 0xf, info->build, info->object_num); return 0; - -err_free_object_table: - mxt_free_object_table(data); - return error; } /* Firmware Version is returned as Major.Minor.Build */ @@ -1246,30 +1952,47 @@ static int mxt_load_fw(struct device *dev, const char *fn) if (ret) goto release_firmware; - ret = mxt_lookup_bootloader_address(data); - if (ret) - goto release_firmware; + if (!data->in_bootloader) { + /* Change to the bootloader mode */ + data->in_bootloader = true; - /* Change to the bootloader mode */ - data->in_bootloader = true; + ret = mxt_t6_command(data, MXT_COMMAND_RESET, + MXT_BOOT_VALUE, false); + if (ret) + goto release_firmware; - ret = mxt_t6_command(data, MXT_COMMAND_RESET, MXT_BOOT_VALUE, false); - if (ret) - goto release_firmware; + msleep(MXT_RESET_TIME); - msleep(MXT_RESET_TIME); + /* Do not need to scan since we know family ID */ + ret = mxt_lookup_bootloader_address(data, 0); + if (ret) + goto release_firmware; + + mxt_free_input_device(data); + mxt_free_object_table(data); + } else { + enable_irq(data->irq); + } reinit_completion(&data->bl_completion); - ret = mxt_check_bootloader(data, MXT_WAITING_BOOTLOAD_CMD); - if (ret) - goto disable_irq; + ret = mxt_check_bootloader(data, MXT_WAITING_BOOTLOAD_CMD, false); + if (ret) { + /* Bootloader may still be unlocked from previous attempt */ + ret = mxt_check_bootloader(data, MXT_WAITING_FRAME_DATA, false); + if (ret) + goto disable_irq; + } else { + dev_info(dev, "Unlocking bootloader\n"); - /* Unlock bootloader */ - mxt_unlock_bootloader(data); + /* Unlock bootloader */ + ret = mxt_send_bootloader_cmd(data, true); + if (ret) + goto disable_irq; + } while (pos < fw->size) { - ret = mxt_check_bootloader(data, MXT_WAITING_FRAME_DATA); + ret = mxt_check_bootloader(data, MXT_WAITING_FRAME_DATA, true); if (ret) goto disable_irq; @@ -1283,7 +2006,7 @@ static int mxt_load_fw(struct device *dev, const char *fn) if (ret) goto disable_irq; - ret = mxt_check_bootloader(data, MXT_FRAME_CRC_PASS); + ret = mxt_check_bootloader(data, MXT_FRAME_CRC_PASS, true); if (ret) { retry++; @@ -1343,13 +2066,7 @@ static ssize_t mxt_update_fw_store(struct device *dev, } else { dev_info(dev, "The firmware update succeeded\n"); - mxt_free_object_table(data); - - mxt_initialize(data); - - enable_irq(data->irq); - - error = mxt_make_highchg(data); + error = mxt_initialize(data); if (error) return error; } @@ -1376,16 +2093,15 @@ static const struct attribute_group mxt_attr_group = { static void mxt_start(struct mxt_data *data) { - /* Touch enable */ - mxt_write_object(data, - MXT_TOUCH_MULTI_T9, MXT_TOUCH_CTRL, 0x83); + mxt_set_t7_power_cfg(data, MXT_POWER_CFG_RUN); + + /* Recalibrate since chip has been in deep sleep */ + mxt_t6_command(data, MXT_COMMAND_CALIBRATE, 1, false); } static void mxt_stop(struct mxt_data *data) { - /* Touch disable */ - mxt_write_object(data, - MXT_TOUCH_MULTI_T9, MXT_TOUCH_CTRL, 0); + mxt_set_t7_power_cfg(data, MXT_POWER_CFG_DEEPSLEEP); } static int mxt_input_open(struct input_dev *dev) @@ -1404,138 +2120,113 @@ static void mxt_input_close(struct input_dev *dev) mxt_stop(data); } -static int mxt_probe(struct i2c_client *client, - const struct i2c_device_id *id) +#ifdef CONFIG_OF +static struct mxt_platform_data *mxt_parse_dt(struct i2c_client *client) +{ + struct mxt_platform_data *pdata; + u32 *keymap; + u32 keycode; + int proplen, i, ret; + + if (!client->dev.of_node) + return ERR_PTR(-ENODEV); + + pdata = devm_kzalloc(&client->dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return ERR_PTR(-ENOMEM); + + if (of_find_property(client->dev.of_node, "linux,gpio-keymap", + &proplen)) { + pdata->t19_num_keys = proplen / sizeof(u32); + + keymap = devm_kzalloc(&client->dev, + pdata->t19_num_keys * sizeof(keymap[0]), + GFP_KERNEL); + if (!keymap) + return ERR_PTR(-ENOMEM); + + for (i = 0; i < pdata->t19_num_keys; i++) { + ret = of_property_read_u32_index(client->dev.of_node, + "linux,gpio-keymap", i, &keycode); + if (ret) + keycode = KEY_RESERVED; + + keymap[i] = keycode; + } + + pdata->t19_keymap = keymap; + } + + return pdata; +} +#else +static struct mxt_platform_data *mxt_parse_dt(struct i2c_client *client) +{ + dev_dbg(&client->dev, "No platform data specified\n"); + return ERR_PTR(-EINVAL); +} +#endif + +static int mxt_probe(struct i2c_client *client, const struct i2c_device_id *id) { - const struct mxt_platform_data *pdata = dev_get_platdata(&client->dev); struct mxt_data *data; - struct input_dev *input_dev; + const struct mxt_platform_data *pdata; int error; - unsigned int num_mt_slots; - unsigned int mt_flags = 0; - int i; - if (!pdata) - return -EINVAL; + pdata = dev_get_platdata(&client->dev); + if (!pdata) { + pdata = mxt_parse_dt(client); + if (IS_ERR(pdata)) + return PTR_ERR(pdata); + } data = kzalloc(sizeof(struct mxt_data), GFP_KERNEL); - input_dev = input_allocate_device(); - if (!data || !input_dev) { + if (!data) { dev_err(&client->dev, "Failed to allocate memory\n"); - error = -ENOMEM; - goto err_free_mem; + return -ENOMEM; } - input_dev->name = "Atmel maXTouch Touchscreen"; snprintf(data->phys, sizeof(data->phys), "i2c-%u-%04x/input0", client->adapter->nr, client->addr); - input_dev->phys = data->phys; - - input_dev->id.bustype = BUS_I2C; - input_dev->dev.parent = &client->dev; - input_dev->open = mxt_input_open; - input_dev->close = mxt_input_close; - data->client = client; - data->input_dev = input_dev; data->pdata = pdata; data->irq = client->irq; + i2c_set_clientdata(client, data); init_completion(&data->bl_completion); init_completion(&data->reset_completion); init_completion(&data->crc_completion); - error = mxt_initialize(data); - if (error) - goto err_free_mem; - - __set_bit(EV_ABS, input_dev->evbit); - __set_bit(EV_KEY, input_dev->evbit); - __set_bit(BTN_TOUCH, input_dev->keybit); - - if (pdata->t19_num_keys) { - __set_bit(INPUT_PROP_BUTTONPAD, input_dev->propbit); - - for (i = 0; i < pdata->t19_num_keys; i++) - if (pdata->t19_keymap[i] != KEY_RESERVED) - input_set_capability(input_dev, EV_KEY, - pdata->t19_keymap[i]); - - mt_flags |= INPUT_MT_POINTER; - - input_abs_set_res(input_dev, ABS_X, MXT_PIXELS_PER_MM); - input_abs_set_res(input_dev, ABS_Y, MXT_PIXELS_PER_MM); - input_abs_set_res(input_dev, ABS_MT_POSITION_X, - MXT_PIXELS_PER_MM); - input_abs_set_res(input_dev, ABS_MT_POSITION_Y, - MXT_PIXELS_PER_MM); - - input_dev->name = "Atmel maXTouch Touchpad"; - } - - /* For single touch */ - input_set_abs_params(input_dev, ABS_X, - 0, data->max_x, 0, 0); - input_set_abs_params(input_dev, ABS_Y, - 0, data->max_y, 0, 0); - input_set_abs_params(input_dev, ABS_PRESSURE, - 0, 255, 0, 0); - - /* For multi touch */ - num_mt_slots = data->T9_reportid_max - data->T9_reportid_min + 1; - error = input_mt_init_slots(input_dev, num_mt_slots, mt_flags); - if (error) - goto err_free_object; - input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, - 0, MXT_MAX_AREA, 0, 0); - input_set_abs_params(input_dev, ABS_MT_POSITION_X, - 0, data->max_x, 0, 0); - input_set_abs_params(input_dev, ABS_MT_POSITION_Y, - 0, data->max_y, 0, 0); - input_set_abs_params(input_dev, ABS_MT_PRESSURE, - 0, 255, 0, 0); - - input_set_drvdata(input_dev, data); - i2c_set_clientdata(client, data); - error = request_threaded_irq(client->irq, NULL, mxt_interrupt, pdata->irqflags | IRQF_ONESHOT, client->name, data); if (error) { dev_err(&client->dev, "Failed to register interrupt\n"); - goto err_free_object; + goto err_free_mem; } - error = mxt_make_highchg(data); - if (error) - goto err_free_irq; + disable_irq(client->irq); - error = input_register_device(input_dev); - if (error) { - dev_err(&client->dev, "Error %d registering input device\n", - error); + error = mxt_initialize(data); + if (error) goto err_free_irq; - } error = sysfs_create_group(&client->dev.kobj, &mxt_attr_group); if (error) { dev_err(&client->dev, "Failure %d creating sysfs group\n", error); - goto err_unregister_device; + goto err_free_object; } return 0; -err_unregister_device: - input_unregister_device(input_dev); - input_dev = NULL; +err_free_object: + mxt_free_input_device(data); + mxt_free_object_table(data); err_free_irq: free_irq(client->irq, data); -err_free_object: - kfree(data->object_table); err_free_mem: - input_free_device(input_dev); kfree(data); return error; } @@ -1546,8 +2237,8 @@ static int mxt_remove(struct i2c_client *client) sysfs_remove_group(&client->dev.kobj, &mxt_attr_group); free_irq(data->irq, data); - input_unregister_device(data->input_dev); - kfree(data->object_table); + mxt_free_input_device(data); + mxt_free_object_table(data); kfree(data); return 0; @@ -1576,8 +2267,6 @@ static int mxt_resume(struct device *dev) struct mxt_data *data = i2c_get_clientdata(client); struct input_dev *input_dev = data->input_dev; - mxt_soft_reset(data); - mutex_lock(&input_dev->mutex); if (input_dev->users) @@ -1591,6 +2280,12 @@ static int mxt_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(mxt_pm_ops, mxt_suspend, mxt_resume); +static const struct of_device_id mxt_of_match[] = { + { .compatible = "atmel,maxtouch", }, + {}, +}; +MODULE_DEVICE_TABLE(of, mxt_of_match); + static const struct i2c_device_id mxt_id[] = { { "qt602240_ts", 0 }, { "atmel_mxt_ts", 0 }, @@ -1604,6 +2299,7 @@ static struct i2c_driver mxt_driver = { .driver = { .name = "atmel_mxt_ts", .owner = THIS_MODULE, + .of_match_table = of_match_ptr(mxt_of_match), .pm = &mxt_pm_ops, }, .probe = mxt_probe, diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c index d4f33992ad8c..8857d5b9be71 100644 --- a/drivers/input/touchscreen/edt-ft5x06.c +++ b/drivers/input/touchscreen/edt-ft5x06.c @@ -262,7 +262,6 @@ static int edt_ft5x06_register_write(struct edt_ft5x06_ts_data *tsdata, case M06: wrbuf[0] = tsdata->factory_mode ? 0xf3 : 0xfc; wrbuf[1] = tsdata->factory_mode ? addr & 0x7f : addr & 0x3f; - wrbuf[1] = tsdata->factory_mode ? addr & 0x7f : addr & 0x3f; wrbuf[2] = value; wrbuf[3] = wrbuf[0] ^ wrbuf[1] ^ wrbuf[2]; return edt_ft5x06_ts_readwrite(tsdata->client, 4, @@ -733,8 +732,7 @@ edt_ft5x06_ts_prepare_debugfs(struct edt_ft5x06_ts_data *tsdata, static void edt_ft5x06_ts_teardown_debugfs(struct edt_ft5x06_ts_data *tsdata) { - if (tsdata->debug_dir) - debugfs_remove_recursive(tsdata->debug_dir); + debugfs_remove_recursive(tsdata->debug_dir); kfree(tsdata->raw_buffer); } diff --git a/drivers/input/touchscreen/ipaq-micro-ts.c b/drivers/input/touchscreen/ipaq-micro-ts.c new file mode 100644 index 000000000000..62c8976e616f --- /dev/null +++ b/drivers/input/touchscreen/ipaq-micro-ts.c @@ -0,0 +1,166 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * h3600 atmel micro companion support, touchscreen subdevice + * Author : Alessandro Gardich <gremlin@gremlin.it> + * Author : Dmitry Artamonow <mad_soft@inbox.ru> + * Author : Linus Walleij <linus.walleij@linaro.org> + * + */ + +#include <asm/byteorder.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/pm.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/input.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/mfd/ipaq-micro.h> + +struct touchscreen_data { + struct input_dev *input; + struct ipaq_micro *micro; +}; + +static void micro_ts_receive(void *data, int len, unsigned char *msg) +{ + struct touchscreen_data *ts = data; + + if (len == 4) { + input_report_abs(ts->input, ABS_X, + be16_to_cpup((__be16 *) &msg[2])); + input_report_abs(ts->input, ABS_Y, + be16_to_cpup((__be16 *) &msg[0])); + input_report_key(ts->input, BTN_TOUCH, 1); + input_sync(ts->input); + } else if (len == 0) { + input_report_abs(ts->input, ABS_X, 0); + input_report_abs(ts->input, ABS_Y, 0); + input_report_key(ts->input, BTN_TOUCH, 0); + input_sync(ts->input); + } +} + +static void micro_ts_toggle_receive(struct touchscreen_data *ts, bool enable) +{ + struct ipaq_micro *micro = ts->micro; + + spin_lock_irq(µ->lock); + + if (enable) { + micro->ts = micro_ts_receive; + micro->ts_data = ts; + } else { + micro->ts = NULL; + micro->ts_data = NULL; + } + + spin_unlock_irq(&ts->micro->lock); +} + +static int micro_ts_open(struct input_dev *input) +{ + struct touchscreen_data *ts = input_get_drvdata(input); + + micro_ts_toggle_receive(ts, true); + + return 0; +} + +static void micro_ts_close(struct input_dev *input) +{ + struct touchscreen_data *ts = input_get_drvdata(input); + + micro_ts_toggle_receive(ts, false); +} + +static int micro_ts_probe(struct platform_device *pdev) +{ + struct ipaq_micro *micro = dev_get_drvdata(pdev->dev.parent); + struct touchscreen_data *ts; + int error; + + ts = devm_kzalloc(&pdev->dev, sizeof(*ts), GFP_KERNEL); + if (!ts) + return -ENOMEM; + + ts->micro = micro; + + ts->input = devm_input_allocate_device(&pdev->dev); + if (!ts->input) { + dev_err(&pdev->dev, "failed to allocate input device\n"); + return -ENOMEM; + } + + ts->input->name = "ipaq micro ts"; + ts->input->open = micro_ts_open; + ts->input->close = micro_ts_close; + + input_set_drvdata(ts->input, ts); + + input_set_capability(ts->input, EV_KEY, BTN_TOUCH); + input_set_capability(ts->input, EV_ABS, ABS_X); + input_set_capability(ts->input, EV_ABS, ABS_Y); + input_set_abs_params(ts->input, ABS_X, 0, 1023, 0, 0); + input_set_abs_params(ts->input, ABS_Y, 0, 1023, 0, 0); + + error = input_register_device(ts->input); + if (error) { + dev_err(&pdev->dev, "error registering touch input\n"); + return error; + } + + platform_set_drvdata(pdev, ts); + + dev_info(&pdev->dev, "iPAQ micro touchscreen\n"); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int micro_ts_suspend(struct device *dev) +{ + struct touchscreen_data *ts = dev_get_drvdata(dev); + + micro_ts_toggle_receive(ts, false); + + return 0; +} + +static int micro_ts_resume(struct device *dev) +{ + struct touchscreen_data *ts = dev_get_drvdata(dev); + struct input_dev *input = ts->input; + + mutex_lock(&input->mutex); + + if (input->users) + micro_ts_toggle_receive(ts, true); + + mutex_unlock(&input->mutex); + + return 0; +} +#endif + +static const struct dev_pm_ops micro_ts_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(micro_ts_suspend, micro_ts_resume) +}; + +static struct platform_driver micro_ts_device_driver = { + .driver = { + .name = "ipaq-micro-ts", + .pm = µ_ts_dev_pm_ops, + }, + .probe = micro_ts_probe, +}; +module_platform_driver(micro_ts_device_driver); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("driver for iPAQ Atmel micro touchscreen"); +MODULE_ALIAS("platform:ipaq-micro-ts"); diff --git a/drivers/input/touchscreen/jornada720_ts.c b/drivers/input/touchscreen/jornada720_ts.c index 7324c5c0fb86..651ec71a5c68 100644 --- a/drivers/input/touchscreen/jornada720_ts.c +++ b/drivers/input/touchscreen/jornada720_ts.c @@ -36,22 +36,21 @@ struct jornada_ts { static void jornada720_ts_collect_data(struct jornada_ts *jornada_ts) { + /* 3 low word X samples */ + jornada_ts->x_data[0] = jornada_ssp_byte(TXDUMMY); + jornada_ts->x_data[1] = jornada_ssp_byte(TXDUMMY); + jornada_ts->x_data[2] = jornada_ssp_byte(TXDUMMY); - /* 3 low word X samples */ - jornada_ts->x_data[0] = jornada_ssp_byte(TXDUMMY); - jornada_ts->x_data[1] = jornada_ssp_byte(TXDUMMY); - jornada_ts->x_data[2] = jornada_ssp_byte(TXDUMMY); + /* 3 low word Y samples */ + jornada_ts->y_data[0] = jornada_ssp_byte(TXDUMMY); + jornada_ts->y_data[1] = jornada_ssp_byte(TXDUMMY); + jornada_ts->y_data[2] = jornada_ssp_byte(TXDUMMY); - /* 3 low word Y samples */ - jornada_ts->y_data[0] = jornada_ssp_byte(TXDUMMY); - jornada_ts->y_data[1] = jornada_ssp_byte(TXDUMMY); - jornada_ts->y_data[2] = jornada_ssp_byte(TXDUMMY); + /* combined x samples bits */ + jornada_ts->x_data[3] = jornada_ssp_byte(TXDUMMY); - /* combined x samples bits */ - jornada_ts->x_data[3] = jornada_ssp_byte(TXDUMMY); - - /* combined y samples bits */ - jornada_ts->y_data[3] = jornada_ssp_byte(TXDUMMY); + /* combined y samples bits */ + jornada_ts->y_data[3] = jornada_ssp_byte(TXDUMMY); } static int jornada720_ts_average(int coords[4]) @@ -104,13 +103,13 @@ static int jornada720_ts_probe(struct platform_device *pdev) struct input_dev *input_dev; int error; - jornada_ts = kzalloc(sizeof(struct jornada_ts), GFP_KERNEL); - input_dev = input_allocate_device(); + jornada_ts = devm_kzalloc(&pdev->dev, sizeof(*jornada_ts), GFP_KERNEL); + if (!jornada_ts) + return -ENOMEM; - if (!jornada_ts || !input_dev) { - error = -ENOMEM; - goto fail1; - } + input_dev = devm_input_allocate_device(&pdev->dev); + if (!input_dev) + return -ENOMEM; platform_set_drvdata(pdev, jornada_ts); @@ -126,36 +125,18 @@ static int jornada720_ts_probe(struct platform_device *pdev) input_set_abs_params(input_dev, ABS_X, 270, 3900, 0, 0); input_set_abs_params(input_dev, ABS_Y, 180, 3700, 0, 0); - error = request_irq(IRQ_GPIO9, - jornada720_ts_interrupt, - IRQF_TRIGGER_RISING, - "HP7XX Touchscreen driver", pdev); + error = devm_request_irq(&pdev->dev, IRQ_GPIO9, + jornada720_ts_interrupt, + IRQF_TRIGGER_RISING, + "HP7XX Touchscreen driver", pdev); if (error) { - printk(KERN_INFO "HP7XX TS : Unable to acquire irq!\n"); - goto fail1; + dev_err(&pdev->dev, "HP7XX TS : Unable to acquire irq!\n"); + return error; } error = input_register_device(jornada_ts->dev); if (error) - goto fail2; - - return 0; - - fail2: - free_irq(IRQ_GPIO9, pdev); - fail1: - input_free_device(input_dev); - kfree(jornada_ts); - return error; -} - -static int jornada720_ts_remove(struct platform_device *pdev) -{ - struct jornada_ts *jornada_ts = platform_get_drvdata(pdev); - - free_irq(IRQ_GPIO9, pdev); - input_unregister_device(jornada_ts->dev); - kfree(jornada_ts); + return error; return 0; } @@ -165,7 +146,6 @@ MODULE_ALIAS("platform:jornada_ts"); static struct platform_driver jornada720_ts_driver = { .probe = jornada720_ts_probe, - .remove = jornada720_ts_remove, .driver = { .name = "jornada_ts", .owner = THIS_MODULE, diff --git a/drivers/input/touchscreen/mcs5000_ts.c b/drivers/input/touchscreen/mcs5000_ts.c index 00510a9836b3..8b47e1fecb25 100644 --- a/drivers/input/touchscreen/mcs5000_ts.c +++ b/drivers/input/touchscreen/mcs5000_ts.c @@ -248,8 +248,7 @@ static int mcs5000_ts_probe(struct i2c_client *client, return 0; } -#ifdef CONFIG_PM -static int mcs5000_ts_suspend(struct device *dev) +static int __maybe_unused mcs5000_ts_suspend(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); @@ -259,7 +258,7 @@ static int mcs5000_ts_suspend(struct device *dev) return 0; } -static int mcs5000_ts_resume(struct device *dev) +static int __maybe_unused mcs5000_ts_resume(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct mcs5000_ts_data *data = i2c_get_clientdata(client); @@ -269,7 +268,6 @@ static int mcs5000_ts_resume(struct device *dev) return 0; } -#endif static SIMPLE_DEV_PM_OPS(mcs5000_ts_pm, mcs5000_ts_suspend, mcs5000_ts_resume); diff --git a/drivers/input/touchscreen/pixcir_i2c_ts.c b/drivers/input/touchscreen/pixcir_i2c_ts.c index 19c6c0fdc94b..fc49c75317d1 100644 --- a/drivers/input/touchscreen/pixcir_i2c_ts.c +++ b/drivers/input/touchscreen/pixcir_i2c_ts.c @@ -23,22 +23,51 @@ #include <linux/slab.h> #include <linux/i2c.h> #include <linux/input.h> +#include <linux/input/mt.h> #include <linux/input/pixcir_ts.h> #include <linux/gpio.h> +#include <linux/of.h> +#include <linux/of_gpio.h> +#include <linux/of_device.h> + +#define PIXCIR_MAX_SLOTS 5 /* Max fingers supported by driver */ struct pixcir_i2c_ts_data { struct i2c_client *client; struct input_dev *input; - const struct pixcir_ts_platform_data *chip; + const struct pixcir_ts_platform_data *pdata; bool running; + int max_fingers; /* Max fingers supported in this instance */ +}; + +struct pixcir_touch { + int x; + int y; + int id; +}; + +struct pixcir_report_data { + int num_touches; + struct pixcir_touch touches[PIXCIR_MAX_SLOTS]; }; -static void pixcir_ts_poscheck(struct pixcir_i2c_ts_data *data) +static void pixcir_ts_parse(struct pixcir_i2c_ts_data *tsdata, + struct pixcir_report_data *report) { - struct pixcir_i2c_ts_data *tsdata = data; - u8 rdbuf[10], wrbuf[1] = { 0 }; + u8 rdbuf[2 + PIXCIR_MAX_SLOTS * 5]; + u8 wrbuf[1] = { 0 }; + u8 *bufptr; u8 touch; - int ret; + int ret, i; + int readsize; + const struct pixcir_i2c_chip_data *chip = &tsdata->pdata->chip; + + memset(report, 0, sizeof(struct pixcir_report_data)); + + i = chip->has_hw_ids ? 1 : 0; + readsize = 2 + tsdata->max_fingers * (4 + i); + if (readsize > sizeof(rdbuf)) + readsize = sizeof(rdbuf); ret = i2c_master_send(tsdata->client, wrbuf, sizeof(wrbuf)); if (ret != sizeof(wrbuf)) { @@ -48,7 +77,7 @@ static void pixcir_ts_poscheck(struct pixcir_i2c_ts_data *data) return; } - ret = i2c_master_recv(tsdata->client, rdbuf, sizeof(rdbuf)); + ret = i2c_master_recv(tsdata->client, rdbuf, readsize); if (ret != sizeof(rdbuf)) { dev_err(&tsdata->client->dev, "%s: i2c_master_recv failed(), ret=%d\n", @@ -56,45 +85,103 @@ static void pixcir_ts_poscheck(struct pixcir_i2c_ts_data *data) return; } - touch = rdbuf[0]; - if (touch) { - u16 posx1 = (rdbuf[3] << 8) | rdbuf[2]; - u16 posy1 = (rdbuf[5] << 8) | rdbuf[4]; - u16 posx2 = (rdbuf[7] << 8) | rdbuf[6]; - u16 posy2 = (rdbuf[9] << 8) | rdbuf[8]; - - input_report_key(tsdata->input, BTN_TOUCH, 1); - input_report_abs(tsdata->input, ABS_X, posx1); - input_report_abs(tsdata->input, ABS_Y, posy1); - - input_report_abs(tsdata->input, ABS_MT_POSITION_X, posx1); - input_report_abs(tsdata->input, ABS_MT_POSITION_Y, posy1); - input_mt_sync(tsdata->input); - - if (touch == 2) { - input_report_abs(tsdata->input, - ABS_MT_POSITION_X, posx2); - input_report_abs(tsdata->input, - ABS_MT_POSITION_Y, posy2); - input_mt_sync(tsdata->input); + touch = rdbuf[0] & 0x7; + if (touch > tsdata->max_fingers) + touch = tsdata->max_fingers; + + report->num_touches = touch; + bufptr = &rdbuf[2]; + + for (i = 0; i < touch; i++) { + report->touches[i].x = (bufptr[1] << 8) | bufptr[0]; + report->touches[i].y = (bufptr[3] << 8) | bufptr[2]; + + if (chip->has_hw_ids) { + report->touches[i].id = bufptr[4]; + bufptr = bufptr + 5; + } else { + bufptr = bufptr + 4; + } + } +} + +static void pixcir_ts_report(struct pixcir_i2c_ts_data *ts, + struct pixcir_report_data *report) +{ + struct input_mt_pos pos[PIXCIR_MAX_SLOTS]; + int slots[PIXCIR_MAX_SLOTS]; + struct pixcir_touch *touch; + int n, i, slot; + struct device *dev = &ts->client->dev; + const struct pixcir_i2c_chip_data *chip = &ts->pdata->chip; + + n = report->num_touches; + if (n > PIXCIR_MAX_SLOTS) + n = PIXCIR_MAX_SLOTS; + + if (!chip->has_hw_ids) { + for (i = 0; i < n; i++) { + touch = &report->touches[i]; + pos[i].x = touch->x; + pos[i].y = touch->y; + } + + input_mt_assign_slots(ts->input, slots, pos, n); + } + + for (i = 0; i < n; i++) { + touch = &report->touches[i]; + + if (chip->has_hw_ids) { + slot = input_mt_get_slot_by_key(ts->input, touch->id); + if (slot < 0) { + dev_dbg(dev, "no free slot for id 0x%x\n", + touch->id); + continue; + } + } else { + slot = slots[i]; } - } else { - input_report_key(tsdata->input, BTN_TOUCH, 0); + + input_mt_slot(ts->input, slot); + input_mt_report_slot_state(ts->input, + MT_TOOL_FINGER, true); + + input_event(ts->input, EV_ABS, ABS_MT_POSITION_X, touch->x); + input_event(ts->input, EV_ABS, ABS_MT_POSITION_Y, touch->y); + + dev_dbg(dev, "%d: slot %d, x %d, y %d\n", + i, slot, touch->x, touch->y); } - input_sync(tsdata->input); + input_mt_sync_frame(ts->input); + input_sync(ts->input); } static irqreturn_t pixcir_ts_isr(int irq, void *dev_id) { struct pixcir_i2c_ts_data *tsdata = dev_id; - const struct pixcir_ts_platform_data *pdata = tsdata->chip; + const struct pixcir_ts_platform_data *pdata = tsdata->pdata; + struct pixcir_report_data report; while (tsdata->running) { - pixcir_ts_poscheck(tsdata); - - if (gpio_get_value(pdata->gpio_attb)) + /* parse packet */ + pixcir_ts_parse(tsdata, &report); + + /* report it */ + pixcir_ts_report(tsdata, &report); + + if (gpio_get_value(pdata->gpio_attb)) { + if (report.num_touches) { + /* + * Last report with no finger up? + * Do it now then. + */ + input_mt_sync_frame(tsdata->input); + input_sync(tsdata->input); + } break; + } msleep(20); } @@ -323,16 +410,69 @@ unlock: static SIMPLE_DEV_PM_OPS(pixcir_dev_pm_ops, pixcir_i2c_ts_suspend, pixcir_i2c_ts_resume); +#ifdef CONFIG_OF +static const struct of_device_id pixcir_of_match[]; + +static struct pixcir_ts_platform_data *pixcir_parse_dt(struct device *dev) +{ + struct pixcir_ts_platform_data *pdata; + struct device_node *np = dev->of_node; + const struct of_device_id *match; + + match = of_match_device(of_match_ptr(pixcir_of_match), dev); + if (!match) + return ERR_PTR(-EINVAL); + + pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return ERR_PTR(-ENOMEM); + + pdata->chip = *(const struct pixcir_i2c_chip_data *)match->data; + + pdata->gpio_attb = of_get_named_gpio(np, "attb-gpio", 0); + /* gpio_attb validity is checked in probe */ + + if (of_property_read_u32(np, "touchscreen-size-x", &pdata->x_max)) { + dev_err(dev, "Failed to get touchscreen-size-x property\n"); + return ERR_PTR(-EINVAL); + } + pdata->x_max -= 1; + + if (of_property_read_u32(np, "touchscreen-size-y", &pdata->y_max)) { + dev_err(dev, "Failed to get touchscreen-size-y property\n"); + return ERR_PTR(-EINVAL); + } + pdata->y_max -= 1; + + dev_dbg(dev, "%s: x %d, y %d, gpio %d\n", __func__, + pdata->x_max + 1, pdata->y_max + 1, pdata->gpio_attb); + + return pdata; +} +#else +static struct pixcir_ts_platform_data *pixcir_parse_dt(struct device *dev) +{ + return ERR_PTR(-EINVAL); +} +#endif + static int pixcir_i2c_ts_probe(struct i2c_client *client, const struct i2c_device_id *id) { const struct pixcir_ts_platform_data *pdata = dev_get_platdata(&client->dev); struct device *dev = &client->dev; + struct device_node *np = dev->of_node; struct pixcir_i2c_ts_data *tsdata; struct input_dev *input; int error; + if (np && !pdata) { + pdata = pixcir_parse_dt(dev); + if (IS_ERR(pdata)) + return PTR_ERR(pdata); + } + if (!pdata) { dev_err(&client->dev, "platform data not defined\n"); return -EINVAL; @@ -343,6 +483,11 @@ static int pixcir_i2c_ts_probe(struct i2c_client *client, return -EINVAL; } + if (!pdata->chip.max_fingers) { + dev_err(dev, "Invalid max_fingers in pdata\n"); + return -EINVAL; + } + tsdata = devm_kzalloc(dev, sizeof(*tsdata), GFP_KERNEL); if (!tsdata) return -ENOMEM; @@ -355,7 +500,7 @@ static int pixcir_i2c_ts_probe(struct i2c_client *client, tsdata->client = client; tsdata->input = input; - tsdata->chip = pdata; + tsdata->pdata = pdata; input->name = client->name; input->id.bustype = BUS_I2C; @@ -371,6 +516,20 @@ static int pixcir_i2c_ts_probe(struct i2c_client *client, input_set_abs_params(input, ABS_MT_POSITION_X, 0, pdata->x_max, 0, 0); input_set_abs_params(input, ABS_MT_POSITION_Y, 0, pdata->y_max, 0, 0); + tsdata->max_fingers = tsdata->pdata->chip.max_fingers; + if (tsdata->max_fingers > PIXCIR_MAX_SLOTS) { + tsdata->max_fingers = PIXCIR_MAX_SLOTS; + dev_info(dev, "Limiting maximum fingers to %d\n", + tsdata->max_fingers); + } + + error = input_mt_init_slots(input, tsdata->max_fingers, + INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED); + if (error) { + dev_err(dev, "Error initializing Multi-Touch slots\n"); + return error; + } + input_set_drvdata(input, tsdata); error = devm_gpio_request_one(dev, pdata->gpio_attb, @@ -419,15 +578,36 @@ static int pixcir_i2c_ts_remove(struct i2c_client *client) static const struct i2c_device_id pixcir_i2c_ts_id[] = { { "pixcir_ts", 0 }, + { "pixcir_tangoc", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, pixcir_i2c_ts_id); +#ifdef CONFIG_OF +static const struct pixcir_i2c_chip_data pixcir_ts_data = { + .max_fingers = 2, + /* no hw id support */ +}; + +static const struct pixcir_i2c_chip_data pixcir_tangoc_data = { + .max_fingers = 5, + .has_hw_ids = true, +}; + +static const struct of_device_id pixcir_of_match[] = { + { .compatible = "pixcir,pixcir_ts", .data = &pixcir_ts_data }, + { .compatible = "pixcir,pixcir_tangoc", .data = &pixcir_tangoc_data }, + { } +}; +MODULE_DEVICE_TABLE(of, pixcir_of_match); +#endif + static struct i2c_driver pixcir_i2c_ts_driver = { .driver = { .owner = THIS_MODULE, .name = "pixcir_ts", .pm = &pixcir_dev_pm_ops, + .of_match_table = of_match_ptr(pixcir_of_match), }, .probe = pixcir_i2c_ts_probe, .remove = pixcir_i2c_ts_remove, diff --git a/drivers/input/touchscreen/s3c2410_ts.c b/drivers/input/touchscreen/s3c2410_ts.c index 19cb247dbb86..5a69ded9b53c 100644 --- a/drivers/input/touchscreen/s3c2410_ts.c +++ b/drivers/input/touchscreen/s3c2410_ts.c @@ -264,7 +264,7 @@ static int s3c2410ts_probe(struct platform_device *pdev) return -ENOENT; } - clk_enable(ts.clock); + clk_prepare_enable(ts.clock); dev_dbg(dev, "got and enabled clocks\n"); ts.irq_tc = ret = platform_get_irq(pdev, 0); @@ -369,7 +369,7 @@ static int s3c2410ts_remove(struct platform_device *pdev) free_irq(ts.irq_tc, ts.input); del_timer_sync(&touch_timer); - clk_disable(ts.clock); + clk_disable_unprepare(ts.clock); clk_put(ts.clock); input_unregister_device(ts.input); diff --git a/drivers/input/touchscreen/wm9712.c b/drivers/input/touchscreen/wm9712.c index 16b52115c27f..705ffa1e064a 100644 --- a/drivers/input/touchscreen/wm9712.c +++ b/drivers/input/touchscreen/wm9712.c @@ -41,7 +41,7 @@ */ static int rpu = 8; module_param(rpu, int, 0); -MODULE_PARM_DESC(rpu, "Set internal pull up resitor for pen detect."); +MODULE_PARM_DESC(rpu, "Set internal pull up resistor for pen detect."); /* * Set current used for pressure measurement. diff --git a/drivers/input/touchscreen/wm9713.c b/drivers/input/touchscreen/wm9713.c index 7405353199d7..572a5a64face 100644 --- a/drivers/input/touchscreen/wm9713.c +++ b/drivers/input/touchscreen/wm9713.c @@ -41,7 +41,7 @@ */ static int rpu = 8; module_param(rpu, int, 0); -MODULE_PARM_DESC(rpu, "Set internal pull up resitor for pen detect."); +MODULE_PARM_DESC(rpu, "Set internal pull up resistor for pen detect."); /* * Set current used for pressure measurement. diff --git a/drivers/input/touchscreen/zforce_ts.c b/drivers/input/touchscreen/zforce_ts.c index feea85b52fa8..8ba48f5eff7b 100644 --- a/drivers/input/touchscreen/zforce_ts.c +++ b/drivers/input/touchscreen/zforce_ts.c @@ -29,6 +29,8 @@ #include <linux/sysfs.h> #include <linux/input/mt.h> #include <linux/platform_data/zforce_ts.h> +#include <linux/regulator/consumer.h> +#include <linux/delay.h> #include <linux/of.h> #include <linux/of_gpio.h> @@ -117,6 +119,8 @@ struct zforce_ts { const struct zforce_ts_platdata *pdata; char phys[32]; + struct regulator *reg_vdd; + bool suspending; bool suspended; bool boot_complete; @@ -690,6 +694,11 @@ static void zforce_reset(void *data) struct zforce_ts *ts = data; gpio_set_value(ts->pdata->gpio_rst, 0); + + udelay(10); + + if (!IS_ERR(ts->reg_vdd)) + regulator_disable(ts->reg_vdd); } static struct zforce_ts_platdata *zforce_parse_dt(struct device *dev) @@ -765,10 +774,32 @@ static int zforce_probe(struct i2c_client *client, return ret; } + ts->reg_vdd = devm_regulator_get_optional(&client->dev, "vdd"); + if (IS_ERR(ts->reg_vdd)) { + ret = PTR_ERR(ts->reg_vdd); + if (ret == -EPROBE_DEFER) + return ret; + } else { + ret = regulator_enable(ts->reg_vdd); + if (ret) + return ret; + + /* + * according to datasheet add 100us grace time after regular + * regulator enable delay. + */ + udelay(100); + } + ret = devm_add_action(&client->dev, zforce_reset, ts); if (ret) { dev_err(&client->dev, "failed to register reset action, %d\n", ret); + + /* hereafter the regulator will be disabled by the action */ + if (!IS_ERR(ts->reg_vdd)) + regulator_disable(ts->reg_vdd); + return ret; } diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 18405314168b..ecb0109a5360 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -3149,14 +3149,16 @@ free_domains: static void cleanup_domain(struct protection_domain *domain) { - struct iommu_dev_data *dev_data, *next; + struct iommu_dev_data *entry; unsigned long flags; write_lock_irqsave(&amd_iommu_devtable_lock, flags); - list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) { - __detach_device(dev_data); - atomic_set(&dev_data->bind, 0); + while (!list_empty(&domain->dev_list)) { + entry = list_first_entry(&domain->dev_list, + struct iommu_dev_data, list); + __detach_device(entry); + atomic_set(&entry->bind, 0); } write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index ca18d6d42a9b..a83cc2a2a2ca 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -146,6 +146,8 @@ #define ID0_CTTW (1 << 14) #define ID0_NUMIRPT_SHIFT 16 #define ID0_NUMIRPT_MASK 0xff +#define ID0_NUMSIDB_SHIFT 9 +#define ID0_NUMSIDB_MASK 0xf #define ID0_NUMSMRG_SHIFT 0 #define ID0_NUMSMRG_MASK 0xff @@ -524,9 +526,18 @@ static int register_smmu_master(struct arm_smmu_device *smmu, master->of_node = masterspec->np; master->cfg.num_streamids = masterspec->args_count; - for (i = 0; i < master->cfg.num_streamids; ++i) - master->cfg.streamids[i] = masterspec->args[i]; + for (i = 0; i < master->cfg.num_streamids; ++i) { + u16 streamid = masterspec->args[i]; + if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && + (streamid >= smmu->num_mapping_groups)) { + dev_err(dev, + "stream ID for master device %s greater than maximum allowed (%d)\n", + masterspec->np->name, smmu->num_mapping_groups); + return -ERANGE; + } + master->cfg.streamids[i] = streamid; + } return insert_smmu_master(smmu, master); } @@ -623,7 +634,7 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev) if (fsr & FSR_IGN) dev_err_ratelimited(smmu->dev, - "Unexpected context fault (fsr 0x%u)\n", + "Unexpected context fault (fsr 0x%x)\n", fsr); fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0); @@ -752,6 +763,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain) reg = (TTBCR2_ADDR_36 << TTBCR2_SEP_SHIFT); break; case 39: + case 40: reg = (TTBCR2_ADDR_40 << TTBCR2_SEP_SHIFT); break; case 42: @@ -773,6 +785,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain) reg |= (TTBCR2_ADDR_36 << TTBCR2_PASIZE_SHIFT); break; case 39: + case 40: reg |= (TTBCR2_ADDR_40 << TTBCR2_PASIZE_SHIFT); break; case 42: @@ -843,8 +856,11 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain) reg |= TTBCR_EAE | (TTBCR_SH_IS << TTBCR_SH0_SHIFT) | (TTBCR_RGN_WBWA << TTBCR_ORGN0_SHIFT) | - (TTBCR_RGN_WBWA << TTBCR_IRGN0_SHIFT) | - (TTBCR_SL0_LVL_1 << TTBCR_SL0_SHIFT); + (TTBCR_RGN_WBWA << TTBCR_IRGN0_SHIFT); + + if (!stage1) + reg |= (TTBCR_SL0_LVL_1 << TTBCR_SL0_SHIFT); + writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR); /* MAIR0 (stage-1 only) */ @@ -868,10 +884,15 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain) static int arm_smmu_init_domain_context(struct iommu_domain *domain, struct arm_smmu_device *smmu) { - int irq, ret, start; + int irq, start, ret = 0; + unsigned long flags; struct arm_smmu_domain *smmu_domain = domain->priv; struct arm_smmu_cfg *cfg = &smmu_domain->cfg; + spin_lock_irqsave(&smmu_domain->lock, flags); + if (smmu_domain->smmu) + goto out_unlock; + if (smmu->features & ARM_SMMU_FEAT_TRANS_NESTED) { /* * We will likely want to change this if/when KVM gets @@ -890,7 +911,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, ret = __arm_smmu_alloc_bitmap(smmu->context_map, start, smmu->num_context_banks); if (IS_ERR_VALUE(ret)) - return ret; + goto out_unlock; cfg->cbndx = ret; if (smmu->version == 1) { @@ -900,6 +921,10 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, cfg->irptndx = cfg->cbndx; } + ACCESS_ONCE(smmu_domain->smmu) = smmu; + arm_smmu_init_context_bank(smmu_domain); + spin_unlock_irqrestore(&smmu_domain->lock, flags); + irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx]; ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED, "arm-smmu-context-fault", domain); @@ -907,15 +932,12 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n", cfg->irptndx, irq); cfg->irptndx = INVALID_IRPTNDX; - goto out_free_context; } - smmu_domain->smmu = smmu; - arm_smmu_init_context_bank(smmu_domain); return 0; -out_free_context: - __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx); +out_unlock: + spin_unlock_irqrestore(&smmu_domain->lock, flags); return ret; } @@ -975,7 +997,6 @@ static void arm_smmu_free_ptes(pmd_t *pmd) { pgtable_t table = pmd_pgtable(*pmd); - pgtable_page_dtor(table); __free_page(table); } @@ -1108,6 +1129,9 @@ static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu, void __iomem *gr0_base = ARM_SMMU_GR0(smmu); struct arm_smmu_smr *smrs = cfg->smrs; + if (!smrs) + return; + /* Invalidate the SMRs before freeing back to the allocator */ for (i = 0; i < cfg->num_streamids; ++i) { u8 idx = smrs[i].idx; @@ -1120,20 +1144,6 @@ static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu, kfree(smrs); } -static void arm_smmu_bypass_stream_mapping(struct arm_smmu_device *smmu, - struct arm_smmu_master_cfg *cfg) -{ - int i; - void __iomem *gr0_base = ARM_SMMU_GR0(smmu); - - for (i = 0; i < cfg->num_streamids; ++i) { - u16 sid = cfg->streamids[i]; - - writel_relaxed(S2CR_TYPE_BYPASS, - gr0_base + ARM_SMMU_GR0_S2CR(sid)); - } -} - static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain, struct arm_smmu_master_cfg *cfg) { @@ -1160,23 +1170,30 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain, static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain, struct arm_smmu_master_cfg *cfg) { + int i; struct arm_smmu_device *smmu = smmu_domain->smmu; + void __iomem *gr0_base = ARM_SMMU_GR0(smmu); /* * We *must* clear the S2CR first, because freeing the SMR means * that it can be re-allocated immediately. */ - arm_smmu_bypass_stream_mapping(smmu, cfg); + for (i = 0; i < cfg->num_streamids; ++i) { + u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i]; + + writel_relaxed(S2CR_TYPE_BYPASS, + gr0_base + ARM_SMMU_GR0_S2CR(idx)); + } + arm_smmu_master_free_smrs(smmu, cfg); } static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) { - int ret = -EINVAL; + int ret; struct arm_smmu_domain *smmu_domain = domain->priv; - struct arm_smmu_device *smmu; + struct arm_smmu_device *smmu, *dom_smmu; struct arm_smmu_master_cfg *cfg; - unsigned long flags; smmu = dev_get_master_dev(dev)->archdata.iommu; if (!smmu) { @@ -1188,20 +1205,22 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) * Sanity check the domain. We don't support domains across * different SMMUs. */ - spin_lock_irqsave(&smmu_domain->lock, flags); - if (!smmu_domain->smmu) { + dom_smmu = ACCESS_ONCE(smmu_domain->smmu); + if (!dom_smmu) { /* Now that we have a master, we can finalise the domain */ ret = arm_smmu_init_domain_context(domain, smmu); if (IS_ERR_VALUE(ret)) - goto err_unlock; - } else if (smmu_domain->smmu != smmu) { + return ret; + + dom_smmu = smmu_domain->smmu; + } + + if (dom_smmu != smmu) { dev_err(dev, "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n", - dev_name(smmu_domain->smmu->dev), - dev_name(smmu->dev)); - goto err_unlock; + dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev)); + return -EINVAL; } - spin_unlock_irqrestore(&smmu_domain->lock, flags); /* Looks ok, so add the device to the domain */ cfg = find_smmu_master_cfg(smmu_domain->smmu, dev); @@ -1209,10 +1228,6 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) return -ENODEV; return arm_smmu_domain_add_master(smmu_domain, cfg); - -err_unlock: - spin_unlock_irqrestore(&smmu_domain->lock, flags); - return ret; } static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev) @@ -1247,10 +1262,6 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd, return -ENOMEM; arm_smmu_flush_pgtable(smmu, page_address(table), PAGE_SIZE); - if (!pgtable_page_ctor(table)) { - __free_page(table); - return -ENOMEM; - } pmd_populate(NULL, pmd, table); arm_smmu_flush_pgtable(smmu, pmd, sizeof(*pmd)); } @@ -1626,7 +1637,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu) /* Mark all SMRn as invalid and all S2CRn as bypass */ for (i = 0; i < smmu->num_mapping_groups; ++i) { - writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(i)); + writel_relaxed(0, gr0_base + ARM_SMMU_GR0_SMR(i)); writel_relaxed(S2CR_TYPE_BYPASS, gr0_base + ARM_SMMU_GR0_S2CR(i)); } @@ -1761,6 +1772,9 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) dev_notice(smmu->dev, "\tstream matching with %u register groups, mask 0x%x", smmu->num_mapping_groups, mask); + } else { + smmu->num_mapping_groups = (id >> ID0_NUMSIDB_SHIFT) & + ID0_NUMSIDB_MASK; } /* ID1 */ @@ -1794,11 +1808,16 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) * Stage-1 output limited by stage-2 input size due to pgd * allocation (PTRS_PER_PGD). */ + if (smmu->features & ARM_SMMU_FEAT_TRANS_NESTED) { #ifdef CONFIG_64BIT - smmu->s1_output_size = min_t(unsigned long, VA_BITS, size); + smmu->s1_output_size = min_t(unsigned long, VA_BITS, size); #else - smmu->s1_output_size = min(32UL, size); + smmu->s1_output_size = min(32UL, size); #endif + } else { + smmu->s1_output_size = min_t(unsigned long, PHYS_MASK_SHIFT, + size); + } /* The stage-2 output mask is also applied for bypass */ size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK); @@ -1889,6 +1908,10 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) smmu->irqs[i] = irq; } + err = arm_smmu_device_cfg_probe(smmu); + if (err) + return err; + i = 0; smmu->masters = RB_ROOT; while (!of_parse_phandle_with_args(dev->of_node, "mmu-masters", @@ -1905,10 +1928,6 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) } dev_notice(dev, "registered %d master devices\n", i); - err = arm_smmu_device_cfg_probe(smmu); - if (err) - goto out_put_masters; - parse_driver_options(smmu); if (smmu->version > 1 && diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index 60ab474bfff3..06d268abe951 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c @@ -678,8 +678,7 @@ static int __init dmar_acpi_dev_scope_init(void) andd->device_name); continue; } - acpi_bus_get_device(h, &adev); - if (!adev) { + if (acpi_bus_get_device(h, &adev)) { pr_err("Failed to get device for ACPI object %s\n", andd->device_name); continue; diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c index 61d1dafa242d..56feed7cec15 100644 --- a/drivers/iommu/fsl_pamu_domain.c +++ b/drivers/iommu/fsl_pamu_domain.c @@ -984,7 +984,7 @@ static int fsl_pamu_add_device(struct device *dev) struct iommu_group *group = ERR_PTR(-ENODEV); struct pci_dev *pdev; const u32 *prop; - int ret, len; + int ret = 0, len; /* * For platform devices we allocate a separate group for @@ -1007,7 +1007,13 @@ static int fsl_pamu_add_device(struct device *dev) if (IS_ERR(group)) return PTR_ERR(group); - ret = iommu_group_add_device(group, dev); + /* + * Check if device has already been added to an iommu group. + * Group could have already been created for a PCI device in + * the iommu_group_get_for_dev path. + */ + if (!dev->iommu_group) + ret = iommu_group_add_device(group, dev); iommu_group_put(group); return ret; diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index d1f5caad04f9..5619f264862d 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -3869,6 +3869,14 @@ static int device_notifier(struct notifier_block *nb, action != BUS_NOTIFY_DEL_DEVICE) return 0; + /* + * If the device is still attached to a device driver we can't + * tear down the domain yet as DMA mappings may still be in use. + * Wait for the BUS_NOTIFY_UNBOUND_DRIVER event to do that. + */ + if (action == BUS_NOTIFY_DEL_DEVICE && dev->driver != NULL) + return 0; + domain = find_domain(dev); if (!domain) return 0; diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 169836020208..0639b9274b11 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -678,15 +678,17 @@ static struct iommu_group *iommu_group_get_for_pci_dev(struct pci_dev *pdev) */ struct iommu_group *iommu_group_get_for_dev(struct device *dev) { - struct iommu_group *group = ERR_PTR(-EIO); + struct iommu_group *group; int ret; group = iommu_group_get(dev); if (group) return group; - if (dev_is_pci(dev)) - group = iommu_group_get_for_pci_dev(to_pci_dev(dev)); + if (!dev_is_pci(dev)) + return ERR_PTR(-EINVAL); + + group = iommu_group_get_for_pci_dev(to_pci_dev(dev)); if (IS_ERR(group)) return group; @@ -995,7 +997,7 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova, size_t orig_size = size; int ret = 0; - if (unlikely(domain->ops->unmap == NULL || + if (unlikely(domain->ops->map == NULL || domain->ops->pgsize_bitmap == 0UL)) return -ENODEV; diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index 792da5ea6d12..3ded3894623c 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c @@ -35,7 +35,8 @@ #include <linux/of_iommu.h> #include <linux/debugfs.h> #include <linux/seq_file.h> -#include <linux/tegra-ahb.h> + +#include <soc/tegra/ahb.h> #include <asm/page.h> #include <asm/cacheflush.h> diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index eb5df9db9adc..a31a9e40eed9 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -28,7 +28,6 @@ config ARM_VIC config ARM_VIC_NR int default 4 if ARCH_S5PV210 - default 3 if ARCH_S5PC100 default 2 depends on ARM_VIC help diff --git a/drivers/irqchip/exynos-combiner.c b/drivers/irqchip/exynos-combiner.c index f8636a650cf6..5945223b73fa 100644 --- a/drivers/irqchip/exynos-combiner.c +++ b/drivers/irqchip/exynos-combiner.c @@ -15,6 +15,7 @@ #include <linux/slab.h> #include <linux/irqdomain.h> #include <linux/irqchip/chained_irq.h> +#include <linux/interrupt.h> #include <linux/of_address.h> #include <linux/of_irq.h> diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c index 85c2985d8bcb..bbbaf5de65d2 100644 --- a/drivers/irqchip/irq-crossbar.c +++ b/drivers/irqchip/irq-crossbar.c @@ -220,7 +220,7 @@ static int __init crossbar_of_init(struct device_node *node) of_property_read_u32_index(node, "ti,irqs-reserved", i, &entry); - if (entry > max) { + if (entry >= max) { pr_err("Invalid reserved entry\n"); ret = -EINVAL; goto err_irq_map; @@ -238,7 +238,7 @@ static int __init crossbar_of_init(struct device_node *node) of_property_read_u32_index(node, "ti,irqs-skip", i, &entry); - if (entry > max) { + if (entry >= max) { pr_err("Invalid skip entry\n"); ret = -EINVAL; goto err_irq_map; diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 57eaa5a0b1e3..a0698b4f0303 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -36,7 +36,7 @@ struct gic_chip_data { void __iomem *dist_base; void __iomem **redist_base; - void __percpu __iomem **rdist; + void __iomem * __percpu *rdist; struct irq_domain *domain; u64 redist_stride; u32 redist_regions; @@ -104,7 +104,7 @@ static void gic_redist_wait_for_rwp(void) } /* Low level accessors */ -static u64 gic_read_iar(void) +static u64 __maybe_unused gic_read_iar(void) { u64 irqstat; @@ -112,24 +112,24 @@ static u64 gic_read_iar(void) return irqstat; } -static void gic_write_pmr(u64 val) +static void __maybe_unused gic_write_pmr(u64 val) { asm volatile("msr_s " __stringify(ICC_PMR_EL1) ", %0" : : "r" (val)); } -static void gic_write_ctlr(u64 val) +static void __maybe_unused gic_write_ctlr(u64 val) { asm volatile("msr_s " __stringify(ICC_CTLR_EL1) ", %0" : : "r" (val)); isb(); } -static void gic_write_grpen1(u64 val) +static void __maybe_unused gic_write_grpen1(u64 val) { asm volatile("msr_s " __stringify(ICC_GRPEN1_EL1) ", %0" : : "r" (val)); isb(); } -static void gic_write_sgi1r(u64 val) +static void __maybe_unused gic_write_sgi1r(u64 val) { asm volatile("msr_s " __stringify(ICC_SGI1R_EL1) ", %0" : : "r" (val)); } @@ -200,19 +200,6 @@ static void gic_poke_irq(struct irq_data *d, u32 offset) rwp_wait(); } -static int gic_peek_irq(struct irq_data *d, u32 offset) -{ - u32 mask = 1 << (gic_irq(d) % 32); - void __iomem *base; - - if (gic_irq_in_rdist(d)) - base = gic_data_rdist_sgi_base(); - else - base = gic_data.dist_base; - - return !!(readl_relaxed(base + offset + (gic_irq(d) / 32) * 4) & mask); -} - static void gic_mask_irq(struct irq_data *d) { gic_poke_irq(d, GICD_ICENABLER); @@ -401,6 +388,19 @@ static void gic_cpu_init(void) } #ifdef CONFIG_SMP +static int gic_peek_irq(struct irq_data *d, u32 offset) +{ + u32 mask = 1 << (gic_irq(d) % 32); + void __iomem *base; + + if (gic_irq_in_rdist(d)) + base = gic_data_rdist_sgi_base(); + else + base = gic_data.dist_base; + + return !!(readl_relaxed(base + offset + (gic_irq(d) / 32) * 4) & mask); +} + static int gic_secondary_init(struct notifier_block *nfb, unsigned long action, void *hcpu) { diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 4b959e606fe8..dda6dbc23565 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -867,7 +867,7 @@ static int gic_routable_irq_domain_xlate(struct irq_domain *d, return 0; } -const struct irq_domain_ops gic_default_routable_irq_domain_ops = { +static const struct irq_domain_ops gic_default_routable_irq_domain_ops = { .map = gic_routable_irq_domain_map, .unmap = gic_routable_irq_domain_unmap, .xlate = gic_routable_irq_domain_xlate, diff --git a/drivers/irqchip/irq-versatile-fpga.c b/drivers/irqchip/irq-versatile-fpga.c index 3ae2bb8d9cf2..ccf58548b161 100644 --- a/drivers/irqchip/irq-versatile-fpga.c +++ b/drivers/irqchip/irq-versatile-fpga.c @@ -14,6 +14,8 @@ #include <asm/exception.h> #include <asm/mach/irq.h> +#include "irqchip.h" + #define IRQ_STATUS 0x00 #define IRQ_RAW_STATUS 0x04 #define IRQ_ENABLE_SET 0x08 @@ -26,6 +28,8 @@ #define FIQ_ENABLE_SET 0x28 #define FIQ_ENABLE_CLEAR 0x2C +#define PIC_ENABLES 0x20 /* set interrupt pass through bits */ + /** * struct fpga_irq_data - irq data container for the FPGA IRQ controller * @base: memory offset in virtual memory @@ -201,14 +205,26 @@ int __init fpga_irq_of_init(struct device_node *node, /* Some chips are cascaded from a parent IRQ */ parent_irq = irq_of_parse_and_map(node, 0); - if (!parent_irq) + if (!parent_irq) { + set_handle_irq(fpga_handle_irq); parent_irq = -1; + } fpga_irq_init(base, node->name, 0, parent_irq, valid_mask, node); writel(clear_mask, base + IRQ_ENABLE_CLEAR); writel(clear_mask, base + FIQ_ENABLE_CLEAR); + /* + * On Versatile AB/PB, some secondary interrupts have a direct + * pass-thru to the primary controller for IRQs 20 and 22-31 which need + * to be enabled. See section 3.10 of the Versatile AB user guide. + */ + if (of_device_is_compatible(node, "arm,versatile-sic")) + writel(0xffd00000, base + PIC_ENABLES); + return 0; } +IRQCHIP_DECLARE(arm_fpga, "arm,versatile-fpga-irq", fpga_irq_of_init); +IRQCHIP_DECLARE(arm_fpga_sic, "arm,versatile-sic", fpga_irq_of_init); #endif diff --git a/drivers/isdn/hardware/eicon/xdi_msg.h b/drivers/isdn/hardware/eicon/xdi_msg.h index 58368f7b5cba..2498c349a32e 100644 --- a/drivers/isdn/hardware/eicon/xdi_msg.h +++ b/drivers/isdn/hardware/eicon/xdi_msg.h @@ -1,6 +1,6 @@ /* $Id: xdi_msg.h,v 1.1.2.2 2001/02/16 08:40:36 armin Exp $ */ -#ifndef __DIVA_XDI_UM_CFG_MESSSGE_H__ +#ifndef __DIVA_XDI_UM_CFG_MESSAGE_H__ #define __DIVA_XDI_UM_CFG_MESSAGE_H__ /* diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig index a1b044e7eaad..8c96e2ddf43b 100644 --- a/drivers/leds/Kconfig +++ b/drivers/leds/Kconfig @@ -32,14 +32,6 @@ config LEDS_88PM860X This option enables support for on-chip LED drivers found on Marvell Semiconductor 88PM8606 PMIC. -config LEDS_ATMEL_PWM - tristate "LED Support using Atmel PWM outputs" - depends on LEDS_CLASS - depends on ATMEL_PWM - help - This option enables support for LEDs driven using outputs - of the dedicated PWM controller found on newer Atmel SOCs. - config LEDS_LM3530 tristate "LCD Backlight driver for LM3530" depends on LEDS_CLASS @@ -143,6 +135,13 @@ config LEDS_SUNFIRE This option enables support for the Left, Middle, and Right LEDs on the I/O and CPU boards of SunFire UltraSPARC servers. +config LEDS_IPAQ_MICRO + tristate "LED Support for the Compaq iPAQ h3xxx" + depends on MFD_IPAQ_MICRO + help + Choose this option if you want to use the notification LED on + Compaq/HP iPAQ h3100 and h3600. + config LEDS_HP6XX tristate "LED Support for the HP Jornada 6xx" depends on LEDS_CLASS diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile index 79c5155199a7..d8cc5f2777de 100644 --- a/drivers/leds/Makefile +++ b/drivers/leds/Makefile @@ -6,7 +6,6 @@ obj-$(CONFIG_LEDS_TRIGGERS) += led-triggers.o # LED Platform Drivers obj-$(CONFIG_LEDS_88PM860X) += leds-88pm860x.o -obj-$(CONFIG_LEDS_ATMEL_PWM) += leds-atmel-pwm.o obj-$(CONFIG_LEDS_BD2802) += leds-bd2802.o obj-$(CONFIG_LEDS_LOCOMO) += leds-locomo.o obj-$(CONFIG_LEDS_LM3530) += leds-lm3530.o @@ -31,6 +30,7 @@ obj-$(CONFIG_LEDS_LP8501) += leds-lp8501.o obj-$(CONFIG_LEDS_LP8788) += leds-lp8788.o obj-$(CONFIG_LEDS_TCA6507) += leds-tca6507.o obj-$(CONFIG_LEDS_CLEVO_MAIL) += leds-clevo-mail.o +obj-$(CONFIG_LEDS_IPAQ_MICRO) += leds-ipaq-micro.o obj-$(CONFIG_LEDS_HP6XX) += leds-hp6xx.o obj-$(CONFIG_LEDS_OT200) += leds-ot200.o obj-$(CONFIG_LEDS_FSG) += leds-fsg.o diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c index f37d63cf726b..aa29198fca3e 100644 --- a/drivers/leds/led-class.c +++ b/drivers/leds/led-class.c @@ -210,8 +210,9 @@ static const struct dev_pm_ops leds_class_dev_pm_ops = { */ int led_classdev_register(struct device *parent, struct led_classdev *led_cdev) { - led_cdev->dev = device_create(leds_class, parent, 0, led_cdev, - "%s", led_cdev->name); + led_cdev->dev = device_create_with_groups(leds_class, parent, 0, + led_cdev, led_cdev->groups, + "%s", led_cdev->name); if (IS_ERR(led_cdev->dev)) return PTR_ERR(led_cdev->dev); diff --git a/drivers/leds/leds-atmel-pwm.c b/drivers/leds/leds-atmel-pwm.c deleted file mode 100644 index 56cec8d6a2ac..000000000000 --- a/drivers/leds/leds-atmel-pwm.c +++ /dev/null @@ -1,149 +0,0 @@ -#include <linux/kernel.h> -#include <linux/platform_device.h> -#include <linux/leds.h> -#include <linux/io.h> -#include <linux/atmel_pwm.h> -#include <linux/slab.h> -#include <linux/module.h> - - -struct pwmled { - struct led_classdev cdev; - struct pwm_channel pwmc; - struct gpio_led *desc; - u32 mult; - u8 active_low; -}; - - -/* - * For simplicity, we use "brightness" as if it were a linear function - * of PWM duty cycle. However, a logarithmic function of duty cycle is - * probably a better match for perceived brightness: two is half as bright - * as four, four is half as bright as eight, etc - */ -static void pwmled_brightness(struct led_classdev *cdev, enum led_brightness b) -{ - struct pwmled *led; - - /* update the duty cycle for the *next* period */ - led = container_of(cdev, struct pwmled, cdev); - pwm_channel_writel(&led->pwmc, PWM_CUPD, led->mult * (unsigned) b); -} - -/* - * NOTE: we reuse the platform_data structure of GPIO leds, - * but repurpose its "gpio" number as a PWM channel number. - */ -static int pwmled_probe(struct platform_device *pdev) -{ - const struct gpio_led_platform_data *pdata; - struct pwmled *leds; - int i; - int status; - - pdata = dev_get_platdata(&pdev->dev); - if (!pdata || pdata->num_leds < 1) - return -ENODEV; - - leds = devm_kzalloc(&pdev->dev, pdata->num_leds * sizeof(*leds), - GFP_KERNEL); - if (!leds) - return -ENOMEM; - - for (i = 0; i < pdata->num_leds; i++) { - struct pwmled *led = leds + i; - const struct gpio_led *dat = pdata->leds + i; - u32 tmp; - - led->cdev.name = dat->name; - led->cdev.brightness = LED_OFF; - led->cdev.brightness_set = pwmled_brightness; - led->cdev.default_trigger = dat->default_trigger; - - led->active_low = dat->active_low; - - status = pwm_channel_alloc(dat->gpio, &led->pwmc); - if (status < 0) - goto err; - - /* - * Prescale clock by 2^x, so PWM counts in low MHz. - * Start each cycle with the LED active, so increasing - * the duty cycle gives us more time on (== brighter). - */ - tmp = 5; - if (!led->active_low) - tmp |= PWM_CPR_CPOL; - pwm_channel_writel(&led->pwmc, PWM_CMR, tmp); - - /* - * Pick a period so PWM cycles at 100+ Hz; and a multiplier - * for scaling duty cycle: brightness * mult. - */ - tmp = (led->pwmc.mck / (1 << 5)) / 100; - tmp /= 255; - led->mult = tmp; - pwm_channel_writel(&led->pwmc, PWM_CDTY, - led->cdev.brightness * 255); - pwm_channel_writel(&led->pwmc, PWM_CPRD, - LED_FULL * tmp); - - pwm_channel_enable(&led->pwmc); - - /* Hand it over to the LED framework */ - status = led_classdev_register(&pdev->dev, &led->cdev); - if (status < 0) { - pwm_channel_free(&led->pwmc); - goto err; - } - } - - platform_set_drvdata(pdev, leds); - return 0; - -err: - if (i > 0) { - for (i = i - 1; i >= 0; i--) { - led_classdev_unregister(&leds[i].cdev); - pwm_channel_free(&leds[i].pwmc); - } - } - - return status; -} - -static int pwmled_remove(struct platform_device *pdev) -{ - const struct gpio_led_platform_data *pdata; - struct pwmled *leds; - unsigned i; - - pdata = dev_get_platdata(&pdev->dev); - leds = platform_get_drvdata(pdev); - - for (i = 0; i < pdata->num_leds; i++) { - struct pwmled *led = leds + i; - - led_classdev_unregister(&led->cdev); - pwm_channel_free(&led->pwmc); - } - - return 0; -} - -static struct platform_driver pwmled_driver = { - .driver = { - .name = "leds-atmel-pwm", - .owner = THIS_MODULE, - }, - /* REVISIT add suspend() and resume() methods */ - .probe = pwmled_probe, - .remove = pwmled_remove, -}; - -module_platform_driver(pwmled_driver); - -MODULE_DESCRIPTION("Driver for LEDs with PWM-controlled brightness"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:leds-atmel-pwm"); diff --git a/drivers/leds/leds-ipaq-micro.c b/drivers/leds/leds-ipaq-micro.c new file mode 100644 index 000000000000..3776f516cd88 --- /dev/null +++ b/drivers/leds/leds-ipaq-micro.c @@ -0,0 +1,141 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * h3xxx atmel micro companion support, notification LED subdevice + * + * Author : Linus Walleij <linus.walleij@linaro.org> + */ + +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/mfd/ipaq-micro.h> +#include <linux/leds.h> + +#define LED_YELLOW 0x00 +#define LED_GREEN 0x01 + +#define LED_EN (1 << 4) /* LED ON/OFF 0:off, 1:on */ +#define LED_AUTOSTOP (1 << 5) /* LED ON/OFF auto stop set 0:disable, 1:enable */ +#define LED_ALWAYS (1 << 6) /* LED Interrupt Mask 0:No mask, 1:mask */ + +static void micro_leds_brightness_set(struct led_classdev *led_cdev, + enum led_brightness value) +{ + struct ipaq_micro *micro = dev_get_drvdata(led_cdev->dev->parent->parent); + /* + * In this message: + * Byte 0 = LED color: 0 = yellow, 1 = green + * yellow LED is always ~30 blinks per minute + * Byte 1 = duration (flags?) appears to be ignored + * Byte 2 = green ontime in 1/10 sec (deciseconds) + * 1 = 1/10 second + * 0 = 256/10 second + * Byte 3 = green offtime in 1/10 sec (deciseconds) + * 1 = 1/10 second + * 0 = 256/10 seconds + */ + struct ipaq_micro_msg msg = { + .id = MSG_NOTIFY_LED, + .tx_len = 4, + }; + + msg.tx_data[0] = LED_GREEN; + msg.tx_data[1] = 0; + if (value) { + msg.tx_data[2] = 0; /* Duty cycle 256 */ + msg.tx_data[3] = 1; + } else { + msg.tx_data[2] = 1; + msg.tx_data[3] = 0; /* Duty cycle 256 */ + } + ipaq_micro_tx_msg_sync(micro, &msg); +} + +/* Maximum duty cycle in ms 256/10 sec = 25600 ms */ +#define IPAQ_LED_MAX_DUTY 25600 + +static int micro_leds_blink_set(struct led_classdev *led_cdev, + unsigned long *delay_on, + unsigned long *delay_off) +{ + struct ipaq_micro *micro = dev_get_drvdata(led_cdev->dev->parent->parent); + /* + * In this message: + * Byte 0 = LED color: 0 = yellow, 1 = green + * yellow LED is always ~30 blinks per minute + * Byte 1 = duration (flags?) appears to be ignored + * Byte 2 = green ontime in 1/10 sec (deciseconds) + * 1 = 1/10 second + * 0 = 256/10 second + * Byte 3 = green offtime in 1/10 sec (deciseconds) + * 1 = 1/10 second + * 0 = 256/10 seconds + */ + struct ipaq_micro_msg msg = { + .id = MSG_NOTIFY_LED, + .tx_len = 4, + }; + + msg.tx_data[0] = LED_GREEN; + if (*delay_on > IPAQ_LED_MAX_DUTY || + *delay_off > IPAQ_LED_MAX_DUTY) + return -EINVAL; + + if (*delay_on == 0 && *delay_off == 0) { + *delay_on = 100; + *delay_off = 100; + } + + msg.tx_data[1] = 0; + if (*delay_on >= IPAQ_LED_MAX_DUTY) + msg.tx_data[2] = 0; + else + msg.tx_data[2] = (u8) DIV_ROUND_CLOSEST(*delay_on, 100); + if (*delay_off >= IPAQ_LED_MAX_DUTY) + msg.tx_data[3] = 0; + else + msg.tx_data[3] = (u8) DIV_ROUND_CLOSEST(*delay_off, 100); + return ipaq_micro_tx_msg_sync(micro, &msg); +} + +static struct led_classdev micro_led = { + .name = "led-ipaq-micro", + .brightness_set = micro_leds_brightness_set, + .blink_set = micro_leds_blink_set, + .flags = LED_CORE_SUSPENDRESUME, +}; + +static int micro_leds_probe(struct platform_device *pdev) +{ + int ret; + + ret = led_classdev_register(&pdev->dev, µ_led); + if (ret) { + dev_err(&pdev->dev, "registering led failed: %d\n", ret); + return ret; + } + dev_info(&pdev->dev, "iPAQ micro notification LED driver\n"); + + return 0; +} + +static int micro_leds_remove(struct platform_device *pdev) +{ + led_classdev_unregister(µ_led); + return 0; +} + +static struct platform_driver micro_leds_device_driver = { + .driver = { + .name = "ipaq-micro-leds", + }, + .probe = micro_leds_probe, + .remove = micro_leds_remove, +}; +module_platform_driver(micro_leds_device_driver); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("driver for iPAQ Atmel micro leds"); +MODULE_ALIAS("platform:ipaq-micro-leds"); diff --git a/drivers/leds/leds-lm3530.c b/drivers/leds/leds-lm3530.c index 652368c2ea9a..91325de3cd33 100644 --- a/drivers/leds/leds-lm3530.c +++ b/drivers/leds/leds-lm3530.c @@ -400,6 +400,12 @@ static ssize_t lm3530_mode_set(struct device *dev, struct device_attribute } static DEVICE_ATTR(mode, 0644, lm3530_mode_get, lm3530_mode_set); +static struct attribute *lm3530_attrs[] = { + &dev_attr_mode.attr, + NULL +}; +ATTRIBUTE_GROUPS(lm3530); + static int lm3530_probe(struct i2c_client *client, const struct i2c_device_id *id) { @@ -436,6 +442,7 @@ static int lm3530_probe(struct i2c_client *client, drvdata->led_dev.name = LM3530_LED_DEV; drvdata->led_dev.brightness_set = lm3530_brightness_set; drvdata->led_dev.max_brightness = MAX_BRIGHTNESS; + drvdata->led_dev.groups = lm3530_groups; i2c_set_clientdata(client, drvdata); @@ -461,26 +468,13 @@ static int lm3530_probe(struct i2c_client *client, return err; } - err = device_create_file(drvdata->led_dev.dev, &dev_attr_mode); - if (err < 0) { - dev_err(&client->dev, "File device creation failed: %d\n", err); - err = -ENODEV; - goto err_create_file; - } - return 0; - -err_create_file: - led_classdev_unregister(&drvdata->led_dev); - return err; } static int lm3530_remove(struct i2c_client *client) { struct lm3530_data *drvdata = i2c_get_clientdata(client); - device_remove_file(drvdata->led_dev.dev, &dev_attr_mode); - lm3530_led_disable(drvdata); led_classdev_unregister(&drvdata->led_dev); return 0; diff --git a/drivers/leds/leds-lm3533.c b/drivers/leds/leds-lm3533.c index e2c642c1169b..cbf61a40137d 100644 --- a/drivers/leds/leds-lm3533.c +++ b/drivers/leds/leds-lm3533.c @@ -645,6 +645,11 @@ static struct attribute_group lm3533_led_attribute_group = { .attrs = lm3533_led_attributes }; +static const struct attribute_group *lm3533_led_attribute_groups[] = { + &lm3533_led_attribute_group, + NULL +}; + static int lm3533_led_setup(struct lm3533_led *led, struct lm3533_led_platform_data *pdata) { @@ -692,6 +697,7 @@ static int lm3533_led_probe(struct platform_device *pdev) led->cdev.brightness_get = lm3533_led_get; led->cdev.blink_set = lm3533_led_blink_set; led->cdev.brightness = LED_OFF; + led->cdev.groups = lm3533_led_attribute_groups, led->id = pdev->id; mutex_init(&led->mutex); @@ -715,25 +721,16 @@ static int lm3533_led_probe(struct platform_device *pdev) led->cb.dev = led->cdev.dev; - ret = sysfs_create_group(&led->cdev.dev->kobj, - &lm3533_led_attribute_group); - if (ret < 0) { - dev_err(&pdev->dev, "failed to create sysfs attributes\n"); - goto err_unregister; - } - ret = lm3533_led_setup(led, pdata); if (ret) - goto err_sysfs_remove; + goto err_unregister; ret = lm3533_ctrlbank_enable(&led->cb); if (ret) - goto err_sysfs_remove; + goto err_unregister; return 0; -err_sysfs_remove: - sysfs_remove_group(&led->cdev.dev->kobj, &lm3533_led_attribute_group); err_unregister: led_classdev_unregister(&led->cdev); flush_work(&led->work); @@ -748,7 +745,6 @@ static int lm3533_led_remove(struct platform_device *pdev) dev_dbg(&pdev->dev, "%s\n", __func__); lm3533_ctrlbank_disable(&led->cb); - sysfs_remove_group(&led->cdev.dev->kobj, &lm3533_led_attribute_group); led_classdev_unregister(&led->cdev); flush_work(&led->work); diff --git a/drivers/leds/leds-lm355x.c b/drivers/leds/leds-lm355x.c index 591eb5e58ae3..f5112cb2d991 100644 --- a/drivers/leds/leds-lm355x.c +++ b/drivers/leds/leds-lm355x.c @@ -413,6 +413,12 @@ out: static DEVICE_ATTR(pattern, S_IWUSR, NULL, lm3556_indicator_pattern_store); +static struct attribute *lm355x_indicator_attrs[] = { + &dev_attr_pattern.attr, + NULL +}; +ATTRIBUTE_GROUPS(lm355x_indicator); + static const struct regmap_config lm355x_regmap = { .reg_bits = 8, .val_bits = 8, @@ -501,25 +507,18 @@ static int lm355x_probe(struct i2c_client *client, else chip->cdev_indicator.max_brightness = 8; chip->cdev_indicator.brightness_set = lm355x_indicator_brightness_set; + /* indicator pattern control only for LM3556 */ + if (id->driver_data == CHIP_LM3556) + chip->cdev_indicator.groups = lm355x_indicator_groups; err = led_classdev_register((struct device *) &client->dev, &chip->cdev_indicator); if (err < 0) goto err_create_indicator_file; - /* indicator pattern control only for LM3554 */ - if (id->driver_data == CHIP_LM3556) { - err = - device_create_file(chip->cdev_indicator.dev, - &dev_attr_pattern); - if (err < 0) - goto err_create_pattern_file; - } dev_info(&client->dev, "%s is initialized\n", lm355x_name[id->driver_data]); return 0; -err_create_pattern_file: - led_classdev_unregister(&chip->cdev_indicator); err_create_indicator_file: led_classdev_unregister(&chip->cdev_torch); err_create_torch_file: @@ -534,8 +533,6 @@ static int lm355x_remove(struct i2c_client *client) struct lm355x_reg_data *preg = chip->regs; regmap_write(chip->regmap, preg[REG_OPMODE].regno, 0); - if (chip->type == CHIP_LM3556) - device_remove_file(chip->cdev_indicator.dev, &dev_attr_pattern); led_classdev_unregister(&chip->cdev_indicator); flush_work(&chip->work_indicator); led_classdev_unregister(&chip->cdev_torch); diff --git a/drivers/leds/leds-lm3642.c b/drivers/leds/leds-lm3642.c index ceb6b3cde6fe..d3dec0132769 100644 --- a/drivers/leds/leds-lm3642.c +++ b/drivers/leds/leds-lm3642.c @@ -313,6 +313,18 @@ static const struct regmap_config lm3642_regmap = { .max_register = REG_MAX, }; +static struct attribute *lm3642_flash_attrs[] = { + &dev_attr_strobe_pin.attr, + NULL +}; +ATTRIBUTE_GROUPS(lm3642_flash); + +static struct attribute *lm3642_torch_attrs[] = { + &dev_attr_torch_pin.attr, + NULL +}; +ATTRIBUTE_GROUPS(lm3642_torch); + static int lm3642_probe(struct i2c_client *client, const struct i2c_device_id *id) { @@ -364,17 +376,13 @@ static int lm3642_probe(struct i2c_client *client, chip->cdev_flash.max_brightness = 16; chip->cdev_flash.brightness_set = lm3642_strobe_brightness_set; chip->cdev_flash.default_trigger = "flash"; + chip->cdev_flash.groups = lm3642_flash_groups, err = led_classdev_register((struct device *) &client->dev, &chip->cdev_flash); if (err < 0) { dev_err(chip->dev, "failed to register flash\n"); goto err_out; } - err = device_create_file(chip->cdev_flash.dev, &dev_attr_strobe_pin); - if (err < 0) { - dev_err(chip->dev, "failed to create strobe-pin file\n"); - goto err_create_flash_pin_file; - } /* torch */ INIT_WORK(&chip->work_torch, lm3642_deferred_torch_brightness_set); @@ -382,17 +390,13 @@ static int lm3642_probe(struct i2c_client *client, chip->cdev_torch.max_brightness = 8; chip->cdev_torch.brightness_set = lm3642_torch_brightness_set; chip->cdev_torch.default_trigger = "torch"; + chip->cdev_torch.groups = lm3642_torch_groups, err = led_classdev_register((struct device *) &client->dev, &chip->cdev_torch); if (err < 0) { dev_err(chip->dev, "failed to register torch\n"); goto err_create_torch_file; } - err = device_create_file(chip->cdev_torch.dev, &dev_attr_torch_pin); - if (err < 0) { - dev_err(chip->dev, "failed to create torch-pin file\n"); - goto err_create_torch_pin_file; - } /* indicator */ INIT_WORK(&chip->work_indicator, @@ -411,12 +415,8 @@ static int lm3642_probe(struct i2c_client *client, return 0; err_create_indicator_file: - device_remove_file(chip->cdev_torch.dev, &dev_attr_torch_pin); -err_create_torch_pin_file: led_classdev_unregister(&chip->cdev_torch); err_create_torch_file: - device_remove_file(chip->cdev_flash.dev, &dev_attr_strobe_pin); -err_create_flash_pin_file: led_classdev_unregister(&chip->cdev_flash); err_out: return err; @@ -428,10 +428,8 @@ static int lm3642_remove(struct i2c_client *client) led_classdev_unregister(&chip->cdev_indicator); flush_work(&chip->work_indicator); - device_remove_file(chip->cdev_torch.dev, &dev_attr_torch_pin); led_classdev_unregister(&chip->cdev_torch); flush_work(&chip->work_torch); - device_remove_file(chip->cdev_flash.dev, &dev_attr_strobe_pin); led_classdev_unregister(&chip->cdev_flash); flush_work(&chip->work_flash); regmap_write(chip->regmap, REG_ENABLE, 0); diff --git a/drivers/leds/leds-lp55xx-common.c b/drivers/leds/leds-lp55xx-common.c index 88317b4f7bf3..77c26bc32eed 100644 --- a/drivers/leds/leds-lp55xx-common.c +++ b/drivers/leds/leds-lp55xx-common.c @@ -127,15 +127,12 @@ static DEVICE_ATTR(led_current, S_IRUGO | S_IWUSR, lp55xx_show_current, lp55xx_store_current); static DEVICE_ATTR(max_current, S_IRUGO , lp55xx_show_max_current, NULL); -static struct attribute *lp55xx_led_attributes[] = { +static struct attribute *lp55xx_led_attrs[] = { &dev_attr_led_current.attr, &dev_attr_max_current.attr, NULL, }; - -static struct attribute_group lp55xx_led_attr_group = { - .attrs = lp55xx_led_attributes -}; +ATTRIBUTE_GROUPS(lp55xx_led); static void lp55xx_set_brightness(struct led_classdev *cdev, enum led_brightness brightness) @@ -176,6 +173,7 @@ static int lp55xx_init_led(struct lp55xx_led *led, } led->cdev.brightness_set = lp55xx_set_brightness; + led->cdev.groups = lp55xx_led_groups; if (pdata->led_config[chan].name) { led->cdev.name = pdata->led_config[chan].name; @@ -185,24 +183,12 @@ static int lp55xx_init_led(struct lp55xx_led *led, led->cdev.name = name; } - /* - * register led class device for each channel and - * add device attributes - */ - ret = led_classdev_register(dev, &led->cdev); if (ret) { dev_err(dev, "led register err: %d\n", ret); return ret; } - ret = sysfs_create_group(&led->cdev.dev->kobj, &lp55xx_led_attr_group); - if (ret) { - dev_err(dev, "led sysfs err: %d\n", ret); - led_classdev_unregister(&led->cdev); - return ret; - } - return 0; } diff --git a/drivers/leds/leds-max8997.c b/drivers/leds/leds-max8997.c index f449a8bdddc7..607bc2755aba 100644 --- a/drivers/leds/leds-max8997.c +++ b/drivers/leds/leds-max8997.c @@ -229,6 +229,12 @@ static ssize_t max8997_led_store_mode(struct device *dev, static DEVICE_ATTR(mode, 0644, max8997_led_show_mode, max8997_led_store_mode); +static struct attribute *max8997_attrs[] = { + &dev_attr_mode.attr, + NULL +}; +ATTRIBUTE_GROUPS(max8997); + static int max8997_led_probe(struct platform_device *pdev) { struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent); @@ -253,6 +259,7 @@ static int max8997_led_probe(struct platform_device *pdev) led->cdev.brightness_set = max8997_led_brightness_set; led->cdev.flags |= LED_CORE_SUSPENDRESUME; led->cdev.brightness = 0; + led->cdev.groups = max8997_groups; led->iodev = iodev; /* initialize mode and brightness according to platform_data */ @@ -281,14 +288,6 @@ static int max8997_led_probe(struct platform_device *pdev) if (ret < 0) return ret; - ret = device_create_file(led->cdev.dev, &dev_attr_mode); - if (ret != 0) { - dev_err(&pdev->dev, - "failed to create file: %d\n", ret); - led_classdev_unregister(&led->cdev); - return ret; - } - return 0; } @@ -296,7 +295,6 @@ static int max8997_led_remove(struct platform_device *pdev) { struct max8997_led *led = platform_get_drvdata(pdev); - device_remove_file(led->cdev.dev, &dev_attr_mode); led_classdev_unregister(&led->cdev); return 0; diff --git a/drivers/leds/leds-netxbig.c b/drivers/leds/leds-netxbig.c index e97f443a6e07..64fde485dcaa 100644 --- a/drivers/leds/leds-netxbig.c +++ b/drivers/leds/leds-netxbig.c @@ -293,10 +293,14 @@ static ssize_t netxbig_led_sata_show(struct device *dev, static DEVICE_ATTR(sata, 0644, netxbig_led_sata_show, netxbig_led_sata_store); +static struct attribute *netxbig_led_attrs[] = { + &dev_attr_sata.attr, + NULL +}; +ATTRIBUTE_GROUPS(netxbig_led); + static void delete_netxbig_led(struct netxbig_led_data *led_dat) { - if (led_dat->mode_val[NETXBIG_LED_SATA] != NETXBIG_LED_INVALID_MODE) - device_remove_file(led_dat->cdev.dev, &dev_attr_sata); led_classdev_unregister(&led_dat->cdev); } @@ -306,7 +310,6 @@ create_netxbig_led(struct platform_device *pdev, const struct netxbig_led *template) { struct netxbig_led_platform_data *pdata = dev_get_platdata(&pdev->dev); - int ret; spin_lock_init(&led_dat->lock); led_dat->gpio_ext = pdata->gpio_ext; @@ -327,6 +330,12 @@ create_netxbig_led(struct platform_device *pdev, led_dat->sata = 0; led_dat->cdev.brightness = LED_OFF; led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME; + /* + * If available, expose the SATA activity blink capability through + * a "sata" sysfs attribute. + */ + if (led_dat->mode_val[NETXBIG_LED_SATA] != NETXBIG_LED_INVALID_MODE) + led_dat->cdev.groups = netxbig_led_groups; led_dat->mode_addr = template->mode_addr; led_dat->mode_val = template->mode_val; led_dat->bright_addr = template->bright_addr; @@ -334,21 +343,7 @@ create_netxbig_led(struct platform_device *pdev, led_dat->timer = pdata->timer; led_dat->num_timer = pdata->num_timer; - ret = led_classdev_register(&pdev->dev, &led_dat->cdev); - if (ret < 0) - return ret; - - /* - * If available, expose the SATA activity blink capability through - * a "sata" sysfs attribute. - */ - if (led_dat->mode_val[NETXBIG_LED_SATA] != NETXBIG_LED_INVALID_MODE) { - ret = device_create_file(led_dat->cdev.dev, &dev_attr_sata); - if (ret) - led_classdev_unregister(&led_dat->cdev); - } - - return ret; + return led_classdev_register(&pdev->dev, &led_dat->cdev); } static int netxbig_led_probe(struct platform_device *pdev) diff --git a/drivers/leds/leds-ns2.c b/drivers/leds/leds-ns2.c index efa625883c83..231993d1fe21 100644 --- a/drivers/leds/leds-ns2.c +++ b/drivers/leds/leds-ns2.c @@ -185,6 +185,12 @@ static ssize_t ns2_led_sata_show(struct device *dev, static DEVICE_ATTR(sata, 0644, ns2_led_sata_show, ns2_led_sata_store); +static struct attribute *ns2_led_attrs[] = { + &dev_attr_sata.attr, + NULL +}; +ATTRIBUTE_GROUPS(ns2_led); + static int create_ns2_led(struct platform_device *pdev, struct ns2_led_data *led_dat, const struct ns2_led *template) @@ -219,6 +225,7 @@ create_ns2_led(struct platform_device *pdev, struct ns2_led_data *led_dat, led_dat->cdev.blink_set = NULL; led_dat->cdev.brightness_set = ns2_led_set; led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME; + led_dat->cdev.groups = ns2_led_groups; led_dat->cmd = template->cmd; led_dat->slow = template->slow; @@ -235,20 +242,11 @@ create_ns2_led(struct platform_device *pdev, struct ns2_led_data *led_dat, if (ret < 0) return ret; - ret = device_create_file(led_dat->cdev.dev, &dev_attr_sata); - if (ret < 0) - goto err_free_cdev; - return 0; - -err_free_cdev: - led_classdev_unregister(&led_dat->cdev); - return ret; } static void delete_ns2_led(struct ns2_led_data *led_dat) { - device_remove_file(led_dat->cdev.dev, &dev_attr_sata); led_classdev_unregister(&led_dat->cdev); } diff --git a/drivers/leds/leds-pca963x.c b/drivers/leds/leds-pca963x.c index 82589c0a5689..f110b4c456ba 100644 --- a/drivers/leds/leds-pca963x.c +++ b/drivers/leds/leds-pca963x.c @@ -12,7 +12,7 @@ * directory of this archive for more details. * * LED driver for the PCA9633 I2C LED driver (7-bit slave address 0x62) - * LED driver for the PCA9634 I2C LED driver (7-bit slave address set by hw.) + * LED driver for the PCA9634/5 I2C LED driver (7-bit slave address set by hw.) * * Note that hardware blinking violates the leds infrastructure driver * interface since the hardware only supports blinking all LEDs with the @@ -52,6 +52,7 @@ enum pca963x_type { pca9633, pca9634, + pca9635, }; struct pca963x_chipdef { @@ -74,6 +75,12 @@ static struct pca963x_chipdef pca963x_chipdefs[] = { .ledout_base = 0xc, .n_leds = 8, }, + [pca9635] = { + .grppwm = 0x12, + .grpfreq = 0x13, + .ledout_base = 0x14, + .n_leds = 16, + }, }; /* Total blink period in milliseconds */ @@ -84,6 +91,7 @@ static const struct i2c_device_id pca963x_id[] = { { "pca9632", pca9633 }, { "pca9633", pca9633 }, { "pca9634", pca9634 }, + { "pca9635", pca9635 }, { } }; MODULE_DEVICE_TABLE(i2c, pca963x_id); @@ -107,7 +115,7 @@ struct pca963x_led { struct work_struct work; enum led_brightness brightness; struct led_classdev led_cdev; - int led_num; /* 0 .. 7 potentially */ + int led_num; /* 0 .. 15 potentially */ enum pca963x_cmd cmd; char name[32]; u8 gdc; @@ -321,6 +329,7 @@ static const struct of_device_id of_pca963x_match[] = { { .compatible = "nxp,pca9632", }, { .compatible = "nxp,pca9633", }, { .compatible = "nxp,pca9634", }, + { .compatible = "nxp,pca9635", }, {}, }; #else @@ -375,9 +384,8 @@ static int pca963x_probe(struct i2c_client *client, pca963x_chip->leds = pca963x; /* Turn off LEDs by default*/ - i2c_smbus_write_byte_data(client, chip->ledout_base, 0x00); - if (chip->n_leds > 4) - i2c_smbus_write_byte_data(client, chip->ledout_base + 1, 0x00); + for (i = 0; i < chip->n_leds / 4; i++) + i2c_smbus_write_byte_data(client, chip->ledout_base + i, 0x00); for (i = 0; i < chip->n_leds; i++) { pca963x[i].led_num = i; @@ -415,9 +423,13 @@ static int pca963x_probe(struct i2c_client *client, /* Disable LED all-call address and set normal mode */ i2c_smbus_write_byte_data(client, PCA963X_MODE1, 0x00); - /* Configure output: open-drain or totem pole (push-pull) */ - if (pdata && pdata->outdrv == PCA963X_OPEN_DRAIN) - i2c_smbus_write_byte_data(client, PCA963X_MODE2, 0x01); + if (pdata) { + /* Configure output: open-drain or totem pole (push-pull) */ + if (pdata->outdrv == PCA963X_OPEN_DRAIN) + i2c_smbus_write_byte_data(client, PCA963X_MODE2, 0x01); + else + i2c_smbus_write_byte_data(client, PCA963X_MODE2, 0x05); + } return 0; diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c index 2eb3ef62962b..046cb7008745 100644 --- a/drivers/leds/leds-ss4200.c +++ b/drivers/leds/leds-ss4200.c @@ -469,6 +469,12 @@ static ssize_t nas_led_blink_store(struct device *dev, static DEVICE_ATTR(blink, 0644, nas_led_blink_show, nas_led_blink_store); +static struct attribute *nasgpio_led_attrs[] = { + &dev_attr_blink.attr, + NULL +}; +ATTRIBUTE_GROUPS(nasgpio_led); + static int register_nasgpio_led(int led_nr) { int ret; @@ -481,20 +487,18 @@ static int register_nasgpio_led(int led_nr) led->brightness = LED_FULL; led->brightness_set = nasgpio_led_set_brightness; led->blink_set = nasgpio_led_set_blink; + led->groups = nasgpio_led_groups; ret = led_classdev_register(&nas_gpio_pci_dev->dev, led); if (ret) return ret; - ret = device_create_file(led->dev, &dev_attr_blink); - if (ret) - led_classdev_unregister(led); - return ret; + + return 0; } static void unregister_nasgpio_led(int led_nr) { struct led_classdev *led = get_classdev_for_led_nr(led_nr); led_classdev_unregister(led); - device_remove_file(led->dev, &dev_attr_blink); } /* * module load/initialization diff --git a/drivers/leds/leds-wm831x-status.c b/drivers/leds/leds-wm831x-status.c index e72c974142d0..1b71e0701002 100644 --- a/drivers/leds/leds-wm831x-status.c +++ b/drivers/leds/leds-wm831x-status.c @@ -219,6 +219,12 @@ static ssize_t wm831x_status_src_store(struct device *dev, static DEVICE_ATTR(src, 0644, wm831x_status_src_show, wm831x_status_src_store); +static struct attribute *wm831x_status_attrs[] = { + &dev_attr_src.attr, + NULL +}; +ATTRIBUTE_GROUPS(wm831x_status); + static int wm831x_status_probe(struct platform_device *pdev) { struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); @@ -232,8 +238,7 @@ static int wm831x_status_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_REG, 0); if (res == NULL) { dev_err(&pdev->dev, "No register resource\n"); - ret = -EINVAL; - goto err; + return -EINVAL; } drvdata = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_status), @@ -284,31 +289,21 @@ static int wm831x_status_probe(struct platform_device *pdev) drvdata->cdev.default_trigger = pdata.default_trigger; drvdata->cdev.brightness_set = wm831x_status_set; drvdata->cdev.blink_set = wm831x_status_blink_set; + drvdata->cdev.groups = wm831x_status_groups; ret = led_classdev_register(wm831x->dev, &drvdata->cdev); if (ret < 0) { dev_err(&pdev->dev, "Failed to register LED: %d\n", ret); - goto err_led; + return ret; } - ret = device_create_file(drvdata->cdev.dev, &dev_attr_src); - if (ret != 0) - dev_err(&pdev->dev, - "No source control for LED: %d\n", ret); - return 0; - -err_led: - led_classdev_unregister(&drvdata->cdev); -err: - return ret; } static int wm831x_status_remove(struct platform_device *pdev) { struct wm831x_status *drvdata = platform_get_drvdata(pdev); - device_remove_file(drvdata->cdev.dev, &dev_attr_src); led_classdev_unregister(&drvdata->cdev); return 0; diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c index 0bf1e4edf04d..6590558d1d31 100644 --- a/drivers/lguest/core.c +++ b/drivers/lguest/core.c @@ -42,7 +42,6 @@ DEFINE_MUTEX(lguest_lock); static __init int map_switcher(void) { int i, err; - struct page **pagep; /* * Map the Switcher in to high memory. @@ -110,11 +109,9 @@ static __init int map_switcher(void) * This code actually sets up the pages we've allocated to appear at * switcher_addr. map_vm_area() takes the vma we allocated above, the * kind of pages we're mapping (kernel pages), and a pointer to our - * array of struct pages. It increments that pointer, but we don't - * care. + * array of struct pages. */ - pagep = lg_switcher_pages; - err = map_vm_area(switcher_vma, PAGE_KERNEL_EXEC, &pagep); + err = map_vm_area(switcher_vma, PAGE_KERNEL_EXEC, lg_switcher_pages); if (err) { printk("lguest: map_vm_area failed: %i\n", err); goto free_vma; diff --git a/drivers/macintosh/via-pmu-backlight.c b/drivers/macintosh/via-pmu-backlight.c index b1d91170ded0..6f68537c93ce 100644 --- a/drivers/macintosh/via-pmu-backlight.c +++ b/drivers/macintosh/via-pmu-backlight.c @@ -110,13 +110,7 @@ static int pmu_backlight_update_status(struct backlight_device *bd) } -static int pmu_backlight_get_brightness(struct backlight_device *bd) -{ - return bd->props.brightness; -} - static const struct backlight_ops pmu_backlight_data = { - .get_brightness = pmu_backlight_get_brightness, .update_status = pmu_backlight_update_status, }; diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig index c8b5c13bcd05..9fd9c6717e0c 100644 --- a/drivers/mailbox/Kconfig +++ b/drivers/mailbox/Kconfig @@ -16,26 +16,9 @@ config PL320_MBOX Management Engine, primarily for cpufreq. Say Y here if you want to use the PL320 IPCM support. -config OMAP_MBOX - tristate - help - This option is selected by any OMAP architecture specific mailbox - driver such as CONFIG_OMAP1_MBOX or CONFIG_OMAP2PLUS_MBOX. This - enables the common OMAP mailbox framework code. - -config OMAP1_MBOX - tristate "OMAP1 Mailbox framework support" - depends on ARCH_OMAP1 - select OMAP_MBOX - help - Mailbox implementation for OMAP chips with hardware for - interprocessor communication involving DSP in OMAP1. Say Y here - if you want to use OMAP1 Mailbox framework support. - config OMAP2PLUS_MBOX tristate "OMAP2+ Mailbox framework support" depends on ARCH_OMAP2PLUS - select OMAP_MBOX help Mailbox implementation for OMAP family chips with hardware for interprocessor communication involving DSP, IVA1.0 and IVA2 in @@ -44,7 +27,7 @@ config OMAP2PLUS_MBOX config OMAP_MBOX_KFIFO_SIZE int "Mailbox kfifo default buffer size (bytes)" - depends on OMAP2PLUS_MBOX || OMAP1_MBOX + depends on OMAP2PLUS_MBOX default 256 help Specify the default size of mailbox's kfifo buffers (bytes). diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile index e0facb34084a..6d184dbcaca8 100644 --- a/drivers/mailbox/Makefile +++ b/drivers/mailbox/Makefile @@ -1,7 +1,3 @@ obj-$(CONFIG_PL320_MBOX) += pl320-ipc.o -obj-$(CONFIG_OMAP_MBOX) += omap-mailbox.o -obj-$(CONFIG_OMAP1_MBOX) += mailbox_omap1.o -mailbox_omap1-objs := mailbox-omap1.o -obj-$(CONFIG_OMAP2PLUS_MBOX) += mailbox_omap2.o -mailbox_omap2-objs := mailbox-omap2.o +obj-$(CONFIG_OMAP2PLUS_MBOX) += omap-mailbox.o diff --git a/drivers/mailbox/mailbox-omap1.c b/drivers/mailbox/mailbox-omap1.c deleted file mode 100644 index 9001b7633f10..000000000000 --- a/drivers/mailbox/mailbox-omap1.c +++ /dev/null @@ -1,203 +0,0 @@ -/* - * Mailbox reservation modules for OMAP1 - * - * Copyright (C) 2006-2009 Nokia Corporation - * Written by: Hiroshi DOYU <Hiroshi.DOYU@nokia.com> - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ - -#include <linux/module.h> -#include <linux/interrupt.h> -#include <linux/platform_device.h> -#include <linux/io.h> - -#include "omap-mbox.h" - -#define MAILBOX_ARM2DSP1 0x00 -#define MAILBOX_ARM2DSP1b 0x04 -#define MAILBOX_DSP2ARM1 0x08 -#define MAILBOX_DSP2ARM1b 0x0c -#define MAILBOX_DSP2ARM2 0x10 -#define MAILBOX_DSP2ARM2b 0x14 -#define MAILBOX_ARM2DSP1_Flag 0x18 -#define MAILBOX_DSP2ARM1_Flag 0x1c -#define MAILBOX_DSP2ARM2_Flag 0x20 - -static void __iomem *mbox_base; - -struct omap_mbox1_fifo { - unsigned long cmd; - unsigned long data; - unsigned long flag; -}; - -struct omap_mbox1_priv { - struct omap_mbox1_fifo tx_fifo; - struct omap_mbox1_fifo rx_fifo; -}; - -static inline int mbox_read_reg(size_t ofs) -{ - return __raw_readw(mbox_base + ofs); -} - -static inline void mbox_write_reg(u32 val, size_t ofs) -{ - __raw_writew(val, mbox_base + ofs); -} - -/* msg */ -static mbox_msg_t omap1_mbox_fifo_read(struct omap_mbox *mbox) -{ - struct omap_mbox1_fifo *fifo = - &((struct omap_mbox1_priv *)mbox->priv)->rx_fifo; - mbox_msg_t msg; - - msg = mbox_read_reg(fifo->data); - msg |= ((mbox_msg_t) mbox_read_reg(fifo->cmd)) << 16; - - return msg; -} - -static void -omap1_mbox_fifo_write(struct omap_mbox *mbox, mbox_msg_t msg) -{ - struct omap_mbox1_fifo *fifo = - &((struct omap_mbox1_priv *)mbox->priv)->tx_fifo; - - mbox_write_reg(msg & 0xffff, fifo->data); - mbox_write_reg(msg >> 16, fifo->cmd); -} - -static int omap1_mbox_fifo_empty(struct omap_mbox *mbox) -{ - return 0; -} - -static int omap1_mbox_fifo_full(struct omap_mbox *mbox) -{ - struct omap_mbox1_fifo *fifo = - &((struct omap_mbox1_priv *)mbox->priv)->rx_fifo; - - return mbox_read_reg(fifo->flag); -} - -/* irq */ -static void -omap1_mbox_enable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq) -{ - if (irq == IRQ_RX) - enable_irq(mbox->irq); -} - -static void -omap1_mbox_disable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq) -{ - if (irq == IRQ_RX) - disable_irq(mbox->irq); -} - -static int -omap1_mbox_is_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq) -{ - if (irq == IRQ_TX) - return 0; - return 1; -} - -static struct omap_mbox_ops omap1_mbox_ops = { - .type = OMAP_MBOX_TYPE1, - .fifo_read = omap1_mbox_fifo_read, - .fifo_write = omap1_mbox_fifo_write, - .fifo_empty = omap1_mbox_fifo_empty, - .fifo_full = omap1_mbox_fifo_full, - .enable_irq = omap1_mbox_enable_irq, - .disable_irq = omap1_mbox_disable_irq, - .is_irq = omap1_mbox_is_irq, -}; - -/* FIXME: the following struct should be created automatically by the user id */ - -/* DSP */ -static struct omap_mbox1_priv omap1_mbox_dsp_priv = { - .tx_fifo = { - .cmd = MAILBOX_ARM2DSP1b, - .data = MAILBOX_ARM2DSP1, - .flag = MAILBOX_ARM2DSP1_Flag, - }, - .rx_fifo = { - .cmd = MAILBOX_DSP2ARM1b, - .data = MAILBOX_DSP2ARM1, - .flag = MAILBOX_DSP2ARM1_Flag, - }, -}; - -static struct omap_mbox mbox_dsp_info = { - .name = "dsp", - .ops = &omap1_mbox_ops, - .priv = &omap1_mbox_dsp_priv, -}; - -static struct omap_mbox *omap1_mboxes[] = { &mbox_dsp_info, NULL }; - -static int omap1_mbox_probe(struct platform_device *pdev) -{ - struct resource *mem; - int ret; - struct omap_mbox **list; - - list = omap1_mboxes; - list[0]->irq = platform_get_irq_byname(pdev, "dsp"); - - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!mem) - return -ENOENT; - - mbox_base = ioremap(mem->start, resource_size(mem)); - if (!mbox_base) - return -ENOMEM; - - ret = omap_mbox_register(&pdev->dev, list); - if (ret) { - iounmap(mbox_base); - return ret; - } - - return 0; -} - -static int omap1_mbox_remove(struct platform_device *pdev) -{ - omap_mbox_unregister(); - iounmap(mbox_base); - return 0; -} - -static struct platform_driver omap1_mbox_driver = { - .probe = omap1_mbox_probe, - .remove = omap1_mbox_remove, - .driver = { - .name = "omap-mailbox", - }, -}; - -static int __init omap1_mbox_init(void) -{ - return platform_driver_register(&omap1_mbox_driver); -} - -static void __exit omap1_mbox_exit(void) -{ - platform_driver_unregister(&omap1_mbox_driver); -} - -module_init(omap1_mbox_init); -module_exit(omap1_mbox_exit); - -MODULE_LICENSE("GPL v2"); -MODULE_DESCRIPTION("omap mailbox: omap1 architecture specific functions"); -MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>"); -MODULE_ALIAS("platform:omap1-mailbox"); diff --git a/drivers/mailbox/mailbox-omap2.c b/drivers/mailbox/mailbox-omap2.c deleted file mode 100644 index 42d2b893ea67..000000000000 --- a/drivers/mailbox/mailbox-omap2.c +++ /dev/null @@ -1,357 +0,0 @@ -/* - * Mailbox reservation modules for OMAP2/3 - * - * Copyright (C) 2006-2009 Nokia Corporation - * Written by: Hiroshi DOYU <Hiroshi.DOYU@nokia.com> - * and Paul Mundt - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ - -#include <linux/module.h> -#include <linux/slab.h> -#include <linux/clk.h> -#include <linux/err.h> -#include <linux/platform_device.h> -#include <linux/io.h> -#include <linux/pm_runtime.h> -#include <linux/platform_data/mailbox-omap.h> - -#include "omap-mbox.h" - -#define MAILBOX_REVISION 0x000 -#define MAILBOX_MESSAGE(m) (0x040 + 4 * (m)) -#define MAILBOX_FIFOSTATUS(m) (0x080 + 4 * (m)) -#define MAILBOX_MSGSTATUS(m) (0x0c0 + 4 * (m)) -#define MAILBOX_IRQSTATUS(u) (0x100 + 8 * (u)) -#define MAILBOX_IRQENABLE(u) (0x104 + 8 * (u)) - -#define OMAP4_MAILBOX_IRQSTATUS(u) (0x104 + 0x10 * (u)) -#define OMAP4_MAILBOX_IRQENABLE(u) (0x108 + 0x10 * (u)) -#define OMAP4_MAILBOX_IRQENABLE_CLR(u) (0x10c + 0x10 * (u)) - -#define MAILBOX_IRQ_NEWMSG(m) (1 << (2 * (m))) -#define MAILBOX_IRQ_NOTFULL(m) (1 << (2 * (m) + 1)) - -#define MBOX_REG_SIZE 0x120 - -#define OMAP4_MBOX_REG_SIZE 0x130 - -#define MBOX_NR_REGS (MBOX_REG_SIZE / sizeof(u32)) -#define OMAP4_MBOX_NR_REGS (OMAP4_MBOX_REG_SIZE / sizeof(u32)) - -static void __iomem *mbox_base; - -struct omap_mbox2_fifo { - unsigned long msg; - unsigned long fifo_stat; - unsigned long msg_stat; -}; - -struct omap_mbox2_priv { - struct omap_mbox2_fifo tx_fifo; - struct omap_mbox2_fifo rx_fifo; - unsigned long irqenable; - unsigned long irqstatus; - u32 newmsg_bit; - u32 notfull_bit; - u32 ctx[OMAP4_MBOX_NR_REGS]; - unsigned long irqdisable; - u32 intr_type; -}; - -static inline unsigned int mbox_read_reg(size_t ofs) -{ - return __raw_readl(mbox_base + ofs); -} - -static inline void mbox_write_reg(u32 val, size_t ofs) -{ - __raw_writel(val, mbox_base + ofs); -} - -/* Mailbox H/W preparations */ -static int omap2_mbox_startup(struct omap_mbox *mbox) -{ - u32 l; - - pm_runtime_enable(mbox->dev->parent); - pm_runtime_get_sync(mbox->dev->parent); - - l = mbox_read_reg(MAILBOX_REVISION); - pr_debug("omap mailbox rev %d.%d\n", (l & 0xf0) >> 4, (l & 0x0f)); - - return 0; -} - -static void omap2_mbox_shutdown(struct omap_mbox *mbox) -{ - pm_runtime_put_sync(mbox->dev->parent); - pm_runtime_disable(mbox->dev->parent); -} - -/* Mailbox FIFO handle functions */ -static mbox_msg_t omap2_mbox_fifo_read(struct omap_mbox *mbox) -{ - struct omap_mbox2_fifo *fifo = - &((struct omap_mbox2_priv *)mbox->priv)->rx_fifo; - return (mbox_msg_t) mbox_read_reg(fifo->msg); -} - -static void omap2_mbox_fifo_write(struct omap_mbox *mbox, mbox_msg_t msg) -{ - struct omap_mbox2_fifo *fifo = - &((struct omap_mbox2_priv *)mbox->priv)->tx_fifo; - mbox_write_reg(msg, fifo->msg); -} - -static int omap2_mbox_fifo_empty(struct omap_mbox *mbox) -{ - struct omap_mbox2_fifo *fifo = - &((struct omap_mbox2_priv *)mbox->priv)->rx_fifo; - return (mbox_read_reg(fifo->msg_stat) == 0); -} - -static int omap2_mbox_fifo_full(struct omap_mbox *mbox) -{ - struct omap_mbox2_fifo *fifo = - &((struct omap_mbox2_priv *)mbox->priv)->tx_fifo; - return mbox_read_reg(fifo->fifo_stat); -} - -/* Mailbox IRQ handle functions */ -static void omap2_mbox_enable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq) -{ - struct omap_mbox2_priv *p = mbox->priv; - u32 l, bit = (irq == IRQ_TX) ? p->notfull_bit : p->newmsg_bit; - - l = mbox_read_reg(p->irqenable); - l |= bit; - mbox_write_reg(l, p->irqenable); -} - -static void omap2_mbox_disable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq) -{ - struct omap_mbox2_priv *p = mbox->priv; - u32 bit = (irq == IRQ_TX) ? p->notfull_bit : p->newmsg_bit; - - /* - * Read and update the interrupt configuration register for pre-OMAP4. - * OMAP4 and later SoCs have a dedicated interrupt disabling register. - */ - if (!p->intr_type) - bit = mbox_read_reg(p->irqdisable) & ~bit; - - mbox_write_reg(bit, p->irqdisable); -} - -static void omap2_mbox_ack_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq) -{ - struct omap_mbox2_priv *p = mbox->priv; - u32 bit = (irq == IRQ_TX) ? p->notfull_bit : p->newmsg_bit; - - mbox_write_reg(bit, p->irqstatus); - - /* Flush posted write for irq status to avoid spurious interrupts */ - mbox_read_reg(p->irqstatus); -} - -static int omap2_mbox_is_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq) -{ - struct omap_mbox2_priv *p = mbox->priv; - u32 bit = (irq == IRQ_TX) ? p->notfull_bit : p->newmsg_bit; - u32 enable = mbox_read_reg(p->irqenable); - u32 status = mbox_read_reg(p->irqstatus); - - return (int)(enable & status & bit); -} - -static void omap2_mbox_save_ctx(struct omap_mbox *mbox) -{ - int i; - struct omap_mbox2_priv *p = mbox->priv; - int nr_regs; - - if (p->intr_type) - nr_regs = OMAP4_MBOX_NR_REGS; - else - nr_regs = MBOX_NR_REGS; - for (i = 0; i < nr_regs; i++) { - p->ctx[i] = mbox_read_reg(i * sizeof(u32)); - - dev_dbg(mbox->dev, "%s: [%02x] %08x\n", __func__, - i, p->ctx[i]); - } -} - -static void omap2_mbox_restore_ctx(struct omap_mbox *mbox) -{ - int i; - struct omap_mbox2_priv *p = mbox->priv; - int nr_regs; - - if (p->intr_type) - nr_regs = OMAP4_MBOX_NR_REGS; - else - nr_regs = MBOX_NR_REGS; - for (i = 0; i < nr_regs; i++) { - mbox_write_reg(p->ctx[i], i * sizeof(u32)); - - dev_dbg(mbox->dev, "%s: [%02x] %08x\n", __func__, - i, p->ctx[i]); - } -} - -static struct omap_mbox_ops omap2_mbox_ops = { - .type = OMAP_MBOX_TYPE2, - .startup = omap2_mbox_startup, - .shutdown = omap2_mbox_shutdown, - .fifo_read = omap2_mbox_fifo_read, - .fifo_write = omap2_mbox_fifo_write, - .fifo_empty = omap2_mbox_fifo_empty, - .fifo_full = omap2_mbox_fifo_full, - .enable_irq = omap2_mbox_enable_irq, - .disable_irq = omap2_mbox_disable_irq, - .ack_irq = omap2_mbox_ack_irq, - .is_irq = omap2_mbox_is_irq, - .save_ctx = omap2_mbox_save_ctx, - .restore_ctx = omap2_mbox_restore_ctx, -}; - -static int omap2_mbox_probe(struct platform_device *pdev) -{ - struct resource *mem; - int ret; - struct omap_mbox **list, *mbox, *mboxblk; - struct omap_mbox2_priv *priv, *privblk; - struct omap_mbox_pdata *pdata = pdev->dev.platform_data; - struct omap_mbox_dev_info *info; - int i; - - if (!pdata || !pdata->info_cnt || !pdata->info) { - pr_err("%s: platform not supported\n", __func__); - return -ENODEV; - } - - /* allocate one extra for marking end of list */ - list = kzalloc((pdata->info_cnt + 1) * sizeof(*list), GFP_KERNEL); - if (!list) - return -ENOMEM; - - mboxblk = mbox = kzalloc(pdata->info_cnt * sizeof(*mbox), GFP_KERNEL); - if (!mboxblk) { - ret = -ENOMEM; - goto free_list; - } - - privblk = priv = kzalloc(pdata->info_cnt * sizeof(*priv), GFP_KERNEL); - if (!privblk) { - ret = -ENOMEM; - goto free_mboxblk; - } - - info = pdata->info; - for (i = 0; i < pdata->info_cnt; i++, info++, priv++) { - priv->tx_fifo.msg = MAILBOX_MESSAGE(info->tx_id); - priv->tx_fifo.fifo_stat = MAILBOX_FIFOSTATUS(info->tx_id); - priv->rx_fifo.msg = MAILBOX_MESSAGE(info->rx_id); - priv->rx_fifo.msg_stat = MAILBOX_MSGSTATUS(info->rx_id); - priv->notfull_bit = MAILBOX_IRQ_NOTFULL(info->tx_id); - priv->newmsg_bit = MAILBOX_IRQ_NEWMSG(info->rx_id); - if (pdata->intr_type) { - priv->irqenable = OMAP4_MAILBOX_IRQENABLE(info->usr_id); - priv->irqstatus = OMAP4_MAILBOX_IRQSTATUS(info->usr_id); - priv->irqdisable = - OMAP4_MAILBOX_IRQENABLE_CLR(info->usr_id); - } else { - priv->irqenable = MAILBOX_IRQENABLE(info->usr_id); - priv->irqstatus = MAILBOX_IRQSTATUS(info->usr_id); - priv->irqdisable = MAILBOX_IRQENABLE(info->usr_id); - } - priv->intr_type = pdata->intr_type; - - mbox->priv = priv; - mbox->name = info->name; - mbox->ops = &omap2_mbox_ops; - mbox->irq = platform_get_irq(pdev, info->irq_id); - if (mbox->irq < 0) { - ret = mbox->irq; - goto free_privblk; - } - list[i] = mbox++; - } - - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!mem) { - ret = -ENOENT; - goto free_privblk; - } - - mbox_base = ioremap(mem->start, resource_size(mem)); - if (!mbox_base) { - ret = -ENOMEM; - goto free_privblk; - } - - ret = omap_mbox_register(&pdev->dev, list); - if (ret) - goto unmap_mbox; - platform_set_drvdata(pdev, list); - - return 0; - -unmap_mbox: - iounmap(mbox_base); -free_privblk: - kfree(privblk); -free_mboxblk: - kfree(mboxblk); -free_list: - kfree(list); - return ret; -} - -static int omap2_mbox_remove(struct platform_device *pdev) -{ - struct omap_mbox2_priv *privblk; - struct omap_mbox **list = platform_get_drvdata(pdev); - struct omap_mbox *mboxblk = list[0]; - - privblk = mboxblk->priv; - omap_mbox_unregister(); - iounmap(mbox_base); - kfree(privblk); - kfree(mboxblk); - kfree(list); - - return 0; -} - -static struct platform_driver omap2_mbox_driver = { - .probe = omap2_mbox_probe, - .remove = omap2_mbox_remove, - .driver = { - .name = "omap-mailbox", - }, -}; - -static int __init omap2_mbox_init(void) -{ - return platform_driver_register(&omap2_mbox_driver); -} - -static void __exit omap2_mbox_exit(void) -{ - platform_driver_unregister(&omap2_mbox_driver); -} - -module_init(omap2_mbox_init); -module_exit(omap2_mbox_exit); - -MODULE_LICENSE("GPL v2"); -MODULE_DESCRIPTION("omap mailbox: omap2/3/4 architecture specific functions"); -MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>"); -MODULE_AUTHOR("Paul Mundt"); -MODULE_ALIAS("platform:omap2-mailbox"); diff --git a/drivers/mailbox/omap-mailbox.c b/drivers/mailbox/omap-mailbox.c index d79a646b9042..a27e00e63a8a 100644 --- a/drivers/mailbox/omap-mailbox.c +++ b/drivers/mailbox/omap-mailbox.c @@ -2,8 +2,10 @@ * OMAP mailbox driver * * Copyright (C) 2006-2009 Nokia Corporation. All rights reserved. + * Copyright (C) 2013-2014 Texas Instruments Inc. * * Contact: Hiroshi DOYU <Hiroshi.DOYU@nokia.com> + * Suman Anna <s-anna@ti.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -24,70 +26,164 @@ #include <linux/interrupt.h> #include <linux/spinlock.h> #include <linux/mutex.h> -#include <linux/delay.h> #include <linux/slab.h> #include <linux/kfifo.h> #include <linux/err.h> #include <linux/notifier.h> #include <linux/module.h> - -#include "omap-mbox.h" - -static struct omap_mbox **mboxes; - -static int mbox_configured; -static DEFINE_MUTEX(mbox_configured_lock); +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/platform_data/mailbox-omap.h> +#include <linux/omap-mailbox.h> + +#define MAILBOX_REVISION 0x000 +#define MAILBOX_MESSAGE(m) (0x040 + 4 * (m)) +#define MAILBOX_FIFOSTATUS(m) (0x080 + 4 * (m)) +#define MAILBOX_MSGSTATUS(m) (0x0c0 + 4 * (m)) + +#define OMAP2_MAILBOX_IRQSTATUS(u) (0x100 + 8 * (u)) +#define OMAP2_MAILBOX_IRQENABLE(u) (0x104 + 8 * (u)) + +#define OMAP4_MAILBOX_IRQSTATUS(u) (0x104 + 0x10 * (u)) +#define OMAP4_MAILBOX_IRQENABLE(u) (0x108 + 0x10 * (u)) +#define OMAP4_MAILBOX_IRQENABLE_CLR(u) (0x10c + 0x10 * (u)) + +#define MAILBOX_IRQSTATUS(type, u) (type ? OMAP4_MAILBOX_IRQSTATUS(u) : \ + OMAP2_MAILBOX_IRQSTATUS(u)) +#define MAILBOX_IRQENABLE(type, u) (type ? OMAP4_MAILBOX_IRQENABLE(u) : \ + OMAP2_MAILBOX_IRQENABLE(u)) +#define MAILBOX_IRQDISABLE(type, u) (type ? OMAP4_MAILBOX_IRQENABLE_CLR(u) \ + : OMAP2_MAILBOX_IRQENABLE(u)) + +#define MAILBOX_IRQ_NEWMSG(m) (1 << (2 * (m))) +#define MAILBOX_IRQ_NOTFULL(m) (1 << (2 * (m) + 1)) + +#define MBOX_REG_SIZE 0x120 + +#define OMAP4_MBOX_REG_SIZE 0x130 + +#define MBOX_NR_REGS (MBOX_REG_SIZE / sizeof(u32)) +#define OMAP4_MBOX_NR_REGS (OMAP4_MBOX_REG_SIZE / sizeof(u32)) + +struct omap_mbox_fifo { + unsigned long msg; + unsigned long fifo_stat; + unsigned long msg_stat; + unsigned long irqenable; + unsigned long irqstatus; + unsigned long irqdisable; + u32 intr_bit; +}; + +struct omap_mbox_queue { + spinlock_t lock; + struct kfifo fifo; + struct work_struct work; + struct tasklet_struct tasklet; + struct omap_mbox *mbox; + bool full; +}; + +struct omap_mbox_device { + struct device *dev; + struct mutex cfg_lock; + void __iomem *mbox_base; + u32 num_users; + u32 num_fifos; + struct omap_mbox **mboxes; + struct list_head elem; +}; + +struct omap_mbox { + const char *name; + int irq; + struct omap_mbox_queue *txq, *rxq; + struct device *dev; + struct omap_mbox_device *parent; + struct omap_mbox_fifo tx_fifo; + struct omap_mbox_fifo rx_fifo; + u32 ctx[OMAP4_MBOX_NR_REGS]; + u32 intr_type; + int use_count; + struct blocking_notifier_head notifier; +}; + +/* global variables for the mailbox devices */ +static DEFINE_MUTEX(omap_mbox_devices_lock); +static LIST_HEAD(omap_mbox_devices); static unsigned int mbox_kfifo_size = CONFIG_OMAP_MBOX_KFIFO_SIZE; module_param(mbox_kfifo_size, uint, S_IRUGO); MODULE_PARM_DESC(mbox_kfifo_size, "Size of omap's mailbox kfifo (bytes)"); +static inline +unsigned int mbox_read_reg(struct omap_mbox_device *mdev, size_t ofs) +{ + return __raw_readl(mdev->mbox_base + ofs); +} + +static inline +void mbox_write_reg(struct omap_mbox_device *mdev, u32 val, size_t ofs) +{ + __raw_writel(val, mdev->mbox_base + ofs); +} + /* Mailbox FIFO handle functions */ -static inline mbox_msg_t mbox_fifo_read(struct omap_mbox *mbox) +static mbox_msg_t mbox_fifo_read(struct omap_mbox *mbox) { - return mbox->ops->fifo_read(mbox); + struct omap_mbox_fifo *fifo = &mbox->rx_fifo; + return (mbox_msg_t) mbox_read_reg(mbox->parent, fifo->msg); } -static inline void mbox_fifo_write(struct omap_mbox *mbox, mbox_msg_t msg) + +static void mbox_fifo_write(struct omap_mbox *mbox, mbox_msg_t msg) { - mbox->ops->fifo_write(mbox, msg); + struct omap_mbox_fifo *fifo = &mbox->tx_fifo; + mbox_write_reg(mbox->parent, msg, fifo->msg); } -static inline int mbox_fifo_empty(struct omap_mbox *mbox) + +static int mbox_fifo_empty(struct omap_mbox *mbox) { - return mbox->ops->fifo_empty(mbox); + struct omap_mbox_fifo *fifo = &mbox->rx_fifo; + return (mbox_read_reg(mbox->parent, fifo->msg_stat) == 0); } -static inline int mbox_fifo_full(struct omap_mbox *mbox) + +static int mbox_fifo_full(struct omap_mbox *mbox) { - return mbox->ops->fifo_full(mbox); + struct omap_mbox_fifo *fifo = &mbox->tx_fifo; + return mbox_read_reg(mbox->parent, fifo->fifo_stat); } /* Mailbox IRQ handle functions */ -static inline void ack_mbox_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq) +static void ack_mbox_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq) { - if (mbox->ops->ack_irq) - mbox->ops->ack_irq(mbox, irq); + struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ? + &mbox->tx_fifo : &mbox->rx_fifo; + u32 bit = fifo->intr_bit; + u32 irqstatus = fifo->irqstatus; + + mbox_write_reg(mbox->parent, bit, irqstatus); + + /* Flush posted write for irq status to avoid spurious interrupts */ + mbox_read_reg(mbox->parent, irqstatus); } -static inline int is_mbox_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq) + +static int is_mbox_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq) { - return mbox->ops->is_irq(mbox, irq); + struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ? + &mbox->tx_fifo : &mbox->rx_fifo; + u32 bit = fifo->intr_bit; + u32 irqenable = fifo->irqenable; + u32 irqstatus = fifo->irqstatus; + + u32 enable = mbox_read_reg(mbox->parent, irqenable); + u32 status = mbox_read_reg(mbox->parent, irqstatus); + + return (int)(enable & status & bit); } /* * message sender */ -static int __mbox_poll_for_space(struct omap_mbox *mbox) -{ - int ret = 0, i = 1000; - - while (mbox_fifo_full(mbox)) { - if (mbox->ops->type == OMAP_MBOX_TYPE2) - return -1; - if (--i == 0) - return -1; - udelay(1); - } - return ret; -} - int omap_mbox_msg_send(struct omap_mbox *mbox, mbox_msg_t msg) { struct omap_mbox_queue *mq = mbox->txq; @@ -100,7 +196,7 @@ int omap_mbox_msg_send(struct omap_mbox *mbox, mbox_msg_t msg) goto out; } - if (kfifo_is_empty(&mq->fifo) && !__mbox_poll_for_space(mbox)) { + if (kfifo_is_empty(&mq->fifo) && !mbox_fifo_full(mbox)) { mbox_fifo_write(mbox, msg); goto out; } @@ -118,35 +214,69 @@ EXPORT_SYMBOL(omap_mbox_msg_send); void omap_mbox_save_ctx(struct omap_mbox *mbox) { - if (!mbox->ops->save_ctx) { - dev_err(mbox->dev, "%s:\tno save\n", __func__); - return; - } + int i; + int nr_regs; + + if (mbox->intr_type) + nr_regs = OMAP4_MBOX_NR_REGS; + else + nr_regs = MBOX_NR_REGS; + for (i = 0; i < nr_regs; i++) { + mbox->ctx[i] = mbox_read_reg(mbox->parent, i * sizeof(u32)); - mbox->ops->save_ctx(mbox); + dev_dbg(mbox->dev, "%s: [%02x] %08x\n", __func__, + i, mbox->ctx[i]); + } } EXPORT_SYMBOL(omap_mbox_save_ctx); void omap_mbox_restore_ctx(struct omap_mbox *mbox) { - if (!mbox->ops->restore_ctx) { - dev_err(mbox->dev, "%s:\tno restore\n", __func__); - return; - } + int i; + int nr_regs; - mbox->ops->restore_ctx(mbox); + if (mbox->intr_type) + nr_regs = OMAP4_MBOX_NR_REGS; + else + nr_regs = MBOX_NR_REGS; + for (i = 0; i < nr_regs; i++) { + mbox_write_reg(mbox->parent, mbox->ctx[i], i * sizeof(u32)); + + dev_dbg(mbox->dev, "%s: [%02x] %08x\n", __func__, + i, mbox->ctx[i]); + } } EXPORT_SYMBOL(omap_mbox_restore_ctx); void omap_mbox_enable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq) { - mbox->ops->enable_irq(mbox, irq); + u32 l; + struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ? + &mbox->tx_fifo : &mbox->rx_fifo; + u32 bit = fifo->intr_bit; + u32 irqenable = fifo->irqenable; + + l = mbox_read_reg(mbox->parent, irqenable); + l |= bit; + mbox_write_reg(mbox->parent, l, irqenable); } EXPORT_SYMBOL(omap_mbox_enable_irq); void omap_mbox_disable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq) { - mbox->ops->disable_irq(mbox, irq); + struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ? + &mbox->tx_fifo : &mbox->rx_fifo; + u32 bit = fifo->intr_bit; + u32 irqdisable = fifo->irqdisable; + + /* + * Read and update the interrupt configuration register for pre-OMAP4. + * OMAP4 and later SoCs have a dedicated interrupt disabling register. + */ + if (!mbox->intr_type) + bit = mbox_read_reg(mbox->parent, irqdisable) & ~bit; + + mbox_write_reg(mbox->parent, bit, irqdisable); } EXPORT_SYMBOL(omap_mbox_disable_irq); @@ -158,7 +288,7 @@ static void mbox_tx_tasklet(unsigned long tx_data) int ret; while (kfifo_len(&mq->fifo)) { - if (__mbox_poll_for_space(mbox)) { + if (mbox_fifo_full(mbox)) { omap_mbox_enable_irq(mbox, IRQ_TX); break; } @@ -223,9 +353,6 @@ static void __mbox_rx_interrupt(struct omap_mbox *mbox) len = kfifo_in(&mq->fifo, (unsigned char *)&msg, sizeof(msg)); WARN_ON(len != sizeof(msg)); - - if (mbox->ops->type == OMAP_MBOX_TYPE1) - break; } /* no more messages in the fifo. clear IRQ source. */ @@ -283,16 +410,12 @@ static int omap_mbox_startup(struct omap_mbox *mbox) { int ret = 0; struct omap_mbox_queue *mq; + struct omap_mbox_device *mdev = mbox->parent; - mutex_lock(&mbox_configured_lock); - if (!mbox_configured++) { - if (likely(mbox->ops->startup)) { - ret = mbox->ops->startup(mbox); - if (unlikely(ret)) - goto fail_startup; - } else - goto fail_startup; - } + mutex_lock(&mdev->cfg_lock); + ret = pm_runtime_get_sync(mdev->dev); + if (unlikely(ret < 0)) + goto fail_startup; if (!mbox->use_count++) { mq = mbox_queue_alloc(mbox, NULL, mbox_tx_tasklet); @@ -319,7 +442,7 @@ static int omap_mbox_startup(struct omap_mbox *mbox) omap_mbox_enable_irq(mbox, IRQ_RX); } - mutex_unlock(&mbox_configured_lock); + mutex_unlock(&mdev->cfg_lock); return 0; fail_request_irq: @@ -327,18 +450,18 @@ fail_request_irq: fail_alloc_rxq: mbox_queue_free(mbox->txq); fail_alloc_txq: - if (mbox->ops->shutdown) - mbox->ops->shutdown(mbox); + pm_runtime_put_sync(mdev->dev); mbox->use_count--; fail_startup: - mbox_configured--; - mutex_unlock(&mbox_configured_lock); + mutex_unlock(&mdev->cfg_lock); return ret; } static void omap_mbox_fini(struct omap_mbox *mbox) { - mutex_lock(&mbox_configured_lock); + struct omap_mbox_device *mdev = mbox->parent; + + mutex_lock(&mdev->cfg_lock); if (!--mbox->use_count) { omap_mbox_disable_irq(mbox, IRQ_RX); @@ -349,28 +472,43 @@ static void omap_mbox_fini(struct omap_mbox *mbox) mbox_queue_free(mbox->rxq); } - if (likely(mbox->ops->shutdown)) { - if (!--mbox_configured) - mbox->ops->shutdown(mbox); - } + pm_runtime_put_sync(mdev->dev); - mutex_unlock(&mbox_configured_lock); + mutex_unlock(&mdev->cfg_lock); } -struct omap_mbox *omap_mbox_get(const char *name, struct notifier_block *nb) +static struct omap_mbox *omap_mbox_device_find(struct omap_mbox_device *mdev, + const char *mbox_name) { struct omap_mbox *_mbox, *mbox = NULL; - int i, ret; + struct omap_mbox **mboxes = mdev->mboxes; + int i; if (!mboxes) - return ERR_PTR(-EINVAL); + return NULL; for (i = 0; (_mbox = mboxes[i]); i++) { - if (!strcmp(_mbox->name, name)) { + if (!strcmp(_mbox->name, mbox_name)) { mbox = _mbox; break; } } + return mbox; +} + +struct omap_mbox *omap_mbox_get(const char *name, struct notifier_block *nb) +{ + struct omap_mbox *mbox = NULL; + struct omap_mbox_device *mdev; + int ret; + + mutex_lock(&omap_mbox_devices_lock); + list_for_each_entry(mdev, &omap_mbox_devices, elem) { + mbox = omap_mbox_device_find(mdev, name); + if (mbox) + break; + } + mutex_unlock(&omap_mbox_devices_lock); if (!mbox) return ERR_PTR(-ENOENT); @@ -397,19 +535,20 @@ EXPORT_SYMBOL(omap_mbox_put); static struct class omap_mbox_class = { .name = "mbox", }; -int omap_mbox_register(struct device *parent, struct omap_mbox **list) +static int omap_mbox_register(struct omap_mbox_device *mdev) { int ret; int i; + struct omap_mbox **mboxes; - mboxes = list; - if (!mboxes) + if (!mdev || !mdev->mboxes) return -EINVAL; + mboxes = mdev->mboxes; for (i = 0; mboxes[i]; i++) { struct omap_mbox *mbox = mboxes[i]; mbox->dev = device_create(&omap_mbox_class, - parent, 0, mbox, "%s", mbox->name); + mdev->dev, 0, mbox, "%s", mbox->name); if (IS_ERR(mbox->dev)) { ret = PTR_ERR(mbox->dev); goto err_out; @@ -417,6 +556,11 @@ int omap_mbox_register(struct device *parent, struct omap_mbox **list) BLOCKING_INIT_NOTIFIER_HEAD(&mbox->notifier); } + + mutex_lock(&omap_mbox_devices_lock); + list_add(&mdev->elem, &omap_mbox_devices); + mutex_unlock(&omap_mbox_devices_lock); + return 0; err_out: @@ -424,21 +568,148 @@ err_out: device_unregister(mboxes[i]->dev); return ret; } -EXPORT_SYMBOL(omap_mbox_register); -int omap_mbox_unregister(void) +static int omap_mbox_unregister(struct omap_mbox_device *mdev) { int i; + struct omap_mbox **mboxes; - if (!mboxes) + if (!mdev || !mdev->mboxes) return -EINVAL; + mutex_lock(&omap_mbox_devices_lock); + list_del(&mdev->elem); + mutex_unlock(&omap_mbox_devices_lock); + + mboxes = mdev->mboxes; for (i = 0; mboxes[i]; i++) device_unregister(mboxes[i]->dev); - mboxes = NULL; return 0; } -EXPORT_SYMBOL(omap_mbox_unregister); + +static int omap_mbox_probe(struct platform_device *pdev) +{ + struct resource *mem; + int ret; + struct omap_mbox **list, *mbox, *mboxblk; + struct omap_mbox_pdata *pdata = pdev->dev.platform_data; + struct omap_mbox_dev_info *info; + struct omap_mbox_device *mdev; + struct omap_mbox_fifo *fifo; + u32 intr_type; + u32 l; + int i; + + if (!pdata || !pdata->info_cnt || !pdata->info) { + pr_err("%s: platform not supported\n", __func__); + return -ENODEV; + } + + mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_KERNEL); + if (!mdev) + return -ENOMEM; + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + mdev->mbox_base = devm_ioremap_resource(&pdev->dev, mem); + if (IS_ERR(mdev->mbox_base)) + return PTR_ERR(mdev->mbox_base); + + /* allocate one extra for marking end of list */ + list = devm_kzalloc(&pdev->dev, (pdata->info_cnt + 1) * sizeof(*list), + GFP_KERNEL); + if (!list) + return -ENOMEM; + + mboxblk = devm_kzalloc(&pdev->dev, pdata->info_cnt * sizeof(*mbox), + GFP_KERNEL); + if (!mboxblk) + return -ENOMEM; + + info = pdata->info; + intr_type = pdata->intr_type; + mbox = mboxblk; + for (i = 0; i < pdata->info_cnt; i++, info++) { + fifo = &mbox->tx_fifo; + fifo->msg = MAILBOX_MESSAGE(info->tx_id); + fifo->fifo_stat = MAILBOX_FIFOSTATUS(info->tx_id); + fifo->intr_bit = MAILBOX_IRQ_NOTFULL(info->tx_id); + fifo->irqenable = MAILBOX_IRQENABLE(intr_type, info->usr_id); + fifo->irqstatus = MAILBOX_IRQSTATUS(intr_type, info->usr_id); + fifo->irqdisable = MAILBOX_IRQDISABLE(intr_type, info->usr_id); + + fifo = &mbox->rx_fifo; + fifo->msg = MAILBOX_MESSAGE(info->rx_id); + fifo->msg_stat = MAILBOX_MSGSTATUS(info->rx_id); + fifo->intr_bit = MAILBOX_IRQ_NEWMSG(info->rx_id); + fifo->irqenable = MAILBOX_IRQENABLE(intr_type, info->usr_id); + fifo->irqstatus = MAILBOX_IRQSTATUS(intr_type, info->usr_id); + fifo->irqdisable = MAILBOX_IRQDISABLE(intr_type, info->usr_id); + + mbox->intr_type = intr_type; + + mbox->parent = mdev; + mbox->name = info->name; + mbox->irq = platform_get_irq(pdev, info->irq_id); + if (mbox->irq < 0) + return mbox->irq; + list[i] = mbox++; + } + + mutex_init(&mdev->cfg_lock); + mdev->dev = &pdev->dev; + mdev->num_users = pdata->num_users; + mdev->num_fifos = pdata->num_fifos; + mdev->mboxes = list; + ret = omap_mbox_register(mdev); + if (ret) + return ret; + + platform_set_drvdata(pdev, mdev); + pm_runtime_enable(mdev->dev); + + ret = pm_runtime_get_sync(mdev->dev); + if (ret < 0) { + pm_runtime_put_noidle(mdev->dev); + goto unregister; + } + + /* + * just print the raw revision register, the format is not + * uniform across all SoCs + */ + l = mbox_read_reg(mdev, MAILBOX_REVISION); + dev_info(mdev->dev, "omap mailbox rev 0x%x\n", l); + + ret = pm_runtime_put_sync(mdev->dev); + if (ret < 0) + goto unregister; + + return 0; + +unregister: + pm_runtime_disable(mdev->dev); + omap_mbox_unregister(mdev); + return ret; +} + +static int omap_mbox_remove(struct platform_device *pdev) +{ + struct omap_mbox_device *mdev = platform_get_drvdata(pdev); + + pm_runtime_disable(mdev->dev); + omap_mbox_unregister(mdev); + + return 0; +} + +static struct platform_driver omap_mbox_driver = { + .probe = omap_mbox_probe, + .remove = omap_mbox_remove, + .driver = { + .name = "omap-mailbox", + .owner = THIS_MODULE, + }, +}; static int __init omap_mbox_init(void) { @@ -453,12 +724,13 @@ static int __init omap_mbox_init(void) mbox_kfifo_size = max_t(unsigned int, mbox_kfifo_size, sizeof(mbox_msg_t)); - return 0; + return platform_driver_register(&omap_mbox_driver); } subsys_initcall(omap_mbox_init); static void __exit omap_mbox_exit(void) { + platform_driver_unregister(&omap_mbox_driver); class_unregister(&omap_mbox_class); } module_exit(omap_mbox_exit); diff --git a/drivers/mailbox/omap-mbox.h b/drivers/mailbox/omap-mbox.h deleted file mode 100644 index 86d7518cd13b..000000000000 --- a/drivers/mailbox/omap-mbox.h +++ /dev/null @@ -1,67 +0,0 @@ -/* - * omap-mbox.h: OMAP mailbox internal definitions - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef OMAP_MBOX_H -#define OMAP_MBOX_H - -#include <linux/device.h> -#include <linux/interrupt.h> -#include <linux/kfifo.h> -#include <linux/spinlock.h> -#include <linux/workqueue.h> -#include <linux/omap-mailbox.h> - -typedef int __bitwise omap_mbox_type_t; -#define OMAP_MBOX_TYPE1 ((__force omap_mbox_type_t) 1) -#define OMAP_MBOX_TYPE2 ((__force omap_mbox_type_t) 2) - -struct omap_mbox_ops { - omap_mbox_type_t type; - int (*startup)(struct omap_mbox *mbox); - void (*shutdown)(struct omap_mbox *mbox); - /* fifo */ - mbox_msg_t (*fifo_read)(struct omap_mbox *mbox); - void (*fifo_write)(struct omap_mbox *mbox, mbox_msg_t msg); - int (*fifo_empty)(struct omap_mbox *mbox); - int (*fifo_full)(struct omap_mbox *mbox); - /* irq */ - void (*enable_irq)(struct omap_mbox *mbox, - omap_mbox_irq_t irq); - void (*disable_irq)(struct omap_mbox *mbox, - omap_mbox_irq_t irq); - void (*ack_irq)(struct omap_mbox *mbox, omap_mbox_irq_t irq); - int (*is_irq)(struct omap_mbox *mbox, omap_mbox_irq_t irq); - /* ctx */ - void (*save_ctx)(struct omap_mbox *mbox); - void (*restore_ctx)(struct omap_mbox *mbox); -}; - -struct omap_mbox_queue { - spinlock_t lock; - struct kfifo fifo; - struct work_struct work; - struct tasklet_struct tasklet; - struct omap_mbox *mbox; - bool full; -}; - -struct omap_mbox { - const char *name; - int irq; - struct omap_mbox_queue *txq, *rxq; - struct omap_mbox_ops *ops; - struct device *dev; - void *priv; - int use_count; - struct blocking_notifier_head notifier; -}; - -int omap_mbox_register(struct device *parent, struct omap_mbox **); -int omap_mbox_unregister(void); - -#endif /* OMAP_MBOX_H */ diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c index 443d03fbac47..8eeab72b93e2 100644 --- a/drivers/md/bcache/alloc.c +++ b/drivers/md/bcache/alloc.c @@ -331,7 +331,7 @@ static int bch_allocator_thread(void *arg) mutex_unlock(&ca->set->bucket_lock); blkdev_issue_discard(ca->bdev, bucket_to_sector(ca->set, bucket), - ca->sb.block_size, GFP_KERNEL, 0); + ca->sb.bucket_size, GFP_KERNEL, 0); mutex_lock(&ca->set->bucket_lock); } diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index d2ebcf323094..04f7bc28ef83 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -477,9 +477,13 @@ struct gc_stat { * CACHE_SET_STOPPING always gets set first when we're closing down a cache set; * we'll continue to run normally for awhile with CACHE_SET_STOPPING set (i.e. * flushing dirty data). + * + * CACHE_SET_RUNNING means all cache devices have been registered and journal + * replay is complete. */ #define CACHE_SET_UNREGISTERING 0 #define CACHE_SET_STOPPING 1 +#define CACHE_SET_RUNNING 2 struct cache_set { struct closure cl; diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c index 545416415305..646fe85261c1 100644 --- a/drivers/md/bcache/bset.c +++ b/drivers/md/bcache/bset.c @@ -1182,7 +1182,7 @@ static void __btree_sort(struct btree_keys *b, struct btree_iter *iter, { uint64_t start_time; bool used_mempool = false; - struct bset *out = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOIO, + struct bset *out = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOWAIT, order); if (!out) { struct page *outp; diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h index 5f6728d5d4dd..ae964624efb2 100644 --- a/drivers/md/bcache/bset.h +++ b/drivers/md/bcache/bset.h @@ -453,7 +453,7 @@ static inline bool bch_bkey_equal_header(const struct bkey *l, { return (KEY_DIRTY(l) == KEY_DIRTY(r) && KEY_PTRS(l) == KEY_PTRS(r) && - KEY_CSUM(l) == KEY_CSUM(l)); + KEY_CSUM(l) == KEY_CSUM(r)); } /* Keylists */ diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 7347b6100961..00cde40db572 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -117,9 +117,9 @@ ({ \ int _r, l = (b)->level - 1; \ bool _w = l <= (op)->lock; \ - struct btree *_child = bch_btree_node_get((b)->c, op, key, l, _w);\ + struct btree *_child = bch_btree_node_get((b)->c, op, key, l, \ + _w, b); \ if (!IS_ERR(_child)) { \ - _child->parent = (b); \ _r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__); \ rw_unlock(_w, _child); \ } else \ @@ -142,7 +142,6 @@ rw_lock(_w, _b, _b->level); \ if (_b == (c)->root && \ _w == insert_lock(op, _b)) { \ - _b->parent = NULL; \ _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \ } \ rw_unlock(_w, _b); \ @@ -202,7 +201,7 @@ void bch_btree_node_read_done(struct btree *b) struct bset *i = btree_bset_first(b); struct btree_iter *iter; - iter = mempool_alloc(b->c->fill_iter, GFP_NOWAIT); + iter = mempool_alloc(b->c->fill_iter, GFP_NOIO); iter->size = b->c->sb.bucket_size / b->c->sb.block_size; iter->used = 0; @@ -421,7 +420,7 @@ static void do_btree_node_write(struct btree *b) SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) + bset_sector_offset(&b->keys, i)); - if (!bio_alloc_pages(b->bio, GFP_NOIO)) { + if (!bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) { int j; struct bio_vec *bv; void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1)); @@ -967,7 +966,8 @@ err: * level and op->lock. */ struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op, - struct bkey *k, int level, bool write) + struct bkey *k, int level, bool write, + struct btree *parent) { int i = 0; struct btree *b; @@ -1002,6 +1002,7 @@ retry: BUG_ON(b->level != level); } + b->parent = parent; b->accessed = 1; for (; i <= b->keys.nsets && b->keys.set[i].size; i++) { @@ -1022,15 +1023,16 @@ retry: return b; } -static void btree_node_prefetch(struct cache_set *c, struct bkey *k, int level) +static void btree_node_prefetch(struct btree *parent, struct bkey *k) { struct btree *b; - mutex_lock(&c->bucket_lock); - b = mca_alloc(c, NULL, k, level); - mutex_unlock(&c->bucket_lock); + mutex_lock(&parent->c->bucket_lock); + b = mca_alloc(parent->c, NULL, k, parent->level - 1); + mutex_unlock(&parent->c->bucket_lock); if (!IS_ERR_OR_NULL(b)) { + b->parent = parent; bch_btree_node_read(b); rw_unlock(true, b); } @@ -1060,15 +1062,16 @@ static void btree_node_free(struct btree *b) mutex_unlock(&b->c->bucket_lock); } -struct btree *bch_btree_node_alloc(struct cache_set *c, struct btree_op *op, - int level) +struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op, + int level, bool wait, + struct btree *parent) { BKEY_PADDED(key) k; struct btree *b = ERR_PTR(-EAGAIN); mutex_lock(&c->bucket_lock); retry: - if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, op != NULL)) + if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, wait)) goto err; bkey_put(c, &k.key); @@ -1085,6 +1088,7 @@ retry: } b->accessed = 1; + b->parent = parent; bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->sb)); mutex_unlock(&c->bucket_lock); @@ -1096,14 +1100,21 @@ err_free: err: mutex_unlock(&c->bucket_lock); - trace_bcache_btree_node_alloc_fail(b); + trace_bcache_btree_node_alloc_fail(c); return b; } +static struct btree *bch_btree_node_alloc(struct cache_set *c, + struct btree_op *op, int level, + struct btree *parent) +{ + return __bch_btree_node_alloc(c, op, level, op != NULL, parent); +} + static struct btree *btree_node_alloc_replacement(struct btree *b, struct btree_op *op) { - struct btree *n = bch_btree_node_alloc(b->c, op, b->level); + struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent); if (!IS_ERR_OR_NULL(n)) { mutex_lock(&n->write_lock); bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort); @@ -1403,6 +1414,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op, BUG_ON(btree_bset_first(new_nodes[0])->keys); btree_node_free(new_nodes[0]); rw_unlock(true, new_nodes[0]); + new_nodes[0] = NULL; for (i = 0; i < nodes; i++) { if (__bch_keylist_realloc(&keylist, bkey_u64s(&r[i].b->key))) @@ -1516,7 +1528,7 @@ static int btree_gc_recurse(struct btree *b, struct btree_op *op, k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad); if (k) { r->b = bch_btree_node_get(b->c, op, k, b->level - 1, - true); + true, b); if (IS_ERR(r->b)) { ret = PTR_ERR(r->b); break; @@ -1811,7 +1823,7 @@ static int bch_btree_check_recurse(struct btree *b, struct btree_op *op) k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad); if (k) - btree_node_prefetch(b->c, k, b->level - 1); + btree_node_prefetch(b, k); if (p) ret = btree(check_recurse, p, b, op); @@ -1976,12 +1988,12 @@ static int btree_split(struct btree *b, struct btree_op *op, trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys); - n2 = bch_btree_node_alloc(b->c, op, b->level); + n2 = bch_btree_node_alloc(b->c, op, b->level, b->parent); if (IS_ERR(n2)) goto err_free1; if (!b->parent) { - n3 = bch_btree_node_alloc(b->c, op, b->level + 1); + n3 = bch_btree_node_alloc(b->c, op, b->level + 1, NULL); if (IS_ERR(n3)) goto err_free2; } diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h index 91dfa5e69685..5c391fa01bed 100644 --- a/drivers/md/bcache/btree.h +++ b/drivers/md/bcache/btree.h @@ -242,9 +242,10 @@ void __bch_btree_node_write(struct btree *, struct closure *); void bch_btree_node_write(struct btree *, struct closure *); void bch_btree_set_root(struct btree *); -struct btree *bch_btree_node_alloc(struct cache_set *, struct btree_op *, int); +struct btree *__bch_btree_node_alloc(struct cache_set *, struct btree_op *, + int, bool, struct btree *); struct btree *bch_btree_node_get(struct cache_set *, struct btree_op *, - struct bkey *, int, bool); + struct bkey *, int, bool, struct btree *); int bch_btree_insert_check_key(struct btree *, struct btree_op *, struct bkey *); diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c index 3a0de4cf9771..243de0bf15cd 100644 --- a/drivers/md/bcache/extents.c +++ b/drivers/md/bcache/extents.c @@ -474,9 +474,8 @@ out: return false; } -static bool bch_extent_invalid(struct btree_keys *bk, const struct bkey *k) +bool __bch_extent_invalid(struct cache_set *c, const struct bkey *k) { - struct btree *b = container_of(bk, struct btree, keys); char buf[80]; if (!KEY_SIZE(k)) @@ -485,16 +484,22 @@ static bool bch_extent_invalid(struct btree_keys *bk, const struct bkey *k) if (KEY_SIZE(k) > KEY_OFFSET(k)) goto bad; - if (__ptr_invalid(b->c, k)) + if (__ptr_invalid(c, k)) goto bad; return false; bad: bch_extent_to_text(buf, sizeof(buf), k); - cache_bug(b->c, "spotted extent %s: %s", buf, bch_ptr_status(b->c, k)); + cache_bug(c, "spotted extent %s: %s", buf, bch_ptr_status(c, k)); return true; } +static bool bch_extent_invalid(struct btree_keys *bk, const struct bkey *k) +{ + struct btree *b = container_of(bk, struct btree, keys); + return __bch_extent_invalid(b->c, k); +} + static bool bch_extent_bad_expensive(struct btree *b, const struct bkey *k, unsigned ptr) { diff --git a/drivers/md/bcache/extents.h b/drivers/md/bcache/extents.h index e4e23409782d..e2ed54054e7a 100644 --- a/drivers/md/bcache/extents.h +++ b/drivers/md/bcache/extents.h @@ -9,5 +9,6 @@ struct cache_set; void bch_extent_to_text(char *, size_t, const struct bkey *); bool __bch_btree_ptr_invalid(struct cache_set *, const struct bkey *); +bool __bch_extent_invalid(struct cache_set *, const struct bkey *); #endif /* _BCACHE_EXTENTS_H */ diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index 59e82021b5bb..fe080ad0e558 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -7,6 +7,7 @@ #include "bcache.h" #include "btree.h" #include "debug.h" +#include "extents.h" #include <trace/events/bcache.h> @@ -189,11 +190,15 @@ int bch_journal_read(struct cache_set *c, struct list_head *list) if (read_bucket(l)) goto bsearch; - if (list_empty(list)) + /* no journal entries on this device? */ + if (l == ca->sb.njournal_buckets) continue; bsearch: + BUG_ON(list_empty(list)); + /* Binary search */ - m = r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1); + m = l; + r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1); pr_debug("starting binary search, l %u r %u", l, r); while (l + 1 < r) { @@ -291,15 +296,16 @@ void bch_journal_mark(struct cache_set *c, struct list_head *list) for (k = i->j.start; k < bset_bkey_last(&i->j); - k = bkey_next(k)) { - unsigned j; + k = bkey_next(k)) + if (!__bch_extent_invalid(c, k)) { + unsigned j; - for (j = 0; j < KEY_PTRS(k); j++) - if (ptr_available(c, k, j)) - atomic_inc(&PTR_BUCKET(c, k, j)->pin); + for (j = 0; j < KEY_PTRS(k); j++) + if (ptr_available(c, k, j)) + atomic_inc(&PTR_BUCKET(c, k, j)->pin); - bch_initial_mark_key(c, 0, k); - } + bch_initial_mark_key(c, 0, k); + } } } diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 15fff4f68a7c..62e6e98186b5 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -311,7 +311,8 @@ void bch_data_insert(struct closure *cl) { struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); - trace_bcache_write(op->bio, op->writeback, op->bypass); + trace_bcache_write(op->c, op->inode, op->bio, + op->writeback, op->bypass); bch_keylist_init(&op->insert_keys); bio_get(op->bio); diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 926ded8ccbf5..d4713d098a39 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -733,8 +733,6 @@ static void bcache_device_detach(struct bcache_device *d) static void bcache_device_attach(struct bcache_device *d, struct cache_set *c, unsigned id) { - BUG_ON(test_bit(CACHE_SET_STOPPING, &c->flags)); - d->id = id; d->c = c; c->devices[id] = d; @@ -927,6 +925,7 @@ static void cached_dev_detach_finish(struct work_struct *w) list_move(&dc->list, &uncached_devices); clear_bit(BCACHE_DEV_DETACHING, &dc->disk.flags); + clear_bit(BCACHE_DEV_UNLINK_DONE, &dc->disk.flags); mutex_unlock(&bch_register_lock); @@ -1041,6 +1040,9 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c) */ atomic_set(&dc->count, 1); + if (bch_cached_dev_writeback_start(dc)) + return -ENOMEM; + if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) { bch_sectors_dirty_init(dc); atomic_set(&dc->has_dirty, 1); @@ -1070,7 +1072,8 @@ static void cached_dev_free(struct closure *cl) struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); cancel_delayed_work_sync(&dc->writeback_rate_update); - kthread_stop(dc->writeback_thread); + if (!IS_ERR_OR_NULL(dc->writeback_thread)) + kthread_stop(dc->writeback_thread); mutex_lock(&bch_register_lock); @@ -1081,12 +1084,8 @@ static void cached_dev_free(struct closure *cl) mutex_unlock(&bch_register_lock); - if (!IS_ERR_OR_NULL(dc->bdev)) { - if (dc->bdev->bd_disk) - blk_sync_queue(bdev_get_queue(dc->bdev)); - + if (!IS_ERR_OR_NULL(dc->bdev)) blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); - } wake_up(&unregister_wait); @@ -1213,7 +1212,9 @@ void bch_flash_dev_release(struct kobject *kobj) static void flash_dev_free(struct closure *cl) { struct bcache_device *d = container_of(cl, struct bcache_device, cl); + mutex_lock(&bch_register_lock); bcache_device_free(d); + mutex_unlock(&bch_register_lock); kobject_put(&d->kobj); } @@ -1221,7 +1222,9 @@ static void flash_dev_flush(struct closure *cl) { struct bcache_device *d = container_of(cl, struct bcache_device, cl); + mutex_lock(&bch_register_lock); bcache_device_unlink(d); + mutex_unlock(&bch_register_lock); kobject_del(&d->kobj); continue_at(cl, flash_dev_free, system_wq); } @@ -1277,6 +1280,9 @@ int bch_flash_dev_create(struct cache_set *c, uint64_t size) if (test_bit(CACHE_SET_STOPPING, &c->flags)) return -EINTR; + if (!test_bit(CACHE_SET_RUNNING, &c->flags)) + return -EPERM; + u = uuid_find_empty(c); if (!u) { pr_err("Can't create volume, no room for UUID"); @@ -1346,8 +1352,11 @@ static void cache_set_free(struct closure *cl) bch_journal_free(c); for_each_cache(ca, c, i) - if (ca) + if (ca) { + ca->set = NULL; + c->cache[ca->sb.nr_this_dev] = NULL; kobject_put(&ca->kobj); + } bch_bset_sort_state_free(&c->sort); free_pages((unsigned long) c->uuids, ilog2(bucket_pages(c))); @@ -1405,9 +1414,11 @@ static void cache_set_flush(struct closure *cl) if (ca->alloc_thread) kthread_stop(ca->alloc_thread); - cancel_delayed_work_sync(&c->journal.work); - /* flush last journal entry if needed */ - c->journal.work.work.func(&c->journal.work.work); + if (c->journal.cur) { + cancel_delayed_work_sync(&c->journal.work); + /* flush last journal entry if needed */ + c->journal.work.work.func(&c->journal.work.work); + } closure_return(cl); } @@ -1586,7 +1597,7 @@ static void run_cache_set(struct cache_set *c) goto err; err = "error reading btree root"; - c->root = bch_btree_node_get(c, NULL, k, j->btree_level, true); + c->root = bch_btree_node_get(c, NULL, k, j->btree_level, true, NULL); if (IS_ERR_OR_NULL(c->root)) goto err; @@ -1661,7 +1672,7 @@ static void run_cache_set(struct cache_set *c) goto err; err = "cannot allocate new btree root"; - c->root = bch_btree_node_alloc(c, NULL, 0); + c->root = __bch_btree_node_alloc(c, NULL, 0, true, NULL); if (IS_ERR_OR_NULL(c->root)) goto err; @@ -1697,6 +1708,7 @@ static void run_cache_set(struct cache_set *c) flash_devs_run(c); + set_bit(CACHE_SET_RUNNING, &c->flags); return; err: closure_sync(&cl); @@ -1760,6 +1772,7 @@ found: pr_debug("set version = %llu", c->sb.version); } + kobject_get(&ca->kobj); ca->set = c; ca->set->cache[ca->sb.nr_this_dev] = ca; c->cache_by_alloc[c->caches_loaded++] = ca; @@ -1780,8 +1793,10 @@ void bch_cache_release(struct kobject *kobj) struct cache *ca = container_of(kobj, struct cache, kobj); unsigned i; - if (ca->set) + if (ca->set) { + BUG_ON(ca->set->cache[ca->sb.nr_this_dev] != ca); ca->set->cache[ca->sb.nr_this_dev] = NULL; + } bio_split_pool_free(&ca->bio_split_hook); @@ -1798,10 +1813,8 @@ void bch_cache_release(struct kobject *kobj) if (ca->sb_bio.bi_inline_vecs[0].bv_page) put_page(ca->sb_bio.bi_io_vec[0].bv_page); - if (!IS_ERR_OR_NULL(ca->bdev)) { - blk_sync_queue(bdev_get_queue(ca->bdev)); + if (!IS_ERR_OR_NULL(ca->bdev)) blkdev_put(ca->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); - } kfree(ca); module_put(THIS_MODULE); @@ -1844,7 +1857,7 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca) } static void register_cache(struct cache_sb *sb, struct page *sb_page, - struct block_device *bdev, struct cache *ca) + struct block_device *bdev, struct cache *ca) { char name[BDEVNAME_SIZE]; const char *err = "cannot allocate memory"; @@ -1877,10 +1890,12 @@ static void register_cache(struct cache_sb *sb, struct page *sb_page, goto err; pr_info("registered cache device %s", bdevname(bdev, name)); +out: + kobject_put(&ca->kobj); return; err: pr_notice("error opening %s: %s", bdevname(bdev, name), err); - kobject_put(&ca->kobj); + goto out; } /* Global interfaces/init */ @@ -1945,10 +1960,12 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, if (IS_ERR(bdev)) { if (bdev == ERR_PTR(-EBUSY)) { bdev = lookup_bdev(strim(path)); + mutex_lock(&bch_register_lock); if (!IS_ERR(bdev) && bch_is_open(bdev)) err = "device already registered"; else err = "device busy"; + mutex_unlock(&bch_register_lock); } goto err; } diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h index ac7d0d1f70d7..98df7572b5f7 100644 --- a/drivers/md/bcache/util.h +++ b/drivers/md/bcache/util.h @@ -416,8 +416,8 @@ do { \ average_frequency, frequency_units); \ __print_time_stat(stats, name, \ average_duration, duration_units); \ - __print_time_stat(stats, name, \ - max_duration, duration_units); \ + sysfs_print(name ## _ ##max_duration ## _ ## duration_units, \ + div_u64((stats)->max_duration, NSEC_PER_ ## duration_units));\ \ sysfs_print(name ## _last_ ## frequency_units, (stats)->last \ ? div_s64(local_clock() - (stats)->last, \ diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index f4300e4c0114..f1986bcd1bf0 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -239,7 +239,7 @@ static void read_dirty(struct cached_dev *dc) if (KEY_START(&w->key) != dc->last_read || jiffies_to_msecs(delay) > 50) while (!kthread_should_stop() && delay) - delay = schedule_timeout_uninterruptible(delay); + delay = schedule_timeout_interruptible(delay); dc->last_read = KEY_OFFSET(&w->key); @@ -436,7 +436,7 @@ static int bch_writeback_thread(void *arg) while (delay && !kthread_should_stop() && !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) - delay = schedule_timeout_uninterruptible(delay); + delay = schedule_timeout_interruptible(delay); } } @@ -478,7 +478,7 @@ void bch_sectors_dirty_init(struct cached_dev *dc) dc->disk.sectors_dirty_last = bcache_dev_sectors_dirty(&dc->disk); } -int bch_cached_dev_writeback_init(struct cached_dev *dc) +void bch_cached_dev_writeback_init(struct cached_dev *dc) { sema_init(&dc->in_flight, 64); init_rwsem(&dc->writeback_lock); @@ -494,14 +494,20 @@ int bch_cached_dev_writeback_init(struct cached_dev *dc) dc->writeback_rate_d_term = 30; dc->writeback_rate_p_term_inverse = 6000; + INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate); +} + +int bch_cached_dev_writeback_start(struct cached_dev *dc) +{ dc->writeback_thread = kthread_create(bch_writeback_thread, dc, "bcache_writeback"); if (IS_ERR(dc->writeback_thread)) return PTR_ERR(dc->writeback_thread); - INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate); schedule_delayed_work(&dc->writeback_rate_update, dc->writeback_rate_update_seconds * HZ); + bch_writeback_queue(dc); + return 0; } diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h index e2f8598937ac..0a9dab187b79 100644 --- a/drivers/md/bcache/writeback.h +++ b/drivers/md/bcache/writeback.h @@ -85,6 +85,7 @@ static inline void bch_writeback_add(struct cached_dev *dc) void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int); void bch_sectors_dirty_init(struct cached_dev *dc); -int bch_cached_dev_writeback_init(struct cached_dev *); +void bch_cached_dev_writeback_init(struct cached_dev *); +int bch_cached_dev_writeback_start(struct cached_dev *); #endif diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c index d2899e7eb3aa..06709257adde 100644 --- a/drivers/md/dm-cache-metadata.c +++ b/drivers/md/dm-cache-metadata.c @@ -330,7 +330,7 @@ static int __write_initial_superblock(struct dm_cache_metadata *cmd) disk_super->discard_root = cpu_to_le64(cmd->discard_root); disk_super->discard_block_size = cpu_to_le64(cmd->discard_block_size); disk_super->discard_nr_blocks = cpu_to_le64(from_oblock(cmd->discard_nr_blocks)); - disk_super->metadata_block_size = cpu_to_le32(DM_CACHE_METADATA_BLOCK_SIZE >> SECTOR_SHIFT); + disk_super->metadata_block_size = cpu_to_le32(DM_CACHE_METADATA_BLOCK_SIZE); disk_super->data_block_size = cpu_to_le32(cmd->data_block_size); disk_super->cache_blocks = cpu_to_le32(0); @@ -478,7 +478,7 @@ static int __create_persistent_data_objects(struct dm_cache_metadata *cmd, bool may_format_device) { int r; - cmd->bm = dm_block_manager_create(cmd->bdev, DM_CACHE_METADATA_BLOCK_SIZE, + cmd->bm = dm_block_manager_create(cmd->bdev, DM_CACHE_METADATA_BLOCK_SIZE << SECTOR_SHIFT, CACHE_METADATA_CACHE_SIZE, CACHE_MAX_CONCURRENT_LOCKS); if (IS_ERR(cmd->bm)) { diff --git a/drivers/md/dm-cache-metadata.h b/drivers/md/dm-cache-metadata.h index cd70a78623a3..7383c90ccdb8 100644 --- a/drivers/md/dm-cache-metadata.h +++ b/drivers/md/dm-cache-metadata.h @@ -9,19 +9,17 @@ #include "dm-cache-block-types.h" #include "dm-cache-policy-internal.h" +#include "persistent-data/dm-space-map-metadata.h" /*----------------------------------------------------------------*/ -#define DM_CACHE_METADATA_BLOCK_SIZE 4096 +#define DM_CACHE_METADATA_BLOCK_SIZE DM_SM_METADATA_BLOCK_SIZE /* FIXME: remove this restriction */ /* * The metadata device is currently limited in size. - * - * We have one block of index, which can hold 255 index entries. Each - * index entry contains allocation info about 16k metadata blocks. */ -#define DM_CACHE_METADATA_MAX_SECTORS (255 * (1 << 14) * (DM_CACHE_METADATA_BLOCK_SIZE / (1 << SECTOR_SHIFT))) +#define DM_CACHE_METADATA_MAX_SECTORS DM_SM_METADATA_MAX_SECTORS /* * A metadata device larger than 16GB triggers a warning. diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 2c63326638b6..7130505c2425 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -718,6 +718,22 @@ static int bio_triggers_commit(struct cache *cache, struct bio *bio) return bio->bi_rw & (REQ_FLUSH | REQ_FUA); } +/* + * You must increment the deferred set whilst the prison cell is held. To + * encourage this, we ask for 'cell' to be passed in. + */ +static void inc_ds(struct cache *cache, struct bio *bio, + struct dm_bio_prison_cell *cell) +{ + size_t pb_data_size = get_per_bio_data_size(cache); + struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); + + BUG_ON(!cell); + BUG_ON(pb->all_io_entry); + + pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); +} + static void issue(struct cache *cache, struct bio *bio) { unsigned long flags; @@ -737,6 +753,12 @@ static void issue(struct cache *cache, struct bio *bio) spin_unlock_irqrestore(&cache->lock, flags); } +static void inc_and_issue(struct cache *cache, struct bio *bio, struct dm_bio_prison_cell *cell) +{ + inc_ds(cache, bio, cell); + issue(cache, bio); +} + static void defer_writethrough_bio(struct cache *cache, struct bio *bio) { unsigned long flags; @@ -873,8 +895,8 @@ static void migration_success_pre_commit(struct dm_cache_migration *mg) struct cache *cache = mg->cache; if (mg->writeback) { - cell_defer(cache, mg->old_ocell, false); clear_dirty(cache, mg->old_oblock, mg->cblock); + cell_defer(cache, mg->old_ocell, false); cleanup_migration(mg); return; @@ -929,13 +951,13 @@ static void migration_success_post_commit(struct dm_cache_migration *mg) } } else { + clear_dirty(cache, mg->new_oblock, mg->cblock); if (mg->requeue_holder) cell_defer(cache, mg->new_ocell, true); else { bio_endio(mg->new_ocell->holder, 0); cell_defer(cache, mg->new_ocell, false); } - clear_dirty(cache, mg->new_oblock, mg->cblock); cleanup_migration(mg); } } @@ -1015,6 +1037,11 @@ static void issue_overwrite(struct dm_cache_migration *mg, struct bio *bio) dm_hook_bio(&pb->hook_info, bio, overwrite_endio, mg); remap_to_cache_dirty(mg->cache, bio, mg->new_oblock, mg->cblock); + + /* + * No need to inc_ds() here, since the cell will be held for the + * duration of the io. + */ generic_make_request(bio); } @@ -1115,8 +1142,7 @@ static void check_for_quiesced_migrations(struct cache *cache, return; INIT_LIST_HEAD(&work); - if (pb->all_io_entry) - dm_deferred_entry_dec(pb->all_io_entry, &work); + dm_deferred_entry_dec(pb->all_io_entry, &work); if (!list_empty(&work)) queue_quiesced_migrations(cache, &work); @@ -1252,6 +1278,11 @@ static void process_flush_bio(struct cache *cache, struct bio *bio) else remap_to_cache(cache, bio, 0); + /* + * REQ_FLUSH is not directed at any particular block so we don't + * need to inc_ds(). REQ_FUA's are split into a write + REQ_FLUSH + * by dm-core. + */ issue(cache, bio); } @@ -1301,15 +1332,6 @@ static void inc_miss_counter(struct cache *cache, struct bio *bio) &cache->stats.read_miss : &cache->stats.write_miss); } -static void issue_cache_bio(struct cache *cache, struct bio *bio, - struct per_bio_data *pb, - dm_oblock_t oblock, dm_cblock_t cblock) -{ - pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); - remap_to_cache_dirty(cache, bio, oblock, cblock); - issue(cache, bio); -} - static void process_bio(struct cache *cache, struct prealloc *structs, struct bio *bio) { @@ -1318,8 +1340,6 @@ static void process_bio(struct cache *cache, struct prealloc *structs, dm_oblock_t block = get_bio_block(cache, bio); struct dm_bio_prison_cell *cell_prealloc, *old_ocell, *new_ocell; struct policy_result lookup_result; - size_t pb_data_size = get_per_bio_data_size(cache); - struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); bool discarded_block = is_discarded_oblock(cache, block); bool passthrough = passthrough_mode(&cache->features); bool can_migrate = !passthrough && (discarded_block || spare_migration_bandwidth(cache)); @@ -1359,9 +1379,8 @@ static void process_bio(struct cache *cache, struct prealloc *structs, } else { /* FIXME: factor out issue_origin() */ - pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); remap_to_origin_clear_discard(cache, bio, block); - issue(cache, bio); + inc_and_issue(cache, bio, new_ocell); } } else { inc_hit_counter(cache, bio); @@ -1369,20 +1388,21 @@ static void process_bio(struct cache *cache, struct prealloc *structs, if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) && !is_dirty(cache, lookup_result.cblock)) { - pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock); - issue(cache, bio); - } else - issue_cache_bio(cache, bio, pb, block, lookup_result.cblock); + inc_and_issue(cache, bio, new_ocell); + + } else { + remap_to_cache_dirty(cache, bio, block, lookup_result.cblock); + inc_and_issue(cache, bio, new_ocell); + } } break; case POLICY_MISS: inc_miss_counter(cache, bio); - pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); remap_to_origin_clear_discard(cache, bio, block); - issue(cache, bio); + inc_and_issue(cache, bio, new_ocell); break; case POLICY_NEW: @@ -1501,6 +1521,9 @@ static void process_deferred_flush_bios(struct cache *cache, bool submit_bios) bio_list_init(&cache->deferred_flush_bios); spin_unlock_irqrestore(&cache->lock, flags); + /* + * These bios have already been through inc_ds() + */ while ((bio = bio_list_pop(&bios))) submit_bios ? generic_make_request(bio) : bio_io_error(bio); } @@ -1518,6 +1541,9 @@ static void process_deferred_writethrough_bios(struct cache *cache) bio_list_init(&cache->deferred_writethrough_bios); spin_unlock_irqrestore(&cache->lock, flags); + /* + * These bios have already been through inc_ds() + */ while ((bio = bio_list_pop(&bios))) generic_make_request(bio); } @@ -1694,6 +1720,7 @@ static void do_worker(struct work_struct *ws) if (commit_if_needed(cache)) { process_deferred_flush_bios(cache, false); + process_migrations(cache, &cache->need_commit_migrations, migration_failure); /* * FIXME: rollback metadata or just go into a @@ -2406,16 +2433,13 @@ out: return r; } -static int cache_map(struct dm_target *ti, struct bio *bio) +static int __cache_map(struct cache *cache, struct bio *bio, struct dm_bio_prison_cell **cell) { - struct cache *cache = ti->private; - int r; dm_oblock_t block = get_bio_block(cache, bio); size_t pb_data_size = get_per_bio_data_size(cache); bool can_migrate = false; bool discarded_block; - struct dm_bio_prison_cell *cell; struct policy_result lookup_result; struct per_bio_data *pb = init_per_bio_data(bio, pb_data_size); @@ -2437,15 +2461,15 @@ static int cache_map(struct dm_target *ti, struct bio *bio) /* * Check to see if that block is currently migrating. */ - cell = alloc_prison_cell(cache); - if (!cell) { + *cell = alloc_prison_cell(cache); + if (!*cell) { defer_bio(cache, bio); return DM_MAPIO_SUBMITTED; } - r = bio_detain(cache, block, bio, cell, + r = bio_detain(cache, block, bio, *cell, (cell_free_fn) free_prison_cell, - cache, &cell); + cache, cell); if (r) { if (r < 0) defer_bio(cache, bio); @@ -2458,11 +2482,12 @@ static int cache_map(struct dm_target *ti, struct bio *bio) r = policy_map(cache->policy, block, false, can_migrate, discarded_block, bio, &lookup_result); if (r == -EWOULDBLOCK) { - cell_defer(cache, cell, true); + cell_defer(cache, *cell, true); return DM_MAPIO_SUBMITTED; } else if (r) { DMERR_LIMIT("Unexpected return from cache replacement policy: %d", r); + cell_defer(cache, *cell, false); bio_io_error(bio); return DM_MAPIO_SUBMITTED; } @@ -2476,52 +2501,44 @@ static int cache_map(struct dm_target *ti, struct bio *bio) * We need to invalidate this block, so * defer for the worker thread. */ - cell_defer(cache, cell, true); + cell_defer(cache, *cell, true); r = DM_MAPIO_SUBMITTED; } else { - pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); inc_miss_counter(cache, bio); remap_to_origin_clear_discard(cache, bio, block); - - cell_defer(cache, cell, false); } } else { inc_hit_counter(cache, bio); - pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); - if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) && !is_dirty(cache, lookup_result.cblock)) remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock); else remap_to_cache_dirty(cache, bio, block, lookup_result.cblock); - - cell_defer(cache, cell, false); } break; case POLICY_MISS: inc_miss_counter(cache, bio); - pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); - if (pb->req_nr != 0) { /* * This is a duplicate writethrough io that is no * longer needed because the block has been demoted. */ bio_endio(bio, 0); - cell_defer(cache, cell, false); - return DM_MAPIO_SUBMITTED; - } else { + cell_defer(cache, *cell, false); + r = DM_MAPIO_SUBMITTED; + + } else remap_to_origin_clear_discard(cache, bio, block); - cell_defer(cache, cell, false); - } + break; default: DMERR_LIMIT("%s: erroring bio: unknown policy op: %u", __func__, (unsigned) lookup_result.op); + cell_defer(cache, *cell, false); bio_io_error(bio); r = DM_MAPIO_SUBMITTED; } @@ -2529,6 +2546,21 @@ static int cache_map(struct dm_target *ti, struct bio *bio) return r; } +static int cache_map(struct dm_target *ti, struct bio *bio) +{ + int r; + struct dm_bio_prison_cell *cell; + struct cache *cache = ti->private; + + r = __cache_map(cache, bio, &cell); + if (r == DM_MAPIO_REMAPPED) { + inc_ds(cache, bio, cell); + cell_defer(cache, cell, false); + } + + return r; +} + static int cache_end_io(struct dm_target *ti, struct bio *bio, int error) { struct cache *cache = ti->private; @@ -2808,7 +2840,7 @@ static void cache_status(struct dm_target *ti, status_type_t type, residency = policy_residency(cache->policy); DMEMIT("%u %llu/%llu %u %llu/%llu %u %u %u %u %u %u %lu ", - (unsigned)(DM_CACHE_METADATA_BLOCK_SIZE >> SECTOR_SHIFT), + (unsigned)DM_CACHE_METADATA_BLOCK_SIZE, (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata), (unsigned long long)nr_blocks_metadata, cache->sectors_per_block, @@ -3062,7 +3094,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits) */ if (io_opt_sectors < cache->sectors_per_block || do_div(io_opt_sectors, cache->sectors_per_block)) { - blk_limits_io_min(limits, 0); + blk_limits_io_min(limits, cache->sectors_per_block << SECTOR_SHIFT); blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT); } set_discard_limits(cache, limits); @@ -3072,7 +3104,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits) static struct target_type cache_target = { .name = "cache", - .version = {1, 4, 0}, + .version = {1, 5, 0}, .module = THIS_MODULE, .ctr = cache_ctr, .dtr = cache_dtr, diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 4cba2d808afb..cd15e0801228 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -59,7 +59,7 @@ struct dm_crypt_io { int error; sector_t sector; struct dm_crypt_io *base_io; -}; +} CRYPTO_MINALIGN_ATTR; struct dm_crypt_request { struct convert_context *ctx; @@ -162,6 +162,8 @@ struct crypt_config { */ unsigned int dmreq_start; + unsigned int per_bio_data_size; + unsigned long flags; unsigned int key_size; unsigned int key_parts; /* independent parts in key buffer */ @@ -895,6 +897,15 @@ static void crypt_alloc_req(struct crypt_config *cc, kcryptd_async_done, dmreq_of_req(cc, ctx->req)); } +static void crypt_free_req(struct crypt_config *cc, + struct ablkcipher_request *req, struct bio *base_bio) +{ + struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size); + + if ((struct ablkcipher_request *)(io + 1) != req) + mempool_free(req, cc->req_pool); +} + /* * Encrypt / decrypt data from one bio to another one (can be the same one) */ @@ -1008,12 +1019,9 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone) } } -static struct dm_crypt_io *crypt_io_alloc(struct crypt_config *cc, - struct bio *bio, sector_t sector) +static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc, + struct bio *bio, sector_t sector) { - struct dm_crypt_io *io; - - io = mempool_alloc(cc->io_pool, GFP_NOIO); io->cc = cc; io->base_bio = bio; io->sector = sector; @@ -1021,8 +1029,6 @@ static struct dm_crypt_io *crypt_io_alloc(struct crypt_config *cc, io->base_io = NULL; io->ctx.req = NULL; atomic_set(&io->io_pending, 0); - - return io; } static void crypt_inc_pending(struct dm_crypt_io *io) @@ -1046,8 +1052,9 @@ static void crypt_dec_pending(struct dm_crypt_io *io) return; if (io->ctx.req) - mempool_free(io->ctx.req, cc->req_pool); - mempool_free(io, cc->io_pool); + crypt_free_req(cc, io->ctx.req, base_bio); + if (io != dm_per_bio_data(base_bio, cc->per_bio_data_size)) + mempool_free(io, cc->io_pool); if (likely(!base_io)) bio_endio(base_bio, error); @@ -1255,8 +1262,8 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) * between fragments, so switch to a new dm_crypt_io structure. */ if (unlikely(!crypt_finished && remaining)) { - new_io = crypt_io_alloc(io->cc, io->base_bio, - sector); + new_io = mempool_alloc(cc->io_pool, GFP_NOIO); + crypt_io_init(new_io, io->cc, io->base_bio, sector); crypt_inc_pending(new_io); crypt_convert_init(cc, &new_io->ctx, NULL, io->base_bio, sector); @@ -1325,7 +1332,7 @@ static void kcryptd_async_done(struct crypto_async_request *async_req, if (error < 0) io->error = -EIO; - mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool); + crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio); if (!atomic_dec_and_test(&ctx->cc_pending)) return; @@ -1681,6 +1688,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) unsigned int key_size, opt_params; unsigned long long tmpll; int ret; + size_t iv_size_padding; struct dm_arg_set as; const char *opt_string; char dummy; @@ -1717,17 +1725,33 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) cc->dmreq_start = sizeof(struct ablkcipher_request); cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc)); - cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment()); - cc->dmreq_start += crypto_ablkcipher_alignmask(any_tfm(cc)) & - ~(crypto_tfm_ctx_alignment() - 1); + cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request)); + + if (crypto_ablkcipher_alignmask(any_tfm(cc)) < CRYPTO_MINALIGN) { + /* Allocate the padding exactly */ + iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request)) + & crypto_ablkcipher_alignmask(any_tfm(cc)); + } else { + /* + * If the cipher requires greater alignment than kmalloc + * alignment, we don't know the exact position of the + * initialization vector. We must assume worst case. + */ + iv_size_padding = crypto_ablkcipher_alignmask(any_tfm(cc)); + } cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start + - sizeof(struct dm_crypt_request) + cc->iv_size); + sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size); if (!cc->req_pool) { ti->error = "Cannot allocate crypt request mempool"; goto bad; } + cc->per_bio_data_size = ti->per_bio_data_size = + ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start + + sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size, + ARCH_KMALLOC_MINALIGN); + cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0); if (!cc->page_pool) { ti->error = "Cannot allocate page mempool"; @@ -1824,7 +1848,9 @@ static int crypt_map(struct dm_target *ti, struct bio *bio) return DM_MAPIO_REMAPPED; } - io = crypt_io_alloc(cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector)); + io = dm_per_bio_data(bio, cc->per_bio_data_size); + crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector)); + io->ctx.req = (struct ablkcipher_request *)(io + 1); if (bio_data_dir(io->base_bio) == READ) { if (kcryptd_io_read(io, GFP_NOWAIT)) diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index db404a0f7e2c..c09359db3a90 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c @@ -33,7 +33,6 @@ struct dm_io_client { struct io { unsigned long error_bits; atomic_t count; - struct completion *wait; struct dm_io_client *client; io_notify_fn callback; void *context; @@ -112,28 +111,27 @@ static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io, * We need an io object to keep track of the number of bios that * have been dispatched for a particular io. *---------------------------------------------------------------*/ -static void dec_count(struct io *io, unsigned int region, int error) +static void complete_io(struct io *io) { - if (error) - set_bit(region, &io->error_bits); + unsigned long error_bits = io->error_bits; + io_notify_fn fn = io->callback; + void *context = io->context; - if (atomic_dec_and_test(&io->count)) { - if (io->vma_invalidate_size) - invalidate_kernel_vmap_range(io->vma_invalidate_address, - io->vma_invalidate_size); + if (io->vma_invalidate_size) + invalidate_kernel_vmap_range(io->vma_invalidate_address, + io->vma_invalidate_size); - if (io->wait) - complete(io->wait); + mempool_free(io, io->client->pool); + fn(error_bits, context); +} - else { - unsigned long r = io->error_bits; - io_notify_fn fn = io->callback; - void *context = io->context; +static void dec_count(struct io *io, unsigned int region, int error) +{ + if (error) + set_bit(region, &io->error_bits); - mempool_free(io, io->client->pool); - fn(r, context); - } - } + if (atomic_dec_and_test(&io->count)) + complete_io(io); } static void endio(struct bio *bio, int error) @@ -376,41 +374,51 @@ static void dispatch_io(int rw, unsigned int num_regions, dec_count(io, 0, 0); } +struct sync_io { + unsigned long error_bits; + struct completion wait; +}; + +static void sync_io_complete(unsigned long error, void *context) +{ + struct sync_io *sio = context; + + sio->error_bits = error; + complete(&sio->wait); +} + static int sync_io(struct dm_io_client *client, unsigned int num_regions, struct dm_io_region *where, int rw, struct dpages *dp, unsigned long *error_bits) { - /* - * gcc <= 4.3 can't do the alignment for stack variables, so we must - * align it on our own. - * volatile prevents the optimizer from removing or reusing - * "io_" field from the stack frame (allowed in ANSI C). - */ - volatile char io_[sizeof(struct io) + __alignof__(struct io) - 1]; - struct io *io = (struct io *)PTR_ALIGN(&io_, __alignof__(struct io)); - DECLARE_COMPLETION_ONSTACK(wait); + struct io *io; + struct sync_io sio; if (num_regions > 1 && (rw & RW_MASK) != WRITE) { WARN_ON(1); return -EIO; } + init_completion(&sio.wait); + + io = mempool_alloc(client->pool, GFP_NOIO); io->error_bits = 0; atomic_set(&io->count, 1); /* see dispatch_io() */ - io->wait = &wait; io->client = client; + io->callback = sync_io_complete; + io->context = &sio; io->vma_invalidate_address = dp->vma_invalidate_address; io->vma_invalidate_size = dp->vma_invalidate_size; dispatch_io(rw, num_regions, where, dp, io, 1); - wait_for_completion_io(&wait); + wait_for_completion_io(&sio.wait); if (error_bits) - *error_bits = io->error_bits; + *error_bits = sio.error_bits; - return io->error_bits ? -EIO : 0; + return sio.error_bits ? -EIO : 0; } static int async_io(struct dm_io_client *client, unsigned int num_regions, @@ -428,7 +436,6 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions, io = mempool_alloc(client->pool, GFP_NOIO); io->error_bits = 0; atomic_set(&io->count, 1); /* see dispatch_io() */ - io->wait = NULL; io->client = client; io->callback = fn; io->context = context; @@ -481,9 +488,9 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp, * New collapsed (a)synchronous interface. * * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug - * the queue with blk_unplug() some time later or set REQ_SYNC in -io_req->bi_rw. If you fail to do one of these, the IO will be submitted to - * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c. + * the queue with blk_unplug() some time later or set REQ_SYNC in io_req->bi_rw. + * If you fail to do one of these, the IO will be submitted to the disk after + * q->unplug_delay, which defaults to 3ms in blk-settings.c. */ int dm_io(struct dm_io_request *io_req, unsigned num_regions, struct dm_io_region *where, unsigned long *sync_error_bits) diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index f4167b013d99..833d7e752f06 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -373,8 +373,6 @@ static int __must_push_back(struct multipath *m) dm_noflush_suspending(m->ti))); } -#define pg_ready(m) (!(m)->queue_io && !(m)->pg_init_required) - /* * Map cloned requests */ @@ -402,11 +400,11 @@ static int multipath_map(struct dm_target *ti, struct request *clone, if (!__must_push_back(m)) r = -EIO; /* Failed */ goto out_unlock; - } - if (!pg_ready(m)) { + } else if (m->queue_io || m->pg_init_required) { __pg_init_all_paths(m); goto out_unlock; } + if (set_mapinfo(m, map_context) < 0) /* ENOMEM, requeue */ goto out_unlock; diff --git a/drivers/md/dm-switch.c b/drivers/md/dm-switch.c index 09a688b3d48c..50fca469cafd 100644 --- a/drivers/md/dm-switch.c +++ b/drivers/md/dm-switch.c @@ -137,13 +137,23 @@ static void switch_get_position(struct switch_ctx *sctx, unsigned long region_nr *bit *= sctx->region_table_entry_bits; } +static unsigned switch_region_table_read(struct switch_ctx *sctx, unsigned long region_nr) +{ + unsigned long region_index; + unsigned bit; + + switch_get_position(sctx, region_nr, ®ion_index, &bit); + + return (ACCESS_ONCE(sctx->region_table[region_index]) >> bit) & + ((1 << sctx->region_table_entry_bits) - 1); +} + /* * Find which path to use at given offset. */ static unsigned switch_get_path_nr(struct switch_ctx *sctx, sector_t offset) { - unsigned long region_index; - unsigned bit, path_nr; + unsigned path_nr; sector_t p; p = offset; @@ -152,9 +162,7 @@ static unsigned switch_get_path_nr(struct switch_ctx *sctx, sector_t offset) else sector_div(p, sctx->region_size); - switch_get_position(sctx, p, ®ion_index, &bit); - path_nr = (ACCESS_ONCE(sctx->region_table[region_index]) >> bit) & - ((1 << sctx->region_table_entry_bits) - 1); + path_nr = switch_region_table_read(sctx, p); /* This can only happen if the processor uses non-atomic stores. */ if (unlikely(path_nr >= sctx->nr_paths)) @@ -363,7 +371,7 @@ static __always_inline unsigned long parse_hex(const char **string) } static int process_set_region_mappings(struct switch_ctx *sctx, - unsigned argc, char **argv) + unsigned argc, char **argv) { unsigned i; unsigned long region_index = 0; @@ -372,6 +380,51 @@ static int process_set_region_mappings(struct switch_ctx *sctx, unsigned long path_nr; const char *string = argv[i]; + if ((*string & 0xdf) == 'R') { + unsigned long cycle_length, num_write; + + string++; + if (unlikely(*string == ',')) { + DMWARN("invalid set_region_mappings argument: '%s'", argv[i]); + return -EINVAL; + } + cycle_length = parse_hex(&string); + if (unlikely(*string != ',')) { + DMWARN("invalid set_region_mappings argument: '%s'", argv[i]); + return -EINVAL; + } + string++; + if (unlikely(!*string)) { + DMWARN("invalid set_region_mappings argument: '%s'", argv[i]); + return -EINVAL; + } + num_write = parse_hex(&string); + if (unlikely(*string)) { + DMWARN("invalid set_region_mappings argument: '%s'", argv[i]); + return -EINVAL; + } + + if (unlikely(!cycle_length) || unlikely(cycle_length - 1 > region_index)) { + DMWARN("invalid set_region_mappings cycle length: %lu > %lu", + cycle_length - 1, region_index); + return -EINVAL; + } + if (unlikely(region_index + num_write < region_index) || + unlikely(region_index + num_write >= sctx->nr_regions)) { + DMWARN("invalid set_region_mappings region number: %lu + %lu >= %lu", + region_index, num_write, sctx->nr_regions); + return -EINVAL; + } + + while (num_write--) { + region_index++; + path_nr = switch_region_table_read(sctx, region_index - cycle_length); + switch_region_table_write(sctx, region_index, path_nr); + } + + continue; + } + if (*string == ':') region_index++; else { @@ -500,7 +553,7 @@ static int switch_iterate_devices(struct dm_target *ti, static struct target_type switch_target = { .name = "switch", - .version = {1, 0, 0}, + .version = {1, 1, 0}, .module = THIS_MODULE, .ctr = switch_ctr, .dtr = switch_dtr, diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 5f59f1e3e5b1..f9c6cb8dbcf8 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1386,6 +1386,14 @@ static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev, return q && !blk_queue_add_random(q); } +static int queue_supports_sg_merge(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) +{ + struct request_queue *q = bdev_get_queue(dev->bdev); + + return q && !test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags); +} + static bool dm_table_all_devices_attribute(struct dm_table *t, iterate_devices_callout_fn func) { @@ -1430,6 +1438,43 @@ static bool dm_table_supports_write_same(struct dm_table *t) return true; } +static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) +{ + struct request_queue *q = bdev_get_queue(dev->bdev); + + return q && blk_queue_discard(q); +} + +static bool dm_table_supports_discards(struct dm_table *t) +{ + struct dm_target *ti; + unsigned i = 0; + + /* + * Unless any target used by the table set discards_supported, + * require at least one underlying device to support discards. + * t->devices includes internal dm devices such as mirror logs + * so we need to use iterate_devices here, which targets + * supporting discard selectively must provide. + */ + while (i < dm_table_get_num_targets(t)) { + ti = dm_table_get_target(t, i++); + + if (!ti->num_discard_bios) + continue; + + if (ti->discards_supported) + return 1; + + if (ti->type->iterate_devices && + ti->type->iterate_devices(ti, device_discard_capable, NULL)) + return 1; + } + + return 0; +} + void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, struct queue_limits *limits) { @@ -1464,6 +1509,11 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, if (!dm_table_supports_write_same(t)) q->limits.max_write_same_sectors = 0; + if (dm_table_all_devices_attribute(t, queue_supports_sg_merge)) + queue_flag_clear_unlocked(QUEUE_FLAG_NO_SG_MERGE, q); + else + queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q); + dm_table_set_integrity(t); /* @@ -1636,39 +1686,3 @@ void dm_table_run_md_queue_async(struct dm_table *t) } EXPORT_SYMBOL(dm_table_run_md_queue_async); -static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev, - sector_t start, sector_t len, void *data) -{ - struct request_queue *q = bdev_get_queue(dev->bdev); - - return q && blk_queue_discard(q); -} - -bool dm_table_supports_discards(struct dm_table *t) -{ - struct dm_target *ti; - unsigned i = 0; - - /* - * Unless any target used by the table set discards_supported, - * require at least one underlying device to support discards. - * t->devices includes internal dm devices such as mirror logs - * so we need to use iterate_devices here, which targets - * supporting discard selectively must provide. - */ - while (i < dm_table_get_num_targets(t)) { - ti = dm_table_get_target(t, i++); - - if (!ti->num_discard_bios) - continue; - - if (ti->discards_supported) - return 1; - - if (ti->type->iterate_devices && - ti->type->iterate_devices(ti, device_discard_capable, NULL)) - return 1; - } - - return 0; -} diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index fc9c848a60c9..4843801173fe 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -227,6 +227,7 @@ struct thin_c { struct list_head list; struct dm_dev *pool_dev; struct dm_dev *origin_dev; + sector_t origin_size; dm_thin_id dev_id; struct pool *pool; @@ -554,11 +555,16 @@ static void remap_and_issue(struct thin_c *tc, struct bio *bio, struct dm_thin_new_mapping { struct list_head list; - bool quiesced:1; - bool prepared:1; bool pass_discard:1; bool definitely_not_shared:1; + /* + * Track quiescing, copying and zeroing preparation actions. When this + * counter hits zero the block is prepared and can be inserted into the + * btree. + */ + atomic_t prepare_actions; + int err; struct thin_c *tc; dm_block_t virt_block; @@ -575,43 +581,41 @@ struct dm_thin_new_mapping { bio_end_io_t *saved_bi_end_io; }; -static void __maybe_add_mapping(struct dm_thin_new_mapping *m) +static void __complete_mapping_preparation(struct dm_thin_new_mapping *m) { struct pool *pool = m->tc->pool; - if (m->quiesced && m->prepared) { + if (atomic_dec_and_test(&m->prepare_actions)) { list_add_tail(&m->list, &pool->prepared_mappings); wake_worker(pool); } } -static void copy_complete(int read_err, unsigned long write_err, void *context) +static void complete_mapping_preparation(struct dm_thin_new_mapping *m) { unsigned long flags; - struct dm_thin_new_mapping *m = context; struct pool *pool = m->tc->pool; - m->err = read_err || write_err ? -EIO : 0; - spin_lock_irqsave(&pool->lock, flags); - m->prepared = true; - __maybe_add_mapping(m); + __complete_mapping_preparation(m); spin_unlock_irqrestore(&pool->lock, flags); } +static void copy_complete(int read_err, unsigned long write_err, void *context) +{ + struct dm_thin_new_mapping *m = context; + + m->err = read_err || write_err ? -EIO : 0; + complete_mapping_preparation(m); +} + static void overwrite_endio(struct bio *bio, int err) { - unsigned long flags; struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); struct dm_thin_new_mapping *m = h->overwrite_mapping; - struct pool *pool = m->tc->pool; m->err = err; - - spin_lock_irqsave(&pool->lock, flags); - m->prepared = true; - __maybe_add_mapping(m); - spin_unlock_irqrestore(&pool->lock, flags); + complete_mapping_preparation(m); } /*----------------------------------------------------------------*/ @@ -821,10 +825,31 @@ static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool) return m; } +static void ll_zero(struct thin_c *tc, struct dm_thin_new_mapping *m, + sector_t begin, sector_t end) +{ + int r; + struct dm_io_region to; + + to.bdev = tc->pool_dev->bdev; + to.sector = begin; + to.count = end - begin; + + r = dm_kcopyd_zero(tc->pool->copier, 1, &to, 0, copy_complete, m); + if (r < 0) { + DMERR_LIMIT("dm_kcopyd_zero() failed"); + copy_complete(1, 1, m); + } +} + +/* + * A partial copy also needs to zero the uncopied region. + */ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block, struct dm_dev *origin, dm_block_t data_origin, dm_block_t data_dest, - struct dm_bio_prison_cell *cell, struct bio *bio) + struct dm_bio_prison_cell *cell, struct bio *bio, + sector_t len) { int r; struct pool *pool = tc->pool; @@ -835,8 +860,15 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block, m->data_block = data_dest; m->cell = cell; + /* + * quiesce action + copy action + an extra reference held for the + * duration of this function (we may need to inc later for a + * partial zero). + */ + atomic_set(&m->prepare_actions, 3); + if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list)) - m->quiesced = true; + complete_mapping_preparation(m); /* already quiesced */ /* * IO to pool_dev remaps to the pool target's data_dev. @@ -857,20 +889,38 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block, from.bdev = origin->bdev; from.sector = data_origin * pool->sectors_per_block; - from.count = pool->sectors_per_block; + from.count = len; to.bdev = tc->pool_dev->bdev; to.sector = data_dest * pool->sectors_per_block; - to.count = pool->sectors_per_block; + to.count = len; r = dm_kcopyd_copy(pool->copier, &from, 1, &to, 0, copy_complete, m); if (r < 0) { - mempool_free(m, pool->mapping_pool); DMERR_LIMIT("dm_kcopyd_copy() failed"); - cell_error(pool, cell); + copy_complete(1, 1, m); + + /* + * We allow the zero to be issued, to simplify the + * error path. Otherwise we'd need to start + * worrying about decrementing the prepare_actions + * counter. + */ + } + + /* + * Do we need to zero a tail region? + */ + if (len < pool->sectors_per_block && pool->pf.zero_new_blocks) { + atomic_inc(&m->prepare_actions); + ll_zero(tc, m, + data_dest * pool->sectors_per_block + len, + (data_dest + 1) * pool->sectors_per_block); } } + + complete_mapping_preparation(m); /* drop our ref */ } static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block, @@ -878,15 +928,8 @@ static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block, struct dm_bio_prison_cell *cell, struct bio *bio) { schedule_copy(tc, virt_block, tc->pool_dev, - data_origin, data_dest, cell, bio); -} - -static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block, - dm_block_t data_dest, - struct dm_bio_prison_cell *cell, struct bio *bio) -{ - schedule_copy(tc, virt_block, tc->origin_dev, - virt_block, data_dest, cell, bio); + data_origin, data_dest, cell, bio, + tc->pool->sectors_per_block); } static void schedule_zero(struct thin_c *tc, dm_block_t virt_block, @@ -896,8 +939,7 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block, struct pool *pool = tc->pool; struct dm_thin_new_mapping *m = get_next_mapping(pool); - m->quiesced = true; - m->prepared = false; + atomic_set(&m->prepare_actions, 1); /* no need to quiesce */ m->tc = tc; m->virt_block = virt_block; m->data_block = data_block; @@ -919,21 +961,33 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block, save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio); inc_all_io_entry(pool, bio); remap_and_issue(tc, bio, data_block); - } else { - int r; - struct dm_io_region to; - to.bdev = tc->pool_dev->bdev; - to.sector = data_block * pool->sectors_per_block; - to.count = pool->sectors_per_block; + } else + ll_zero(tc, m, + data_block * pool->sectors_per_block, + (data_block + 1) * pool->sectors_per_block); +} - r = dm_kcopyd_zero(pool->copier, 1, &to, 0, copy_complete, m); - if (r < 0) { - mempool_free(m, pool->mapping_pool); - DMERR_LIMIT("dm_kcopyd_zero() failed"); - cell_error(pool, cell); - } - } +static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block, + dm_block_t data_dest, + struct dm_bio_prison_cell *cell, struct bio *bio) +{ + struct pool *pool = tc->pool; + sector_t virt_block_begin = virt_block * pool->sectors_per_block; + sector_t virt_block_end = (virt_block + 1) * pool->sectors_per_block; + + if (virt_block_end <= tc->origin_size) + schedule_copy(tc, virt_block, tc->origin_dev, + virt_block, data_dest, cell, bio, + pool->sectors_per_block); + + else if (virt_block_begin < tc->origin_size) + schedule_copy(tc, virt_block, tc->origin_dev, + virt_block, data_dest, cell, bio, + tc->origin_size - virt_block_begin); + + else + schedule_zero(tc, virt_block, data_dest, cell, bio); } /* @@ -1315,7 +1369,18 @@ static void process_bio(struct thin_c *tc, struct bio *bio) inc_all_io_entry(pool, bio); cell_defer_no_holder(tc, cell); - remap_to_origin_and_issue(tc, bio); + if (bio_end_sector(bio) <= tc->origin_size) + remap_to_origin_and_issue(tc, bio); + + else if (bio->bi_iter.bi_sector < tc->origin_size) { + zero_fill_bio(bio); + bio->bi_iter.bi_size = (tc->origin_size - bio->bi_iter.bi_sector) << SECTOR_SHIFT; + remap_to_origin_and_issue(tc, bio); + + } else { + zero_fill_bio(bio); + bio_endio(bio, 0); + } } else provision_block(tc, bio, block, cell); break; @@ -3112,7 +3177,7 @@ static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits) */ if (io_opt_sectors < pool->sectors_per_block || do_div(io_opt_sectors, pool->sectors_per_block)) { - blk_limits_io_min(limits, 0); + blk_limits_io_min(limits, pool->sectors_per_block << SECTOR_SHIFT); blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT); } @@ -3141,7 +3206,7 @@ static struct target_type pool_target = { .name = "thin-pool", .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | DM_TARGET_IMMUTABLE, - .version = {1, 12, 0}, + .version = {1, 13, 0}, .module = THIS_MODULE, .ctr = pool_ctr, .dtr = pool_dtr, @@ -3361,8 +3426,7 @@ static int thin_endio(struct dm_target *ti, struct bio *bio, int err) spin_lock_irqsave(&pool->lock, flags); list_for_each_entry_safe(m, tmp, &work, list) { list_del(&m->list); - m->quiesced = true; - __maybe_add_mapping(m); + __complete_mapping_preparation(m); } spin_unlock_irqrestore(&pool->lock, flags); } @@ -3401,6 +3465,16 @@ static void thin_postsuspend(struct dm_target *ti) noflush_work(tc, do_noflush_stop); } +static int thin_preresume(struct dm_target *ti) +{ + struct thin_c *tc = ti->private; + + if (tc->origin_dev) + tc->origin_size = get_dev_size(tc->origin_dev->bdev); + + return 0; +} + /* * <nr mapped sectors> <highest mapped sector> */ @@ -3483,12 +3557,13 @@ static int thin_iterate_devices(struct dm_target *ti, static struct target_type thin_target = { .name = "thin", - .version = {1, 12, 0}, + .version = {1, 13, 0}, .module = THIS_MODULE, .ctr = thin_ctr, .dtr = thin_dtr, .map = thin_map, .end_io = thin_endio, + .preresume = thin_preresume, .presuspend = thin_presuspend, .postsuspend = thin_postsuspend, .status = thin_status, diff --git a/drivers/md/dm.h b/drivers/md/dm.h index ed76126aac54..e81d2152fa68 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h @@ -72,7 +72,6 @@ int dm_table_any_busy_target(struct dm_table *t); unsigned dm_table_get_type(struct dm_table *t); struct target_type *dm_table_get_immutable_target_type(struct dm_table *t); bool dm_table_request_based(struct dm_table *t); -bool dm_table_supports_discards(struct dm_table *t); void dm_table_free_md_mempools(struct dm_table *t); struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t); diff --git a/drivers/md/md.c b/drivers/md/md.c index 32fc19c540d4..1294238610df 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -5961,7 +5961,7 @@ static int set_bitmap_file(struct mddev *mddev, int fd) int err = 0; if (mddev->pers) { - if (!mddev->pers->quiesce) + if (!mddev->pers->quiesce || !mddev->thread) return -EBUSY; if (mddev->recovery || mddev->sync_thread) return -EBUSY; @@ -6263,7 +6263,7 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info) rv = update_raid_disks(mddev, info->raid_disks); if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) { - if (mddev->pers->quiesce == NULL) + if (mddev->pers->quiesce == NULL || mddev->thread == NULL) return -EINVAL; if (mddev->recovery || mddev->sync_thread) return -EBUSY; @@ -7376,7 +7376,7 @@ void md_do_sync(struct md_thread *thread) struct mddev *mddev2; unsigned int currspeed = 0, window; - sector_t max_sectors,j, io_sectors; + sector_t max_sectors,j, io_sectors, recovery_done; unsigned long mark[SYNC_MARKS]; unsigned long update_time; sector_t mark_cnt[SYNC_MARKS]; @@ -7652,7 +7652,8 @@ void md_do_sync(struct md_thread *thread) */ cond_resched(); - currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2 + recovery_done = io_sectors - atomic_read(&mddev->recovery_active); + currspeed = ((unsigned long)(recovery_done - mddev->resync_mark_cnt))/2 /((jiffies-mddev->resync_mark)/HZ +1) +1; if (currspeed > speed_min(mddev)) { @@ -8592,7 +8593,7 @@ static int __init md_init(void) goto err_mdp; mdp_major = ret; - blk_register_region(MKDEV(MD_MAJOR, 0), 1UL<<MINORBITS, THIS_MODULE, + blk_register_region(MKDEV(MD_MAJOR, 0), 512, THIS_MODULE, md_probe, NULL, NULL); blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE, md_probe, NULL, NULL); @@ -8687,7 +8688,7 @@ static __exit void md_exit(void) struct list_head *tmp; int delay = 1; - blk_unregister_region(MKDEV(MD_MAJOR,0), 1U << MINORBITS); + blk_unregister_region(MKDEV(MD_MAJOR,0), 512); blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS); unregister_blkdev(MD_MAJOR,"md"); diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 407a99e46f69..cf91f5910c7c 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -685,6 +685,12 @@ static void *raid0_takeover(struct mddev *mddev) * raid10 - assuming we have all necessary active disks * raid1 - with (N -1) mirror drives faulty */ + + if (mddev->bitmap) { + printk(KERN_ERR "md/raid0: %s: cannot takeover array with bitmap\n", + mdname(mddev)); + return ERR_PTR(-EBUSY); + } if (mddev->level == 4) return raid0_takeover_raid45(mddev); diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 56e24c072b62..55de4f6f7eaf 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -540,11 +540,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect has_nonrot_disk = 0; choose_next_idle = 0; - if (conf->mddev->recovery_cp < MaxSector && - (this_sector + sectors >= conf->next_resync)) - choose_first = 1; - else - choose_first = 0; + choose_first = (conf->mddev->recovery_cp < this_sector + sectors); for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) { sector_t dist; @@ -831,7 +827,7 @@ static void flush_pending_writes(struct r1conf *conf) * there is no normal IO happeing. It must arrange to call * lower_barrier when the particular background IO completes. */ -static void raise_barrier(struct r1conf *conf) +static void raise_barrier(struct r1conf *conf, sector_t sector_nr) { spin_lock_irq(&conf->resync_lock); @@ -841,6 +837,7 @@ static void raise_barrier(struct r1conf *conf) /* block any new IO from starting */ conf->barrier++; + conf->next_resync = sector_nr; /* For these conditions we must wait: * A: while the array is in frozen state @@ -849,14 +846,17 @@ static void raise_barrier(struct r1conf *conf) * C: next_resync + RESYNC_SECTORS > start_next_window, meaning * next resync will reach to the window which normal bios are * handling. + * D: while there are any active requests in the current window. */ wait_event_lock_irq(conf->wait_barrier, !conf->array_frozen && conf->barrier < RESYNC_DEPTH && + conf->current_window_requests == 0 && (conf->start_next_window >= conf->next_resync + RESYNC_SECTORS), conf->resync_lock); + conf->nr_pending++; spin_unlock_irq(&conf->resync_lock); } @@ -866,6 +866,7 @@ static void lower_barrier(struct r1conf *conf) BUG_ON(conf->barrier <= 0); spin_lock_irqsave(&conf->resync_lock, flags); conf->barrier--; + conf->nr_pending--; spin_unlock_irqrestore(&conf->resync_lock, flags); wake_up(&conf->wait_barrier); } @@ -877,12 +878,10 @@ static bool need_to_wait_for_sync(struct r1conf *conf, struct bio *bio) if (conf->array_frozen || !bio) wait = true; else if (conf->barrier && bio_data_dir(bio) == WRITE) { - if (conf->next_resync < RESYNC_WINDOW_SECTORS) - wait = true; - else if ((conf->next_resync - RESYNC_WINDOW_SECTORS - >= bio_end_sector(bio)) || - (conf->next_resync + NEXT_NORMALIO_DISTANCE - <= bio->bi_iter.bi_sector)) + if ((conf->mddev->curr_resync_completed + >= bio_end_sector(bio)) || + (conf->next_resync + NEXT_NORMALIO_DISTANCE + <= bio->bi_iter.bi_sector)) wait = false; else wait = true; @@ -919,8 +918,8 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio) } if (bio && bio_data_dir(bio) == WRITE) { - if (conf->next_resync + NEXT_NORMALIO_DISTANCE - <= bio->bi_iter.bi_sector) { + if (bio->bi_iter.bi_sector >= + conf->mddev->curr_resync_completed) { if (conf->start_next_window == MaxSector) conf->start_next_window = conf->next_resync + @@ -1186,6 +1185,7 @@ read_again: atomic_read(&bitmap->behind_writes) == 0); } r1_bio->read_disk = rdisk; + r1_bio->start_next_window = 0; read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector, @@ -1501,12 +1501,12 @@ static void error(struct mddev *mddev, struct md_rdev *rdev) mddev->degraded++; set_bit(Faulty, &rdev->flags); spin_unlock_irqrestore(&conf->device_lock, flags); - /* - * if recovery is running, make sure it aborts. - */ - set_bit(MD_RECOVERY_INTR, &mddev->recovery); } else set_bit(Faulty, &rdev->flags); + /* + * if recovery is running, make sure it aborts. + */ + set_bit(MD_RECOVERY_INTR, &mddev->recovery); set_bit(MD_CHANGE_DEVS, &mddev->flags); printk(KERN_ALERT "md/raid1:%s: Disk failure on %s, disabling device.\n" @@ -1548,8 +1548,13 @@ static void close_sync(struct r1conf *conf) mempool_destroy(conf->r1buf_pool); conf->r1buf_pool = NULL; + spin_lock_irq(&conf->resync_lock); conf->next_resync = 0; conf->start_next_window = MaxSector; + conf->current_window_requests += + conf->next_window_requests; + conf->next_window_requests = 0; + spin_unlock_irq(&conf->resync_lock); } static int raid1_spare_active(struct mddev *mddev) @@ -2150,7 +2155,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk, d--; rdev = conf->mirrors[d].rdev; if (rdev && - test_bit(In_sync, &rdev->flags)) + !test_bit(Faulty, &rdev->flags)) r1_sync_page_io(rdev, sect, s, conf->tmppage, WRITE); } @@ -2162,7 +2167,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk, d--; rdev = conf->mirrors[d].rdev; if (rdev && - test_bit(In_sync, &rdev->flags)) { + !test_bit(Faulty, &rdev->flags)) { if (r1_sync_page_io(rdev, sect, s, conf->tmppage, READ)) { atomic_add(s, &rdev->corrected_errors); @@ -2541,9 +2546,8 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp bitmap_cond_end_sync(mddev->bitmap, sector_nr); r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO); - raise_barrier(conf); - conf->next_resync = sector_nr; + raise_barrier(conf, sector_nr); rcu_read_lock(); /* diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index cb882aae9e20..6703751d87d7 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1684,13 +1684,12 @@ static void error(struct mddev *mddev, struct md_rdev *rdev) spin_unlock_irqrestore(&conf->device_lock, flags); return; } - if (test_and_clear_bit(In_sync, &rdev->flags)) { + if (test_and_clear_bit(In_sync, &rdev->flags)) mddev->degraded++; - /* - * if recovery is running, make sure it aborts. - */ - set_bit(MD_RECOVERY_INTR, &mddev->recovery); - } + /* + * If recovery is running, make sure it aborts. + */ + set_bit(MD_RECOVERY_INTR, &mddev->recovery); set_bit(Blocked, &rdev->flags); set_bit(Faulty, &rdev->flags); set_bit(MD_CHANGE_DEVS, &mddev->flags); @@ -2954,6 +2953,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, */ if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { end_reshape(conf); + close_sync(conf); return 0; } @@ -3082,6 +3082,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, } r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); + r10_bio->state = 0; raise_barrier(conf, rb2 != NULL); atomic_set(&r10_bio->remaining, 0); @@ -3270,6 +3271,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, if (sync_blocks < max_sync) max_sync = sync_blocks; r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); + r10_bio->state = 0; r10_bio->mddev = mddev; atomic_set(&r10_bio->remaining, 0); @@ -4385,6 +4387,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, read_more: /* Now schedule reads for blocks from sector_nr to last */ r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); + r10_bio->state = 0; raise_barrier(conf, sectors_done != 0); atomic_set(&r10_bio->remaining, 0); r10_bio->mddev = mddev; @@ -4399,6 +4402,7 @@ read_more: * on all the target devices. */ // FIXME + mempool_free(r10_bio, conf->r10buf_pool); set_bit(MD_RECOVERY_INTR, &mddev->recovery); return sectors_done; } @@ -4411,7 +4415,7 @@ read_more: read_bio->bi_private = r10_bio; read_bio->bi_end_io = end_sync_read; read_bio->bi_rw = READ; - read_bio->bi_flags &= ~(BIO_POOL_MASK - 1); + read_bio->bi_flags &= (~0UL << BIO_RESET_BITS); read_bio->bi_flags |= 1 << BIO_UPTODATE; read_bio->bi_vcnt = 0; read_bio->bi_iter.bi_size = 0; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 6234b2e84587..9f0fbecd1eb5 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -64,6 +64,10 @@ #define cpu_to_group(cpu) cpu_to_node(cpu) #define ANY_GROUP NUMA_NO_NODE +static bool devices_handle_discard_safely = false; +module_param(devices_handle_discard_safely, bool, 0644); +MODULE_PARM_DESC(devices_handle_discard_safely, + "Set to Y if all devices in each array reliably return zeroes on reads from discarded regions"); static struct workqueue_struct *raid5_wq; /* * Stripe cache @@ -2922,7 +2926,7 @@ static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s, (!test_bit(R5_Insync, &dev->flags) || test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) && !test_bit(R5_OVERWRITE, &fdev[0]->flags)) || (sh->raid_conf->level == 6 && s->failed && s->to_write && - s->to_write < sh->raid_conf->raid_disks - 2 && + s->to_write - s->non_overwrite < sh->raid_conf->raid_disks - 2 && (!test_bit(R5_Insync, &dev->flags) || test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))))) { /* we would like to get this block, possibly by computing it, * otherwise read it if the backing disk is insync @@ -3817,6 +3821,8 @@ static void handle_stripe(struct stripe_head *sh) set_bit(R5_Wantwrite, &dev->flags); if (prexor) continue; + if (s.failed > 1) + continue; if (!test_bit(R5_Insync, &dev->flags) || ((i == sh->pd_idx || i == sh->qd_idx) && s.failed == 0)) @@ -6206,7 +6212,7 @@ static int run(struct mddev *mddev) mddev->queue->limits.discard_granularity = stripe; /* * unaligned part of discard request will be ignored, so can't - * guarantee discard_zerors_data + * guarantee discard_zeroes_data */ mddev->queue->limits.discard_zeroes_data = 0; @@ -6231,6 +6237,18 @@ static int run(struct mddev *mddev) !bdev_get_queue(rdev->bdev)-> limits.discard_zeroes_data) discard_supported = false; + /* Unfortunately, discard_zeroes_data is not currently + * a guarantee - just a hint. So we only allow DISCARD + * if the sysadmin has confirmed that only safe devices + * are in use by setting a module parameter. + */ + if (!devices_handle_discard_safely) { + if (discard_supported) { + pr_info("md/raid456: discard support disabled due to uncertainty.\n"); + pr_info("Set raid456.devices_handle_discard_safely=Y to override.\n"); + } + discard_supported = false; + } } if (discard_supported && diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig index f60bad491eb6..3c89fcbc621e 100644 --- a/drivers/media/Kconfig +++ b/drivers/media/Kconfig @@ -182,7 +182,6 @@ config MEDIA_SUBDRV_AUTOSELECT depends on HAS_IOMEM select I2C select I2C_MUX - select SPI default y help By default, a media driver auto-selects all possible ancillary diff --git a/drivers/media/common/cx2341x.c b/drivers/media/common/cx2341x.c index 103ef6bad2e2..be763150b8aa 100644 --- a/drivers/media/common/cx2341x.c +++ b/drivers/media/common/cx2341x.c @@ -1490,6 +1490,7 @@ static struct v4l2_ctrl *cx2341x_ctrl_new_custom(struct v4l2_ctrl_handler *hdl, { struct v4l2_ctrl_config cfg; + memset(&cfg, 0, sizeof(cfg)); cx2341x_ctrl_fill(id, &cfg.name, &cfg.type, &min, &max, &step, &def, &cfg.flags); cfg.ops = &cx2341x_ops; cfg.id = id; diff --git a/drivers/media/common/saa7146/saa7146_core.c b/drivers/media/common/saa7146/saa7146_core.c index 34b0d0ddeef3..97afee672d07 100644 --- a/drivers/media/common/saa7146/saa7146_core.c +++ b/drivers/media/common/saa7146/saa7146_core.c @@ -421,23 +421,20 @@ static int saa7146_init_one(struct pci_dev *pci, const struct pci_device_id *ent err = -ENOMEM; /* get memory for various stuff */ - dev->d_rps0.cpu_addr = pci_alloc_consistent(pci, SAA7146_RPS_MEM, - &dev->d_rps0.dma_handle); + dev->d_rps0.cpu_addr = pci_zalloc_consistent(pci, SAA7146_RPS_MEM, + &dev->d_rps0.dma_handle); if (!dev->d_rps0.cpu_addr) goto err_free_irq; - memset(dev->d_rps0.cpu_addr, 0x0, SAA7146_RPS_MEM); - dev->d_rps1.cpu_addr = pci_alloc_consistent(pci, SAA7146_RPS_MEM, - &dev->d_rps1.dma_handle); + dev->d_rps1.cpu_addr = pci_zalloc_consistent(pci, SAA7146_RPS_MEM, + &dev->d_rps1.dma_handle); if (!dev->d_rps1.cpu_addr) goto err_free_rps0; - memset(dev->d_rps1.cpu_addr, 0x0, SAA7146_RPS_MEM); - dev->d_i2c.cpu_addr = pci_alloc_consistent(pci, SAA7146_RPS_MEM, - &dev->d_i2c.dma_handle); + dev->d_i2c.cpu_addr = pci_zalloc_consistent(pci, SAA7146_RPS_MEM, + &dev->d_i2c.dma_handle); if (!dev->d_i2c.cpu_addr) goto err_free_rps1; - memset(dev->d_i2c.cpu_addr, 0x0, SAA7146_RPS_MEM); /* the rest + print status message */ diff --git a/drivers/media/common/saa7146/saa7146_fops.c b/drivers/media/common/saa7146/saa7146_fops.c index d9e1d6395ed9..6c47f3fe9b0f 100644 --- a/drivers/media/common/saa7146/saa7146_fops.c +++ b/drivers/media/common/saa7146/saa7146_fops.c @@ -520,14 +520,15 @@ int saa7146_vv_init(struct saa7146_dev* dev, struct saa7146_ext_vv *ext_vv) configuration data) */ dev->ext_vv_data = ext_vv; - vv->d_clipping.cpu_addr = pci_alloc_consistent(dev->pci, SAA7146_CLIPPING_MEM, &vv->d_clipping.dma_handle); + vv->d_clipping.cpu_addr = + pci_zalloc_consistent(dev->pci, SAA7146_CLIPPING_MEM, + &vv->d_clipping.dma_handle); if( NULL == vv->d_clipping.cpu_addr ) { ERR("out of memory. aborting.\n"); kfree(vv); v4l2_ctrl_handler_free(hdl); return -1; } - memset(vv->d_clipping.cpu_addr, 0x0, SAA7146_CLIPPING_MEM); saa7146_video_uops.init(dev,vv); if (dev->ext_vv_data->capabilities & V4L2_CAP_VBI_CAPTURE) diff --git a/drivers/media/dvb-core/dvb-usb-ids.h b/drivers/media/dvb-core/dvb-usb-ids.h index 5135a096bfa6..12ce19c98ded 100644 --- a/drivers/media/dvb-core/dvb-usb-ids.h +++ b/drivers/media/dvb-core/dvb-usb-ids.h @@ -280,6 +280,8 @@ #define USB_PID_PCTV_400E 0x020f #define USB_PID_PCTV_450E 0x0222 #define USB_PID_PCTV_452E 0x021f +#define USB_PID_PCTV_78E 0x025a +#define USB_PID_PCTV_79E 0x0262 #define USB_PID_REALTEK_RTL2831U 0x2831 #define USB_PID_REALTEK_RTL2832U 0x2832 #define USB_PID_TECHNOTREND_CONNECT_S2_3600 0x3007 diff --git a/drivers/media/dvb-frontends/af9033.c b/drivers/media/dvb-frontends/af9033.c index be4bec2a9640..5c90ea683a7e 100644 --- a/drivers/media/dvb-frontends/af9033.c +++ b/drivers/media/dvb-frontends/af9033.c @@ -314,6 +314,19 @@ static int af9033_init(struct dvb_frontend *fe) goto err; } + /* feed clock to RF tuner */ + switch (state->cfg.tuner) { + case AF9033_TUNER_IT9135_38: + case AF9033_TUNER_IT9135_51: + case AF9033_TUNER_IT9135_52: + case AF9033_TUNER_IT9135_60: + case AF9033_TUNER_IT9135_61: + case AF9033_TUNER_IT9135_62: + ret = af9033_wr_reg(state, 0x80fba8, 0x00); + if (ret < 0) + goto err; + } + /* settings for TS interface */ if (state->cfg.ts_mode == AF9033_TS_MODE_USB) { ret = af9033_wr_reg_mask(state, 0x80f9a5, 0x00, 0x01); diff --git a/drivers/media/dvb-frontends/af9033_priv.h b/drivers/media/dvb-frontends/af9033_priv.h index fc2ad581e302..ded7b67d7526 100644 --- a/drivers/media/dvb-frontends/af9033_priv.h +++ b/drivers/media/dvb-frontends/af9033_priv.h @@ -1418,7 +1418,7 @@ static const struct reg_val tuner_init_it9135_60[] = { { 0x800068, 0x0a }, { 0x80006a, 0x03 }, { 0x800070, 0x0a }, - { 0x800071, 0x05 }, + { 0x800071, 0x0a }, { 0x800072, 0x02 }, { 0x800075, 0x8c }, { 0x800076, 0x8c }, @@ -1484,7 +1484,6 @@ static const struct reg_val tuner_init_it9135_60[] = { { 0x800104, 0x02 }, { 0x800105, 0xbe }, { 0x800106, 0x00 }, - { 0x800109, 0x02 }, { 0x800115, 0x0a }, { 0x800116, 0x03 }, { 0x80011a, 0xbe }, @@ -1510,7 +1509,6 @@ static const struct reg_val tuner_init_it9135_60[] = { { 0x80014b, 0x8c }, { 0x80014d, 0xac }, { 0x80014e, 0xc6 }, - { 0x80014f, 0x03 }, { 0x800151, 0x1e }, { 0x800153, 0xbc }, { 0x800178, 0x09 }, @@ -1522,9 +1520,10 @@ static const struct reg_val tuner_init_it9135_60[] = { { 0x80018d, 0x5f }, { 0x80018f, 0xa0 }, { 0x800190, 0x5a }, - { 0x80ed02, 0xff }, - { 0x80ee42, 0xff }, - { 0x80ee82, 0xff }, + { 0x800191, 0x00 }, + { 0x80ed02, 0x40 }, + { 0x80ee42, 0x40 }, + { 0x80ee82, 0x40 }, { 0x80f000, 0x0f }, { 0x80f01f, 0x8c }, { 0x80f020, 0x00 }, @@ -1699,7 +1698,6 @@ static const struct reg_val tuner_init_it9135_61[] = { { 0x800104, 0x02 }, { 0x800105, 0xc8 }, { 0x800106, 0x00 }, - { 0x800109, 0x02 }, { 0x800115, 0x0a }, { 0x800116, 0x03 }, { 0x80011a, 0xc6 }, @@ -1725,7 +1723,6 @@ static const struct reg_val tuner_init_it9135_61[] = { { 0x80014b, 0x8c }, { 0x80014d, 0xa8 }, { 0x80014e, 0xc6 }, - { 0x80014f, 0x03 }, { 0x800151, 0x28 }, { 0x800153, 0xcc }, { 0x800178, 0x09 }, @@ -1737,9 +1734,10 @@ static const struct reg_val tuner_init_it9135_61[] = { { 0x80018d, 0x5f }, { 0x80018f, 0xfb }, { 0x800190, 0x5c }, - { 0x80ed02, 0xff }, - { 0x80ee42, 0xff }, - { 0x80ee82, 0xff }, + { 0x800191, 0x00 }, + { 0x80ed02, 0x40 }, + { 0x80ee42, 0x40 }, + { 0x80ee82, 0x40 }, { 0x80f000, 0x0f }, { 0x80f01f, 0x8c }, { 0x80f020, 0x00 }, diff --git a/drivers/media/dvb-frontends/cx24123.c b/drivers/media/dvb-frontends/cx24123.c index 72fb5838cae0..7975c6608e20 100644 --- a/drivers/media/dvb-frontends/cx24123.c +++ b/drivers/media/dvb-frontends/cx24123.c @@ -1095,6 +1095,7 @@ struct dvb_frontend *cx24123_attach(const struct cx24123_config *config, sizeof(state->tuner_i2c_adapter.name)); state->tuner_i2c_adapter.algo = &cx24123_tuner_i2c_algo; state->tuner_i2c_adapter.algo_data = NULL; + state->tuner_i2c_adapter.dev.parent = i2c->dev.parent; i2c_set_adapdata(&state->tuner_i2c_adapter, state); if (i2c_add_adapter(&state->tuner_i2c_adapter) < 0) { err("tuner i2c bus could not be initialized\n"); diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c index d4fa213ba74a..de88b980a837 100644 --- a/drivers/media/i2c/adv7604.c +++ b/drivers/media/i2c/adv7604.c @@ -2325,7 +2325,7 @@ static int adv7604_log_status(struct v4l2_subdev *sd) v4l2_info(sd, "HDCP keys read: %s%s\n", (hdmi_read(sd, 0x04) & 0x20) ? "yes" : "no", (hdmi_read(sd, 0x04) & 0x10) ? "ERROR" : ""); - if (!is_hdmi(sd)) { + if (is_hdmi(sd)) { bool audio_pll_locked = hdmi_read(sd, 0x04) & 0x01; bool audio_sample_packet_detect = hdmi_read(sd, 0x18) & 0x01; bool audio_mute = io_read(sd, 0x65) & 0x40; diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c index 1eaf975d3612..62acb10630f9 100644 --- a/drivers/media/i2c/smiapp/smiapp-core.c +++ b/drivers/media/i2c/smiapp/smiapp-core.c @@ -1282,19 +1282,12 @@ static int smiapp_set_power(struct v4l2_subdev *subdev, int on) mutex_lock(&sensor->power_mutex); - /* - * If the power count is modified from 0 to != 0 or from != 0 - * to 0, update the power state. - */ - if (!sensor->power_count == !on) - goto out; - - if (on) { + if (on && !sensor->power_count) { /* Power on and perform initialisation. */ ret = smiapp_power_on(sensor); if (ret < 0) goto out; - } else { + } else if (!on && sensor->power_count == 1) { smiapp_power_off(sensor); } @@ -2572,7 +2565,7 @@ static int smiapp_registered(struct v4l2_subdev *subdev) this->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; this->sd.internal_ops = &smiapp_internal_ops; - this->sd.owner = NULL; + this->sd.owner = THIS_MODULE; v4l2_set_subdevdata(&this->sd, client); rval = media_entity_init(&this->sd.entity, diff --git a/drivers/media/pci/bt8xx/bt878.c b/drivers/media/pci/bt8xx/bt878.c index d0c281f41a0a..11765835d7b2 100644 --- a/drivers/media/pci/bt8xx/bt878.c +++ b/drivers/media/pci/bt8xx/bt878.c @@ -101,28 +101,20 @@ static int bt878_mem_alloc(struct bt878 *bt) if (!bt->buf_cpu) { bt->buf_size = 128 * 1024; - bt->buf_cpu = - pci_alloc_consistent(bt->dev, bt->buf_size, - &bt->buf_dma); - + bt->buf_cpu = pci_zalloc_consistent(bt->dev, bt->buf_size, + &bt->buf_dma); if (!bt->buf_cpu) return -ENOMEM; - - memset(bt->buf_cpu, 0, bt->buf_size); } if (!bt->risc_cpu) { bt->risc_size = PAGE_SIZE; - bt->risc_cpu = - pci_alloc_consistent(bt->dev, bt->risc_size, - &bt->risc_dma); - + bt->risc_cpu = pci_zalloc_consistent(bt->dev, bt->risc_size, + &bt->risc_dma); if (!bt->risc_cpu) { bt878_mem_free(bt); return -ENOMEM; } - - memset(bt->risc_cpu, 0, bt->risc_size); } return 0; diff --git a/drivers/media/pci/cx18/cx18-driver.c b/drivers/media/pci/cx18/cx18-driver.c index 716bdc57fac6..83f5074706f9 100644 --- a/drivers/media/pci/cx18/cx18-driver.c +++ b/drivers/media/pci/cx18/cx18-driver.c @@ -1091,6 +1091,7 @@ static int cx18_probe(struct pci_dev *pci_dev, setup.addr = ADDR_UNSET; setup.type = cx->options.tuner; setup.mode_mask = T_ANALOG_TV; /* matches TV tuners */ + setup.config = NULL; if (cx->options.radio > 0) setup.mode_mask |= T_RADIO; setup.tuner_callback = (setup.type == TUNER_XC2028) ? diff --git a/drivers/media/pci/ngene/ngene-core.c b/drivers/media/pci/ngene/ngene-core.c index 826228c3800e..4930b55fd5f4 100644 --- a/drivers/media/pci/ngene/ngene-core.c +++ b/drivers/media/pci/ngene/ngene-core.c @@ -1075,12 +1075,11 @@ static int AllocCommonBuffers(struct ngene *dev) dev->ngenetohost = dev->FWInterfaceBuffer + 256; dev->EventBuffer = dev->FWInterfaceBuffer + 512; - dev->OverflowBuffer = pci_alloc_consistent(dev->pci_dev, - OVERFLOW_BUFFER_SIZE, - &dev->PAOverflowBuffer); + dev->OverflowBuffer = pci_zalloc_consistent(dev->pci_dev, + OVERFLOW_BUFFER_SIZE, + &dev->PAOverflowBuffer); if (!dev->OverflowBuffer) return -ENOMEM; - memset(dev->OverflowBuffer, 0, OVERFLOW_BUFFER_SIZE); for (i = STREAM_VIDEOIN1; i < MAX_STREAM; i++) { int type = dev->card_info->io_type[i]; diff --git a/drivers/media/radio/radio-miropcm20.c b/drivers/media/radio/radio-miropcm20.c index 998919e97dfe..7b35e633118d 100644 --- a/drivers/media/radio/radio-miropcm20.c +++ b/drivers/media/radio/radio-miropcm20.c @@ -27,6 +27,7 @@ #include <linux/module.h> #include <linux/init.h> +#include <linux/io.h> #include <linux/delay.h> #include <linux/videodev2.h> #include <linux/kthread.h> diff --git a/drivers/media/rc/keymaps/Kconfig b/drivers/media/rc/keymaps/Kconfig index 8e615fd55852..767423bbbdd0 100644 --- a/drivers/media/rc/keymaps/Kconfig +++ b/drivers/media/rc/keymaps/Kconfig @@ -12,4 +12,4 @@ config RC_MAP The ir-keytable program, available at v4l-utils package provide the tool and the same RC maps for load from userspace. Its available at - http://git.linuxtv.org/v4l-utils + http://git.linuxtv.org/cgit.cgi/v4l-utils.git/ diff --git a/drivers/media/tuners/tuner_it913x.c b/drivers/media/tuners/tuner_it913x.c index 6f30d7e535b8..3d83c425bccf 100644 --- a/drivers/media/tuners/tuner_it913x.c +++ b/drivers/media/tuners/tuner_it913x.c @@ -396,6 +396,7 @@ struct dvb_frontend *it913x_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c_adap, u8 i2c_addr, u8 config) { struct it913x_state *state = NULL; + int ret; /* allocate memory for the internal state */ state = kzalloc(sizeof(struct it913x_state), GFP_KERNEL); @@ -425,6 +426,11 @@ struct dvb_frontend *it913x_attach(struct dvb_frontend *fe, state->tuner_type = config; state->firmware_ver = 1; + /* tuner RF initial */ + ret = it913x_wr_reg(state, PRO_DMOD, 0xec4c, 0x68); + if (ret < 0) + goto error; + fe->tuner_priv = state; memcpy(&fe->ops.tuner_ops, &it913x_tuner_ops, sizeof(struct dvb_tuner_ops)); diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c index 75ec1c659fdd..c82beac0e0cb 100644 --- a/drivers/media/usb/dvb-usb-v2/af9035.c +++ b/drivers/media/usb/dvb-usb-v2/af9035.c @@ -1575,6 +1575,10 @@ static const struct usb_device_id af9035_id_table[] = { &af9035_props, "Leadtek WinFast DTV Dongle Dual", NULL) }, { DVB_USB_DEVICE(USB_VID_HAUPPAUGE, 0xf900, &af9035_props, "Hauppauge WinTV-MiniStick 2", NULL) }, + { DVB_USB_DEVICE(USB_VID_PCTV, USB_PID_PCTV_78E, + &af9035_props, "PCTV 78e", RC_MAP_IT913X_V1) }, + { DVB_USB_DEVICE(USB_VID_PCTV, USB_PID_PCTV_79E, + &af9035_props, "PCTV 79e", RC_MAP_IT913X_V2) }, { } }; MODULE_DEVICE_TABLE(usb, af9035_id_table); diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c index a7e24848f6c8..9da812b8a786 100644 --- a/drivers/media/usb/em28xx/em28xx-cards.c +++ b/drivers/media/usb/em28xx/em28xx-cards.c @@ -3524,6 +3524,7 @@ static struct usb_driver em28xx_usb_driver = { .disconnect = em28xx_usb_disconnect, .suspend = em28xx_usb_suspend, .resume = em28xx_usb_resume, + .reset_resume = em28xx_usb_resume, .id_table = em28xx_id_table, }; diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c index 90dec2955f1c..29abc379551e 100644 --- a/drivers/media/usb/em28xx/em28xx-video.c +++ b/drivers/media/usb/em28xx/em28xx-video.c @@ -1342,7 +1342,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv, struct em28xx *dev = video_drvdata(file); struct em28xx_v4l2 *v4l2 = dev->v4l2; - if (v4l2->streaming_users > 0) + if (vb2_is_busy(&v4l2->vb_vidq)) return -EBUSY; vidioc_try_fmt_vid_cap(file, priv, f); @@ -1883,8 +1883,9 @@ static int em28xx_v4l2_open(struct file *filp) return -EINVAL; } - em28xx_videodbg("open dev=%s type=%s\n", - video_device_node_name(vdev), v4l2_type_names[fh_type]); + em28xx_videodbg("open dev=%s type=%s users=%d\n", + video_device_node_name(vdev), v4l2_type_names[fh_type], + v4l2->users); if (mutex_lock_interruptible(&dev->lock)) return -ERESTARTSYS; @@ -1897,9 +1898,7 @@ static int em28xx_v4l2_open(struct file *filp) return ret; } - if (v4l2_fh_is_singular_file(filp)) { - em28xx_videodbg("first opened filehandle, initializing device\n"); - + if (v4l2->users == 0) { em28xx_set_mode(dev, EM28XX_ANALOG_MODE); if (vdev->vfl_type != VFL_TYPE_RADIO) @@ -1910,8 +1909,6 @@ static int em28xx_v4l2_open(struct file *filp) * of some i2c devices */ em28xx_wake_i2c(dev); - } else { - em28xx_videodbg("further filehandles are already opened\n"); } if (vdev->vfl_type == VFL_TYPE_RADIO) { @@ -1921,6 +1918,7 @@ static int em28xx_v4l2_open(struct file *filp) kref_get(&dev->ref); kref_get(&v4l2->ref); + v4l2->users++; mutex_unlock(&dev->lock); @@ -2027,11 +2025,12 @@ static int em28xx_v4l2_close(struct file *filp) struct em28xx_v4l2 *v4l2 = dev->v4l2; int errCode; - mutex_lock(&dev->lock); + em28xx_videodbg("users=%d\n", v4l2->users); - if (v4l2_fh_is_singular_file(filp)) { - em28xx_videodbg("last opened filehandle, shutting down device\n"); + vb2_fop_release(filp); + mutex_lock(&dev->lock); + if (v4l2->users == 1) { /* No sense to try to write to the device */ if (dev->disconnected) goto exit; @@ -2050,12 +2049,10 @@ static int em28xx_v4l2_close(struct file *filp) em28xx_errdev("cannot change alternate number to " "0 (error=%i)\n", errCode); } - } else { - em28xx_videodbg("further opened filehandles left\n"); } exit: - vb2_fop_release(filp); + v4l2->users--; kref_put(&v4l2->ref, em28xx_free_v4l2); mutex_unlock(&dev->lock); kref_put(&dev->ref, em28xx_free_device); diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h index 84ef8efdb148..4360338e7b31 100644 --- a/drivers/media/usb/em28xx/em28xx.h +++ b/drivers/media/usb/em28xx/em28xx.h @@ -524,6 +524,7 @@ struct em28xx_v4l2 { int sensor_yres; int sensor_xtal; + int users; /* user count for exclusive use */ int streaming_users; /* number of actively streaming users */ u32 frequency; /* selected tuner frequency */ diff --git a/drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c b/drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c index f166ffc9800a..cef7a00099ea 100644 --- a/drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c +++ b/drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c @@ -803,11 +803,9 @@ static int ttusb_alloc_iso_urbs(struct ttusb *ttusb) { int i; - ttusb->iso_buffer = pci_alloc_consistent(NULL, - ISO_FRAME_SIZE * - FRAMES_PER_ISO_BUF * - ISO_BUF_COUNT, - &ttusb->iso_dma_handle); + ttusb->iso_buffer = pci_zalloc_consistent(NULL, + ISO_FRAME_SIZE * FRAMES_PER_ISO_BUF * ISO_BUF_COUNT, + &ttusb->iso_dma_handle); if (!ttusb->iso_buffer) { dprintk("%s: pci_alloc_consistent - not enough memory\n", @@ -815,9 +813,6 @@ static int ttusb_alloc_iso_urbs(struct ttusb *ttusb) return -ENOMEM; } - memset(ttusb->iso_buffer, 0, - ISO_FRAME_SIZE * FRAMES_PER_ISO_BUF * ISO_BUF_COUNT); - for (i = 0; i < ISO_BUF_COUNT; i++) { struct urb *urb; diff --git a/drivers/media/usb/ttusb-dec/ttusb_dec.c b/drivers/media/usb/ttusb-dec/ttusb_dec.c index 29724af9b9ab..15ab584cf265 100644 --- a/drivers/media/usb/ttusb-dec/ttusb_dec.c +++ b/drivers/media/usb/ttusb-dec/ttusb_dec.c @@ -1151,11 +1151,9 @@ static int ttusb_dec_alloc_iso_urbs(struct ttusb_dec *dec) dprintk("%s\n", __func__); - dec->iso_buffer = pci_alloc_consistent(NULL, - ISO_FRAME_SIZE * - (FRAMES_PER_ISO_BUF * - ISO_BUF_COUNT), - &dec->iso_dma_handle); + dec->iso_buffer = pci_zalloc_consistent(NULL, + ISO_FRAME_SIZE * (FRAMES_PER_ISO_BUF * ISO_BUF_COUNT), + &dec->iso_dma_handle); if (!dec->iso_buffer) { dprintk("%s: pci_alloc_consistent - not enough memory\n", @@ -1163,9 +1161,6 @@ static int ttusb_dec_alloc_iso_urbs(struct ttusb_dec *dec) return -ENOMEM; } - memset(dec->iso_buffer, 0, - ISO_FRAME_SIZE * (FRAMES_PER_ISO_BUF * ISO_BUF_COUNT)); - for (i = 0; i < ISO_BUF_COUNT; i++) { struct urb *urb; diff --git a/drivers/media/v4l2-core/Kconfig b/drivers/media/v4l2-core/Kconfig index 9ca0f8d59a14..ba7e21a73023 100644 --- a/drivers/media/v4l2-core/Kconfig +++ b/drivers/media/v4l2-core/Kconfig @@ -25,6 +25,15 @@ config VIDEO_FIXED_MINOR_RANGES When in doubt, say N. +config VIDEO_PCI_SKELETON + tristate "Skeleton PCI V4L2 driver" + depends on PCI && BUILD_DOCSRC + depends on VIDEO_V4L2 && VIDEOBUF2_CORE + depends on VIDEOBUF2_MEMOPS && VIDEOBUF2_DMA_CONTIG + ---help--- + Enable build of the skeleton PCI driver, used as a reference + when developing new drivers. + # Used by drivers that need tuner.ko config VIDEO_TUNER tristate diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c index c359006074a8..25d3ae2188cb 100644 --- a/drivers/media/v4l2-core/videobuf2-core.c +++ b/drivers/media/v4l2-core/videobuf2-core.c @@ -971,6 +971,7 @@ static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req) * to the userspace. */ req->count = allocated_buffers; + q->waiting_for_buffers = !V4L2_TYPE_IS_OUTPUT(q->type); return 0; } @@ -1018,6 +1019,7 @@ static int __create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create memset(q->plane_sizes, 0, sizeof(q->plane_sizes)); memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx)); q->memory = create->memory; + q->waiting_for_buffers = !V4L2_TYPE_IS_OUTPUT(q->type); } num_buffers = min(create->count, VIDEO_MAX_FRAME - q->num_buffers); @@ -1130,7 +1132,7 @@ EXPORT_SYMBOL_GPL(vb2_plane_vaddr); */ void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no) { - if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv) + if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv) return NULL; return call_ptr_memop(vb, cookie, vb->planes[plane_no].mem_priv); @@ -1165,13 +1167,10 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state) if (WARN_ON(vb->state != VB2_BUF_STATE_ACTIVE)) return; - if (!q->start_streaming_called) { - if (WARN_ON(state != VB2_BUF_STATE_QUEUED)) - state = VB2_BUF_STATE_QUEUED; - } else if (WARN_ON(state != VB2_BUF_STATE_DONE && - state != VB2_BUF_STATE_ERROR)) { - state = VB2_BUF_STATE_ERROR; - } + if (WARN_ON(state != VB2_BUF_STATE_DONE && + state != VB2_BUF_STATE_ERROR && + state != VB2_BUF_STATE_QUEUED)) + state = VB2_BUF_STATE_ERROR; #ifdef CONFIG_VIDEO_ADV_DEBUG /* @@ -1762,6 +1761,12 @@ static int vb2_start_streaming(struct vb2_queue *q) q->start_streaming_called = 0; dprintk(1, "driver refused to start streaming\n"); + /* + * If you see this warning, then the driver isn't cleaning up properly + * after a failed start_streaming(). See the start_streaming() + * documentation in videobuf2-core.h for more information how buffers + * should be returned to vb2 in start_streaming(). + */ if (WARN_ON(atomic_read(&q->owned_by_drv_count))) { unsigned i; @@ -1777,6 +1782,12 @@ static int vb2_start_streaming(struct vb2_queue *q) /* Must be zero now */ WARN_ON(atomic_read(&q->owned_by_drv_count)); } + /* + * If done_list is not empty, then start_streaming() didn't call + * vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED) but STATE_ERROR or + * STATE_DONE. + */ + WARN_ON(!list_empty(&q->done_list)); return ret; } @@ -1812,6 +1823,7 @@ static int vb2_internal_qbuf(struct vb2_queue *q, struct v4l2_buffer *b) */ list_add_tail(&vb->queued_entry, &q->queued_list); q->queued_count++; + q->waiting_for_buffers = false; vb->state = VB2_BUF_STATE_QUEUED; if (V4L2_TYPE_IS_OUTPUT(q->type)) { /* @@ -2123,6 +2135,12 @@ static void __vb2_queue_cancel(struct vb2_queue *q) if (q->start_streaming_called) call_void_qop(q, stop_streaming, q); + /* + * If you see this warning, then the driver isn't cleaning up properly + * in stop_streaming(). See the stop_streaming() documentation in + * videobuf2-core.h for more information how buffers should be returned + * to vb2 in stop_streaming(). + */ if (WARN_ON(atomic_read(&q->owned_by_drv_count))) { for (i = 0; i < q->num_buffers; ++i) if (q->bufs[i]->state == VB2_BUF_STATE_ACTIVE) @@ -2272,6 +2290,7 @@ static int vb2_internal_streamoff(struct vb2_queue *q, enum v4l2_buf_type type) * their normal dequeued state. */ __vb2_queue_cancel(q); + q->waiting_for_buffers = !V4L2_TYPE_IS_OUTPUT(q->type); dprintk(3, "successful\n"); return 0; @@ -2590,10 +2609,17 @@ unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait) } /* - * There is nothing to wait for if no buffer has been queued and the - * queue isn't streaming, or if the error flag is set. + * There is nothing to wait for if the queue isn't streaming, or if the + * error flag is set. + */ + if (!vb2_is_streaming(q) || q->error) + return res | POLLERR; + /* + * For compatibility with vb1: if QBUF hasn't been called yet, then + * return POLLERR as well. This only affects capture queues, output + * queues will always initialize waiting_for_buffers to false. */ - if ((list_empty(&q->queued_list) && !vb2_is_streaming(q)) || q->error) + if (q->waiting_for_buffers) return res | POLLERR; /* diff --git a/drivers/media/v4l2-core/videobuf2-dma-sg.c b/drivers/media/v4l2-core/videobuf2-dma-sg.c index adefc31bb853..9b163a440f89 100644 --- a/drivers/media/v4l2-core/videobuf2-dma-sg.c +++ b/drivers/media/v4l2-core/videobuf2-dma-sg.c @@ -113,7 +113,7 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_fla goto fail_pages_alloc; ret = sg_alloc_table_from_pages(&buf->sg_table, buf->pages, - buf->num_pages, 0, size, gfp_flags); + buf->num_pages, 0, size, GFP_KERNEL); if (ret) goto fail_table_alloc; diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig index c59e9c96e86d..fab81a143bd7 100644 --- a/drivers/memory/Kconfig +++ b/drivers/memory/Kconfig @@ -61,6 +61,16 @@ config TEGRA30_MC analysis, especially for IOMMU/SMMU(System Memory Management Unit) module. +config FSL_CORENET_CF + tristate "Freescale CoreNet Error Reporting" + depends on FSL_SOC_BOOKE + help + Say Y for reporting of errors from the Freescale CoreNet + Coherency Fabric. Errors reported include accesses to + physical addresses that mapped by no local access window + (LAW) or an invalid LAW, as well as bad cache state that + represents a coherency violation. + config FSL_IFC bool depends on FSL_SOC diff --git a/drivers/memory/Makefile b/drivers/memory/Makefile index 71160a2b7313..4055c47f45ab 100644 --- a/drivers/memory/Makefile +++ b/drivers/memory/Makefile @@ -7,6 +7,7 @@ obj-$(CONFIG_OF) += of_memory.o endif obj-$(CONFIG_TI_AEMIF) += ti-aemif.o obj-$(CONFIG_TI_EMIF) += emif.o +obj-$(CONFIG_FSL_CORENET_CF) += fsl-corenet-cf.o obj-$(CONFIG_FSL_IFC) += fsl_ifc.o obj-$(CONFIG_MVEBU_DEVBUS) += mvebu-devbus.o obj-$(CONFIG_TEGRA20_MC) += tegra20-mc.o diff --git a/drivers/memory/fsl-corenet-cf.c b/drivers/memory/fsl-corenet-cf.c new file mode 100644 index 000000000000..c9443fc136db --- /dev/null +++ b/drivers/memory/fsl-corenet-cf.c @@ -0,0 +1,251 @@ +/* + * CoreNet Coherency Fabric error reporting + * + * Copyright 2014 Freescale Semiconductor Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_device.h> +#include <linux/of_irq.h> +#include <linux/platform_device.h> + +enum ccf_version { + CCF1, + CCF2, +}; + +struct ccf_info { + enum ccf_version version; + int err_reg_offs; +}; + +static const struct ccf_info ccf1_info = { + .version = CCF1, + .err_reg_offs = 0xa00, +}; + +static const struct ccf_info ccf2_info = { + .version = CCF2, + .err_reg_offs = 0xe40, +}; + +static const struct of_device_id ccf_matches[] = { + { + .compatible = "fsl,corenet1-cf", + .data = &ccf1_info, + }, + { + .compatible = "fsl,corenet2-cf", + .data = &ccf2_info, + }, + {} +}; + +struct ccf_err_regs { + u32 errdet; /* 0x00 Error Detect Register */ + /* 0x04 Error Enable (ccf1)/Disable (ccf2) Register */ + u32 errdis; + /* 0x08 Error Interrupt Enable Register (ccf2 only) */ + u32 errinten; + u32 cecar; /* 0x0c Error Capture Attribute Register */ + u32 cecaddrh; /* 0x10 Error Capture Address High */ + u32 cecaddrl; /* 0x14 Error Capture Address Low */ + u32 cecar2; /* 0x18 Error Capture Attribute Register 2 */ +}; + +/* LAE/CV also valid for errdis and errinten */ +#define ERRDET_LAE (1 << 0) /* Local Access Error */ +#define ERRDET_CV (1 << 1) /* Coherency Violation */ +#define ERRDET_CTYPE_SHIFT 26 /* Capture Type (ccf2 only) */ +#define ERRDET_CTYPE_MASK (0x1f << ERRDET_CTYPE_SHIFT) +#define ERRDET_CAP (1 << 31) /* Capture Valid (ccf2 only) */ + +#define CECAR_VAL (1 << 0) /* Valid (ccf1 only) */ +#define CECAR_UVT (1 << 15) /* Unavailable target ID (ccf1) */ +#define CECAR_SRCID_SHIFT_CCF1 24 +#define CECAR_SRCID_MASK_CCF1 (0xff << CECAR_SRCID_SHIFT_CCF1) +#define CECAR_SRCID_SHIFT_CCF2 18 +#define CECAR_SRCID_MASK_CCF2 (0xff << CECAR_SRCID_SHIFT_CCF2) + +#define CECADDRH_ADDRH 0xff + +struct ccf_private { + const struct ccf_info *info; + struct device *dev; + void __iomem *regs; + struct ccf_err_regs __iomem *err_regs; +}; + +static irqreturn_t ccf_irq(int irq, void *dev_id) +{ + struct ccf_private *ccf = dev_id; + static DEFINE_RATELIMIT_STATE(ratelimit, DEFAULT_RATELIMIT_INTERVAL, + DEFAULT_RATELIMIT_BURST); + u32 errdet, cecar, cecar2; + u64 addr; + u32 src_id; + bool uvt = false; + bool cap_valid = false; + + errdet = ioread32be(&ccf->err_regs->errdet); + cecar = ioread32be(&ccf->err_regs->cecar); + cecar2 = ioread32be(&ccf->err_regs->cecar2); + addr = ioread32be(&ccf->err_regs->cecaddrl); + addr |= ((u64)(ioread32be(&ccf->err_regs->cecaddrh) & + CECADDRH_ADDRH)) << 32; + + if (!__ratelimit(&ratelimit)) + goto out; + + switch (ccf->info->version) { + case CCF1: + if (cecar & CECAR_VAL) { + if (cecar & CECAR_UVT) + uvt = true; + + src_id = (cecar & CECAR_SRCID_MASK_CCF1) >> + CECAR_SRCID_SHIFT_CCF1; + cap_valid = true; + } + + break; + case CCF2: + if (errdet & ERRDET_CAP) { + src_id = (cecar & CECAR_SRCID_MASK_CCF2) >> + CECAR_SRCID_SHIFT_CCF2; + cap_valid = true; + } + + break; + } + + dev_crit(ccf->dev, "errdet 0x%08x cecar 0x%08x cecar2 0x%08x\n", + errdet, cecar, cecar2); + + if (errdet & ERRDET_LAE) { + if (uvt) + dev_crit(ccf->dev, "LAW Unavailable Target ID\n"); + else + dev_crit(ccf->dev, "Local Access Window Error\n"); + } + + if (errdet & ERRDET_CV) + dev_crit(ccf->dev, "Coherency Violation\n"); + + if (cap_valid) { + dev_crit(ccf->dev, "address 0x%09llx, src id 0x%x\n", + addr, src_id); + } + +out: + iowrite32be(errdet, &ccf->err_regs->errdet); + return errdet ? IRQ_HANDLED : IRQ_NONE; +} + +static int ccf_probe(struct platform_device *pdev) +{ + struct ccf_private *ccf; + struct resource *r; + const struct of_device_id *match; + int ret, irq; + + match = of_match_device(ccf_matches, &pdev->dev); + if (WARN_ON(!match)) + return -ENODEV; + + ccf = devm_kzalloc(&pdev->dev, sizeof(*ccf), GFP_KERNEL); + if (!ccf) + return -ENOMEM; + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!r) { + dev_err(&pdev->dev, "%s: no mem resource\n", __func__); + return -ENXIO; + } + + ccf->regs = devm_ioremap_resource(&pdev->dev, r); + if (IS_ERR(ccf->regs)) { + dev_err(&pdev->dev, "%s: can't map mem resource\n", __func__); + return PTR_ERR(ccf->regs); + } + + ccf->dev = &pdev->dev; + ccf->info = match->data; + ccf->err_regs = ccf->regs + ccf->info->err_reg_offs; + + dev_set_drvdata(&pdev->dev, ccf); + + irq = platform_get_irq(pdev, 0); + if (!irq) { + dev_err(&pdev->dev, "%s: no irq\n", __func__); + return -ENXIO; + } + + ret = devm_request_irq(&pdev->dev, irq, ccf_irq, 0, pdev->name, ccf); + if (ret) { + dev_err(&pdev->dev, "%s: can't request irq\n", __func__); + return ret; + } + + switch (ccf->info->version) { + case CCF1: + /* On CCF1 this register enables rather than disables. */ + iowrite32be(ERRDET_LAE | ERRDET_CV, &ccf->err_regs->errdis); + break; + + case CCF2: + iowrite32be(0, &ccf->err_regs->errdis); + iowrite32be(ERRDET_LAE | ERRDET_CV, &ccf->err_regs->errinten); + break; + } + + return 0; +} + +static int ccf_remove(struct platform_device *pdev) +{ + struct ccf_private *ccf = dev_get_drvdata(&pdev->dev); + + switch (ccf->info->version) { + case CCF1: + iowrite32be(0, &ccf->err_regs->errdis); + break; + + case CCF2: + /* + * We clear errdis on ccf1 because that's the only way to + * disable interrupts, but on ccf2 there's no need to disable + * detection. + */ + iowrite32be(0, &ccf->err_regs->errinten); + break; + } + + return 0; +} + +static struct platform_driver ccf_driver = { + .driver = { + .name = KBUILD_MODNAME, + .owner = THIS_MODULE, + .of_match_table = ccf_matches, + }, + .probe = ccf_probe, + .remove = ccf_remove, +}; + +module_platform_driver(ccf_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Freescale Semiconductor"); +MODULE_DESCRIPTION("Freescale CoreNet Coherency Fabric error reporting"); diff --git a/drivers/message/fusion/Kconfig b/drivers/message/fusion/Kconfig index a34a11d2fef2..63ca9841db10 100644 --- a/drivers/message/fusion/Kconfig +++ b/drivers/message/fusion/Kconfig @@ -29,7 +29,7 @@ config FUSION_SPI config FUSION_FC tristate "Fusion MPT ScsiHost drivers for FC" depends on PCI && SCSI - select SCSI_FC_ATTRS + depends on SCSI_FC_ATTRS ---help--- SCSI HOST support for a Fiber Channel host adapters. diff --git a/drivers/mfd/88pm805.c b/drivers/mfd/88pm805.c index 64751c2a1ace..e9d50644660c 100644 --- a/drivers/mfd/88pm805.c +++ b/drivers/mfd/88pm805.c @@ -158,7 +158,7 @@ static int device_irq_init_805(struct pm80x_chip *chip) * PM805_INT_STATUS is under 32K clock domain, so need to * add proper delay before the next I2C register access. */ - msleep(1); + usleep_range(1000, 3000); if (ret < 0) goto out; diff --git a/drivers/mfd/88pm860x-core.c b/drivers/mfd/88pm860x-core.c index bcfc9e85b4a0..3a2604580164 100644 --- a/drivers/mfd/88pm860x-core.c +++ b/drivers/mfd/88pm860x-core.c @@ -2,7 +2,8 @@ * Base driver for Marvell 88PM8607 * * Copyright (C) 2009 Marvell International Ltd. - * Haojian Zhuang <haojian.zhuang@marvell.com> + * + * Author: Haojian Zhuang <haojian.zhuang@marvell.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -140,7 +141,8 @@ static struct resource codec_resources[] = { /* Headset insertion or removal */ {PM8607_IRQ_HEADSET, PM8607_IRQ_HEADSET, "headset", IORESOURCE_IRQ,}, /* Audio short */ - {PM8607_IRQ_AUDIO_SHORT, PM8607_IRQ_AUDIO_SHORT, "audio-short", IORESOURCE_IRQ,}, + {PM8607_IRQ_AUDIO_SHORT, PM8607_IRQ_AUDIO_SHORT, "audio-short", + IORESOURCE_IRQ,}, }; static struct resource battery_resources[] = { @@ -150,10 +152,14 @@ static struct resource battery_resources[] = { static struct resource charger_resources[] = { {PM8607_IRQ_CHG, PM8607_IRQ_CHG, "charger detect", IORESOURCE_IRQ,}, - {PM8607_IRQ_CHG_DONE, PM8607_IRQ_CHG_DONE, "charging done", IORESOURCE_IRQ,}, - {PM8607_IRQ_CHG_FAIL, PM8607_IRQ_CHG_FAIL, "charging timeout", IORESOURCE_IRQ,}, - {PM8607_IRQ_CHG_FAULT, PM8607_IRQ_CHG_FAULT, "charging fault", IORESOURCE_IRQ,}, - {PM8607_IRQ_GPADC1, PM8607_IRQ_GPADC1, "battery temperature", IORESOURCE_IRQ,}, + {PM8607_IRQ_CHG_DONE, PM8607_IRQ_CHG_DONE, "charging done", + IORESOURCE_IRQ,}, + {PM8607_IRQ_CHG_FAIL, PM8607_IRQ_CHG_FAIL, "charging timeout", + IORESOURCE_IRQ,}, + {PM8607_IRQ_CHG_FAULT, PM8607_IRQ_CHG_FAULT, "charging fault", + IORESOURCE_IRQ,}, + {PM8607_IRQ_GPADC1, PM8607_IRQ_GPADC1, "battery temperature", + IORESOURCE_IRQ,}, {PM8607_IRQ_VBAT, PM8607_IRQ_VBAT, "battery voltage", IORESOURCE_IRQ,}, {PM8607_IRQ_VCHG, PM8607_IRQ_VCHG, "vchg voltage", IORESOURCE_IRQ,}, }; @@ -568,8 +574,8 @@ static struct irq_domain_ops pm860x_irq_domain_ops = { static int device_irq_init(struct pm860x_chip *chip, struct pm860x_platform_data *pdata) { - struct i2c_client *i2c = (chip->id == CHIP_PM8607) ? chip->client \ - : chip->companion; + struct i2c_client *i2c = (chip->id == CHIP_PM8607) ? + chip->client : chip->companion; unsigned char status_buf[INT_STATUS_NUM]; unsigned long flags = IRQF_TRIGGER_FALLING | IRQF_ONESHOT; int data, mask, ret = -EINVAL; @@ -631,8 +637,8 @@ static int device_irq_init(struct pm860x_chip *chip, if (!chip->core_irq) goto out; - ret = request_threaded_irq(chip->core_irq, NULL, pm860x_irq, flags | IRQF_ONESHOT, - "88pm860x", chip); + ret = request_threaded_irq(chip->core_irq, NULL, pm860x_irq, + flags | IRQF_ONESHOT, "88pm860x", chip); if (ret) { dev_err(chip->dev, "Failed to request IRQ: %d\n", ret); chip->core_irq = 0; @@ -871,7 +877,7 @@ static void device_rtc_init(struct pm860x_chip *chip, { int ret; - if ((pdata == NULL)) + if (!pdata) return; rtc_devs[0].platform_data = pdata->rtc; @@ -997,8 +1003,9 @@ static void device_8607_init(struct pm860x_chip *chip, ret); break; default: - dev_err(chip->dev, "Failed to detect Marvell 88PM8607. " - "Chip ID: %02x\n", ret); + dev_err(chip->dev, + "Failed to detect Marvell 88PM8607. Chip ID: %02x\n", + ret); goto out; } @@ -1120,8 +1127,8 @@ static int pm860x_dt_init(struct device_node *np, ret = of_property_read_u32(np, "marvell,88pm860x-slave-addr", &pdata->companion_addr); if (ret) { - dev_err(dev, "Not found \"marvell,88pm860x-slave-addr\" " - "property\n"); + dev_err(dev, + "Not found \"marvell,88pm860x-slave-addr\" property\n"); pdata->companion_addr = 0; } return 0; diff --git a/drivers/mfd/88pm860x-i2c.c b/drivers/mfd/88pm860x-i2c.c index ff8f803ce833..a93b4d0134a2 100644 --- a/drivers/mfd/88pm860x-i2c.c +++ b/drivers/mfd/88pm860x-i2c.c @@ -2,7 +2,8 @@ * I2C driver for Marvell 88PM860x * * Copyright (C) 2009 Marvell International Ltd. - * Haojian Zhuang <haojian.zhuang@marvell.com> + * + * Author: Haojian Zhuang <haojian.zhuang@marvell.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index fb824f501197..de5abf244746 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -13,7 +13,7 @@ config MFD_CORE config MFD_CS5535 tristate "AMD CS5535 and CS5536 southbridge core functions" select MFD_CORE - depends on PCI && X86 + depends on PCI && (X86_32 || (X86 && COMPILE_TEST)) ---help--- This is the core driver for CS5535/CS5536 MFD functions. This is necessary for using the board's GPIO and MFGPT functionality. @@ -187,6 +187,7 @@ config MFD_MC13XXX tristate depends on (SPI_MASTER || I2C) select MFD_CORE + select REGMAP_IRQ help Enable support for the Freescale MC13783 and MC13892 PMICs. This driver provides common support for accessing the device, @@ -253,6 +254,18 @@ config LPC_SCH LPC bridge function of the Intel SCH provides support for System Management Bus and General Purpose I/O. +config INTEL_SOC_PMIC + bool "Support for Intel Atom SoC PMIC" + depends on I2C=y + select MFD_CORE + select REGMAP_I2C + select REGMAP_IRQ + help + Select this option to enable support for the PMIC device + on some Intel SoC systems. The PMIC provides ADC, GPIO, + thermal, charger and related power management functions + on these systems. + config MFD_INTEL_MSIC bool "Intel MSIC" depends on INTEL_SCU_IPC @@ -367,14 +380,15 @@ config MFD_MAX14577 of the device. config MFD_MAX77686 - bool "Maxim Semiconductor MAX77686 PMIC Support" + bool "Maxim Semiconductor MAX77686/802 PMIC Support" depends on I2C=y select MFD_CORE select REGMAP_I2C + select REGMAP_IRQ select IRQ_DOMAIN help - Say yes here to add support for Maxim Semiconductor MAX77686. - This is a Power Management IC with RTC on chip. + Say yes here to add support for Maxim Semiconductor MAX77686 and + MAX77802 which are Power Management IC with an RTC on chip. This driver provides common support for accessing the device; additional drivers must be enabled in order to use the functionality of the device. @@ -574,6 +588,7 @@ config MFD_SEC_CORE select MFD_CORE select REGMAP_I2C select REGMAP_IRQ + select REGULATOR help Support for the Samsung Electronics MFD series. This driver provides common support for accessing the device, @@ -1057,7 +1072,7 @@ config MFD_LM3533 config MFD_TIMBERDALE tristate "Timberdale FPGA" select MFD_CORE - depends on PCI && GPIOLIB + depends on PCI && GPIOLIB && (X86_32 || COMPILE_TEST) ---help--- This is the core driver for the timberdale FPGA. This device is a multifunction device which exposes numerous platform devices. diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index 8c6e7bba4660..f00148782d9b 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -115,7 +115,7 @@ da9063-objs := da9063-core.o da9063-irq.o da9063-i2c.o obj-$(CONFIG_MFD_DA9063) += da9063.o obj-$(CONFIG_MFD_MAX14577) += max14577.o -obj-$(CONFIG_MFD_MAX77686) += max77686.o max77686-irq.o +obj-$(CONFIG_MFD_MAX77686) += max77686.o obj-$(CONFIG_MFD_MAX77693) += max77693.o obj-$(CONFIG_MFD_MAX8907) += max8907.o max8925-objs := max8925-core.o max8925-i2c.o @@ -169,3 +169,6 @@ obj-$(CONFIG_MFD_AS3711) += as3711.o obj-$(CONFIG_MFD_AS3722) += as3722.o obj-$(CONFIG_MFD_STW481X) += stw481x.o obj-$(CONFIG_MFD_IPAQ_MICRO) += ipaq-micro.o + +intel-soc-pmic-objs := intel_soc_pmic_core.o intel_soc_pmic_crc.o +obj-$(CONFIG_INTEL_SOC_PMIC) += intel-soc-pmic.o diff --git a/drivers/mfd/aat2870-core.c b/drivers/mfd/aat2870-core.c index 14d9542a4eed..4e6e03d63e12 100644 --- a/drivers/mfd/aat2870-core.c +++ b/drivers/mfd/aat2870-core.c @@ -303,7 +303,10 @@ static ssize_t aat2870_reg_write_file(struct file *file, while (*start == ' ') start++; - addr = simple_strtoul(start, &start, 16); + ret = kstrtoul(start, 16, &addr); + if (ret) + return ret; + if (addr >= AAT2870_REG_NUM) { dev_err(aat2870->dev, "Invalid address, 0x%lx\n", addr); return -EINVAL; diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c index b348ae520629..4659ac1db039 100644 --- a/drivers/mfd/ab3100-core.c +++ b/drivers/mfd/ab3100-core.c @@ -91,8 +91,8 @@ static int ab3100_set_register_interruptible(struct ab3100 *ab3100, err); } else if (err != 2) { dev_err(ab3100->dev, - "write error (write register) " - "%d bytes transferred (expected 2)\n", + "write error (write register)\n" + " %d bytes transferred (expected 2)\n", err); err = -EIO; } else { @@ -135,8 +135,8 @@ static int ab3100_set_test_register_interruptible(struct ab3100 *ab3100, err); } else if (err != 2) { dev_err(ab3100->dev, - "write error (write test register) " - "%d bytes transferred (expected 2)\n", + "write error (write test register)\n" + " %d bytes transferred (expected 2)\n", err); err = -EIO; } else { @@ -171,8 +171,8 @@ static int ab3100_get_register_interruptible(struct ab3100 *ab3100, goto get_reg_out_unlock; } else if (err != 1) { dev_err(ab3100->dev, - "write error (send register address) " - "%d bytes transferred (expected 1)\n", + "write error (send register address)\n" + " %d bytes transferred (expected 1)\n", err); err = -EIO; goto get_reg_out_unlock; @@ -189,8 +189,8 @@ static int ab3100_get_register_interruptible(struct ab3100 *ab3100, goto get_reg_out_unlock; } else if (err != 1) { dev_err(ab3100->dev, - "write error (read register) " - "%d bytes transferred (expected 1)\n", + "write error (read register)\n" + " %d bytes transferred (expected 1)\n", err); err = -EIO; goto get_reg_out_unlock; @@ -237,8 +237,8 @@ static int ab3100_get_register_page_interruptible(struct ab3100 *ab3100, goto get_reg_page_out_unlock; } else if (err != 1) { dev_err(ab3100->dev, - "write error (send first register address) " - "%d bytes transferred (expected 1)\n", + "write error (send first register address)\n" + " %d bytes transferred (expected 1)\n", err); err = -EIO; goto get_reg_page_out_unlock; @@ -252,8 +252,8 @@ static int ab3100_get_register_page_interruptible(struct ab3100 *ab3100, goto get_reg_page_out_unlock; } else if (err != numregs) { dev_err(ab3100->dev, - "write error (read register page) " - "%d bytes transferred (expected %d)\n", + "write error (read register page)\n" + " %d bytes transferred (expected %d)\n", err, numregs); err = -EIO; goto get_reg_page_out_unlock; @@ -295,8 +295,8 @@ static int ab3100_mask_and_set_register_interruptible(struct ab3100 *ab3100, goto get_maskset_unlock; } else if (err != 1) { dev_err(ab3100->dev, - "write error (maskset send address) " - "%d bytes transferred (expected 1)\n", + "write error (maskset send address)\n" + " %d bytes transferred (expected 1)\n", err); err = -EIO; goto get_maskset_unlock; @@ -310,8 +310,8 @@ static int ab3100_mask_and_set_register_interruptible(struct ab3100 *ab3100, goto get_maskset_unlock; } else if (err != 1) { dev_err(ab3100->dev, - "write error (maskset read register) " - "%d bytes transferred (expected 1)\n", + "write error (maskset read register)\n" + " %d bytes transferred (expected 1)\n", err); err = -EIO; goto get_maskset_unlock; @@ -330,8 +330,8 @@ static int ab3100_mask_and_set_register_interruptible(struct ab3100 *ab3100, goto get_maskset_unlock; } else if (err != 2) { dev_err(ab3100->dev, - "write error (write register) " - "%d bytes transferred (expected 2)\n", + "write error (write register)\n" + " %d bytes transferred (expected 2)\n", err); err = -EIO; goto get_maskset_unlock; @@ -371,7 +371,7 @@ EXPORT_SYMBOL(ab3100_event_register); int ab3100_event_unregister(struct ab3100 *ab3100, struct notifier_block *nb) { - return blocking_notifier_chain_unregister(&ab3100->event_subscribers, + return blocking_notifier_chain_unregister(&ab3100->event_subscribers, nb); } EXPORT_SYMBOL(ab3100_event_unregister); @@ -455,7 +455,7 @@ static int ab3100_registers_print(struct seq_file *s, void *p) u8 value; u8 reg; - seq_printf(s, "AB3100 registers:\n"); + seq_puts(s, "AB3100 registers:\n"); for (reg = 0; reg < 0xff; reg++) { ab3100_get_register_interruptible(ab3100, reg, &value); @@ -560,8 +560,8 @@ static ssize_t ab3100_get_set_reg(struct file *file, ab3100_get_register_interruptible(ab3100, user_reg, ®value); dev_info(ab3100->dev, - "debug write reg[0x%02x] with 0x%02x, " - "after readback: 0x%02x\n", + "debug write reg[0x%02x]\n" + " with 0x%02x, after readback: 0x%02x\n", user_reg, user_value, regvalue); } return buf_size; @@ -719,8 +719,7 @@ static int ab3100_setup(struct ab3100 *ab3100) */ if (ab3100->chip_id == 0xc4) { dev_warn(ab3100->dev, - "AB3100 P1E variant detected, " - "forcing chip to 32KHz\n"); + "AB3100 P1E variant detected forcing chip to 32KHz\n"); err = ab3100_set_test_register_interruptible(ab3100, 0x02, 0x08); } @@ -878,8 +877,7 @@ static int ab3100_probe(struct i2c_client *client, &ab3100->chip_id); if (err) { dev_err(&client->dev, - "could not communicate with the AB3100 analog " - "baseband chip\n"); + "failed to communicate with AB3100 chip\n"); goto exit_no_detect; } @@ -902,8 +900,8 @@ static int ab3100_probe(struct i2c_client *client, if (ids[i].id == 0x0) { dev_err(&client->dev, "unknown analog baseband chip id: 0x%x\n", ab3100->chip_id); - dev_err(&client->dev, "accepting it anyway. Please update " - "the driver.\n"); + dev_err(&client->dev, + "accepting it anyway. Please update the driver.\n"); goto exit_no_detect; } diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c index cf2e6a198c6b..bde2fc072410 100644 --- a/drivers/mfd/ab8500-core.c +++ b/drivers/mfd/ab8500-core.c @@ -148,8 +148,8 @@ static const int ab9540_irq_regoffset[AB9540_NUM_IRQ_REGS] = { /* AB8540 support */ static const int ab8540_irq_regoffset[AB8540_NUM_IRQ_REGS] = { - 0, 1, 2, 3, 4, -1, -1, -1, -1, 11, 18, 19, 20, 21, 12, 13, 24, 5, 22, 23, - 25, 26, 27, 28, 29, 30, 31, + 0, 1, 2, 3, 4, -1, -1, -1, -1, 11, 18, 19, 20, 21, 12, 13, 24, 5, 22, + 23, 25, 26, 27, 28, 29, 30, 31, }; static const char ab8500_version_str[][7] = { @@ -322,7 +322,7 @@ static int ab8500_mask_and_set_register(struct device *dev, struct ab8500 *ab8500 = dev_get_drvdata(dev->parent); atomic_inc(&ab8500->transfer_ongoing); - ret= mask_and_set_register_interruptible(ab8500, bank, reg, + ret = mask_and_set_register_interruptible(ab8500, bank, reg, bitmask, bitvalues); atomic_dec(&ab8500->transfer_ongoing); return ret; @@ -415,9 +415,11 @@ static void ab8500_irq_unmask(struct irq_data *data) if (type & IRQ_TYPE_EDGE_FALLING) { if (offset >= AB8500_INT_GPIO6R && offset <= AB8500_INT_GPIO41R) ab8500->mask[index + 2] &= ~mask; - else if (offset >= AB9540_INT_GPIO50R && offset <= AB9540_INT_GPIO54R) + else if (offset >= AB9540_INT_GPIO50R && + offset <= AB9540_INT_GPIO54R) ab8500->mask[index + 1] &= ~mask; - else if (offset == AB8540_INT_GPIO43R || offset == AB8540_INT_GPIO44R) + else if (offset == AB8540_INT_GPIO43R || + offset == AB8540_INT_GPIO44R) /* Here the falling IRQ is one bit lower */ ab8500->mask[index] &= ~(mask << 1); else @@ -451,7 +453,7 @@ static void update_latch_offset(u8 *offset, int i) /* Fix inconsistent ab8540 bit mapping... */ if (unlikely(*offset == 16)) *offset = 25; - if ((i==3) && (*offset >= 24)) + if ((i == 3) && (*offset >= 24)) *offset += 2; } @@ -573,8 +575,8 @@ static int ab8500_irq_map(struct irq_domain *d, unsigned int virq, } static struct irq_domain_ops ab8500_irq_ops = { - .map = ab8500_irq_map, - .xlate = irq_domain_xlate_twocell, + .map = ab8500_irq_map, + .xlate = irq_domain_xlate_twocell, }; static int ab8500_irq_init(struct ab8500 *ab8500, struct device_node *np) @@ -607,8 +609,8 @@ int ab8500_suspend(struct ab8500 *ab8500) { if (atomic_read(&ab8500->transfer_ongoing)) return -EINVAL; - else - return 0; + + return 0; } static struct resource ab8500_gpadc_resources[] = { @@ -1551,7 +1553,7 @@ static struct attribute_group ab9540_attr_group = { static int ab8500_probe(struct platform_device *pdev) { - static char *switch_off_status[] = { + static const char *switch_off_status[] = { "Swoff bit programming", "Thermal protection activation", "Vbat lower then BattOk falling threshold", @@ -1560,7 +1562,7 @@ static int ab8500_probe(struct platform_device *pdev) "Battery level lower than power on reset threshold", "Power on key 1 pressed longer than 10 seconds", "DB8500 thermal shutdown"}; - static char *turn_on_status[] = { + static const char *turn_on_status[] = { "Battery rising (Vbat)", "Power On Key 1 dbF", "Power On Key 2 dbF", @@ -1579,7 +1581,7 @@ static int ab8500_probe(struct platform_device *pdev) int i; u8 value; - ab8500 = devm_kzalloc(&pdev->dev, sizeof *ab8500, GFP_KERNEL); + ab8500 = devm_kzalloc(&pdev->dev, sizeof(*ab8500), GFP_KERNEL); if (!ab8500) return -ENOMEM; @@ -1636,7 +1638,7 @@ static int ab8500_probe(struct platform_device *pdev) ab8500->mask_size = AB8540_NUM_IRQ_REGS; ab8500->irq_reg_offset = ab8540_irq_regoffset; ab8500->it_latchhier_num = AB8540_IT_LATCHHIER_NUM; - }/* Configure AB8500 or AB9540 IRQ */ + } /* Configure AB8500 or AB9540 IRQ */ else if (is_ab9540(ab8500) || is_ab8505(ab8500)) { ab8500->mask_size = AB9540_NUM_IRQ_REGS; ab8500->irq_reg_offset = ab9540_irq_regoffset; @@ -1646,10 +1648,12 @@ static int ab8500_probe(struct platform_device *pdev) ab8500->irq_reg_offset = ab8500_irq_regoffset; ab8500->it_latchhier_num = AB8500_IT_LATCHHIER_NUM; } - ab8500->mask = devm_kzalloc(&pdev->dev, ab8500->mask_size, GFP_KERNEL); + ab8500->mask = devm_kzalloc(&pdev->dev, ab8500->mask_size, + GFP_KERNEL); if (!ab8500->mask) return -ENOMEM; - ab8500->oldmask = devm_kzalloc(&pdev->dev, ab8500->mask_size, GFP_KERNEL); + ab8500->oldmask = devm_kzalloc(&pdev->dev, ab8500->mask_size, + GFP_KERNEL); if (!ab8500->oldmask) return -ENOMEM; @@ -1674,14 +1678,13 @@ static int ab8500_probe(struct platform_device *pdev) if (value) { for (i = 0; i < ARRAY_SIZE(switch_off_status); i++) { if (value & 1) - printk(KERN_CONT " \"%s\"", - switch_off_status[i]); + pr_cont(" \"%s\"", switch_off_status[i]); value = value >> 1; } - printk(KERN_CONT "\n"); + pr_cont("\n"); } else { - printk(KERN_CONT " None\n"); + pr_cont(" None\n"); } ret = get_register_interruptible(ab8500, AB8500_SYS_CTRL1_BLOCK, AB8500_TURN_ON_STATUS, &value); @@ -1692,12 +1695,12 @@ static int ab8500_probe(struct platform_device *pdev) if (value) { for (i = 0; i < ARRAY_SIZE(turn_on_status); i++) { if (value & 1) - printk("\"%s\" ", turn_on_status[i]); + pr_cont("\"%s\" ", turn_on_status[i]); value = value >> 1; } - printk("\n"); + pr_cont("\n"); } else { - printk("None\n"); + pr_cont("None\n"); } if (plat && plat->init) @@ -1751,7 +1754,7 @@ static int ab8500_probe(struct platform_device *pdev) if (ret) return ret; -#if CONFIG_DEBUG_FS +#ifdef CONFIG_DEBUG_FS /* Pass to debugfs */ ab8500_debug_resources[0].start = ab8500->irq; ab8500_debug_resources[0].end = ab8500->irq; diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c index d1a22aae2df5..b2c7e3b1edfa 100644 --- a/drivers/mfd/ab8500-debugfs.c +++ b/drivers/mfd/ab8500-debugfs.c @@ -135,10 +135,10 @@ struct ab8500_prcmu_ranges { /* hwreg- "mask" and "shift" entries ressources */ struct hwreg_cfg { u32 bank; /* target bank */ - u32 addr; /* target address */ + unsigned long addr; /* target address */ uint fmt; /* format */ - uint mask; /* read/write mask, applied before any bit shift */ - int shift; /* bit shift (read:right shift, write:left shift */ + unsigned long mask; /* read/write mask, applied before any bit shift */ + long shift; /* bit shift (read:right shift, write:left shift */ }; /* fmt bit #0: 0=hexa, 1=dec */ #define REG_FMT_DEC(c) ((c)->fmt & 0x1) @@ -1304,16 +1304,17 @@ static int ab8500_registers_print(struct device *dev, u32 bank, } if (s) { - err = seq_printf(s, " [0x%02X/0x%02X]: 0x%02X\n", - bank, reg, value); + err = seq_printf(s, + " [0x%02X/0x%02X]: 0x%02X\n", + bank, reg, value); if (err < 0) { /* Error is not returned here since * the output is wanted in any case */ return 0; } } else { - printk(KERN_INFO" [0x%02X/0x%02X]: 0x%02X\n", - bank, reg, value); + dev_info(dev, " [0x%02X/0x%02X]: 0x%02X\n", + bank, reg, value); } } } @@ -1325,7 +1326,7 @@ static int ab8500_print_bank_registers(struct seq_file *s, void *p) struct device *dev = s->private; u32 bank = debug_bank; - seq_printf(s, AB8500_NAME_STRING " register values:\n"); + seq_puts(s, AB8500_NAME_STRING " register values:\n"); seq_printf(s, " bank 0x%02X:\n", bank); @@ -1350,12 +1351,11 @@ static int ab8500_print_all_banks(struct seq_file *s, void *p) { struct device *dev = s->private; unsigned int i; - int err; - seq_printf(s, AB8500_NAME_STRING " register values:\n"); + seq_puts(s, AB8500_NAME_STRING " register values:\n"); for (i = 0; i < AB8500_NUM_BANKS; i++) { - err = seq_printf(s, " bank 0x%02X:\n", i); + seq_printf(s, " bank 0x%02X:\n", i); ab8500_registers_print(dev, i, s); } @@ -1367,10 +1367,10 @@ void ab8500_dump_all_banks(struct device *dev) { unsigned int i; - printk(KERN_INFO"ab8500 register values:\n"); + dev_info(dev, "ab8500 register values:\n"); for (i = 1; i < AB8500_NUM_BANKS; i++) { - printk(KERN_INFO" bank 0x%02X:\n", i); + dev_info(dev, " bank 0x%02X:\n", i); ab8500_registers_print(dev, i, NULL); } } @@ -1384,8 +1384,6 @@ static struct ab8500_register_dump u8 value; } ab8500_complete_register_dump[DUMP_MAX_REGS]; -extern int prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size); - /* This shall only be called upon kernel panic! */ void ab8500_dump_all_banks_to_mem(void) { @@ -1393,8 +1391,7 @@ void ab8500_dump_all_banks_to_mem(void) u8 bank; int err = 0; - pr_info("Saving all ABB registers at \"ab8500_complete_register_dump\" " - "for crash analyze.\n"); + pr_info("Saving all ABB registers for crash analysis.\n"); for (bank = 0; bank < AB8500_NUM_BANKS; bank++) { for (i = 0; i < debug_ranges[bank].num_ranges; i++) { @@ -1564,7 +1561,7 @@ static ssize_t ab8500_val_write(struct file *file, err = abx500_set_register_interruptible(dev, (u8)debug_bank, debug_address, (u8)user_val); if (err < 0) { - printk(KERN_ERR "abx500_set_reg failed %d, %d", err, __LINE__); + pr_err("abx500_set_reg failed %d, %d", err, __LINE__); return -EINVAL; } @@ -1596,7 +1593,7 @@ static int ab8500_interrupts_print(struct seq_file *s, void *p) { int line; - seq_printf(s, "name: number: number of: wake:\n"); + seq_puts(s, "name: number: number of: wake:\n"); for (line = 0; line < num_interrupt_lines; line++) { struct irq_desc *desc = irq_to_desc(line + irq_first); @@ -1722,7 +1719,8 @@ static int ab8500_print_modem_registers(struct seq_file *s, void *p) static int ab8500_modem_open(struct inode *inode, struct file *file) { - return single_open(file, ab8500_print_modem_registers, inode->i_private); + return single_open(file, ab8500_print_modem_registers, + inode->i_private); } static const struct file_operations ab8500_modem_fops = { @@ -1751,7 +1749,8 @@ static int ab8500_gpadc_bat_ctrl_print(struct seq_file *s, void *p) static int ab8500_gpadc_bat_ctrl_open(struct inode *inode, struct file *file) { - return single_open(file, ab8500_gpadc_bat_ctrl_print, inode->i_private); + return single_open(file, ab8500_gpadc_bat_ctrl_print, + inode->i_private); } static const struct file_operations ab8500_gpadc_bat_ctrl_fops = { @@ -1781,7 +1780,8 @@ static int ab8500_gpadc_btemp_ball_print(struct seq_file *s, void *p) static int ab8500_gpadc_btemp_ball_open(struct inode *inode, struct file *file) { - return single_open(file, ab8500_gpadc_btemp_ball_print, inode->i_private); + return single_open(file, ab8500_gpadc_btemp_ball_print, + inode->i_private); } static const struct file_operations ab8500_gpadc_btemp_ball_fops = { @@ -1962,7 +1962,8 @@ static int ab8500_gpadc_main_bat_v_print(struct seq_file *s, void *p) static int ab8500_gpadc_main_bat_v_open(struct inode *inode, struct file *file) { - return single_open(file, ab8500_gpadc_main_bat_v_print, inode->i_private); + return single_open(file, ab8500_gpadc_main_bat_v_print, + inode->i_private); } static const struct file_operations ab8500_gpadc_main_bat_v_fops = { @@ -2082,7 +2083,8 @@ static int ab8500_gpadc_bk_bat_v_print(struct seq_file *s, void *p) static int ab8500_gpadc_bk_bat_v_open(struct inode *inode, struct file *file) { - return single_open(file, ab8500_gpadc_bk_bat_v_print, inode->i_private); + return single_open(file, ab8500_gpadc_bk_bat_v_print, + inode->i_private); } static const struct file_operations ab8500_gpadc_bk_bat_v_fops = { @@ -2111,7 +2113,8 @@ static int ab8500_gpadc_die_temp_print(struct seq_file *s, void *p) static int ab8500_gpadc_die_temp_open(struct inode *inode, struct file *file) { - return single_open(file, ab8500_gpadc_die_temp_print, inode->i_private); + return single_open(file, ab8500_gpadc_die_temp_print, + inode->i_private); } static const struct file_operations ab8500_gpadc_die_temp_fops = { @@ -2190,8 +2193,9 @@ static int ab8540_gpadc_vbat_true_meas_print(struct seq_file *s, void *p) gpadc = ab8500_gpadc_get("ab8500-gpadc.0"); vbat_true_meas_raw = ab8500_gpadc_read_raw(gpadc, VBAT_TRUE_MEAS, avg_sample, trig_edge, trig_timer, conv_type); - vbat_true_meas_convert = ab8500_gpadc_ad_to_voltage(gpadc, VBAT_TRUE_MEAS, - vbat_true_meas_raw); + vbat_true_meas_convert = + ab8500_gpadc_ad_to_voltage(gpadc, VBAT_TRUE_MEAS, + vbat_true_meas_raw); return seq_printf(s, "%d,0x%X\n", vbat_true_meas_convert, vbat_true_meas_raw); @@ -2285,7 +2289,8 @@ static const struct file_operations ab8540_gpadc_vbat_meas_and_ibat_fops = { .owner = THIS_MODULE, }; -static int ab8540_gpadc_vbat_true_meas_and_ibat_print(struct seq_file *s, void *p) +static int ab8540_gpadc_vbat_true_meas_and_ibat_print(struct seq_file *s, + void *p) { int vbat_true_meas_raw; int vbat_true_meas_convert; @@ -2314,7 +2319,8 @@ static int ab8540_gpadc_vbat_true_meas_and_ibat_open(struct inode *inode, inode->i_private); } -static const struct file_operations ab8540_gpadc_vbat_true_meas_and_ibat_fops = { +static const struct file_operations +ab8540_gpadc_vbat_true_meas_and_ibat_fops = { .open = ab8540_gpadc_vbat_true_meas_and_ibat_open, .read = seq_read, .llseek = seq_lseek, @@ -2368,14 +2374,15 @@ static int ab8540_gpadc_otp_cal_print(struct seq_file *s, void *p) ab8540_gpadc_get_otp(gpadc, &vmain_l, &vmain_h, &btemp_l, &btemp_h, &vbat_l, &vbat_h, &ibat_l, &ibat_h); return seq_printf(s, "VMAIN_L:0x%X\n" - "VMAIN_H:0x%X\n" - "BTEMP_L:0x%X\n" - "BTEMP_H:0x%X\n" - "VBAT_L:0x%X\n" - "VBAT_H:0x%X\n" - "IBAT_L:0x%X\n" - "IBAT_H:0x%X\n", - vmain_l, vmain_h, btemp_l, btemp_h, vbat_l, vbat_h, ibat_l, ibat_h); + "VMAIN_H:0x%X\n" + "BTEMP_L:0x%X\n" + "BTEMP_H:0x%X\n" + "VBAT_L:0x%X\n" + "VBAT_H:0x%X\n" + "IBAT_L:0x%X\n" + "IBAT_H:0x%X\n", + vmain_l, vmain_h, btemp_l, btemp_h, + vbat_l, vbat_h, ibat_l, ibat_h); } static int ab8540_gpadc_otp_cal_open(struct inode *inode, struct file *file) @@ -2419,8 +2426,8 @@ static ssize_t ab8500_gpadc_avg_sample_write(struct file *file, || (user_avg_sample == SAMPLE_16)) { avg_sample = (u8) user_avg_sample; } else { - dev_err(dev, "debugfs error input: " - "should be egal to 1, 4, 8 or 16\n"); + dev_err(dev, + "debugfs err input: should be egal to 1, 4, 8 or 16\n"); return -EINVAL; } @@ -2504,14 +2511,14 @@ static ssize_t ab8500_gpadc_trig_timer_write(struct file *file, if (err) return err; - if ((user_trig_timer >= 0) && (user_trig_timer <= 255)) { - trig_timer = (u8) user_trig_timer; - } else { - dev_err(dev, "debugfs error input: " - "should be beetween 0 to 255\n"); + if (user_trig_timer & ~0xFF) { + dev_err(dev, + "debugfs error input: should be beetween 0 to 255\n"); return -EINVAL; } + trig_timer = (u8) user_trig_timer; + return count; } @@ -2579,6 +2586,7 @@ static const struct file_operations ab8500_gpadc_conv_type_fops = { static int strval_len(char *b) { char *s = b; + if ((*s == '0') && ((*(s+1) == 'x') || (*(s+1) == 'X'))) { s += 2; for (; *s && (*s != ' ') && (*s != '\n'); s++) { @@ -2643,13 +2651,17 @@ static ssize_t hwreg_common_write(char *b, struct hwreg_cfg *cfg, b += (*(b+2) == ' ') ? 3 : 6; if (strval_len(b) == 0) return -EINVAL; - loc.mask = simple_strtoul(b, &b, 0); + ret = kstrtoul(b, 0, &loc.mask); + if (ret) + return ret; } else if ((!strncmp(b, "-s ", 3)) || (!strncmp(b, "-shift ", 7))) { b += (*(b+2) == ' ') ? 3 : 7; if (strval_len(b) == 0) return -EINVAL; - loc.shift = simple_strtol(b, &b, 0); + ret = kstrtol(b, 0, &loc.shift); + if (ret) + return ret; } else { return -EINVAL; } @@ -2657,29 +2669,36 @@ static ssize_t hwreg_common_write(char *b, struct hwreg_cfg *cfg, /* get arg BANK and ADDRESS */ if (strval_len(b) == 0) return -EINVAL; - loc.bank = simple_strtoul(b, &b, 0); + ret = kstrtouint(b, 0, &loc.bank); + if (ret) + return ret; while (*b == ' ') b++; if (strval_len(b) == 0) return -EINVAL; - loc.addr = simple_strtoul(b, &b, 0); + ret = kstrtoul(b, 0, &loc.addr); + if (ret) + return ret; if (write) { while (*b == ' ') b++; if (strval_len(b) == 0) return -EINVAL; - val = simple_strtoul(b, &b, 0); + ret = kstrtouint(b, 0, &val); + if (ret) + return ret; } /* args are ok, update target cfg (mainly for read) */ *cfg = loc; #ifdef ABB_HWREG_DEBUG - pr_warn("HWREG request: %s, %s, addr=0x%08X, mask=0x%X, shift=%d" - "value=0x%X\n", (write) ? "write" : "read", - REG_FMT_DEC(cfg) ? "decimal" : "hexa", - cfg->addr, cfg->mask, cfg->shift, val); + pr_warn("HWREG request: %s, %s,\n" + " addr=0x%08X, mask=0x%X, shift=%d" "value=0x%X\n", + (write) ? "write" : "read", + REG_FMT_DEC(cfg) ? "decimal" : "hexa", + cfg->addr, cfg->mask, cfg->shift, val); #endif if (!write) @@ -2765,8 +2784,8 @@ static ssize_t show_irq(struct device *dev, irq_index = name - irq_first; if (irq_index >= num_irqs) return -EINVAL; - else - return sprintf(buf, "%u\n", irq_count[irq_index]); + + return sprintf(buf, "%u\n", irq_count[irq_index]); } static ssize_t ab8500_subscribe_write(struct file *file, @@ -2815,7 +2834,7 @@ static ssize_t ab8500_subscribe_write(struct file *file, dev_attr[irq_index]->attr.mode = S_IRUGO; err = sysfs_create_file(&dev->kobj, &dev_attr[irq_index]->attr); if (err < 0) { - printk(KERN_ERR "sysfs_create_file failed %d\n", err); + pr_info("sysfs_create_file failed %d\n", err); return err; } @@ -2823,8 +2842,8 @@ static ssize_t ab8500_subscribe_write(struct file *file, IRQF_SHARED | IRQF_NO_SUSPEND, "ab8500-debug", &dev->kobj); if (err < 0) { - printk(KERN_ERR "request_threaded_irq failed %d, %lu\n", - err, user_val); + pr_info("request_threaded_irq failed %d, %lu\n", + err, user_val); sysfs_remove_file(&dev->kobj, &dev_attr[irq_index]->attr); return err; } @@ -2946,6 +2965,7 @@ static int ab8500_debug_probe(struct platform_device *plf) struct dentry *file; struct ab8500 *ab8500; struct resource *res; + debug_bank = AB8500_MISC; debug_address = AB8500_REV_REG & 0x00FF; @@ -2958,7 +2978,7 @@ static int ab8500_debug_probe(struct platform_device *plf) return -ENOMEM; dev_attr = devm_kzalloc(&plf->dev, - sizeof(*dev_attr)*num_irqs,GFP_KERNEL); + sizeof(*dev_attr)*num_irqs, GFP_KERNEL); if (!dev_attr) return -ENOMEM; @@ -2969,23 +2989,20 @@ static int ab8500_debug_probe(struct platform_device *plf) res = platform_get_resource_byname(plf, 0, "IRQ_AB8500"); if (!res) { - dev_err(&plf->dev, "AB8500 irq not found, err %d\n", - irq_first); - return ENXIO; + dev_err(&plf->dev, "AB8500 irq not found, err %d\n", irq_first); + return -ENXIO; } irq_ab8500 = res->start; irq_first = platform_get_irq_byname(plf, "IRQ_FIRST"); if (irq_first < 0) { - dev_err(&plf->dev, "First irq not found, err %d\n", - irq_first); + dev_err(&plf->dev, "First irq not found, err %d\n", irq_first); return irq_first; } irq_last = platform_get_irq_byname(plf, "IRQ_LAST"); if (irq_last < 0) { - dev_err(&plf->dev, "Last irq not found, err %d\n", - irq_last); + dev_err(&plf->dev, "Last irq not found, err %d\n", irq_last); return irq_last; } @@ -2994,37 +3011,41 @@ static int ab8500_debug_probe(struct platform_device *plf) goto err; ab8500_gpadc_dir = debugfs_create_dir(AB8500_ADC_NAME_STRING, - ab8500_dir); + ab8500_dir); if (!ab8500_gpadc_dir) goto err; - file = debugfs_create_file("all-bank-registers", S_IRUGO, - ab8500_dir, &plf->dev, &ab8500_registers_fops); + file = debugfs_create_file("all-bank-registers", S_IRUGO, ab8500_dir, + &plf->dev, &ab8500_registers_fops); if (!file) goto err; - file = debugfs_create_file("all-banks", S_IRUGO, - ab8500_dir, &plf->dev, &ab8500_all_banks_fops); + file = debugfs_create_file("all-banks", S_IRUGO, ab8500_dir, + &plf->dev, &ab8500_all_banks_fops); if (!file) goto err; - file = debugfs_create_file("register-bank", (S_IRUGO | S_IWUSR | S_IWGRP), - ab8500_dir, &plf->dev, &ab8500_bank_fops); + file = debugfs_create_file("register-bank", + (S_IRUGO | S_IWUSR | S_IWGRP), + ab8500_dir, &plf->dev, &ab8500_bank_fops); if (!file) goto err; - file = debugfs_create_file("register-address", (S_IRUGO | S_IWUSR | S_IWGRP), - ab8500_dir, &plf->dev, &ab8500_address_fops); + file = debugfs_create_file("register-address", + (S_IRUGO | S_IWUSR | S_IWGRP), + ab8500_dir, &plf->dev, &ab8500_address_fops); if (!file) goto err; - file = debugfs_create_file("register-value", (S_IRUGO | S_IWUSR | S_IWGRP), - ab8500_dir, &plf->dev, &ab8500_val_fops); + file = debugfs_create_file("register-value", + (S_IRUGO | S_IWUSR | S_IWGRP), + ab8500_dir, &plf->dev, &ab8500_val_fops); if (!file) goto err; - file = debugfs_create_file("irq-subscribe", (S_IRUGO | S_IWUSR | S_IWGRP), - ab8500_dir, &plf->dev, &ab8500_subscribe_fops); + file = debugfs_create_file("irq-subscribe", + (S_IRUGO | S_IWUSR | S_IWGRP), ab8500_dir, + &plf->dev, &ab8500_subscribe_fops); if (!file) goto err; @@ -3042,158 +3063,191 @@ static int ab8500_debug_probe(struct platform_device *plf) num_interrupt_lines = AB8540_NR_IRQS; } - file = debugfs_create_file("interrupts", (S_IRUGO), - ab8500_dir, &plf->dev, &ab8500_interrupts_fops); + file = debugfs_create_file("interrupts", (S_IRUGO), ab8500_dir, + &plf->dev, &ab8500_interrupts_fops); if (!file) goto err; - file = debugfs_create_file("irq-unsubscribe", (S_IRUGO | S_IWUSR | S_IWGRP), - ab8500_dir, &plf->dev, &ab8500_unsubscribe_fops); + file = debugfs_create_file("irq-unsubscribe", + (S_IRUGO | S_IWUSR | S_IWGRP), ab8500_dir, + &plf->dev, &ab8500_unsubscribe_fops); if (!file) goto err; file = debugfs_create_file("hwreg", (S_IRUGO | S_IWUSR | S_IWGRP), - ab8500_dir, &plf->dev, &ab8500_hwreg_fops); + ab8500_dir, &plf->dev, &ab8500_hwreg_fops); if (!file) goto err; - file = debugfs_create_file("all-modem-registers", (S_IRUGO | S_IWUSR | S_IWGRP), - ab8500_dir, &plf->dev, &ab8500_modem_fops); + file = debugfs_create_file("all-modem-registers", + (S_IRUGO | S_IWUSR | S_IWGRP), + ab8500_dir, &plf->dev, &ab8500_modem_fops); if (!file) goto err; file = debugfs_create_file("bat_ctrl", (S_IRUGO | S_IWUSR | S_IWGRP), - ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_bat_ctrl_fops); + ab8500_gpadc_dir, &plf->dev, + &ab8500_gpadc_bat_ctrl_fops); if (!file) goto err; file = debugfs_create_file("btemp_ball", (S_IRUGO | S_IWUSR | S_IWGRP), - ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_btemp_ball_fops); + ab8500_gpadc_dir, + &plf->dev, &ab8500_gpadc_btemp_ball_fops); if (!file) goto err; - file = debugfs_create_file("main_charger_v", (S_IRUGO | S_IWUSR | S_IWGRP), - ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_main_charger_v_fops); + file = debugfs_create_file("main_charger_v", + (S_IRUGO | S_IWUSR | S_IWGRP), + ab8500_gpadc_dir, &plf->dev, + &ab8500_gpadc_main_charger_v_fops); if (!file) goto err; - file = debugfs_create_file("acc_detect1", (S_IRUGO | S_IWUSR | S_IWGRP), - ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_acc_detect1_fops); + file = debugfs_create_file("acc_detect1", + (S_IRUGO | S_IWUSR | S_IWGRP), + ab8500_gpadc_dir, &plf->dev, + &ab8500_gpadc_acc_detect1_fops); if (!file) goto err; - file = debugfs_create_file("acc_detect2", (S_IRUGO | S_IWUSR | S_IWGRP), - ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_acc_detect2_fops); + file = debugfs_create_file("acc_detect2", + (S_IRUGO | S_IWUSR | S_IWGRP), + ab8500_gpadc_dir, &plf->dev, + &ab8500_gpadc_acc_detect2_fops); if (!file) goto err; file = debugfs_create_file("adc_aux1", (S_IRUGO | S_IWUSR | S_IWGRP), - ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_aux1_fops); + ab8500_gpadc_dir, &plf->dev, + &ab8500_gpadc_aux1_fops); if (!file) goto err; file = debugfs_create_file("adc_aux2", (S_IRUGO | S_IWUSR | S_IWGRP), - ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_aux2_fops); + ab8500_gpadc_dir, &plf->dev, + &ab8500_gpadc_aux2_fops); if (!file) goto err; file = debugfs_create_file("main_bat_v", (S_IRUGO | S_IWUSR | S_IWGRP), - ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_main_bat_v_fops); + ab8500_gpadc_dir, &plf->dev, + &ab8500_gpadc_main_bat_v_fops); if (!file) goto err; file = debugfs_create_file("vbus_v", (S_IRUGO | S_IWUSR | S_IWGRP), - ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_vbus_v_fops); + ab8500_gpadc_dir, &plf->dev, + &ab8500_gpadc_vbus_v_fops); if (!file) goto err; - file = debugfs_create_file("main_charger_c", (S_IRUGO | S_IWUSR | S_IWGRP), - ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_main_charger_c_fops); + file = debugfs_create_file("main_charger_c", + (S_IRUGO | S_IWUSR | S_IWGRP), + ab8500_gpadc_dir, &plf->dev, + &ab8500_gpadc_main_charger_c_fops); if (!file) goto err; - file = debugfs_create_file("usb_charger_c", (S_IRUGO | S_IWUSR | S_IWGRP), - ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_usb_charger_c_fops); + file = debugfs_create_file("usb_charger_c", + (S_IRUGO | S_IWUSR | S_IWGRP), + ab8500_gpadc_dir, + &plf->dev, &ab8500_gpadc_usb_charger_c_fops); if (!file) goto err; file = debugfs_create_file("bk_bat_v", (S_IRUGO | S_IWUSR | S_IWGRP), - ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_bk_bat_v_fops); + ab8500_gpadc_dir, &plf->dev, + &ab8500_gpadc_bk_bat_v_fops); if (!file) goto err; file = debugfs_create_file("die_temp", (S_IRUGO | S_IWUSR | S_IWGRP), - ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_die_temp_fops); + ab8500_gpadc_dir, &plf->dev, + &ab8500_gpadc_die_temp_fops); if (!file) goto err; file = debugfs_create_file("usb_id", (S_IRUGO | S_IWUSR | S_IWGRP), - ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_usb_id_fops); + ab8500_gpadc_dir, &plf->dev, + &ab8500_gpadc_usb_id_fops); if (!file) goto err; if (is_ab8540(ab8500)) { - file = debugfs_create_file("xtal_temp", (S_IRUGO | S_IWUSR | S_IWGRP), - ab8500_gpadc_dir, &plf->dev, &ab8540_gpadc_xtal_temp_fops); + file = debugfs_create_file("xtal_temp", + (S_IRUGO | S_IWUSR | S_IWGRP), + ab8500_gpadc_dir, &plf->dev, + &ab8540_gpadc_xtal_temp_fops); if (!file) goto err; - file = debugfs_create_file("vbattruemeas", (S_IRUGO | S_IWUSR | S_IWGRP), - ab8500_gpadc_dir, &plf->dev, - &ab8540_gpadc_vbat_true_meas_fops); + file = debugfs_create_file("vbattruemeas", + (S_IRUGO | S_IWUSR | S_IWGRP), + ab8500_gpadc_dir, &plf->dev, + &ab8540_gpadc_vbat_true_meas_fops); if (!file) goto err; file = debugfs_create_file("batctrl_and_ibat", - (S_IRUGO | S_IWUGO), ab8500_gpadc_dir, - &plf->dev, &ab8540_gpadc_bat_ctrl_and_ibat_fops); + (S_IRUGO | S_IWUGO), + ab8500_gpadc_dir, + &plf->dev, + &ab8540_gpadc_bat_ctrl_and_ibat_fops); if (!file) goto err; file = debugfs_create_file("vbatmeas_and_ibat", - (S_IRUGO | S_IWUGO), ab8500_gpadc_dir, - &plf->dev, - &ab8540_gpadc_vbat_meas_and_ibat_fops); + (S_IRUGO | S_IWUGO), + ab8500_gpadc_dir, &plf->dev, + &ab8540_gpadc_vbat_meas_and_ibat_fops); if (!file) goto err; file = debugfs_create_file("vbattruemeas_and_ibat", - (S_IRUGO | S_IWUGO), ab8500_gpadc_dir, - &plf->dev, - &ab8540_gpadc_vbat_true_meas_and_ibat_fops); + (S_IRUGO | S_IWUGO), + ab8500_gpadc_dir, + &plf->dev, + &ab8540_gpadc_vbat_true_meas_and_ibat_fops); if (!file) goto err; file = debugfs_create_file("battemp_and_ibat", - (S_IRUGO | S_IWUGO), ab8500_gpadc_dir, + (S_IRUGO | S_IWUGO), + ab8500_gpadc_dir, &plf->dev, &ab8540_gpadc_bat_temp_and_ibat_fops); if (!file) goto err; - file = debugfs_create_file("otp_calib", (S_IRUGO | S_IWUSR | S_IWGRP), - ab8500_gpadc_dir, &plf->dev, &ab8540_gpadc_otp_calib_fops); + file = debugfs_create_file("otp_calib", + (S_IRUGO | S_IWUSR | S_IWGRP), + ab8500_gpadc_dir, + &plf->dev, &ab8540_gpadc_otp_calib_fops); if (!file) goto err; } file = debugfs_create_file("avg_sample", (S_IRUGO | S_IWUSR | S_IWGRP), - ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_avg_sample_fops); + ab8500_gpadc_dir, &plf->dev, + &ab8500_gpadc_avg_sample_fops); if (!file) goto err; file = debugfs_create_file("trig_edge", (S_IRUGO | S_IWUSR | S_IWGRP), - ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_trig_edge_fops); + ab8500_gpadc_dir, &plf->dev, + &ab8500_gpadc_trig_edge_fops); if (!file) goto err; file = debugfs_create_file("trig_timer", (S_IRUGO | S_IWUSR | S_IWGRP), - ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_trig_timer_fops); + ab8500_gpadc_dir, &plf->dev, + &ab8500_gpadc_trig_timer_fops); if (!file) goto err; file = debugfs_create_file("conv_type", (S_IRUGO | S_IWUSR | S_IWGRP), - ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_conv_type_fops); + ab8500_gpadc_dir, &plf->dev, + &ab8500_gpadc_conv_type_fops); if (!file) goto err; return 0; err: - if (ab8500_dir) - debugfs_remove_recursive(ab8500_dir); + debugfs_remove_recursive(ab8500_dir); dev_err(&plf->dev, "failed to create debugfs entries.\n"); return -ENOMEM; diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c index cfc191abae4a..10a0cb90619a 100644 --- a/drivers/mfd/arizona-core.c +++ b/drivers/mfd/arizona-core.c @@ -123,6 +123,8 @@ static irqreturn_t arizona_underclocked(int irq, void *data) dev_err(arizona->dev, "AIF2 underclocked\n"); if (val & ARIZONA_AIF1_UNDERCLOCKED_STS) dev_err(arizona->dev, "AIF1 underclocked\n"); + if (val & ARIZONA_ISRC3_UNDERCLOCKED_STS) + dev_err(arizona->dev, "ISRC3 underclocked\n"); if (val & ARIZONA_ISRC2_UNDERCLOCKED_STS) dev_err(arizona->dev, "ISRC2 underclocked\n"); if (val & ARIZONA_ISRC1_UNDERCLOCKED_STS) @@ -192,6 +194,8 @@ static irqreturn_t arizona_overclocked(int irq, void *data) dev_err(arizona->dev, "ASRC sync WARP overclocked\n"); if (val[1] & ARIZONA_ADSP2_1_OVERCLOCKED_STS) dev_err(arizona->dev, "DSP1 overclocked\n"); + if (val[1] & ARIZONA_ISRC3_OVERCLOCKED_STS) + dev_err(arizona->dev, "ISRC3 overclocked\n"); if (val[1] & ARIZONA_ISRC2_OVERCLOCKED_STS) dev_err(arizona->dev, "ISRC2 overclocked\n"); if (val[1] & ARIZONA_ISRC1_OVERCLOCKED_STS) @@ -497,12 +501,12 @@ const struct dev_pm_ops arizona_pm_ops = { EXPORT_SYMBOL_GPL(arizona_pm_ops); #ifdef CONFIG_OF -int arizona_of_get_type(struct device *dev) +unsigned long arizona_of_get_type(struct device *dev) { const struct of_device_id *id = of_match_device(arizona_of_match, dev); if (id) - return (int)id->data; + return (unsigned long)id->data; else return 0; } @@ -578,17 +582,21 @@ static const struct mfd_cell early_devs[] = { }; static const char *wm5102_supplies[] = { + "MICVDD", "DBVDD2", "DBVDD3", "CPVDD", "SPKVDDL", "SPKVDDR", - "MICVDD", }; static const struct mfd_cell wm5102_devs[] = { { .name = "arizona-micsupp" }, - { .name = "arizona-extcon" }, + { + .name = "arizona-extcon", + .parent_supplies = wm5102_supplies, + .num_parent_supplies = 1, /* We only need MICVDD */ + }, { .name = "arizona-gpio" }, { .name = "arizona-haptics" }, { .name = "arizona-pwm" }, @@ -601,7 +609,11 @@ static const struct mfd_cell wm5102_devs[] = { static const struct mfd_cell wm5110_devs[] = { { .name = "arizona-micsupp" }, - { .name = "arizona-extcon" }, + { + .name = "arizona-extcon", + .parent_supplies = wm5102_supplies, + .num_parent_supplies = 1, /* We only need MICVDD */ + }, { .name = "arizona-gpio" }, { .name = "arizona-haptics" }, { .name = "arizona-pwm" }, @@ -613,6 +625,7 @@ static const struct mfd_cell wm5110_devs[] = { }; static const char *wm8997_supplies[] = { + "MICVDD", "DBVDD2", "CPVDD", "SPKVDD", @@ -620,7 +633,11 @@ static const char *wm8997_supplies[] = { static const struct mfd_cell wm8997_devs[] = { { .name = "arizona-micsupp" }, - { .name = "arizona-extcon" }, + { + .name = "arizona-extcon", + .parent_supplies = wm8997_supplies, + .num_parent_supplies = 1, /* We only need MICVDD */ + }, { .name = "arizona-gpio" }, { .name = "arizona-haptics" }, { .name = "arizona-pwm" }, @@ -683,7 +700,13 @@ int arizona_dev_init(struct arizona *arizona) goto err_early; } - arizona->dcvdd = devm_regulator_get(arizona->dev, "DCVDD"); + /** + * Don't use devres here because the only device we have to get + * against is the MFD device and DCVDD will likely be supplied by + * one of its children. Meaning that the regulator will be + * destroyed by the time devres calls regulator put. + */ + arizona->dcvdd = regulator_get(arizona->dev, "DCVDD"); if (IS_ERR(arizona->dcvdd)) { ret = PTR_ERR(arizona->dcvdd); dev_err(dev, "Failed to request DCVDD: %d\n", ret); @@ -697,7 +720,7 @@ int arizona_dev_init(struct arizona *arizona) "arizona /RESET"); if (ret != 0) { dev_err(dev, "Failed to request /RESET: %d\n", ret); - goto err_early; + goto err_dcvdd; } } @@ -706,7 +729,7 @@ int arizona_dev_init(struct arizona *arizona) if (ret != 0) { dev_err(dev, "Failed to enable core supplies: %d\n", ret); - goto err_early; + goto err_dcvdd; } ret = regulator_enable(arizona->dcvdd); @@ -1015,6 +1038,8 @@ err_reset: err_enable: regulator_bulk_disable(arizona->num_core_supplies, arizona->core_supplies); +err_dcvdd: + regulator_put(arizona->dcvdd); err_early: mfd_remove_devices(dev); return ret; @@ -1023,16 +1048,20 @@ EXPORT_SYMBOL_GPL(arizona_dev_init); int arizona_dev_exit(struct arizona *arizona) { + pm_runtime_disable(arizona->dev); + + regulator_disable(arizona->dcvdd); + regulator_put(arizona->dcvdd); + mfd_remove_devices(arizona->dev); arizona_free_irq(arizona, ARIZONA_IRQ_UNDERCLOCKED, arizona); arizona_free_irq(arizona, ARIZONA_IRQ_OVERCLOCKED, arizona); arizona_free_irq(arizona, ARIZONA_IRQ_CLKGEN_ERR, arizona); - pm_runtime_disable(arizona->dev); arizona_irq_exit(arizona); if (arizona->pdata.reset) gpio_set_value_cansleep(arizona->pdata.reset, 0); - regulator_disable(arizona->dcvdd); - regulator_bulk_disable(ARRAY_SIZE(arizona->core_supplies), + + regulator_bulk_disable(arizona->num_core_supplies, arizona->core_supplies); return 0; } diff --git a/drivers/mfd/arizona-i2c.c b/drivers/mfd/arizona-i2c.c index beccb790c9ba..9d4156fb082a 100644 --- a/drivers/mfd/arizona-i2c.c +++ b/drivers/mfd/arizona-i2c.c @@ -24,11 +24,12 @@ #include "arizona.h" static int arizona_i2c_probe(struct i2c_client *i2c, - const struct i2c_device_id *id) + const struct i2c_device_id *id) { struct arizona *arizona; const struct regmap_config *regmap_config; - int ret, type; + unsigned long type; + int ret; if (i2c->dev.of_node) type = arizona_of_get_type(&i2c->dev); diff --git a/drivers/mfd/arizona-irq.c b/drivers/mfd/arizona-irq.c index 17102f589100..d420dbc0e2b0 100644 --- a/drivers/mfd/arizona-irq.c +++ b/drivers/mfd/arizona-irq.c @@ -188,24 +188,33 @@ int arizona_irq_init(struct arizona *arizona) int flags = IRQF_ONESHOT; int ret, i; const struct regmap_irq_chip *aod, *irq; - bool ctrlif_error = true; struct irq_data *irq_data; + arizona->ctrlif_error = true; + switch (arizona->type) { #ifdef CONFIG_MFD_WM5102 case WM5102: aod = &wm5102_aod; irq = &wm5102_irq; - ctrlif_error = false; + arizona->ctrlif_error = false; break; #endif #ifdef CONFIG_MFD_WM5110 case WM5110: aod = &wm5110_aod; - irq = &wm5110_irq; - ctrlif_error = false; + switch (arizona->rev) { + case 0 ... 2: + irq = &wm5110_irq; + break; + default: + irq = &wm5110_revd_irq; + break; + } + + arizona->ctrlif_error = false; break; #endif #ifdef CONFIG_MFD_WM8997 @@ -213,7 +222,7 @@ int arizona_irq_init(struct arizona *arizona) aod = &wm8997_aod; irq = &wm8997_irq; - ctrlif_error = false; + arizona->ctrlif_error = false; break; #endif default: @@ -300,7 +309,7 @@ int arizona_irq_init(struct arizona *arizona) } /* Handle control interface errors in the core */ - if (ctrlif_error) { + if (arizona->ctrlif_error) { i = arizona_map_irq(arizona, ARIZONA_IRQ_CTRLIF_ERR); ret = request_threaded_irq(i, NULL, arizona_ctrlif_err, IRQF_ONESHOT, @@ -345,7 +354,9 @@ int arizona_irq_init(struct arizona *arizona) return 0; err_main_irq: - free_irq(arizona_map_irq(arizona, ARIZONA_IRQ_CTRLIF_ERR), arizona); + if (arizona->ctrlif_error) + free_irq(arizona_map_irq(arizona, ARIZONA_IRQ_CTRLIF_ERR), + arizona); err_ctrlif: free_irq(arizona_map_irq(arizona, ARIZONA_IRQ_BOOT_DONE), arizona); err_boot_done: @@ -361,7 +372,9 @@ err: int arizona_irq_exit(struct arizona *arizona) { - free_irq(arizona_map_irq(arizona, ARIZONA_IRQ_CTRLIF_ERR), arizona); + if (arizona->ctrlif_error) + free_irq(arizona_map_irq(arizona, ARIZONA_IRQ_CTRLIF_ERR), + arizona); free_irq(arizona_map_irq(arizona, ARIZONA_IRQ_BOOT_DONE), arizona); regmap_del_irq_chip(irq_create_mapping(arizona->virq, 1), arizona->irq_chip); diff --git a/drivers/mfd/arizona-spi.c b/drivers/mfd/arizona-spi.c index 1ca554b18bef..5145d78bf07e 100644 --- a/drivers/mfd/arizona-spi.c +++ b/drivers/mfd/arizona-spi.c @@ -28,7 +28,8 @@ static int arizona_spi_probe(struct spi_device *spi) const struct spi_device_id *id = spi_get_device_id(spi); struct arizona *arizona; const struct regmap_config *regmap_config; - int ret, type; + unsigned long type; + int ret; if (spi->dev.of_node) type = arizona_of_get_type(&spi->dev); diff --git a/drivers/mfd/arizona.h b/drivers/mfd/arizona.h index b4cef777df73..fbe2843271c5 100644 --- a/drivers/mfd/arizona.h +++ b/drivers/mfd/arizona.h @@ -36,6 +36,7 @@ extern const struct regmap_irq_chip wm5102_irq; extern const struct regmap_irq_chip wm5110_aod; extern const struct regmap_irq_chip wm5110_irq; +extern const struct regmap_irq_chip wm5110_revd_irq; extern const struct regmap_irq_chip wm8997_aod; extern const struct regmap_irq_chip wm8997_irq; @@ -46,9 +47,9 @@ int arizona_irq_init(struct arizona *arizona); int arizona_irq_exit(struct arizona *arizona); #ifdef CONFIG_OF -int arizona_of_get_type(struct device *dev); +unsigned long arizona_of_get_type(struct device *dev); #else -static inline int arizona_of_get_type(struct device *dev) +static inline unsigned long arizona_of_get_type(struct device *dev) { return 0; } diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c index 9f6294f2a070..9fc4186d4132 100644 --- a/drivers/mfd/asic3.c +++ b/drivers/mfd/asic3.c @@ -899,13 +899,15 @@ static int __init asic3_mfd_probe(struct platform_device *pdev, ds1wm_resources[0].end >>= asic->bus_shift; /* MMC */ - asic->tmio_cnf = ioremap((ASIC3_SD_CONFIG_BASE >> asic->bus_shift) + + if (mem_sdio) { + asic->tmio_cnf = ioremap((ASIC3_SD_CONFIG_BASE >> asic->bus_shift) + mem_sdio->start, ASIC3_SD_CONFIG_SIZE >> asic->bus_shift); - if (!asic->tmio_cnf) { - ret = -ENOMEM; - dev_dbg(asic->dev, "Couldn't ioremap SD_CONFIG\n"); - goto out; + if (!asic->tmio_cnf) { + ret = -ENOMEM; + dev_dbg(asic->dev, "Couldn't ioremap SD_CONFIG\n"); + goto out; + } } asic3_mmc_resources[0].start >>= asic->bus_shift; asic3_mmc_resources[0].end >>= asic->bus_shift; diff --git a/drivers/mfd/cros_ec.c b/drivers/mfd/cros_ec.c index 38fe9bf0d169..4873f9c50452 100644 --- a/drivers/mfd/cros_ec.c +++ b/drivers/mfd/cros_ec.c @@ -25,64 +25,42 @@ #include <linux/mfd/cros_ec_commands.h> int cros_ec_prepare_tx(struct cros_ec_device *ec_dev, - struct cros_ec_msg *msg) + struct cros_ec_command *msg) { uint8_t *out; int csum, i; - BUG_ON(msg->out_len > EC_PROTO2_MAX_PARAM_SIZE); + BUG_ON(msg->outsize > EC_PROTO2_MAX_PARAM_SIZE); out = ec_dev->dout; out[0] = EC_CMD_VERSION0 + msg->version; - out[1] = msg->cmd; - out[2] = msg->out_len; + out[1] = msg->command; + out[2] = msg->outsize; csum = out[0] + out[1] + out[2]; - for (i = 0; i < msg->out_len; i++) - csum += out[EC_MSG_TX_HEADER_BYTES + i] = msg->out_buf[i]; - out[EC_MSG_TX_HEADER_BYTES + msg->out_len] = (uint8_t)(csum & 0xff); + for (i = 0; i < msg->outsize; i++) + csum += out[EC_MSG_TX_HEADER_BYTES + i] = msg->outdata[i]; + out[EC_MSG_TX_HEADER_BYTES + msg->outsize] = (uint8_t)(csum & 0xff); - return EC_MSG_TX_PROTO_BYTES + msg->out_len; + return EC_MSG_TX_PROTO_BYTES + msg->outsize; } EXPORT_SYMBOL(cros_ec_prepare_tx); -static int cros_ec_command_sendrecv(struct cros_ec_device *ec_dev, - uint16_t cmd, void *out_buf, int out_len, - void *in_buf, int in_len) +int cros_ec_check_result(struct cros_ec_device *ec_dev, + struct cros_ec_command *msg) { - struct cros_ec_msg msg; - - msg.version = cmd >> 8; - msg.cmd = cmd & 0xff; - msg.out_buf = out_buf; - msg.out_len = out_len; - msg.in_buf = in_buf; - msg.in_len = in_len; - - return ec_dev->command_xfer(ec_dev, &msg); -} - -static int cros_ec_command_recv(struct cros_ec_device *ec_dev, - uint16_t cmd, void *buf, int buf_len) -{ - return cros_ec_command_sendrecv(ec_dev, cmd, NULL, 0, buf, buf_len); -} - -static int cros_ec_command_send(struct cros_ec_device *ec_dev, - uint16_t cmd, void *buf, int buf_len) -{ - return cros_ec_command_sendrecv(ec_dev, cmd, buf, buf_len, NULL, 0); -} - -static irqreturn_t ec_irq_thread(int irq, void *data) -{ - struct cros_ec_device *ec_dev = data; - - if (device_may_wakeup(ec_dev->dev)) - pm_wakeup_event(ec_dev->dev, 0); - - blocking_notifier_call_chain(&ec_dev->event_notifier, 1, ec_dev); - - return IRQ_HANDLED; + switch (msg->result) { + case EC_RES_SUCCESS: + return 0; + case EC_RES_IN_PROGRESS: + dev_dbg(ec_dev->dev, "command 0x%02x in progress\n", + msg->command); + return -EAGAIN; + default: + dev_dbg(ec_dev->dev, "command 0x%02x returned %d\n", + msg->command, msg->result); + return 0; + } } +EXPORT_SYMBOL(cros_ec_check_result); static const struct mfd_cell cros_devs[] = { { @@ -102,12 +80,6 @@ int cros_ec_register(struct cros_ec_device *ec_dev) struct device *dev = ec_dev->dev; int err = 0; - BLOCKING_INIT_NOTIFIER_HEAD(&ec_dev->event_notifier); - - ec_dev->command_send = cros_ec_command_send; - ec_dev->command_recv = cros_ec_command_recv; - ec_dev->command_sendrecv = cros_ec_command_sendrecv; - if (ec_dev->din_size) { ec_dev->din = devm_kzalloc(dev, ec_dev->din_size, GFP_KERNEL); if (!ec_dev->din) @@ -119,42 +91,23 @@ int cros_ec_register(struct cros_ec_device *ec_dev) return -ENOMEM; } - if (!ec_dev->irq) { - dev_dbg(dev, "no valid IRQ: %d\n", ec_dev->irq); - return err; - } - - err = request_threaded_irq(ec_dev->irq, NULL, ec_irq_thread, - IRQF_TRIGGER_LOW | IRQF_ONESHOT, - "chromeos-ec", ec_dev); - if (err) { - dev_err(dev, "request irq %d: error %d\n", ec_dev->irq, err); - return err; - } - err = mfd_add_devices(dev, 0, cros_devs, ARRAY_SIZE(cros_devs), NULL, ec_dev->irq, NULL); if (err) { dev_err(dev, "failed to add mfd devices\n"); - goto fail_mfd; + return err; } - dev_info(dev, "Chrome EC (%s)\n", ec_dev->name); + dev_info(dev, "Chrome EC device registered\n"); return 0; - -fail_mfd: - free_irq(ec_dev->irq, ec_dev); - - return err; } EXPORT_SYMBOL(cros_ec_register); int cros_ec_remove(struct cros_ec_device *ec_dev) { mfd_remove_devices(ec_dev->dev); - free_irq(ec_dev->irq, ec_dev); return 0; } diff --git a/drivers/mfd/cros_ec_i2c.c b/drivers/mfd/cros_ec_i2c.c index 4f71be99a183..c0c30f4f946f 100644 --- a/drivers/mfd/cros_ec_i2c.c +++ b/drivers/mfd/cros_ec_i2c.c @@ -29,12 +29,13 @@ static inline struct cros_ec_device *to_ec_dev(struct device *dev) return i2c_get_clientdata(client); } -static int cros_ec_command_xfer(struct cros_ec_device *ec_dev, - struct cros_ec_msg *msg) +static int cros_ec_cmd_xfer_i2c(struct cros_ec_device *ec_dev, + struct cros_ec_command *msg) { struct i2c_client *client = ec_dev->priv; int ret = -ENOMEM; int i; + int len; int packet_len; u8 *out_buf = NULL; u8 *in_buf = NULL; @@ -50,7 +51,7 @@ static int cros_ec_command_xfer(struct cros_ec_device *ec_dev, * allocate larger packet (one byte for checksum, one byte for * length, and one for result code) */ - packet_len = msg->in_len + 3; + packet_len = msg->insize + 3; in_buf = kzalloc(packet_len, GFP_KERNEL); if (!in_buf) goto done; @@ -61,7 +62,7 @@ static int cros_ec_command_xfer(struct cros_ec_device *ec_dev, * allocate larger packet (one byte for checksum, one for * command code, one for length, and one for command version) */ - packet_len = msg->out_len + 4; + packet_len = msg->outsize + 4; out_buf = kzalloc(packet_len, GFP_KERNEL); if (!out_buf) goto done; @@ -69,16 +70,16 @@ static int cros_ec_command_xfer(struct cros_ec_device *ec_dev, i2c_msg[0].buf = (char *)out_buf; out_buf[0] = EC_CMD_VERSION0 + msg->version; - out_buf[1] = msg->cmd; - out_buf[2] = msg->out_len; + out_buf[1] = msg->command; + out_buf[2] = msg->outsize; /* copy message payload and compute checksum */ sum = out_buf[0] + out_buf[1] + out_buf[2]; - for (i = 0; i < msg->out_len; i++) { - out_buf[3 + i] = msg->out_buf[i]; + for (i = 0; i < msg->outsize; i++) { + out_buf[3 + i] = msg->outdata[i]; sum += out_buf[3 + i]; } - out_buf[3 + msg->out_len] = sum; + out_buf[3 + msg->outsize] = sum; /* send command to EC and read answer */ ret = i2c_transfer(client->adapter, i2c_msg, 2); @@ -92,28 +93,34 @@ static int cros_ec_command_xfer(struct cros_ec_device *ec_dev, } /* check response error code */ - if (i2c_msg[1].buf[0]) { - dev_warn(ec_dev->dev, "command 0x%02x returned an error %d\n", - msg->cmd, i2c_msg[1].buf[0]); - ret = -EINVAL; + msg->result = i2c_msg[1].buf[0]; + ret = cros_ec_check_result(ec_dev, msg); + if (ret) + goto done; + + len = in_buf[1]; + if (len > msg->insize) { + dev_err(ec_dev->dev, "packet too long (%d bytes, expected %d)", + len, msg->insize); + ret = -ENOSPC; goto done; } /* copy response packet payload and compute checksum */ sum = in_buf[0] + in_buf[1]; - for (i = 0; i < msg->in_len; i++) { - msg->in_buf[i] = in_buf[2 + i]; + for (i = 0; i < len; i++) { + msg->indata[i] = in_buf[2 + i]; sum += in_buf[2 + i]; } dev_dbg(ec_dev->dev, "packet: %*ph, sum = %02x\n", i2c_msg[1].len, in_buf, sum); - if (sum != in_buf[2 + msg->in_len]) { + if (sum != in_buf[2 + len]) { dev_err(ec_dev->dev, "bad packet checksum\n"); ret = -EBADMSG; goto done; } - ret = 0; + ret = len; done: kfree(in_buf); kfree(out_buf); @@ -132,11 +139,10 @@ static int cros_ec_i2c_probe(struct i2c_client *client, return -ENOMEM; i2c_set_clientdata(client, ec_dev); - ec_dev->name = "I2C"; ec_dev->dev = dev; ec_dev->priv = client; ec_dev->irq = client->irq; - ec_dev->command_xfer = cros_ec_command_xfer; + ec_dev->cmd_xfer = cros_ec_cmd_xfer_i2c; ec_dev->ec_name = client->name; ec_dev->phys_name = client->adapter->name; ec_dev->parent = &client->dev; diff --git a/drivers/mfd/cros_ec_spi.c b/drivers/mfd/cros_ec_spi.c index 8c1c7cc373f8..588c700af39c 100644 --- a/drivers/mfd/cros_ec_spi.c +++ b/drivers/mfd/cros_ec_spi.c @@ -73,7 +73,7 @@ * if no record * @end_of_msg_delay: used to set the delay_usecs on the spi_transfer that * is sent when we want to turn off CS at the end of a transaction. - * @lock: mutex to ensure only one user of cros_ec_command_spi_xfer at a time + * @lock: mutex to ensure only one user of cros_ec_cmd_xfer_spi at a time */ struct cros_ec_spi { struct spi_device *spi; @@ -210,13 +210,13 @@ static int cros_ec_spi_receive_response(struct cros_ec_device *ec_dev, } /** - * cros_ec_command_spi_xfer - Transfer a message over SPI and receive the reply + * cros_ec_cmd_xfer_spi - Transfer a message over SPI and receive the reply * * @ec_dev: ChromeOS EC device * @ec_msg: Message to transfer */ -static int cros_ec_command_spi_xfer(struct cros_ec_device *ec_dev, - struct cros_ec_msg *ec_msg) +static int cros_ec_cmd_xfer_spi(struct cros_ec_device *ec_dev, + struct cros_ec_command *ec_msg) { struct cros_ec_spi *ec_spi = ec_dev->priv; struct spi_transfer trans; @@ -258,23 +258,19 @@ static int cros_ec_command_spi_xfer(struct cros_ec_device *ec_dev, /* Get the response */ if (!ret) { ret = cros_ec_spi_receive_response(ec_dev, - ec_msg->in_len + EC_MSG_TX_PROTO_BYTES); + ec_msg->insize + EC_MSG_TX_PROTO_BYTES); } else { dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret); } - /* turn off CS */ + /* + * Turn off CS, possibly adding a delay to ensure the rising edge + * doesn't come too soon after the end of the data. + */ spi_message_init(&msg); - - if (ec_spi->end_of_msg_delay) { - /* - * Add delay for last transaction, to ensure the rising edge - * doesn't come too soon after the end of the data. - */ - memset(&trans, 0, sizeof(trans)); - trans.delay_usecs = ec_spi->end_of_msg_delay; - spi_message_add_tail(&trans, &msg); - } + memset(&trans, 0, sizeof(trans)); + trans.delay_usecs = ec_spi->end_of_msg_delay; + spi_message_add_tail(&trans, &msg); final_ret = spi_sync(ec_spi->spi, &msg); ec_spi->last_transfer_ns = ktime_get_ns(); @@ -285,20 +281,19 @@ static int cros_ec_command_spi_xfer(struct cros_ec_device *ec_dev, goto exit; } - /* check response error code */ ptr = ec_dev->din; - if (ptr[0]) { - dev_warn(ec_dev->dev, "command 0x%02x returned an error %d\n", - ec_msg->cmd, ptr[0]); - debug_packet(ec_dev->dev, "in_err", ptr, len); - ret = -EINVAL; + + /* check response error code */ + ec_msg->result = ptr[0]; + ret = cros_ec_check_result(ec_dev, ec_msg); + if (ret) goto exit; - } + len = ptr[1]; sum = ptr[0] + ptr[1]; - if (len > ec_msg->in_len) { + if (len > ec_msg->insize) { dev_err(ec_dev->dev, "packet too long (%d bytes, expected %d)", - len, ec_msg->in_len); + len, ec_msg->insize); ret = -ENOSPC; goto exit; } @@ -306,8 +301,8 @@ static int cros_ec_command_spi_xfer(struct cros_ec_device *ec_dev, /* copy response packet payload and compute checksum */ for (i = 0; i < len; i++) { sum += ptr[i + 2]; - if (ec_msg->in_len) - ec_msg->in_buf[i] = ptr[i + 2]; + if (ec_msg->insize) + ec_msg->indata[i] = ptr[i + 2]; } sum &= 0xff; @@ -321,7 +316,7 @@ static int cros_ec_command_spi_xfer(struct cros_ec_device *ec_dev, goto exit; } - ret = 0; + ret = len; exit: mutex_unlock(&ec_spi->lock); return ret; @@ -364,11 +359,10 @@ static int cros_ec_spi_probe(struct spi_device *spi) cros_ec_spi_dt_probe(ec_spi, dev); spi_set_drvdata(spi, ec_dev); - ec_dev->name = "SPI"; ec_dev->dev = dev; ec_dev->priv = ec_spi; ec_dev->irq = spi->irq; - ec_dev->command_xfer = cros_ec_command_spi_xfer; + ec_dev->cmd_xfer = cros_ec_cmd_xfer_spi; ec_dev->ec_name = ec_spi->spi->modalias; ec_dev->phys_name = dev_name(&ec_spi->spi->dev); ec_dev->parent = &ec_spi->spi->dev; @@ -381,6 +375,8 @@ static int cros_ec_spi_probe(struct spi_device *spi) return err; } + device_init_wakeup(&spi->dev, true); + return 0; } diff --git a/drivers/mfd/da9063-core.c b/drivers/mfd/da9063-core.c index e70ae315abc7..93db8bb8c8f0 100644 --- a/drivers/mfd/da9063-core.c +++ b/drivers/mfd/da9063-core.c @@ -153,9 +153,9 @@ int da9063_device_init(struct da9063 *da9063, unsigned int irq) "Device detected (chip-ID: 0x%02X, var-ID: 0x%02X)\n", model, variant_id); - if (variant_code != PMIC_DA9063_BB) { - dev_err(da9063->dev, "Unknown chip variant code: 0x%02X\n", - variant_code); + if (variant_code < PMIC_DA9063_BB && variant_code != PMIC_DA9063_AD) { + dev_err(da9063->dev, + "Cannot support variant code: 0x%02X\n", variant_code); return -ENODEV; } diff --git a/drivers/mfd/da9063-i2c.c b/drivers/mfd/da9063-i2c.c index 8db5c805c64f..21fd8d9a217b 100644 --- a/drivers/mfd/da9063-i2c.c +++ b/drivers/mfd/da9063-i2c.c @@ -25,10 +25,10 @@ #include <linux/mfd/da9063/pdata.h> #include <linux/mfd/da9063/registers.h> -static const struct regmap_range da9063_readable_ranges[] = { +static const struct regmap_range da9063_ad_readable_ranges[] = { { .range_min = DA9063_REG_PAGE_CON, - .range_max = DA9063_REG_SECOND_D, + .range_max = DA9063_AD_REG_SECOND_D, }, { .range_min = DA9063_REG_SEQ, .range_max = DA9063_REG_ID_32_31, @@ -37,14 +37,14 @@ static const struct regmap_range da9063_readable_ranges[] = { .range_max = DA9063_REG_AUTO3_LOW, }, { .range_min = DA9063_REG_T_OFFSET, - .range_max = DA9063_REG_GP_ID_19, + .range_max = DA9063_AD_REG_GP_ID_19, }, { .range_min = DA9063_REG_CHIP_ID, .range_max = DA9063_REG_CHIP_VARIANT, }, }; -static const struct regmap_range da9063_writeable_ranges[] = { +static const struct regmap_range da9063_ad_writeable_ranges[] = { { .range_min = DA9063_REG_PAGE_CON, .range_max = DA9063_REG_PAGE_CON, @@ -53,7 +53,7 @@ static const struct regmap_range da9063_writeable_ranges[] = { .range_max = DA9063_REG_VSYS_MON, }, { .range_min = DA9063_REG_COUNT_S, - .range_max = DA9063_REG_ALARM_Y, + .range_max = DA9063_AD_REG_ALARM_Y, }, { .range_min = DA9063_REG_SEQ, .range_max = DA9063_REG_ID_32_31, @@ -62,14 +62,14 @@ static const struct regmap_range da9063_writeable_ranges[] = { .range_max = DA9063_REG_AUTO3_LOW, }, { .range_min = DA9063_REG_CONFIG_I, - .range_max = DA9063_REG_MON_REG_4, + .range_max = DA9063_AD_REG_MON_REG_4, }, { - .range_min = DA9063_REG_GP_ID_0, - .range_max = DA9063_REG_GP_ID_19, + .range_min = DA9063_AD_REG_GP_ID_0, + .range_max = DA9063_AD_REG_GP_ID_19, }, }; -static const struct regmap_range da9063_volatile_ranges[] = { +static const struct regmap_range da9063_ad_volatile_ranges[] = { { .range_min = DA9063_REG_STATUS_A, .range_max = DA9063_REG_EVENT_D, @@ -81,26 +81,104 @@ static const struct regmap_range da9063_volatile_ranges[] = { .range_max = DA9063_REG_ADC_MAN, }, { .range_min = DA9063_REG_ADC_RES_L, - .range_max = DA9063_REG_SECOND_D, + .range_max = DA9063_AD_REG_SECOND_D, }, { - .range_min = DA9063_REG_MON_REG_5, - .range_max = DA9063_REG_MON_REG_6, + .range_min = DA9063_AD_REG_MON_REG_5, + .range_max = DA9063_AD_REG_MON_REG_6, }, }; -static const struct regmap_access_table da9063_readable_table = { - .yes_ranges = da9063_readable_ranges, - .n_yes_ranges = ARRAY_SIZE(da9063_readable_ranges), +static const struct regmap_access_table da9063_ad_readable_table = { + .yes_ranges = da9063_ad_readable_ranges, + .n_yes_ranges = ARRAY_SIZE(da9063_ad_readable_ranges), }; -static const struct regmap_access_table da9063_writeable_table = { - .yes_ranges = da9063_writeable_ranges, - .n_yes_ranges = ARRAY_SIZE(da9063_writeable_ranges), +static const struct regmap_access_table da9063_ad_writeable_table = { + .yes_ranges = da9063_ad_writeable_ranges, + .n_yes_ranges = ARRAY_SIZE(da9063_ad_writeable_ranges), }; -static const struct regmap_access_table da9063_volatile_table = { - .yes_ranges = da9063_volatile_ranges, - .n_yes_ranges = ARRAY_SIZE(da9063_volatile_ranges), +static const struct regmap_access_table da9063_ad_volatile_table = { + .yes_ranges = da9063_ad_volatile_ranges, + .n_yes_ranges = ARRAY_SIZE(da9063_ad_volatile_ranges), +}; + +static const struct regmap_range da9063_bb_readable_ranges[] = { + { + .range_min = DA9063_REG_PAGE_CON, + .range_max = DA9063_BB_REG_SECOND_D, + }, { + .range_min = DA9063_REG_SEQ, + .range_max = DA9063_REG_ID_32_31, + }, { + .range_min = DA9063_REG_SEQ_A, + .range_max = DA9063_REG_AUTO3_LOW, + }, { + .range_min = DA9063_REG_T_OFFSET, + .range_max = DA9063_BB_REG_GP_ID_19, + }, { + .range_min = DA9063_REG_CHIP_ID, + .range_max = DA9063_REG_CHIP_VARIANT, + }, +}; + +static const struct regmap_range da9063_bb_writeable_ranges[] = { + { + .range_min = DA9063_REG_PAGE_CON, + .range_max = DA9063_REG_PAGE_CON, + }, { + .range_min = DA9063_REG_FAULT_LOG, + .range_max = DA9063_REG_VSYS_MON, + }, { + .range_min = DA9063_REG_COUNT_S, + .range_max = DA9063_BB_REG_ALARM_Y, + }, { + .range_min = DA9063_REG_SEQ, + .range_max = DA9063_REG_ID_32_31, + }, { + .range_min = DA9063_REG_SEQ_A, + .range_max = DA9063_REG_AUTO3_LOW, + }, { + .range_min = DA9063_REG_CONFIG_I, + .range_max = DA9063_BB_REG_MON_REG_4, + }, { + .range_min = DA9063_BB_REG_GP_ID_0, + .range_max = DA9063_BB_REG_GP_ID_19, + }, +}; + +static const struct regmap_range da9063_bb_volatile_ranges[] = { + { + .range_min = DA9063_REG_STATUS_A, + .range_max = DA9063_REG_EVENT_D, + }, { + .range_min = DA9063_REG_CONTROL_F, + .range_max = DA9063_REG_CONTROL_F, + }, { + .range_min = DA9063_REG_ADC_MAN, + .range_max = DA9063_REG_ADC_MAN, + }, { + .range_min = DA9063_REG_ADC_RES_L, + .range_max = DA9063_BB_REG_SECOND_D, + }, { + .range_min = DA9063_BB_REG_MON_REG_5, + .range_max = DA9063_BB_REG_MON_REG_6, + }, +}; + +static const struct regmap_access_table da9063_bb_readable_table = { + .yes_ranges = da9063_bb_readable_ranges, + .n_yes_ranges = ARRAY_SIZE(da9063_bb_readable_ranges), +}; + +static const struct regmap_access_table da9063_bb_writeable_table = { + .yes_ranges = da9063_bb_writeable_ranges, + .n_yes_ranges = ARRAY_SIZE(da9063_bb_writeable_ranges), +}; + +static const struct regmap_access_table da9063_bb_volatile_table = { + .yes_ranges = da9063_bb_volatile_ranges, + .n_yes_ranges = ARRAY_SIZE(da9063_bb_volatile_ranges), }; static const struct regmap_range_cfg da9063_range_cfg[] = { @@ -123,10 +201,6 @@ static struct regmap_config da9063_regmap_config = { .max_register = DA9063_REG_CHIP_VARIANT, .cache_type = REGCACHE_RBTREE, - - .rd_table = &da9063_readable_table, - .wr_table = &da9063_writeable_table, - .volatile_table = &da9063_volatile_table, }; static int da9063_i2c_probe(struct i2c_client *i2c, @@ -143,6 +217,16 @@ static int da9063_i2c_probe(struct i2c_client *i2c, da9063->dev = &i2c->dev; da9063->chip_irq = i2c->irq; + if (da9063->variant_code == PMIC_DA9063_AD) { + da9063_regmap_config.rd_table = &da9063_ad_readable_table; + da9063_regmap_config.wr_table = &da9063_ad_writeable_table; + da9063_regmap_config.volatile_table = &da9063_ad_volatile_table; + } else { + da9063_regmap_config.rd_table = &da9063_bb_readable_table; + da9063_regmap_config.wr_table = &da9063_bb_writeable_table; + da9063_regmap_config.volatile_table = &da9063_bb_volatile_table; + } + da9063->regmap = devm_regmap_init_i2c(i2c, &da9063_regmap_config); if (IS_ERR(da9063->regmap)) { ret = PTR_ERR(da9063->regmap); diff --git a/drivers/mfd/dm355evm_msp.c b/drivers/mfd/dm355evm_msp.c index 7a55c0071fa8..4c826f78acd0 100644 --- a/drivers/mfd/dm355evm_msp.c +++ b/drivers/mfd/dm355evm_msp.c @@ -95,7 +95,7 @@ EXPORT_SYMBOL(dm355evm_msp_read); * Many of the msp430 pins are just used as fixed-direction GPIOs. * We could export a few more of them this way, if we wanted. */ -#define MSP_GPIO(bit,reg) ((DM355EVM_MSP_ ## reg) << 3 | (bit)) +#define MSP_GPIO(bit, reg) ((DM355EVM_MSP_ ## reg) << 3 | (bit)) static const u8 msp_gpios[] = { /* eight leds */ diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c index 2ed774e7d342..5991faddd3c6 100644 --- a/drivers/mfd/ezx-pcap.c +++ b/drivers/mfd/ezx-pcap.c @@ -62,7 +62,7 @@ static int ezx_pcap_putget(struct pcap_chip *pcap, u32 *data) struct spi_message m; int status; - memset(&t, 0, sizeof t); + memset(&t, 0, sizeof(t)); spi_message_init(&m); t.len = sizeof(u32); spi_message_add_tail(&t, &m); @@ -211,7 +211,6 @@ static void pcap_irq_handler(unsigned int irq, struct irq_desc *desc) desc->irq_data.chip->irq_ack(&desc->irq_data); queue_work(pcap->workqueue, &pcap->isr_work); - return; } /* ADC */ diff --git a/drivers/mfd/htc-i2cpld.c b/drivers/mfd/htc-i2cpld.c index d7b2a75aca3e..6bdb78c2ac77 100644 --- a/drivers/mfd/htc-i2cpld.c +++ b/drivers/mfd/htc-i2cpld.c @@ -332,18 +332,13 @@ static int htcpld_setup_chip_irq( int chip_index) { struct htcpld_data *htcpld; - struct device *dev = &pdev->dev; - struct htcpld_core_platform_data *pdata; struct htcpld_chip *chip; - struct htcpld_chip_platform_data *plat_chip_data; unsigned int irq, irq_end; int ret = 0; /* Get the platform and driver data */ - pdata = dev_get_platdata(dev); htcpld = platform_get_drvdata(pdev); chip = &htcpld->chip[chip_index]; - plat_chip_data = &pdata->chip[chip_index]; /* Setup irq handlers */ irq_end = chip->irq_start + chip->nirqs; @@ -409,7 +404,7 @@ static int htcpld_register_chip_i2c( } i2c_set_clientdata(client, chip); - snprintf(client->name, I2C_NAME_SIZE, "Chip_0x%d", client->addr); + snprintf(client->name, I2C_NAME_SIZE, "Chip_0x%x", client->addr); chip->client = client; /* Reset the chip */ diff --git a/drivers/mfd/intel_soc_pmic_core.c b/drivers/mfd/intel_soc_pmic_core.c new file mode 100644 index 000000000000..2720922f90b4 --- /dev/null +++ b/drivers/mfd/intel_soc_pmic_core.c @@ -0,0 +1,170 @@ +/* + * intel_soc_pmic_core.c - Intel SoC PMIC MFD Driver + * + * Copyright (C) 2013, 2014 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Author: Yang, Bin <bin.yang@intel.com> + * Author: Zhu, Lejun <lejun.zhu@linux.intel.com> + */ + +#include <linux/module.h> +#include <linux/mfd/core.h> +#include <linux/i2c.h> +#include <linux/interrupt.h> +#include <linux/gpio/consumer.h> +#include <linux/acpi.h> +#include <linux/regmap.h> +#include <linux/mfd/intel_soc_pmic.h> +#include "intel_soc_pmic_core.h" + +/* + * On some boards the PMIC interrupt may come from a GPIO line. + * Try to lookup the ACPI table and see if such connection exists. If not, + * return -ENOENT and use the IRQ provided by I2C. + */ +static int intel_soc_pmic_find_gpio_irq(struct device *dev) +{ + struct gpio_desc *desc; + int irq; + + desc = devm_gpiod_get_index(dev, "intel_soc_pmic", 0); + if (IS_ERR(desc)) + return -ENOENT; + + irq = gpiod_to_irq(desc); + if (irq < 0) + dev_warn(dev, "Can't get irq: %d\n", irq); + + return irq; +} + +static int intel_soc_pmic_i2c_probe(struct i2c_client *i2c, + const struct i2c_device_id *i2c_id) +{ + struct device *dev = &i2c->dev; + const struct acpi_device_id *id; + struct intel_soc_pmic_config *config; + struct intel_soc_pmic *pmic; + int ret; + int irq; + + id = acpi_match_device(dev->driver->acpi_match_table, dev); + if (!id || !id->driver_data) + return -ENODEV; + + config = (struct intel_soc_pmic_config *)id->driver_data; + + pmic = devm_kzalloc(dev, sizeof(*pmic), GFP_KERNEL); + dev_set_drvdata(dev, pmic); + + pmic->regmap = devm_regmap_init_i2c(i2c, config->regmap_config); + + irq = intel_soc_pmic_find_gpio_irq(dev); + pmic->irq = (irq < 0) ? i2c->irq : irq; + + ret = regmap_add_irq_chip(pmic->regmap, pmic->irq, + config->irq_flags | IRQF_ONESHOT, + 0, config->irq_chip, + &pmic->irq_chip_data); + if (ret) + return ret; + + ret = enable_irq_wake(pmic->irq); + if (ret) + dev_warn(dev, "Can't enable IRQ as wake source: %d\n", ret); + + ret = mfd_add_devices(dev, -1, config->cell_dev, + config->n_cell_devs, NULL, 0, + regmap_irq_get_domain(pmic->irq_chip_data)); + if (ret) + goto err_del_irq_chip; + + return 0; + +err_del_irq_chip: + regmap_del_irq_chip(pmic->irq, pmic->irq_chip_data); + return ret; +} + +static int intel_soc_pmic_i2c_remove(struct i2c_client *i2c) +{ + struct intel_soc_pmic *pmic = dev_get_drvdata(&i2c->dev); + + regmap_del_irq_chip(pmic->irq, pmic->irq_chip_data); + + mfd_remove_devices(&i2c->dev); + + return 0; +} + +static void intel_soc_pmic_shutdown(struct i2c_client *i2c) +{ + struct intel_soc_pmic *pmic = dev_get_drvdata(&i2c->dev); + + disable_irq(pmic->irq); + + return; +} + +static int intel_soc_pmic_suspend(struct device *dev) +{ + struct intel_soc_pmic *pmic = dev_get_drvdata(dev); + + disable_irq(pmic->irq); + + return 0; +} + +static int intel_soc_pmic_resume(struct device *dev) +{ + struct intel_soc_pmic *pmic = dev_get_drvdata(dev); + + enable_irq(pmic->irq); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(intel_soc_pmic_pm_ops, intel_soc_pmic_suspend, + intel_soc_pmic_resume); + +static const struct i2c_device_id intel_soc_pmic_i2c_id[] = { + { } +}; +MODULE_DEVICE_TABLE(i2c, intel_soc_pmic_i2c_id); + +#if defined(CONFIG_ACPI) +static struct acpi_device_id intel_soc_pmic_acpi_match[] = { + {"INT33FD", (kernel_ulong_t)&intel_soc_pmic_config_crc}, + { }, +}; +MODULE_DEVICE_TABLE(acpi, intel_soc_pmic_acpi_match); +#endif + +static struct i2c_driver intel_soc_pmic_i2c_driver = { + .driver = { + .name = "intel_soc_pmic_i2c", + .owner = THIS_MODULE, + .pm = &intel_soc_pmic_pm_ops, + .acpi_match_table = ACPI_PTR(intel_soc_pmic_acpi_match), + }, + .probe = intel_soc_pmic_i2c_probe, + .remove = intel_soc_pmic_i2c_remove, + .id_table = intel_soc_pmic_i2c_id, + .shutdown = intel_soc_pmic_shutdown, +}; + +module_i2c_driver(intel_soc_pmic_i2c_driver); + +MODULE_DESCRIPTION("I2C driver for Intel SoC PMIC"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Yang, Bin <bin.yang@intel.com>"); +MODULE_AUTHOR("Zhu, Lejun <lejun.zhu@linux.intel.com>"); diff --git a/drivers/mfd/intel_soc_pmic_core.h b/drivers/mfd/intel_soc_pmic_core.h new file mode 100644 index 000000000000..33aacd9baddc --- /dev/null +++ b/drivers/mfd/intel_soc_pmic_core.h @@ -0,0 +1,32 @@ +/* + * intel_soc_pmic_core.h - Intel SoC PMIC MFD Driver + * + * Copyright (C) 2012-2014 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Author: Yang, Bin <bin.yang@intel.com> + * Author: Zhu, Lejun <lejun.zhu@linux.intel.com> + */ + +#ifndef __INTEL_SOC_PMIC_CORE_H__ +#define __INTEL_SOC_PMIC_CORE_H__ + +struct intel_soc_pmic_config { + unsigned long irq_flags; + struct mfd_cell *cell_dev; + int n_cell_devs; + struct regmap_config *regmap_config; + struct regmap_irq_chip *irq_chip; +}; + +extern struct intel_soc_pmic_config intel_soc_pmic_config_crc; + +#endif /* __INTEL_SOC_PMIC_CORE_H__ */ diff --git a/drivers/mfd/intel_soc_pmic_crc.c b/drivers/mfd/intel_soc_pmic_crc.c new file mode 100644 index 000000000000..7107cab832e6 --- /dev/null +++ b/drivers/mfd/intel_soc_pmic_crc.c @@ -0,0 +1,158 @@ +/* + * intel_soc_pmic_crc.c - Device access for Crystal Cove PMIC + * + * Copyright (C) 2013, 2014 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Author: Yang, Bin <bin.yang@intel.com> + * Author: Zhu, Lejun <lejun.zhu@linux.intel.com> + */ + +#include <linux/mfd/core.h> +#include <linux/interrupt.h> +#include <linux/regmap.h> +#include <linux/mfd/intel_soc_pmic.h> +#include "intel_soc_pmic_core.h" + +#define CRYSTAL_COVE_MAX_REGISTER 0xC6 + +#define CRYSTAL_COVE_REG_IRQLVL1 0x02 +#define CRYSTAL_COVE_REG_MIRQLVL1 0x0E + +#define CRYSTAL_COVE_IRQ_PWRSRC 0 +#define CRYSTAL_COVE_IRQ_THRM 1 +#define CRYSTAL_COVE_IRQ_BCU 2 +#define CRYSTAL_COVE_IRQ_ADC 3 +#define CRYSTAL_COVE_IRQ_CHGR 4 +#define CRYSTAL_COVE_IRQ_GPIO 5 +#define CRYSTAL_COVE_IRQ_VHDMIOCP 6 + +static struct resource gpio_resources[] = { + { + .name = "GPIO", + .start = CRYSTAL_COVE_IRQ_GPIO, + .end = CRYSTAL_COVE_IRQ_GPIO, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct resource pwrsrc_resources[] = { + { + .name = "PWRSRC", + .start = CRYSTAL_COVE_IRQ_PWRSRC, + .end = CRYSTAL_COVE_IRQ_PWRSRC, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct resource adc_resources[] = { + { + .name = "ADC", + .start = CRYSTAL_COVE_IRQ_ADC, + .end = CRYSTAL_COVE_IRQ_ADC, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct resource thermal_resources[] = { + { + .name = "THERMAL", + .start = CRYSTAL_COVE_IRQ_THRM, + .end = CRYSTAL_COVE_IRQ_THRM, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct resource bcu_resources[] = { + { + .name = "BCU", + .start = CRYSTAL_COVE_IRQ_BCU, + .end = CRYSTAL_COVE_IRQ_BCU, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct mfd_cell crystal_cove_dev[] = { + { + .name = "crystal_cove_pwrsrc", + .num_resources = ARRAY_SIZE(pwrsrc_resources), + .resources = pwrsrc_resources, + }, + { + .name = "crystal_cove_adc", + .num_resources = ARRAY_SIZE(adc_resources), + .resources = adc_resources, + }, + { + .name = "crystal_cove_thermal", + .num_resources = ARRAY_SIZE(thermal_resources), + .resources = thermal_resources, + }, + { + .name = "crystal_cove_bcu", + .num_resources = ARRAY_SIZE(bcu_resources), + .resources = bcu_resources, + }, + { + .name = "crystal_cove_gpio", + .num_resources = ARRAY_SIZE(gpio_resources), + .resources = gpio_resources, + }, +}; + +static struct regmap_config crystal_cove_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + + .max_register = CRYSTAL_COVE_MAX_REGISTER, + .cache_type = REGCACHE_NONE, +}; + +static const struct regmap_irq crystal_cove_irqs[] = { + [CRYSTAL_COVE_IRQ_PWRSRC] = { + .mask = BIT(CRYSTAL_COVE_IRQ_PWRSRC), + }, + [CRYSTAL_COVE_IRQ_THRM] = { + .mask = BIT(CRYSTAL_COVE_IRQ_THRM), + }, + [CRYSTAL_COVE_IRQ_BCU] = { + .mask = BIT(CRYSTAL_COVE_IRQ_BCU), + }, + [CRYSTAL_COVE_IRQ_ADC] = { + .mask = BIT(CRYSTAL_COVE_IRQ_ADC), + }, + [CRYSTAL_COVE_IRQ_CHGR] = { + .mask = BIT(CRYSTAL_COVE_IRQ_CHGR), + }, + [CRYSTAL_COVE_IRQ_GPIO] = { + .mask = BIT(CRYSTAL_COVE_IRQ_GPIO), + }, + [CRYSTAL_COVE_IRQ_VHDMIOCP] = { + .mask = BIT(CRYSTAL_COVE_IRQ_VHDMIOCP), + }, +}; + +static struct regmap_irq_chip crystal_cove_irq_chip = { + .name = "Crystal Cove", + .irqs = crystal_cove_irqs, + .num_irqs = ARRAY_SIZE(crystal_cove_irqs), + .num_regs = 1, + .status_base = CRYSTAL_COVE_REG_IRQLVL1, + .mask_base = CRYSTAL_COVE_REG_MIRQLVL1, +}; + +struct intel_soc_pmic_config intel_soc_pmic_config_crc = { + .irq_flags = IRQF_TRIGGER_RISING, + .cell_dev = crystal_cove_dev, + .n_cell_devs = ARRAY_SIZE(crystal_cove_dev), + .regmap_config = &crystal_cove_regmap_config, + .irq_chip = &crystal_cove_irq_chip, +}; diff --git a/drivers/mfd/ipaq-micro.c b/drivers/mfd/ipaq-micro.c index 7e50fe0118e3..8df3266064e4 100644 --- a/drivers/mfd/ipaq-micro.c +++ b/drivers/mfd/ipaq-micro.c @@ -115,7 +115,7 @@ static void micro_rx_msg(struct ipaq_micro *micro, u8 id, int len, u8 *data) } else { dev_err(micro->dev, "out of band RX message 0x%02x\n", id); - if(!micro->msg) + if (!micro->msg) dev_info(micro->dev, "no message queued\n"); else dev_info(micro->dev, "expected message %02x\n", @@ -126,13 +126,13 @@ static void micro_rx_msg(struct ipaq_micro *micro, u8 id, int len, u8 *data) if (micro->key) micro->key(micro->key_data, len, data); else - dev_dbg(micro->dev, "key message ignored, no handle \n"); + dev_dbg(micro->dev, "key message ignored, no handle\n"); break; case MSG_TOUCHSCREEN: if (micro->ts) micro->ts(micro->ts_data, len, data); else - dev_dbg(micro->dev, "touchscreen message ignored, no handle \n"); + dev_dbg(micro->dev, "touchscreen message ignored, no handle\n"); break; default: dev_err(micro->dev, @@ -154,7 +154,7 @@ static void micro_process_char(struct ipaq_micro *micro, u8 ch) rx->state = STATE_ID; /* Next byte is the id and len */ break; case STATE_ID: /* Looking for id and len byte */ - rx->id = (ch & 0xf0) >> 4 ; + rx->id = (ch & 0xf0) >> 4; rx->len = (ch & 0x0f); rx->index = 0; rx->chksum = ch; diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c index f7ff0188603d..bd2696136eee 100644 --- a/drivers/mfd/kempld-core.c +++ b/drivers/mfd/kempld-core.c @@ -24,7 +24,8 @@ #define MAX_ID_LEN 4 static char force_device_id[MAX_ID_LEN + 1] = ""; -module_param_string(force_device_id, force_device_id, sizeof(force_device_id), 0); +module_param_string(force_device_id, force_device_id, + sizeof(force_device_id), 0); MODULE_PARM_DESC(force_device_id, "Override detected product"); /* @@ -36,7 +37,7 @@ static void kempld_get_hardware_mutex(struct kempld_device_data *pld) { /* The mutex bit will read 1 until access has been granted */ while (ioread8(pld->io_index) & KEMPLD_MUTEX_KEY) - msleep(1); + usleep_range(1000, 3000); } static void kempld_release_hardware_mutex(struct kempld_device_data *pld) @@ -499,7 +500,7 @@ static struct platform_driver kempld_driver = { .remove = kempld_remove, }; -static struct dmi_system_id __initdata kempld_dmi_table[] = { +static struct dmi_system_id kempld_dmi_table[] __initdata = { { .ident = "BHL6", .matches = { @@ -736,7 +737,8 @@ static int __init kempld_init(void) int ret; if (force_device_id[0]) { - for (id = kempld_dmi_table; id->matches[0].slot != DMI_NONE; id++) + for (id = kempld_dmi_table; + id->matches[0].slot != DMI_NONE; id++) if (strstr(id->ident, force_device_id)) if (id->callback && id->callback(id)) break; diff --git a/drivers/mfd/lp8788-irq.c b/drivers/mfd/lp8788-irq.c index c84ded5f8ece..23982dbf014d 100644 --- a/drivers/mfd/lp8788-irq.c +++ b/drivers/mfd/lp8788-irq.c @@ -66,12 +66,14 @@ static inline u8 _irq_to_val(enum lp8788_int_id id, int enable) static void lp8788_irq_enable(struct irq_data *data) { struct lp8788_irq_data *irqd = irq_data_get_irq_chip_data(data); + irqd->enabled[data->hwirq] = 1; } static void lp8788_irq_disable(struct irq_data *data) { struct lp8788_irq_data *irqd = irq_data_get_irq_chip_data(data); + irqd->enabled[data->hwirq] = 0; } diff --git a/drivers/mfd/max77686-irq.c b/drivers/mfd/max77686-irq.c deleted file mode 100644 index cdc3280e2ec7..000000000000 --- a/drivers/mfd/max77686-irq.c +++ /dev/null @@ -1,319 +0,0 @@ -/* - * max77686-irq.c - Interrupt controller support for MAX77686 - * - * Copyright (C) 2012 Samsung Electronics Co.Ltd - * Chiwoong Byun <woong.byun@samsung.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - * This driver is based on max8997-irq.c - */ - -#include <linux/err.h> -#include <linux/irq.h> -#include <linux/interrupt.h> -#include <linux/gpio.h> -#include <linux/mfd/max77686.h> -#include <linux/mfd/max77686-private.h> -#include <linux/irqdomain.h> -#include <linux/regmap.h> - -enum { - MAX77686_DEBUG_IRQ_INFO = 1 << 0, - MAX77686_DEBUG_IRQ_MASK = 1 << 1, - MAX77686_DEBUG_IRQ_INT = 1 << 2, -}; - -static int debug_mask = 0; -module_param(debug_mask, int, 0); -MODULE_PARM_DESC(debug_mask, "Set debug_mask : 0x0=off 0x1=IRQ_INFO 0x2=IRQ_MASK 0x4=IRQ_INI)"); - -static const u8 max77686_mask_reg[] = { - [PMIC_INT1] = MAX77686_REG_INT1MSK, - [PMIC_INT2] = MAX77686_REG_INT2MSK, - [RTC_INT] = MAX77686_RTC_INTM, -}; - -static struct regmap *max77686_get_regmap(struct max77686_dev *max77686, - enum max77686_irq_source src) -{ - switch (src) { - case PMIC_INT1 ... PMIC_INT2: - return max77686->regmap; - case RTC_INT: - return max77686->rtc_regmap; - default: - return ERR_PTR(-EINVAL); - } -} - -struct max77686_irq_data { - int mask; - enum max77686_irq_source group; -}; - -#define DECLARE_IRQ(idx, _group, _mask) \ - [(idx)] = { .group = (_group), .mask = (_mask) } -static const struct max77686_irq_data max77686_irqs[] = { - DECLARE_IRQ(MAX77686_PMICIRQ_PWRONF, PMIC_INT1, 1 << 0), - DECLARE_IRQ(MAX77686_PMICIRQ_PWRONR, PMIC_INT1, 1 << 1), - DECLARE_IRQ(MAX77686_PMICIRQ_JIGONBF, PMIC_INT1, 1 << 2), - DECLARE_IRQ(MAX77686_PMICIRQ_JIGONBR, PMIC_INT1, 1 << 3), - DECLARE_IRQ(MAX77686_PMICIRQ_ACOKBF, PMIC_INT1, 1 << 4), - DECLARE_IRQ(MAX77686_PMICIRQ_ACOKBR, PMIC_INT1, 1 << 5), - DECLARE_IRQ(MAX77686_PMICIRQ_ONKEY1S, PMIC_INT1, 1 << 6), - DECLARE_IRQ(MAX77686_PMICIRQ_MRSTB, PMIC_INT1, 1 << 7), - DECLARE_IRQ(MAX77686_PMICIRQ_140C, PMIC_INT2, 1 << 0), - DECLARE_IRQ(MAX77686_PMICIRQ_120C, PMIC_INT2, 1 << 1), - DECLARE_IRQ(MAX77686_RTCIRQ_RTC60S, RTC_INT, 1 << 0), - DECLARE_IRQ(MAX77686_RTCIRQ_RTCA1, RTC_INT, 1 << 1), - DECLARE_IRQ(MAX77686_RTCIRQ_RTCA2, RTC_INT, 1 << 2), - DECLARE_IRQ(MAX77686_RTCIRQ_SMPL, RTC_INT, 1 << 3), - DECLARE_IRQ(MAX77686_RTCIRQ_RTC1S, RTC_INT, 1 << 4), - DECLARE_IRQ(MAX77686_RTCIRQ_WTSR, RTC_INT, 1 << 5), -}; - -static void max77686_irq_lock(struct irq_data *data) -{ - struct max77686_dev *max77686 = irq_get_chip_data(data->irq); - - if (debug_mask & MAX77686_DEBUG_IRQ_MASK) - pr_info("%s\n", __func__); - - mutex_lock(&max77686->irqlock); -} - -static void max77686_irq_sync_unlock(struct irq_data *data) -{ - struct max77686_dev *max77686 = irq_get_chip_data(data->irq); - int i; - - for (i = 0; i < MAX77686_IRQ_GROUP_NR; i++) { - u8 mask_reg = max77686_mask_reg[i]; - struct regmap *map = max77686_get_regmap(max77686, i); - - if (debug_mask & MAX77686_DEBUG_IRQ_MASK) - pr_debug("%s: mask_reg[%d]=0x%x, cur=0x%x\n", - __func__, i, mask_reg, max77686->irq_masks_cur[i]); - - if (mask_reg == MAX77686_REG_INVALID || - IS_ERR_OR_NULL(map)) - continue; - - max77686->irq_masks_cache[i] = max77686->irq_masks_cur[i]; - - regmap_write(map, max77686_mask_reg[i], - max77686->irq_masks_cur[i]); - } - - mutex_unlock(&max77686->irqlock); -} - -static const inline struct max77686_irq_data *to_max77686_irq(int irq) -{ - struct irq_data *data = irq_get_irq_data(irq); - return &max77686_irqs[data->hwirq]; -} - -static void max77686_irq_mask(struct irq_data *data) -{ - struct max77686_dev *max77686 = irq_get_chip_data(data->irq); - const struct max77686_irq_data *irq_data = to_max77686_irq(data->irq); - - max77686->irq_masks_cur[irq_data->group] |= irq_data->mask; - - if (debug_mask & MAX77686_DEBUG_IRQ_MASK) - pr_info("%s: group=%d, cur=0x%x\n", - __func__, irq_data->group, - max77686->irq_masks_cur[irq_data->group]); -} - -static void max77686_irq_unmask(struct irq_data *data) -{ - struct max77686_dev *max77686 = irq_get_chip_data(data->irq); - const struct max77686_irq_data *irq_data = to_max77686_irq(data->irq); - - max77686->irq_masks_cur[irq_data->group] &= ~irq_data->mask; - - if (debug_mask & MAX77686_DEBUG_IRQ_MASK) - pr_info("%s: group=%d, cur=0x%x\n", - __func__, irq_data->group, - max77686->irq_masks_cur[irq_data->group]); -} - -static struct irq_chip max77686_irq_chip = { - .name = "max77686", - .irq_bus_lock = max77686_irq_lock, - .irq_bus_sync_unlock = max77686_irq_sync_unlock, - .irq_mask = max77686_irq_mask, - .irq_unmask = max77686_irq_unmask, -}; - -static irqreturn_t max77686_irq_thread(int irq, void *data) -{ - struct max77686_dev *max77686 = data; - unsigned int irq_reg[MAX77686_IRQ_GROUP_NR] = {}; - unsigned int irq_src; - int ret; - int i, cur_irq; - - ret = regmap_read(max77686->regmap, MAX77686_REG_INTSRC, &irq_src); - if (ret < 0) { - dev_err(max77686->dev, "Failed to read interrupt source: %d\n", - ret); - return IRQ_NONE; - } - - if (debug_mask & MAX77686_DEBUG_IRQ_INT) - pr_info("%s: irq_src=0x%x\n", __func__, irq_src); - - if (irq_src == MAX77686_IRQSRC_PMIC) { - ret = regmap_bulk_read(max77686->regmap, - MAX77686_REG_INT1, irq_reg, 2); - if (ret < 0) { - dev_err(max77686->dev, "Failed to read interrupt source: %d\n", - ret); - return IRQ_NONE; - } - - if (debug_mask & MAX77686_DEBUG_IRQ_INT) - pr_info("%s: int1=0x%x, int2=0x%x\n", __func__, - irq_reg[PMIC_INT1], irq_reg[PMIC_INT2]); - } - - if (irq_src & MAX77686_IRQSRC_RTC) { - ret = regmap_read(max77686->rtc_regmap, - MAX77686_RTC_INT, &irq_reg[RTC_INT]); - if (ret < 0) { - dev_err(max77686->dev, "Failed to read interrupt source: %d\n", - ret); - return IRQ_NONE; - } - - if (debug_mask & MAX77686_DEBUG_IRQ_INT) - pr_info("%s: rtc int=0x%x\n", __func__, - irq_reg[RTC_INT]); - - } - - for (i = 0; i < MAX77686_IRQ_GROUP_NR; i++) - irq_reg[i] &= ~max77686->irq_masks_cur[i]; - - for (i = 0; i < MAX77686_IRQ_NR; i++) { - if (irq_reg[max77686_irqs[i].group] & max77686_irqs[i].mask) { - cur_irq = irq_find_mapping(max77686->irq_domain, i); - if (cur_irq) - handle_nested_irq(cur_irq); - } - } - - return IRQ_HANDLED; -} - -static int max77686_irq_domain_map(struct irq_domain *d, unsigned int irq, - irq_hw_number_t hw) -{ - struct max77686_dev *max77686 = d->host_data; - - irq_set_chip_data(irq, max77686); - irq_set_chip_and_handler(irq, &max77686_irq_chip, handle_edge_irq); - irq_set_nested_thread(irq, 1); -#ifdef CONFIG_ARM - set_irq_flags(irq, IRQF_VALID); -#else - irq_set_noprobe(irq); -#endif - return 0; -} - -static struct irq_domain_ops max77686_irq_domain_ops = { - .map = max77686_irq_domain_map, -}; - -int max77686_irq_init(struct max77686_dev *max77686) -{ - struct irq_domain *domain; - int i; - int ret; - int val; - struct regmap *map; - - mutex_init(&max77686->irqlock); - - if (max77686->irq_gpio && !max77686->irq) { - max77686->irq = gpio_to_irq(max77686->irq_gpio); - - if (debug_mask & MAX77686_DEBUG_IRQ_INT) { - ret = gpio_request(max77686->irq_gpio, "pmic_irq"); - if (ret < 0) { - dev_err(max77686->dev, - "Failed to request gpio %d with ret:" - "%d\n", max77686->irq_gpio, ret); - return IRQ_NONE; - } - - gpio_direction_input(max77686->irq_gpio); - val = gpio_get_value(max77686->irq_gpio); - gpio_free(max77686->irq_gpio); - pr_info("%s: gpio_irq=%x\n", __func__, val); - } - } - - if (!max77686->irq) { - dev_err(max77686->dev, "irq is not specified\n"); - return -ENODEV; - } - - /* Mask individual interrupt sources */ - for (i = 0; i < MAX77686_IRQ_GROUP_NR; i++) { - max77686->irq_masks_cur[i] = 0xff; - max77686->irq_masks_cache[i] = 0xff; - map = max77686_get_regmap(max77686, i); - - if (IS_ERR_OR_NULL(map)) - continue; - if (max77686_mask_reg[i] == MAX77686_REG_INVALID) - continue; - - regmap_write(map, max77686_mask_reg[i], 0xff); - } - domain = irq_domain_add_linear(NULL, MAX77686_IRQ_NR, - &max77686_irq_domain_ops, max77686); - if (!domain) { - dev_err(max77686->dev, "could not create irq domain\n"); - return -ENODEV; - } - max77686->irq_domain = domain; - - ret = request_threaded_irq(max77686->irq, NULL, max77686_irq_thread, - IRQF_TRIGGER_FALLING | IRQF_ONESHOT, - "max77686-irq", max77686); - - if (ret) - dev_err(max77686->dev, "Failed to request IRQ %d: %d\n", - max77686->irq, ret); - - - if (debug_mask & MAX77686_DEBUG_IRQ_INFO) - pr_info("%s-\n", __func__); - - return 0; -} - -void max77686_irq_exit(struct max77686_dev *max77686) -{ - if (max77686->irq) - free_irq(max77686->irq, max77686); -} diff --git a/drivers/mfd/max77686.c b/drivers/mfd/max77686.c index ce869acf27ae..86e552348db4 100644 --- a/drivers/mfd/max77686.c +++ b/drivers/mfd/max77686.c @@ -1,5 +1,5 @@ /* - * max77686.c - mfd core driver for the Maxim 77686 + * max77686.c - mfd core driver for the Maxim 77686/802 * * Copyright (C) 2012 Samsung Electronics * Chiwoong Byun <woong.byun@smasung.com> @@ -25,6 +25,8 @@ #include <linux/export.h> #include <linux/slab.h> #include <linux/i2c.h> +#include <linux/irq.h> +#include <linux/interrupt.h> #include <linux/pm_runtime.h> #include <linux/module.h> #include <linux/mfd/core.h> @@ -41,15 +43,166 @@ static const struct mfd_cell max77686_devs[] = { { .name = "max77686-clk", }, }; +static const struct mfd_cell max77802_devs[] = { + { .name = "max77802-pmic", }, + { .name = "max77802-clk", }, + { .name = "max77802-rtc", }, +}; + +static bool max77802_pmic_is_accessible_reg(struct device *dev, + unsigned int reg) +{ + return (reg >= MAX77802_REG_DEVICE_ID && reg < MAX77802_REG_PMIC_END); +} + +static bool max77802_rtc_is_accessible_reg(struct device *dev, + unsigned int reg) +{ + return (reg >= MAX77802_RTC_INT && reg < MAX77802_RTC_END); +} + +static bool max77802_is_accessible_reg(struct device *dev, unsigned int reg) +{ + return (max77802_pmic_is_accessible_reg(dev, reg) || + max77802_rtc_is_accessible_reg(dev, reg)); +} + +static bool max77802_pmic_is_precious_reg(struct device *dev, unsigned int reg) +{ + return (reg == MAX77802_REG_INTSRC || reg == MAX77802_REG_INT1 || + reg == MAX77802_REG_INT2); +} + +static bool max77802_rtc_is_precious_reg(struct device *dev, unsigned int reg) +{ + return (reg == MAX77802_RTC_INT || + reg == MAX77802_RTC_UPDATE0 || + reg == MAX77802_RTC_UPDATE1); +} + +static bool max77802_is_precious_reg(struct device *dev, unsigned int reg) +{ + return (max77802_pmic_is_precious_reg(dev, reg) || + max77802_rtc_is_precious_reg(dev, reg)); +} + +static bool max77802_pmic_is_volatile_reg(struct device *dev, unsigned int reg) +{ + return (max77802_is_precious_reg(dev, reg) || + reg == MAX77802_REG_STATUS1 || reg == MAX77802_REG_STATUS2 || + reg == MAX77802_REG_PWRON); +} + +static bool max77802_rtc_is_volatile_reg(struct device *dev, unsigned int reg) +{ + return (max77802_rtc_is_precious_reg(dev, reg) || + reg == MAX77802_RTC_SEC || + reg == MAX77802_RTC_MIN || + reg == MAX77802_RTC_HOUR || + reg == MAX77802_RTC_WEEKDAY || + reg == MAX77802_RTC_MONTH || + reg == MAX77802_RTC_YEAR || + reg == MAX77802_RTC_DATE); +} + +static bool max77802_is_volatile_reg(struct device *dev, unsigned int reg) +{ + return (max77802_pmic_is_volatile_reg(dev, reg) || + max77802_rtc_is_volatile_reg(dev, reg)); +} + static struct regmap_config max77686_regmap_config = { .reg_bits = 8, .val_bits = 8, }; -#ifdef CONFIG_OF +static struct regmap_config max77686_rtc_regmap_config = { + .reg_bits = 8, + .val_bits = 8, +}; + +static struct regmap_config max77802_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .writeable_reg = max77802_is_accessible_reg, + .readable_reg = max77802_is_accessible_reg, + .precious_reg = max77802_is_precious_reg, + .volatile_reg = max77802_is_volatile_reg, + .name = "max77802-pmic", + .cache_type = REGCACHE_RBTREE, +}; + +static const struct regmap_irq max77686_irqs[] = { + /* INT1 interrupts */ + { .reg_offset = 0, .mask = MAX77686_INT1_PWRONF_MSK, }, + { .reg_offset = 0, .mask = MAX77686_INT1_PWRONR_MSK, }, + { .reg_offset = 0, .mask = MAX77686_INT1_JIGONBF_MSK, }, + { .reg_offset = 0, .mask = MAX77686_INT1_JIGONBR_MSK, }, + { .reg_offset = 0, .mask = MAX77686_INT1_ACOKBF_MSK, }, + { .reg_offset = 0, .mask = MAX77686_INT1_ACOKBR_MSK, }, + { .reg_offset = 0, .mask = MAX77686_INT1_ONKEY1S_MSK, }, + { .reg_offset = 0, .mask = MAX77686_INT1_MRSTB_MSK, }, + /* INT2 interrupts */ + { .reg_offset = 1, .mask = MAX77686_INT2_140C_MSK, }, + { .reg_offset = 1, .mask = MAX77686_INT2_120C_MSK, }, +}; + +static const struct regmap_irq_chip max77686_irq_chip = { + .name = "max77686-pmic", + .status_base = MAX77686_REG_INT1, + .mask_base = MAX77686_REG_INT1MSK, + .num_regs = 2, + .irqs = max77686_irqs, + .num_irqs = ARRAY_SIZE(max77686_irqs), +}; + +static const struct regmap_irq max77686_rtc_irqs[] = { + /* RTC interrupts */ + { .reg_offset = 0, .mask = MAX77686_RTCINT_RTC60S_MSK, }, + { .reg_offset = 0, .mask = MAX77686_RTCINT_RTCA1_MSK, }, + { .reg_offset = 0, .mask = MAX77686_RTCINT_RTCA2_MSK, }, + { .reg_offset = 0, .mask = MAX77686_RTCINT_SMPL_MSK, }, + { .reg_offset = 0, .mask = MAX77686_RTCINT_RTC1S_MSK, }, + { .reg_offset = 0, .mask = MAX77686_RTCINT_WTSR_MSK, }, +}; + +static const struct regmap_irq_chip max77686_rtc_irq_chip = { + .name = "max77686-rtc", + .status_base = MAX77686_RTC_INT, + .mask_base = MAX77686_RTC_INTM, + .num_regs = 1, + .irqs = max77686_rtc_irqs, + .num_irqs = ARRAY_SIZE(max77686_rtc_irqs), +}; + +static const struct regmap_irq_chip max77802_irq_chip = { + .name = "max77802-pmic", + .status_base = MAX77802_REG_INT1, + .mask_base = MAX77802_REG_INT1MSK, + .num_regs = 2, + .irqs = max77686_irqs, /* same masks as 77686 */ + .num_irqs = ARRAY_SIZE(max77686_irqs), +}; + +static const struct regmap_irq_chip max77802_rtc_irq_chip = { + .name = "max77802-rtc", + .status_base = MAX77802_RTC_INT, + .mask_base = MAX77802_RTC_INTM, + .num_regs = 1, + .irqs = max77686_rtc_irqs, /* same masks as 77686 */ + .num_irqs = ARRAY_SIZE(max77686_rtc_irqs), +}; + static const struct of_device_id max77686_pmic_dt_match[] = { - {.compatible = "maxim,max77686", .data = NULL}, - {}, + { + .compatible = "maxim,max77686", + .data = (void *)TYPE_MAX77686, + }, + { + .compatible = "maxim,max77802", + .data = (void *)TYPE_MAX77802, + }, + { }, }; static struct max77686_platform_data *max77686_i2c_parse_dt_pdata(struct device @@ -58,53 +211,74 @@ static struct max77686_platform_data *max77686_i2c_parse_dt_pdata(struct device struct max77686_platform_data *pd; pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL); - if (!pd) { - dev_err(dev, "could not allocate memory for pdata\n"); + if (!pd) return NULL; - } dev->platform_data = pd; return pd; } -#else -static struct max77686_platform_data *max77686_i2c_parse_dt_pdata(struct device - *dev) -{ - return 0; -} -#endif static int max77686_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { struct max77686_dev *max77686 = NULL; struct max77686_platform_data *pdata = dev_get_platdata(&i2c->dev); + const struct of_device_id *match; unsigned int data; int ret = 0; + const struct regmap_config *config; + const struct regmap_irq_chip *irq_chip; + const struct regmap_irq_chip *rtc_irq_chip; + struct regmap **rtc_regmap; + const struct mfd_cell *cells; + int n_devs; - if (i2c->dev.of_node) + if (IS_ENABLED(CONFIG_OF) && i2c->dev.of_node && !pdata) pdata = max77686_i2c_parse_dt_pdata(&i2c->dev); if (!pdata) { dev_err(&i2c->dev, "No platform data found.\n"); - return -EIO; + return -EINVAL; } max77686 = devm_kzalloc(&i2c->dev, sizeof(struct max77686_dev), GFP_KERNEL); - if (max77686 == NULL) + if (!max77686) return -ENOMEM; + if (i2c->dev.of_node) { + match = of_match_node(max77686_pmic_dt_match, i2c->dev.of_node); + if (!match) + return -EINVAL; + + max77686->type = (unsigned long)match->data; + } else + max77686->type = id->driver_data; + i2c_set_clientdata(i2c, max77686); max77686->dev = &i2c->dev; max77686->i2c = i2c; - max77686->type = id->driver_data; max77686->wakeup = pdata->wakeup; - max77686->irq_gpio = pdata->irq_gpio; max77686->irq = i2c->irq; - max77686->regmap = devm_regmap_init_i2c(i2c, &max77686_regmap_config); + if (max77686->type == TYPE_MAX77686) { + config = &max77686_regmap_config; + irq_chip = &max77686_irq_chip; + rtc_irq_chip = &max77686_rtc_irq_chip; + rtc_regmap = &max77686->rtc_regmap; + cells = max77686_devs; + n_devs = ARRAY_SIZE(max77686_devs); + } else { + config = &max77802_regmap_config; + irq_chip = &max77802_irq_chip; + rtc_irq_chip = &max77802_rtc_irq_chip; + rtc_regmap = &max77686->regmap; + cells = max77802_devs; + n_devs = ARRAY_SIZE(max77802_devs); + } + + max77686->regmap = devm_regmap_init_i2c(i2c, config); if (IS_ERR(max77686->regmap)) { ret = PTR_ERR(max77686->regmap); dev_err(max77686->dev, "Failed to allocate register map: %d\n", @@ -112,30 +286,68 @@ static int max77686_i2c_probe(struct i2c_client *i2c, return ret; } - if (regmap_read(max77686->regmap, - MAX77686_REG_DEVICE_ID, &data) < 0) { + ret = regmap_read(max77686->regmap, MAX77686_REG_DEVICE_ID, &data); + if (ret < 0) { dev_err(max77686->dev, "device not found on this channel (this is not an error)\n"); return -ENODEV; - } else - dev_info(max77686->dev, "device found\n"); + } - max77686->rtc = i2c_new_dummy(i2c->adapter, I2C_ADDR_RTC); - if (!max77686->rtc) { - dev_err(max77686->dev, "Failed to allocate I2C device for RTC\n"); - return -ENODEV; + if (max77686->type == TYPE_MAX77686) { + max77686->rtc = i2c_new_dummy(i2c->adapter, I2C_ADDR_RTC); + if (!max77686->rtc) { + dev_err(max77686->dev, + "Failed to allocate I2C device for RTC\n"); + return -ENODEV; + } + i2c_set_clientdata(max77686->rtc, max77686); + + max77686->rtc_regmap = + devm_regmap_init_i2c(max77686->rtc, + &max77686_rtc_regmap_config); + if (IS_ERR(max77686->rtc_regmap)) { + ret = PTR_ERR(max77686->rtc_regmap); + dev_err(max77686->dev, + "failed to allocate RTC regmap: %d\n", + ret); + goto err_unregister_i2c; + } + } + + ret = regmap_add_irq_chip(max77686->regmap, max77686->irq, + IRQF_TRIGGER_FALLING | IRQF_ONESHOT | + IRQF_SHARED, 0, irq_chip, + &max77686->irq_data); + if (ret) { + dev_err(&i2c->dev, "failed to add PMIC irq chip: %d\n", ret); + goto err_unregister_i2c; } - i2c_set_clientdata(max77686->rtc, max77686); - max77686_irq_init(max77686); + ret = regmap_add_irq_chip(*rtc_regmap, max77686->irq, + IRQF_TRIGGER_FALLING | IRQF_ONESHOT | + IRQF_SHARED, 0, rtc_irq_chip, + &max77686->rtc_irq_data); + if (ret) { + dev_err(&i2c->dev, "failed to add RTC irq chip: %d\n", ret); + goto err_del_irqc; + } - ret = mfd_add_devices(max77686->dev, -1, max77686_devs, - ARRAY_SIZE(max77686_devs), NULL, 0, NULL); + ret = mfd_add_devices(max77686->dev, -1, cells, n_devs, NULL, 0, NULL); if (ret < 0) { - mfd_remove_devices(max77686->dev); - i2c_unregister_device(max77686->rtc); + dev_err(&i2c->dev, "failed to add MFD devices: %d\n", ret); + goto err_del_rtc_irqc; } + return 0; + +err_del_rtc_irqc: + regmap_del_irq_chip(max77686->irq, max77686->rtc_irq_data); +err_del_irqc: + regmap_del_irq_chip(max77686->irq, max77686->irq_data); +err_unregister_i2c: + if (max77686->type == TYPE_MAX77686) + i2c_unregister_device(max77686->rtc); + return ret; } @@ -144,7 +356,12 @@ static int max77686_i2c_remove(struct i2c_client *i2c) struct max77686_dev *max77686 = i2c_get_clientdata(i2c); mfd_remove_devices(max77686->dev); - i2c_unregister_device(max77686->rtc); + + regmap_del_irq_chip(max77686->irq, max77686->rtc_irq_data); + regmap_del_irq_chip(max77686->irq, max77686->irq_data); + + if (max77686->type == TYPE_MAX77686) + i2c_unregister_device(max77686->rtc); return 0; } @@ -155,10 +372,50 @@ static const struct i2c_device_id max77686_i2c_id[] = { }; MODULE_DEVICE_TABLE(i2c, max77686_i2c_id); +#ifdef CONFIG_PM_SLEEP +static int max77686_suspend(struct device *dev) +{ + struct i2c_client *i2c = container_of(dev, struct i2c_client, dev); + struct max77686_dev *max77686 = i2c_get_clientdata(i2c); + + if (device_may_wakeup(dev)) + enable_irq_wake(max77686->irq); + + /* + * IRQ must be disabled during suspend because if it happens + * while suspended it will be handled before resuming I2C. + * + * When device is woken up from suspend (e.g. by RTC wake alarm), + * an interrupt occurs before resuming I2C bus controller. + * Interrupt handler tries to read registers but this read + * will fail because I2C is still suspended. + */ + disable_irq(max77686->irq); + + return 0; +} + +static int max77686_resume(struct device *dev) +{ + struct i2c_client *i2c = container_of(dev, struct i2c_client, dev); + struct max77686_dev *max77686 = i2c_get_clientdata(i2c); + + if (device_may_wakeup(dev)) + disable_irq_wake(max77686->irq); + + enable_irq(max77686->irq); + + return 0; +} +#endif /* CONFIG_PM_SLEEP */ + +static SIMPLE_DEV_PM_OPS(max77686_pm, max77686_suspend, max77686_resume); + static struct i2c_driver max77686_i2c_driver = { .driver = { .name = "max77686", .owner = THIS_MODULE, + .pm = &max77686_pm, .of_match_table = of_match_ptr(max77686_pmic_dt_match), }, .probe = max77686_i2c_probe, @@ -179,6 +436,6 @@ static void __exit max77686_i2c_exit(void) } module_exit(max77686_i2c_exit); -MODULE_DESCRIPTION("MAXIM 77686 multi-function core driver"); +MODULE_DESCRIPTION("MAXIM 77686/802 multi-function core driver"); MODULE_AUTHOR("Chiwoong Byun <woong.byun@samsung.com>"); MODULE_LICENSE("GPL"); diff --git a/drivers/mfd/max8925-core.c b/drivers/mfd/max8925-core.c index f3faf0c45ddd..97a787ab3d51 100644 --- a/drivers/mfd/max8925-core.c +++ b/drivers/mfd/max8925-core.c @@ -624,6 +624,7 @@ static void max8925_irq_sync_unlock(struct irq_data *data) static void max8925_irq_enable(struct irq_data *data) { struct max8925_chip *chip = irq_data_get_irq_chip_data(data); + max8925_irqs[data->irq - chip->irq_base].enable = max8925_irqs[data->irq - chip->irq_base].offs; } @@ -631,6 +632,7 @@ static void max8925_irq_enable(struct irq_data *data) static void max8925_irq_disable(struct irq_data *data) { struct max8925_chip *chip = irq_data_get_irq_chip_data(data); + max8925_irqs[data->irq - chip->irq_base].enable = 0; } diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c index a83eed5c15ca..ecbe78ead3b6 100644 --- a/drivers/mfd/max8925-i2c.c +++ b/drivers/mfd/max8925-i2c.c @@ -257,9 +257,11 @@ static struct i2c_driver max8925_driver = { static int __init max8925_i2c_init(void) { int ret; + ret = i2c_add_driver(&max8925_driver); if (ret != 0) pr_err("Failed to register MAX8925 I2C driver: %d\n", ret); + return ret; } subsys_initcall(max8925_i2c_init); diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c index acf5dd712eb2..2b6bc868cd3d 100644 --- a/drivers/mfd/mc13xxx-core.c +++ b/drivers/mfd/mc13xxx-core.c @@ -10,106 +10,18 @@ * Free Software Foundation. */ -#include <linux/slab.h> #include <linux/module.h> -#include <linux/platform_device.h> -#include <linux/mutex.h> -#include <linux/interrupt.h> -#include <linux/mfd/core.h> -#include <linux/mfd/mc13xxx.h> #include <linux/of.h> #include <linux/of_device.h> -#include <linux/of_gpio.h> +#include <linux/platform_device.h> +#include <linux/mfd/core.h> #include "mc13xxx.h" #define MC13XXX_IRQSTAT0 0 -#define MC13XXX_IRQSTAT0_ADCDONEI (1 << 0) -#define MC13XXX_IRQSTAT0_ADCBISDONEI (1 << 1) -#define MC13XXX_IRQSTAT0_TSI (1 << 2) -#define MC13783_IRQSTAT0_WHIGHI (1 << 3) -#define MC13783_IRQSTAT0_WLOWI (1 << 4) -#define MC13XXX_IRQSTAT0_CHGDETI (1 << 6) -#define MC13783_IRQSTAT0_CHGOVI (1 << 7) -#define MC13XXX_IRQSTAT0_CHGREVI (1 << 8) -#define MC13XXX_IRQSTAT0_CHGSHORTI (1 << 9) -#define MC13XXX_IRQSTAT0_CCCVI (1 << 10) -#define MC13XXX_IRQSTAT0_CHGCURRI (1 << 11) -#define MC13XXX_IRQSTAT0_BPONI (1 << 12) -#define MC13XXX_IRQSTAT0_LOBATLI (1 << 13) -#define MC13XXX_IRQSTAT0_LOBATHI (1 << 14) -#define MC13783_IRQSTAT0_UDPI (1 << 15) -#define MC13783_IRQSTAT0_USBI (1 << 16) -#define MC13783_IRQSTAT0_IDI (1 << 19) -#define MC13783_IRQSTAT0_SE1I (1 << 21) -#define MC13783_IRQSTAT0_CKDETI (1 << 22) -#define MC13783_IRQSTAT0_UDMI (1 << 23) - #define MC13XXX_IRQMASK0 1 -#define MC13XXX_IRQMASK0_ADCDONEM MC13XXX_IRQSTAT0_ADCDONEI -#define MC13XXX_IRQMASK0_ADCBISDONEM MC13XXX_IRQSTAT0_ADCBISDONEI -#define MC13XXX_IRQMASK0_TSM MC13XXX_IRQSTAT0_TSI -#define MC13783_IRQMASK0_WHIGHM MC13783_IRQSTAT0_WHIGHI -#define MC13783_IRQMASK0_WLOWM MC13783_IRQSTAT0_WLOWI -#define MC13XXX_IRQMASK0_CHGDETM MC13XXX_IRQSTAT0_CHGDETI -#define MC13783_IRQMASK0_CHGOVM MC13783_IRQSTAT0_CHGOVI -#define MC13XXX_IRQMASK0_CHGREVM MC13XXX_IRQSTAT0_CHGREVI -#define MC13XXX_IRQMASK0_CHGSHORTM MC13XXX_IRQSTAT0_CHGSHORTI -#define MC13XXX_IRQMASK0_CCCVM MC13XXX_IRQSTAT0_CCCVI -#define MC13XXX_IRQMASK0_CHGCURRM MC13XXX_IRQSTAT0_CHGCURRI -#define MC13XXX_IRQMASK0_BPONM MC13XXX_IRQSTAT0_BPONI -#define MC13XXX_IRQMASK0_LOBATLM MC13XXX_IRQSTAT0_LOBATLI -#define MC13XXX_IRQMASK0_LOBATHM MC13XXX_IRQSTAT0_LOBATHI -#define MC13783_IRQMASK0_UDPM MC13783_IRQSTAT0_UDPI -#define MC13783_IRQMASK0_USBM MC13783_IRQSTAT0_USBI -#define MC13783_IRQMASK0_IDM MC13783_IRQSTAT0_IDI -#define MC13783_IRQMASK0_SE1M MC13783_IRQSTAT0_SE1I -#define MC13783_IRQMASK0_CKDETM MC13783_IRQSTAT0_CKDETI -#define MC13783_IRQMASK0_UDMM MC13783_IRQSTAT0_UDMI - #define MC13XXX_IRQSTAT1 3 -#define MC13XXX_IRQSTAT1_1HZI (1 << 0) -#define MC13XXX_IRQSTAT1_TODAI (1 << 1) -#define MC13783_IRQSTAT1_ONOFD1I (1 << 3) -#define MC13783_IRQSTAT1_ONOFD2I (1 << 4) -#define MC13783_IRQSTAT1_ONOFD3I (1 << 5) -#define MC13XXX_IRQSTAT1_SYSRSTI (1 << 6) -#define MC13XXX_IRQSTAT1_RTCRSTI (1 << 7) -#define MC13XXX_IRQSTAT1_PCI (1 << 8) -#define MC13XXX_IRQSTAT1_WARMI (1 << 9) -#define MC13XXX_IRQSTAT1_MEMHLDI (1 << 10) -#define MC13783_IRQSTAT1_PWRRDYI (1 << 11) -#define MC13XXX_IRQSTAT1_THWARNLI (1 << 12) -#define MC13XXX_IRQSTAT1_THWARNHI (1 << 13) -#define MC13XXX_IRQSTAT1_CLKI (1 << 14) -#define MC13783_IRQSTAT1_SEMAFI (1 << 15) -#define MC13783_IRQSTAT1_MC2BI (1 << 17) -#define MC13783_IRQSTAT1_HSDETI (1 << 18) -#define MC13783_IRQSTAT1_HSLI (1 << 19) -#define MC13783_IRQSTAT1_ALSPTHI (1 << 20) -#define MC13783_IRQSTAT1_AHSSHORTI (1 << 21) - #define MC13XXX_IRQMASK1 4 -#define MC13XXX_IRQMASK1_1HZM MC13XXX_IRQSTAT1_1HZI -#define MC13XXX_IRQMASK1_TODAM MC13XXX_IRQSTAT1_TODAI -#define MC13783_IRQMASK1_ONOFD1M MC13783_IRQSTAT1_ONOFD1I -#define MC13783_IRQMASK1_ONOFD2M MC13783_IRQSTAT1_ONOFD2I -#define MC13783_IRQMASK1_ONOFD3M MC13783_IRQSTAT1_ONOFD3I -#define MC13XXX_IRQMASK1_SYSRSTM MC13XXX_IRQSTAT1_SYSRSTI -#define MC13XXX_IRQMASK1_RTCRSTM MC13XXX_IRQSTAT1_RTCRSTI -#define MC13XXX_IRQMASK1_PCM MC13XXX_IRQSTAT1_PCI -#define MC13XXX_IRQMASK1_WARMM MC13XXX_IRQSTAT1_WARMI -#define MC13XXX_IRQMASK1_MEMHLDM MC13XXX_IRQSTAT1_MEMHLDI -#define MC13783_IRQMASK1_PWRRDYM MC13783_IRQSTAT1_PWRRDYI -#define MC13XXX_IRQMASK1_THWARNLM MC13XXX_IRQSTAT1_THWARNLI -#define MC13XXX_IRQMASK1_THWARNHM MC13XXX_IRQSTAT1_THWARNHI -#define MC13XXX_IRQMASK1_CLKM MC13XXX_IRQSTAT1_CLKI -#define MC13783_IRQMASK1_SEMAFM MC13783_IRQSTAT1_SEMAFI -#define MC13783_IRQMASK1_MC2BM MC13783_IRQSTAT1_MC2BI -#define MC13783_IRQMASK1_HSDETM MC13783_IRQSTAT1_HSDETI -#define MC13783_IRQMASK1_HSLM MC13783_IRQSTAT1_HSLI -#define MC13783_IRQMASK1_ALSPTHM MC13783_IRQSTAT1_ALSPTHI -#define MC13783_IRQMASK1_AHSSHORTM MC13783_IRQSTAT1_AHSSHORTI #define MC13XXX_REVISION 7 #define MC13XXX_REVISION_REVMETAL (0x07 << 0) @@ -189,45 +101,21 @@ EXPORT_SYMBOL(mc13xxx_reg_rmw); int mc13xxx_irq_mask(struct mc13xxx *mc13xxx, int irq) { - int ret; - unsigned int offmask = irq < 24 ? MC13XXX_IRQMASK0 : MC13XXX_IRQMASK1; - u32 irqbit = 1 << (irq < 24 ? irq : irq - 24); - u32 mask; - - if (irq < 0 || irq >= MC13XXX_NUM_IRQ) - return -EINVAL; - - ret = mc13xxx_reg_read(mc13xxx, offmask, &mask); - if (ret) - return ret; + int virq = regmap_irq_get_virq(mc13xxx->irq_data, irq); - if (mask & irqbit) - /* already masked */ - return 0; + disable_irq_nosync(virq); - return mc13xxx_reg_write(mc13xxx, offmask, mask | irqbit); + return 0; } EXPORT_SYMBOL(mc13xxx_irq_mask); int mc13xxx_irq_unmask(struct mc13xxx *mc13xxx, int irq) { - int ret; - unsigned int offmask = irq < 24 ? MC13XXX_IRQMASK0 : MC13XXX_IRQMASK1; - u32 irqbit = 1 << (irq < 24 ? irq : irq - 24); - u32 mask; - - if (irq < 0 || irq >= MC13XXX_NUM_IRQ) - return -EINVAL; + int virq = regmap_irq_get_virq(mc13xxx->irq_data, irq); - ret = mc13xxx_reg_read(mc13xxx, offmask, &mask); - if (ret) - return ret; + enable_irq(virq); - if (!(mask & irqbit)) - /* already unmasked */ - return 0; - - return mc13xxx_reg_write(mc13xxx, offmask, mask & ~irqbit); + return 0; } EXPORT_SYMBOL(mc13xxx_irq_unmask); @@ -239,7 +127,7 @@ int mc13xxx_irq_status(struct mc13xxx *mc13xxx, int irq, unsigned int offstat = irq < 24 ? MC13XXX_IRQSTAT0 : MC13XXX_IRQSTAT1; u32 irqbit = 1 << (irq < 24 ? irq : irq - 24); - if (irq < 0 || irq >= MC13XXX_NUM_IRQ) + if (irq < 0 || irq >= ARRAY_SIZE(mc13xxx->irqs)) return -EINVAL; if (enabled) { @@ -266,147 +154,26 @@ int mc13xxx_irq_status(struct mc13xxx *mc13xxx, int irq, } EXPORT_SYMBOL(mc13xxx_irq_status); -int mc13xxx_irq_ack(struct mc13xxx *mc13xxx, int irq) -{ - unsigned int offstat = irq < 24 ? MC13XXX_IRQSTAT0 : MC13XXX_IRQSTAT1; - unsigned int val = 1 << (irq < 24 ? irq : irq - 24); - - BUG_ON(irq < 0 || irq >= MC13XXX_NUM_IRQ); - - return mc13xxx_reg_write(mc13xxx, offstat, val); -} -EXPORT_SYMBOL(mc13xxx_irq_ack); - -int mc13xxx_irq_request_nounmask(struct mc13xxx *mc13xxx, int irq, - irq_handler_t handler, const char *name, void *dev) -{ - BUG_ON(!mutex_is_locked(&mc13xxx->lock)); - BUG_ON(!handler); - - if (irq < 0 || irq >= MC13XXX_NUM_IRQ) - return -EINVAL; - - if (mc13xxx->irqhandler[irq]) - return -EBUSY; - - mc13xxx->irqhandler[irq] = handler; - mc13xxx->irqdata[irq] = dev; - - return 0; -} -EXPORT_SYMBOL(mc13xxx_irq_request_nounmask); - int mc13xxx_irq_request(struct mc13xxx *mc13xxx, int irq, irq_handler_t handler, const char *name, void *dev) { - int ret; + int virq = regmap_irq_get_virq(mc13xxx->irq_data, irq); - ret = mc13xxx_irq_request_nounmask(mc13xxx, irq, handler, name, dev); - if (ret) - return ret; - - ret = mc13xxx_irq_unmask(mc13xxx, irq); - if (ret) { - mc13xxx->irqhandler[irq] = NULL; - mc13xxx->irqdata[irq] = NULL; - return ret; - } - - return 0; + return devm_request_threaded_irq(mc13xxx->dev, virq, NULL, handler, + 0, name, dev); } EXPORT_SYMBOL(mc13xxx_irq_request); int mc13xxx_irq_free(struct mc13xxx *mc13xxx, int irq, void *dev) { - int ret; - BUG_ON(!mutex_is_locked(&mc13xxx->lock)); + int virq = regmap_irq_get_virq(mc13xxx->irq_data, irq); - if (irq < 0 || irq >= MC13XXX_NUM_IRQ || !mc13xxx->irqhandler[irq] || - mc13xxx->irqdata[irq] != dev) - return -EINVAL; - - ret = mc13xxx_irq_mask(mc13xxx, irq); - if (ret) - return ret; - - mc13xxx->irqhandler[irq] = NULL; - mc13xxx->irqdata[irq] = NULL; + devm_free_irq(mc13xxx->dev, virq, dev); return 0; } EXPORT_SYMBOL(mc13xxx_irq_free); -static inline irqreturn_t mc13xxx_irqhandler(struct mc13xxx *mc13xxx, int irq) -{ - return mc13xxx->irqhandler[irq](irq, mc13xxx->irqdata[irq]); -} - -/* - * returns: number of handled irqs or negative error - * locking: holds mc13xxx->lock - */ -static int mc13xxx_irq_handle(struct mc13xxx *mc13xxx, - unsigned int offstat, unsigned int offmask, int baseirq) -{ - u32 stat, mask; - int ret = mc13xxx_reg_read(mc13xxx, offstat, &stat); - int num_handled = 0; - - if (ret) - return ret; - - ret = mc13xxx_reg_read(mc13xxx, offmask, &mask); - if (ret) - return ret; - - while (stat & ~mask) { - int irq = __ffs(stat & ~mask); - - stat &= ~(1 << irq); - - if (likely(mc13xxx->irqhandler[baseirq + irq])) { - irqreturn_t handled; - - handled = mc13xxx_irqhandler(mc13xxx, baseirq + irq); - if (handled == IRQ_HANDLED) - num_handled++; - } else { - dev_err(mc13xxx->dev, - "BUG: irq %u but no handler\n", - baseirq + irq); - - mask |= 1 << irq; - - ret = mc13xxx_reg_write(mc13xxx, offmask, mask); - } - } - - return num_handled; -} - -static irqreturn_t mc13xxx_irq_thread(int irq, void *data) -{ - struct mc13xxx *mc13xxx = data; - irqreturn_t ret; - int handled = 0; - - mc13xxx_lock(mc13xxx); - - ret = mc13xxx_irq_handle(mc13xxx, MC13XXX_IRQSTAT0, - MC13XXX_IRQMASK0, 0); - if (ret > 0) - handled = 1; - - ret = mc13xxx_irq_handle(mc13xxx, MC13XXX_IRQSTAT1, - MC13XXX_IRQMASK1, 24); - if (ret > 0) - handled = 1; - - mc13xxx_unlock(mc13xxx); - - return IRQ_RETVAL(handled); -} - #define maskval(reg, mask) (((reg) & (mask)) >> __ffs(mask)) static void mc13xxx_print_revision(struct mc13xxx *mc13xxx, u32 revision) { @@ -475,8 +242,6 @@ static irqreturn_t mc13xxx_handler_adcdone(int irq, void *data) { struct mc13xxx_adcdone_data *adcdone_data = data; - mc13xxx_irq_ack(adcdone_data->mc13xxx, irq); - complete_all(&adcdone_data->done); return IRQ_HANDLED; @@ -544,7 +309,6 @@ int mc13xxx_adc_do_conversion(struct mc13xxx *mc13xxx, unsigned int mode, dev_dbg(mc13xxx->dev, "%s: request irq\n", __func__); mc13xxx_irq_request(mc13xxx, MC13XXX_IRQ_ADCDONE, mc13xxx_handler_adcdone, __func__, &adcdone_data); - mc13xxx_irq_ack(mc13xxx, MC13XXX_IRQ_ADCDONE); mc13xxx_reg_write(mc13xxx, MC13XXX_ADC0, adc0); mc13xxx_reg_write(mc13xxx, MC13XXX_ADC1, adc1); @@ -599,7 +363,8 @@ static int mc13xxx_add_subdevice_pdata(struct mc13xxx *mc13xxx, if (!cell.name) return -ENOMEM; - return mfd_add_devices(mc13xxx->dev, -1, &cell, 1, NULL, 0, NULL); + return mfd_add_devices(mc13xxx->dev, -1, &cell, 1, NULL, 0, + regmap_irq_get_domain(mc13xxx->irq_data)); } static int mc13xxx_add_subdevice(struct mc13xxx *mc13xxx, const char *format) @@ -640,8 +405,8 @@ int mc13xxx_common_init(struct device *dev) { struct mc13xxx_platform_data *pdata = dev_get_platdata(dev); struct mc13xxx *mc13xxx = dev_get_drvdata(dev); - int ret; u32 revision; + int i, ret; mc13xxx->dev = dev; @@ -651,31 +416,32 @@ int mc13xxx_common_init(struct device *dev) mc13xxx->variant->print_revision(mc13xxx, revision); - /* mask all irqs */ - ret = mc13xxx_reg_write(mc13xxx, MC13XXX_IRQMASK0, 0x00ffffff); - if (ret) - return ret; + for (i = 0; i < ARRAY_SIZE(mc13xxx->irqs); i++) { + mc13xxx->irqs[i].reg_offset = i / MC13XXX_IRQ_PER_REG; + mc13xxx->irqs[i].mask = BIT(i % MC13XXX_IRQ_PER_REG); + } - ret = mc13xxx_reg_write(mc13xxx, MC13XXX_IRQMASK1, 0x00ffffff); + mc13xxx->irq_chip.name = dev_name(dev); + mc13xxx->irq_chip.status_base = MC13XXX_IRQSTAT0; + mc13xxx->irq_chip.mask_base = MC13XXX_IRQMASK0; + mc13xxx->irq_chip.ack_base = MC13XXX_IRQSTAT0; + mc13xxx->irq_chip.irq_reg_stride = MC13XXX_IRQSTAT1 - MC13XXX_IRQSTAT0; + mc13xxx->irq_chip.init_ack_masked = true; + mc13xxx->irq_chip.use_ack = true; + mc13xxx->irq_chip.num_regs = MC13XXX_IRQ_REG_CNT; + mc13xxx->irq_chip.irqs = mc13xxx->irqs; + mc13xxx->irq_chip.num_irqs = ARRAY_SIZE(mc13xxx->irqs); + + ret = regmap_add_irq_chip(mc13xxx->regmap, mc13xxx->irq, IRQF_ONESHOT, + 0, &mc13xxx->irq_chip, &mc13xxx->irq_data); if (ret) return ret; mutex_init(&mc13xxx->lock); - ret = request_threaded_irq(mc13xxx->irq, NULL, mc13xxx_irq_thread, - IRQF_ONESHOT | IRQF_TRIGGER_HIGH, "mc13xxx", mc13xxx); - if (ret) - return ret; - if (mc13xxx_probe_flags_dt(mc13xxx) < 0 && pdata) mc13xxx->flags = pdata->flags; - if (mc13xxx->flags & MC13XXX_USE_ADC) - mc13xxx_add_subdevice(mc13xxx, "%s-adc"); - - if (mc13xxx->flags & MC13XXX_USE_RTC) - mc13xxx_add_subdevice(mc13xxx, "%s-rtc"); - if (pdata) { mc13xxx_add_subdevice_pdata(mc13xxx, "%s-regulator", &pdata->regulators, sizeof(pdata->regulators)); @@ -699,6 +465,12 @@ int mc13xxx_common_init(struct device *dev) mc13xxx_add_subdevice(mc13xxx, "%s-ts"); } + if (mc13xxx->flags & MC13XXX_USE_ADC) + mc13xxx_add_subdevice(mc13xxx, "%s-adc"); + + if (mc13xxx->flags & MC13XXX_USE_RTC) + mc13xxx_add_subdevice(mc13xxx, "%s-rtc"); + return 0; } EXPORT_SYMBOL_GPL(mc13xxx_common_init); @@ -707,8 +479,8 @@ int mc13xxx_common_exit(struct device *dev) { struct mc13xxx *mc13xxx = dev_get_drvdata(dev); - free_irq(mc13xxx->irq, mc13xxx); mfd_remove_devices(dev); + regmap_del_irq_chip(mc13xxx->irq, mc13xxx->irq_data); mutex_destroy(&mc13xxx->lock); return 0; diff --git a/drivers/mfd/mc13xxx.h b/drivers/mfd/mc13xxx.h index ae7f1659f5d1..33677d1dcf66 100644 --- a/drivers/mfd/mc13xxx.h +++ b/drivers/mfd/mc13xxx.h @@ -13,7 +13,9 @@ #include <linux/regmap.h> #include <linux/mfd/mc13xxx.h> -#define MC13XXX_NUMREGS 0x3f +#define MC13XXX_NUMREGS 0x3f +#define MC13XXX_IRQ_REG_CNT 2 +#define MC13XXX_IRQ_PER_REG 24 struct mc13xxx; @@ -33,13 +35,14 @@ struct mc13xxx { struct device *dev; const struct mc13xxx_variant *variant; + struct regmap_irq irqs[MC13XXX_IRQ_PER_REG * MC13XXX_IRQ_REG_CNT]; + struct regmap_irq_chip irq_chip; + struct regmap_irq_chip_data *irq_data; + struct mutex lock; int irq; int flags; - irq_handler_t irqhandler[MC13XXX_NUM_IRQ]; - void *irqdata[MC13XXX_NUM_IRQ]; - int adcflags; }; diff --git a/drivers/mfd/mcp-core.c b/drivers/mfd/mcp-core.c index 62e5e3617eb0..7f5066e39752 100644 --- a/drivers/mfd/mcp-core.c +++ b/drivers/mfd/mcp-core.c @@ -137,6 +137,7 @@ EXPORT_SYMBOL(mcp_reg_read); void mcp_enable(struct mcp *mcp) { unsigned long flags; + spin_lock_irqsave(&mcp->lock, flags); if (mcp->use_count++ == 0) mcp->ops->enable(mcp); diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c index b48d80c367f9..83dab2f0a50e 100644 --- a/drivers/mfd/omap-usb-host.c +++ b/drivers/mfd/omap-usb-host.c @@ -445,7 +445,7 @@ static unsigned omap_usbhs_rev1_hostconfig(struct usbhs_hcd_omap *omap, for (i = 0; i < omap->nports; i++) { if (is_ehci_phy_mode(pdata->port_mode[i])) { - reg &= OMAP_UHH_HOSTCONFIG_ULPI_BYPASS; + reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_BYPASS; break; } } @@ -647,7 +647,7 @@ static int usbhs_omap_probe(struct platform_device *pdev) default: omap->nports = OMAP3_HS_USB_PORTS; dev_dbg(dev, - "USB HOST Rev:0x%d not recognized, assuming %d ports\n", + "USB HOST Rev:0x%x not recognized, assuming %d ports\n", omap->usbhs_rev, omap->nports); break; } diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c index 41ab5e34d2ac..c87f7a0a53f8 100644 --- a/drivers/mfd/pcf50633-core.c +++ b/drivers/mfd/pcf50633-core.c @@ -244,20 +244,20 @@ static int pcf50633_probe(struct i2c_client *client, for (i = 0; i < PCF50633_NUM_REGULATORS; i++) { struct platform_device *pdev; + int j; pdev = platform_device_alloc("pcf50633-regulator", i); - if (!pdev) { - dev_err(pcf->dev, "Cannot create regulator %d\n", i); - continue; - } + if (!pdev) + return -ENOMEM; pdev->dev.parent = pcf->dev; - if (platform_device_add_data(pdev, &pdata->reg_init_data[i], - sizeof(pdata->reg_init_data[i])) < 0) { + ret = platform_device_add_data(pdev, &pdata->reg_init_data[i], + sizeof(pdata->reg_init_data[i])); + if (ret) { platform_device_put(pdev); - dev_err(pcf->dev, "Out of memory for regulator parameters %d\n", - i); - continue; + for (j = 0; j < i; j++) + platform_device_put(pcf->regulator_pdev[j]); + return ret; } pcf->regulator_pdev[i] = pdev; diff --git a/drivers/mfd/pm8921-core.c b/drivers/mfd/pm8921-core.c index 959513803542..39904f77c049 100644 --- a/drivers/mfd/pm8921-core.c +++ b/drivers/mfd/pm8921-core.c @@ -186,11 +186,9 @@ static void pm8xxx_irq_mask_ack(struct irq_data *d) { struct pm_irq_chip *chip = irq_data_get_irq_chip_data(d); unsigned int pmirq = irqd_to_hwirq(d); - int irq_bit; u8 block, config; block = pmirq / 8; - irq_bit = pmirq % 8; config = chip->config[pmirq] | PM_IRQF_MASK_ALL | PM_IRQF_CLR; pm8xxx_config_irq(chip, block, config); @@ -200,11 +198,9 @@ static void pm8xxx_irq_unmask(struct irq_data *d) { struct pm_irq_chip *chip = irq_data_get_irq_chip_data(d); unsigned int pmirq = irqd_to_hwirq(d); - int irq_bit; u8 block, config; block = pmirq / 8; - irq_bit = pmirq % 8; config = chip->config[pmirq]; pm8xxx_config_irq(chip, block, config); diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/mfd/rtsx_pcr.c index 1d15735f9ef9..d01b8c249231 100644 --- a/drivers/mfd/rtsx_pcr.c +++ b/drivers/mfd/rtsx_pcr.c @@ -337,40 +337,64 @@ static void rtsx_pci_add_sg_tbl(struct rtsx_pcr *pcr, int rtsx_pci_transfer_data(struct rtsx_pcr *pcr, struct scatterlist *sglist, int num_sg, bool read, int timeout) { - struct completion trans_done; - u8 dir; - int err = 0, i, count; - long timeleft; - unsigned long flags; - struct scatterlist *sg; - enum dma_data_direction dma_dir; - u32 val; - dma_addr_t addr; - unsigned int len; + int err = 0, count; dev_dbg(&(pcr->pci->dev), "--> %s: num_sg = %d\n", __func__, num_sg); + count = rtsx_pci_dma_map_sg(pcr, sglist, num_sg, read); + if (count < 1) + return -EINVAL; + dev_dbg(&(pcr->pci->dev), "DMA mapping count: %d\n", count); + + err = rtsx_pci_dma_transfer(pcr, sglist, count, read, timeout); + + rtsx_pci_dma_unmap_sg(pcr, sglist, num_sg, read); + + return err; +} +EXPORT_SYMBOL_GPL(rtsx_pci_transfer_data); + +int rtsx_pci_dma_map_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist, + int num_sg, bool read) +{ + enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE; - /* don't transfer data during abort processing */ if (pcr->remove_pci) return -EINVAL; if ((sglist == NULL) || (num_sg <= 0)) return -EINVAL; - if (read) { - dir = DEVICE_TO_HOST; - dma_dir = DMA_FROM_DEVICE; - } else { - dir = HOST_TO_DEVICE; - dma_dir = DMA_TO_DEVICE; - } + return dma_map_sg(&(pcr->pci->dev), sglist, num_sg, dir); +} +EXPORT_SYMBOL_GPL(rtsx_pci_dma_map_sg); - count = dma_map_sg(&(pcr->pci->dev), sglist, num_sg, dma_dir); - if (count < 1) { - dev_err(&(pcr->pci->dev), "scatterlist map failed\n"); +void rtsx_pci_dma_unmap_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist, + int num_sg, bool read) +{ + enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE; + + dma_unmap_sg(&(pcr->pci->dev), sglist, num_sg, dir); +} +EXPORT_SYMBOL_GPL(rtsx_pci_dma_unmap_sg); + +int rtsx_pci_dma_transfer(struct rtsx_pcr *pcr, struct scatterlist *sglist, + int count, bool read, int timeout) +{ + struct completion trans_done; + struct scatterlist *sg; + dma_addr_t addr; + long timeleft; + unsigned long flags; + unsigned int len; + int i, err = 0; + u32 val; + u8 dir = read ? DEVICE_TO_HOST : HOST_TO_DEVICE; + + if (pcr->remove_pci) + return -ENODEV; + + if ((sglist == NULL) || (count < 1)) return -EINVAL; - } - dev_dbg(&(pcr->pci->dev), "DMA mapping count: %d\n", count); val = ((u32)(dir & 0x01) << 29) | TRIG_DMA | ADMA_MODE; pcr->sgi = 0; @@ -400,12 +424,10 @@ int rtsx_pci_transfer_data(struct rtsx_pcr *pcr, struct scatterlist *sglist, } spin_lock_irqsave(&pcr->lock, flags); - if (pcr->trans_result == TRANS_RESULT_FAIL) err = -EINVAL; else if (pcr->trans_result == TRANS_NO_DEVICE) err = -ENODEV; - spin_unlock_irqrestore(&pcr->lock, flags); out: @@ -413,8 +435,6 @@ out: pcr->done = NULL; spin_unlock_irqrestore(&pcr->lock, flags); - dma_unmap_sg(&(pcr->pci->dev), sglist, num_sg, dma_dir); - if ((err < 0) && (err != -ENODEV)) rtsx_pci_stop_cmd(pcr); @@ -423,7 +443,7 @@ out: return err; } -EXPORT_SYMBOL_GPL(rtsx_pci_transfer_data); +EXPORT_SYMBOL_GPL(rtsx_pci_dma_transfer); int rtsx_pci_read_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len) { diff --git a/drivers/mfd/rtsx_usb.c b/drivers/mfd/rtsx_usb.c index 6352bec8419a..71f387ce8cbd 100644 --- a/drivers/mfd/rtsx_usb.c +++ b/drivers/mfd/rtsx_usb.c @@ -744,6 +744,7 @@ static struct usb_device_id rtsx_usb_usb_ids[] = { { USB_DEVICE(0x0BDA, 0x0140) }, { } }; +MODULE_DEVICE_TABLE(usb, rtsx_usb_usb_ids); static struct usb_driver rtsx_usb_driver = { .name = "rtsx_usb", diff --git a/drivers/mfd/sec-core.c b/drivers/mfd/sec-core.c index be06d0abbf19..dba7e2b6f8e9 100644 --- a/drivers/mfd/sec-core.c +++ b/drivers/mfd/sec-core.c @@ -28,8 +28,10 @@ #include <linux/mfd/samsung/s2mpa01.h> #include <linux/mfd/samsung/s2mps11.h> #include <linux/mfd/samsung/s2mps14.h> +#include <linux/mfd/samsung/s2mpu02.h> #include <linux/mfd/samsung/s5m8763.h> #include <linux/mfd/samsung/s5m8767.h> +#include <linux/regulator/machine.h> #include <linux/regmap.h> static const struct mfd_cell s5m8751_devs[] = { @@ -89,6 +91,15 @@ static const struct mfd_cell s2mpa01_devs[] = { }, }; +static const struct mfd_cell s2mpu02_devs[] = { + { .name = "s2mpu02-pmic", }, + { .name = "s2mpu02-rtc", }, + { + .name = "s2mpu02-clk", + .of_compatible = "samsung,s2mpu02-clk", + } +}; + #ifdef CONFIG_OF static const struct of_device_id sec_dt_match[] = { { .compatible = "samsung,s5m8767-pmic", @@ -103,6 +114,9 @@ static const struct of_device_id sec_dt_match[] = { .compatible = "samsung,s2mpa01-pmic", .data = (void *)S2MPA01, }, { + .compatible = "samsung,s2mpu02-pmic", + .data = (void *)S2MPU02, + }, { /* Sentinel */ }, }; @@ -132,6 +146,18 @@ static bool s2mps11_volatile(struct device *dev, unsigned int reg) } } +static bool s2mpu02_volatile(struct device *dev, unsigned int reg) +{ + switch (reg) { + case S2MPU02_REG_INT1M: + case S2MPU02_REG_INT2M: + case S2MPU02_REG_INT3M: + return false; + default: + return true; + } +} + static bool s5m8763_volatile(struct device *dev, unsigned int reg) { switch (reg) { @@ -177,6 +203,15 @@ static const struct regmap_config s2mps14_regmap_config = { .cache_type = REGCACHE_FLAT, }; +static const struct regmap_config s2mpu02_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + + .max_register = S2MPU02_REG_DVSDATA, + .volatile_reg = s2mpu02_volatile, + .cache_type = REGCACHE_FLAT, +}; + static const struct regmap_config s5m8763_regmap_config = { .reg_bits = 8, .val_bits = 8, @@ -238,6 +273,7 @@ static inline unsigned long sec_i2c_get_driver_data(struct i2c_client *i2c, #ifdef CONFIG_OF if (i2c->dev.of_node) { const struct of_device_id *match; + match = of_match_node(sec_dt_match, i2c->dev.of_node); return (unsigned long)match->data; } @@ -250,9 +286,10 @@ static int sec_pmic_probe(struct i2c_client *i2c, { struct sec_platform_data *pdata = dev_get_platdata(&i2c->dev); const struct regmap_config *regmap; + const struct mfd_cell *sec_devs; struct sec_pmic_dev *sec_pmic; unsigned long device_type; - int ret; + int ret, num_sec_devs; sec_pmic = devm_kzalloc(&i2c->dev, sizeof(struct sec_pmic_dev), GFP_KERNEL); @@ -297,6 +334,9 @@ static int sec_pmic_probe(struct i2c_client *i2c, case S5M8767X: regmap = &s5m8767_regmap_config; break; + case S2MPU02: + regmap = &s2mpu02_regmap_config; + break; default: regmap = &sec_regmap_config; break; @@ -319,34 +359,39 @@ static int sec_pmic_probe(struct i2c_client *i2c, switch (sec_pmic->device_type) { case S5M8751X: - ret = mfd_add_devices(sec_pmic->dev, -1, s5m8751_devs, - ARRAY_SIZE(s5m8751_devs), NULL, 0, NULL); + sec_devs = s5m8751_devs; + num_sec_devs = ARRAY_SIZE(s5m8751_devs); break; case S5M8763X: - ret = mfd_add_devices(sec_pmic->dev, -1, s5m8763_devs, - ARRAY_SIZE(s5m8763_devs), NULL, 0, NULL); + sec_devs = s5m8763_devs; + num_sec_devs = ARRAY_SIZE(s5m8763_devs); break; case S5M8767X: - ret = mfd_add_devices(sec_pmic->dev, -1, s5m8767_devs, - ARRAY_SIZE(s5m8767_devs), NULL, 0, NULL); + sec_devs = s5m8767_devs; + num_sec_devs = ARRAY_SIZE(s5m8767_devs); break; case S2MPA01: - ret = mfd_add_devices(sec_pmic->dev, -1, s2mpa01_devs, - ARRAY_SIZE(s2mpa01_devs), NULL, 0, NULL); + sec_devs = s2mpa01_devs; + num_sec_devs = ARRAY_SIZE(s2mpa01_devs); break; case S2MPS11X: - ret = mfd_add_devices(sec_pmic->dev, -1, s2mps11_devs, - ARRAY_SIZE(s2mps11_devs), NULL, 0, NULL); + sec_devs = s2mps11_devs; + num_sec_devs = ARRAY_SIZE(s2mps11_devs); break; case S2MPS14X: - ret = mfd_add_devices(sec_pmic->dev, -1, s2mps14_devs, - ARRAY_SIZE(s2mps14_devs), NULL, 0, NULL); + sec_devs = s2mps14_devs; + num_sec_devs = ARRAY_SIZE(s2mps14_devs); + break; + case S2MPU02: + sec_devs = s2mpu02_devs; + num_sec_devs = ARRAY_SIZE(s2mpu02_devs); break; default: /* If this happens the probe function is problem */ BUG(); } - + ret = mfd_add_devices(sec_pmic->dev, -1, sec_devs, num_sec_devs, NULL, + 0, NULL); if (ret) goto err_mfd; @@ -387,6 +432,15 @@ static int sec_pmic_suspend(struct device *dev) */ disable_irq(sec_pmic->irq); + switch (sec_pmic->device_type) { + case S2MPS14X: + case S2MPU02: + regulator_suspend_prepare(PM_SUSPEND_MEM); + break; + default: + break; + } + return 0; } diff --git a/drivers/mfd/sec-irq.c b/drivers/mfd/sec-irq.c index 654e2c1dbf7a..f9a57869e3ec 100644 --- a/drivers/mfd/sec-irq.c +++ b/drivers/mfd/sec-irq.c @@ -20,6 +20,7 @@ #include <linux/mfd/samsung/irq.h> #include <linux/mfd/samsung/s2mps11.h> #include <linux/mfd/samsung/s2mps14.h> +#include <linux/mfd/samsung/s2mpu02.h> #include <linux/mfd/samsung/s5m8763.h> #include <linux/mfd/samsung/s5m8767.h> @@ -161,6 +162,77 @@ static const struct regmap_irq s2mps14_irqs[] = { }, }; +static const struct regmap_irq s2mpu02_irqs[] = { + [S2MPU02_IRQ_PWRONF] = { + .reg_offset = 0, + .mask = S2MPS11_IRQ_PWRONF_MASK, + }, + [S2MPU02_IRQ_PWRONR] = { + .reg_offset = 0, + .mask = S2MPS11_IRQ_PWRONR_MASK, + }, + [S2MPU02_IRQ_JIGONBF] = { + .reg_offset = 0, + .mask = S2MPS11_IRQ_JIGONBF_MASK, + }, + [S2MPU02_IRQ_JIGONBR] = { + .reg_offset = 0, + .mask = S2MPS11_IRQ_JIGONBR_MASK, + }, + [S2MPU02_IRQ_ACOKBF] = { + .reg_offset = 0, + .mask = S2MPS11_IRQ_ACOKBF_MASK, + }, + [S2MPU02_IRQ_ACOKBR] = { + .reg_offset = 0, + .mask = S2MPS11_IRQ_ACOKBR_MASK, + }, + [S2MPU02_IRQ_PWRON1S] = { + .reg_offset = 0, + .mask = S2MPS11_IRQ_PWRON1S_MASK, + }, + [S2MPU02_IRQ_MRB] = { + .reg_offset = 0, + .mask = S2MPS11_IRQ_MRB_MASK, + }, + [S2MPU02_IRQ_RTC60S] = { + .reg_offset = 1, + .mask = S2MPS11_IRQ_RTC60S_MASK, + }, + [S2MPU02_IRQ_RTCA1] = { + .reg_offset = 1, + .mask = S2MPS11_IRQ_RTCA1_MASK, + }, + [S2MPU02_IRQ_RTCA0] = { + .reg_offset = 1, + .mask = S2MPS11_IRQ_RTCA0_MASK, + }, + [S2MPU02_IRQ_SMPL] = { + .reg_offset = 1, + .mask = S2MPS11_IRQ_SMPL_MASK, + }, + [S2MPU02_IRQ_RTC1S] = { + .reg_offset = 1, + .mask = S2MPS11_IRQ_RTC1S_MASK, + }, + [S2MPU02_IRQ_WTSR] = { + .reg_offset = 1, + .mask = S2MPS11_IRQ_WTSR_MASK, + }, + [S2MPU02_IRQ_INT120C] = { + .reg_offset = 2, + .mask = S2MPS11_IRQ_INT120C_MASK, + }, + [S2MPU02_IRQ_INT140C] = { + .reg_offset = 2, + .mask = S2MPS11_IRQ_INT140C_MASK, + }, + [S2MPU02_IRQ_TSD] = { + .reg_offset = 2, + .mask = S2MPS14_IRQ_TSD_MASK, + }, +}; + static const struct regmap_irq s5m8767_irqs[] = { [S5M8767_IRQ_PWRR] = { .reg_offset = 0, @@ -327,6 +399,16 @@ static const struct regmap_irq_chip s2mps14_irq_chip = { .ack_base = S2MPS14_REG_INT1, }; +static const struct regmap_irq_chip s2mpu02_irq_chip = { + .name = "s2mpu02", + .irqs = s2mpu02_irqs, + .num_irqs = ARRAY_SIZE(s2mpu02_irqs), + .num_regs = 3, + .status_base = S2MPU02_REG_INT1, + .mask_base = S2MPU02_REG_INT1M, + .ack_base = S2MPU02_REG_INT1, +}; + static const struct regmap_irq_chip s5m8767_irq_chip = { .name = "s5m8767", .irqs = s5m8767_irqs, @@ -351,6 +433,7 @@ int sec_irq_init(struct sec_pmic_dev *sec_pmic) { int ret = 0; int type = sec_pmic->device_type; + const struct regmap_irq_chip *sec_irq_chip; if (!sec_pmic->irq) { dev_warn(sec_pmic->dev, @@ -361,28 +444,19 @@ int sec_irq_init(struct sec_pmic_dev *sec_pmic) switch (type) { case S5M8763X: - ret = regmap_add_irq_chip(sec_pmic->regmap_pmic, sec_pmic->irq, - IRQF_TRIGGER_FALLING | IRQF_ONESHOT, - sec_pmic->irq_base, &s5m8763_irq_chip, - &sec_pmic->irq_data); + sec_irq_chip = &s5m8763_irq_chip; break; case S5M8767X: - ret = regmap_add_irq_chip(sec_pmic->regmap_pmic, sec_pmic->irq, - IRQF_TRIGGER_FALLING | IRQF_ONESHOT, - sec_pmic->irq_base, &s5m8767_irq_chip, - &sec_pmic->irq_data); + sec_irq_chip = &s5m8767_irq_chip; break; case S2MPS11X: - ret = regmap_add_irq_chip(sec_pmic->regmap_pmic, sec_pmic->irq, - IRQF_TRIGGER_FALLING | IRQF_ONESHOT, - sec_pmic->irq_base, &s2mps11_irq_chip, - &sec_pmic->irq_data); + sec_irq_chip = &s2mps11_irq_chip; break; case S2MPS14X: - ret = regmap_add_irq_chip(sec_pmic->regmap_pmic, sec_pmic->irq, - IRQF_TRIGGER_FALLING | IRQF_ONESHOT, - sec_pmic->irq_base, &s2mps14_irq_chip, - &sec_pmic->irq_data); + sec_irq_chip = &s2mps14_irq_chip; + break; + case S2MPU02: + sec_irq_chip = &s2mpu02_irq_chip; break; default: dev_err(sec_pmic->dev, "Unknown device type %lu\n", @@ -390,6 +464,10 @@ int sec_irq_init(struct sec_pmic_dev *sec_pmic) return -EINVAL; } + ret = regmap_add_irq_chip(sec_pmic->regmap_pmic, sec_pmic->irq, + IRQF_TRIGGER_FALLING | IRQF_ONESHOT, + sec_pmic->irq_base, sec_irq_chip, + &sec_pmic->irq_data); if (ret != 0) { dev_err(sec_pmic->dev, "Failed to register IRQ chip: %d\n", ret); return ret; diff --git a/drivers/mfd/si476x-cmd.c b/drivers/mfd/si476x-cmd.c index 6f1ef63086c9..2086b4665288 100644 --- a/drivers/mfd/si476x-cmd.c +++ b/drivers/mfd/si476x-cmd.c @@ -1228,8 +1228,8 @@ static int si476x_core_cmd_fm_rsq_status_a10(struct si476x_core *core, } static int si476x_core_cmd_fm_rsq_status_a20(struct si476x_core *core, - struct si476x_rsq_status_args *rsqargs, - struct si476x_rsq_status_report *report) + struct si476x_rsq_status_args *rsqargs, + struct si476x_rsq_status_report *report) { int err; u8 resp[CMD_FM_RSQ_STATUS_A10_NRESP]; @@ -1434,10 +1434,10 @@ typedef int (*tune_freq_func_t) (struct si476x_core *core, struct si476x_tune_freq_args *tuneargs); static struct { - int (*power_up) (struct si476x_core *, - struct si476x_power_up_args *); - int (*power_down) (struct si476x_core *, - struct si476x_power_down_args *); + int (*power_up)(struct si476x_core *, + struct si476x_power_up_args *); + int (*power_down)(struct si476x_core *, + struct si476x_power_down_args *); tune_freq_func_t fm_tune_freq; tune_freq_func_t am_tune_freq; diff --git a/drivers/mfd/stmpe-i2c.c b/drivers/mfd/stmpe-i2c.c index a45f9c0a330a..5c054031c3f8 100644 --- a/drivers/mfd/stmpe-i2c.c +++ b/drivers/mfd/stmpe-i2c.c @@ -68,7 +68,7 @@ MODULE_DEVICE_TABLE(of, stmpe_of_match); static int stmpe_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { - int partnum; + enum stmpe_partnum partnum; const struct of_device_id *of_id; i2c_ci.data = (void *)id; @@ -85,7 +85,7 @@ stmpe_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id) dev_info(&i2c->dev, "matching on node name, compatible is preferred\n"); partnum = id->driver_data; } else - partnum = (int)of_id->data; + partnum = (enum stmpe_partnum)of_id->data; return stmpe_probe(&i2c_ci, partnum); } diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c index 3b6bfa7184ad..02a17c388e87 100644 --- a/drivers/mfd/stmpe.c +++ b/drivers/mfd/stmpe.c @@ -1147,7 +1147,7 @@ static void stmpe_of_probe(struct stmpe_platform_data *pdata, } /* Called from client specific probe routines */ -int stmpe_probe(struct stmpe_client_info *ci, int partnum) +int stmpe_probe(struct stmpe_client_info *ci, enum stmpe_partnum partnum) { struct stmpe_platform_data *pdata = dev_get_platdata(ci->dev); struct device_node *np = ci->dev->of_node; diff --git a/drivers/mfd/stmpe.h b/drivers/mfd/stmpe.h index 9e4d21d37a11..2d045f26f193 100644 --- a/drivers/mfd/stmpe.h +++ b/drivers/mfd/stmpe.h @@ -97,7 +97,7 @@ struct stmpe_client_info { void (*init)(struct stmpe *stmpe); }; -int stmpe_probe(struct stmpe_client_info *ci, int partnum); +int stmpe_probe(struct stmpe_client_info *ci, enum stmpe_partnum partnum); int stmpe_remove(struct stmpe *stmpe); #define STMPE_ICR_LSB_HIGH (1 << 2) diff --git a/drivers/mfd/sun6i-prcm.c b/drivers/mfd/sun6i-prcm.c index 718fc4d2adc0..283ab8d197e4 100644 --- a/drivers/mfd/sun6i-prcm.c +++ b/drivers/mfd/sun6i-prcm.c @@ -76,16 +76,46 @@ static const struct mfd_cell sun6i_a31_prcm_subdevs[] = { }, }; +static const struct mfd_cell sun8i_a23_prcm_subdevs[] = { + { + .name = "sun8i-a23-apb0-clk", + .of_compatible = "allwinner,sun8i-a23-apb0-clk", + .num_resources = ARRAY_SIZE(sun6i_a31_apb0_clk_res), + .resources = sun6i_a31_apb0_clk_res, + }, + { + .name = "sun6i-a31-apb0-gates-clk", + .of_compatible = "allwinner,sun8i-a23-apb0-gates-clk", + .num_resources = ARRAY_SIZE(sun6i_a31_apb0_gates_clk_res), + .resources = sun6i_a31_apb0_gates_clk_res, + }, + { + .name = "sun6i-a31-apb0-clock-reset", + .of_compatible = "allwinner,sun6i-a31-clock-reset", + .num_resources = ARRAY_SIZE(sun6i_a31_apb0_rstc_res), + .resources = sun6i_a31_apb0_rstc_res, + }, +}; + static const struct prcm_data sun6i_a31_prcm_data = { .nsubdevs = ARRAY_SIZE(sun6i_a31_prcm_subdevs), .subdevs = sun6i_a31_prcm_subdevs, }; +static const struct prcm_data sun8i_a23_prcm_data = { + .nsubdevs = ARRAY_SIZE(sun8i_a23_prcm_subdevs), + .subdevs = sun8i_a23_prcm_subdevs, +}; + static const struct of_device_id sun6i_prcm_dt_ids[] = { { .compatible = "allwinner,sun6i-a31-prcm", .data = &sun6i_a31_prcm_data, }, + { + .compatible = "allwinner,sun8i-a23-prcm", + .data = &sun8i_a23_prcm_data, + }, { /* sentinel */ }, }; diff --git a/drivers/mfd/tc3589x.c b/drivers/mfd/tc3589x.c index bd83accc0f6d..0072e668c208 100644 --- a/drivers/mfd/tc3589x.c +++ b/drivers/mfd/tc3589x.c @@ -236,7 +236,7 @@ static void tc3589x_irq_unmap(struct irq_domain *d, unsigned int virq) static struct irq_domain_ops tc3589x_irq_ops = { .map = tc3589x_irq_map, .unmap = tc3589x_irq_unmap, - .xlate = irq_domain_xlate_twocell, + .xlate = irq_domain_xlate_onecell, }; static int tc3589x_irq_init(struct tc3589x *tc3589x, struct device_node *np) diff --git a/drivers/mfd/tc6387xb.c b/drivers/mfd/tc6387xb.c index 591a331d8d83..e71f88000ae5 100644 --- a/drivers/mfd/tc6387xb.c +++ b/drivers/mfd/tc6387xb.c @@ -147,11 +147,10 @@ static int tc6387xb_probe(struct platform_device *dev) int irq, ret; iomem = platform_get_resource(dev, IORESOURCE_MEM, 0); - if (!iomem) { + if (!iomem) return -EINVAL; - } - tc6387xb = kzalloc(sizeof *tc6387xb, GFP_KERNEL); + tc6387xb = kzalloc(sizeof(*tc6387xb), GFP_KERNEL); if (!tc6387xb) return -ENOMEM; @@ -189,7 +188,7 @@ static int tc6387xb_probe(struct platform_device *dev) if (pdata && pdata->enable) pdata->enable(dev); - printk(KERN_INFO "Toshiba tc6387xb initialised\n"); + dev_info(&dev->dev, "Toshiba tc6387xb initialised\n"); ret = mfd_add_devices(&dev->dev, dev->id, tc6387xb_cells, ARRAY_SIZE(tc6387xb_cells), iomem, irq, NULL); diff --git a/drivers/mfd/tps6105x.c b/drivers/mfd/tps6105x.c index b5dfa6e4e692..5de95c265c1a 100644 --- a/drivers/mfd/tps6105x.c +++ b/drivers/mfd/tps6105x.c @@ -141,7 +141,7 @@ static int tps6105x_probe(struct i2c_client *client, int ret; int i; - tps6105x = kmalloc(sizeof(*tps6105x), GFP_KERNEL); + tps6105x = devm_kmalloc(&client->dev, sizeof(*tps6105x), GFP_KERNEL); if (!tps6105x) return -ENOMEM; @@ -154,7 +154,7 @@ static int tps6105x_probe(struct i2c_client *client, ret = tps6105x_startup(tps6105x); if (ret) { dev_err(&client->dev, "chip initialization failed\n"); - goto fail; + return ret; } /* Remove warning texts when you implement new cell drivers */ @@ -187,16 +187,8 @@ static int tps6105x_probe(struct i2c_client *client, tps6105x_cells[i].pdata_size = sizeof(*tps6105x); } - ret = mfd_add_devices(&client->dev, 0, tps6105x_cells, - ARRAY_SIZE(tps6105x_cells), NULL, 0, NULL); - if (ret) - goto fail; - - return 0; - -fail: - kfree(tps6105x); - return ret; + return mfd_add_devices(&client->dev, 0, tps6105x_cells, + ARRAY_SIZE(tps6105x_cells), NULL, 0, NULL); } static int tps6105x_remove(struct i2c_client *client) @@ -210,7 +202,6 @@ static int tps6105x_remove(struct i2c_client *client) TPS6105X_REG0_MODE_MASK, TPS6105X_MODE_SHUTDOWN << TPS6105X_REG0_MODE_SHIFT); - kfree(tps6105x); return 0; } diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c index f9e42ea1cb1a..f243e75d28f3 100644 --- a/drivers/mfd/tps65910.c +++ b/drivers/mfd/tps65910.c @@ -387,7 +387,7 @@ static const struct of_device_id tps65910_of_match[] = { MODULE_DEVICE_TABLE(of, tps65910_of_match); static struct tps65910_board *tps65910_parse_dt(struct i2c_client *client, - int *chip_id) + unsigned long *chip_id) { struct device_node *np = client->dev.of_node; struct tps65910_board *board_info; @@ -401,7 +401,7 @@ static struct tps65910_board *tps65910_parse_dt(struct i2c_client *client, return NULL; } - *chip_id = (int)match->data; + *chip_id = (unsigned long)match->data; board_info = devm_kzalloc(&client->dev, sizeof(*board_info), GFP_KERNEL); @@ -431,7 +431,7 @@ static struct tps65910_board *tps65910_parse_dt(struct i2c_client *client, #else static inline struct tps65910_board *tps65910_parse_dt(struct i2c_client *client, - int *chip_id) + unsigned long *chip_id) { return NULL; } @@ -453,14 +453,14 @@ static void tps65910_power_off(void) } static int tps65910_i2c_probe(struct i2c_client *i2c, - const struct i2c_device_id *id) + const struct i2c_device_id *id) { struct tps65910 *tps65910; struct tps65910_board *pmic_plat_data; struct tps65910_board *of_pmic_plat_data = NULL; struct tps65910_platform_data *init_data; + unsigned long chip_id = id->driver_data; int ret = 0; - int chip_id = id->driver_data; pmic_plat_data = dev_get_platdata(&i2c->dev); diff --git a/drivers/mfd/tps65912-spi.c b/drivers/mfd/tps65912-spi.c index 69a5178bf152..de60ad98bd9f 100644 --- a/drivers/mfd/tps65912-spi.c +++ b/drivers/mfd/tps65912-spi.c @@ -32,10 +32,9 @@ static int tps65912_spi_write(struct tps65912 *tps65912, u8 addr, unsigned long spi_data = 1 << 23 | addr << 15 | *data; struct spi_transfer xfer; struct spi_message msg; - u32 tx_buf, rx_buf; + u32 tx_buf; tx_buf = spi_data; - rx_buf = 0; xfer.tx_buf = &tx_buf; xfer.rx_buf = NULL; diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c index 596b1f657e21..b1dabba763cf 100644 --- a/drivers/mfd/twl4030-irq.c +++ b/drivers/mfd/twl4030-irq.c @@ -297,7 +297,7 @@ static irqreturn_t handle_twl4030_pih(int irq, void *devid) ret = twl_i2c_read_u8(TWL_MODULE_PIH, &pih_isr, REG_PIH_ISR_P1); if (ret) { - pr_warning("twl4030: I2C error %d reading PIH ISR\n", ret); + pr_warn("twl4030: I2C error %d reading PIH ISR\n", ret); return IRQ_NONE; } @@ -338,7 +338,7 @@ static int twl4030_init_sih_modules(unsigned line) irq_line = line; /* disable all interrupts on our line */ - memset(buf, 0xff, sizeof buf); + memset(buf, 0xff, sizeof(buf)); sih = sih_modules; for (i = 0; i < nr_sih_modules; i++, sih++) { /* skip USB -- it's funky */ @@ -646,7 +646,7 @@ int twl4030_sih_setup(struct device *dev, int module, int irq_base) if (status < 0) return status; - agent = kzalloc(sizeof *agent, GFP_KERNEL); + agent = kzalloc(sizeof(*agent), GFP_KERNEL); if (!agent) return -ENOMEM; diff --git a/drivers/mfd/twl4030-power.c b/drivers/mfd/twl4030-power.c index 3bc969a5916b..4d3ff3771491 100644 --- a/drivers/mfd/twl4030-power.c +++ b/drivers/mfd/twl4030-power.c @@ -724,24 +724,24 @@ static struct twl4030_script *omap3_idle_scripts[] = { * above. */ static struct twl4030_resconfig omap3_idle_rconfig[] = { - TWL_REMAP_SLEEP(RES_VAUX1, DEV_GRP_NULL, 0, 0), - TWL_REMAP_SLEEP(RES_VAUX2, DEV_GRP_NULL, 0, 0), - TWL_REMAP_SLEEP(RES_VAUX3, DEV_GRP_NULL, 0, 0), - TWL_REMAP_SLEEP(RES_VAUX4, DEV_GRP_NULL, 0, 0), - TWL_REMAP_SLEEP(RES_VMMC1, DEV_GRP_NULL, 0, 0), - TWL_REMAP_SLEEP(RES_VMMC2, DEV_GRP_NULL, 0, 0), + TWL_REMAP_SLEEP(RES_VAUX1, TWL4030_RESCONFIG_UNDEF, 0, 0), + TWL_REMAP_SLEEP(RES_VAUX2, TWL4030_RESCONFIG_UNDEF, 0, 0), + TWL_REMAP_SLEEP(RES_VAUX3, TWL4030_RESCONFIG_UNDEF, 0, 0), + TWL_REMAP_SLEEP(RES_VAUX4, TWL4030_RESCONFIG_UNDEF, 0, 0), + TWL_REMAP_SLEEP(RES_VMMC1, TWL4030_RESCONFIG_UNDEF, 0, 0), + TWL_REMAP_SLEEP(RES_VMMC2, TWL4030_RESCONFIG_UNDEF, 0, 0), TWL_REMAP_OFF(RES_VPLL1, DEV_GRP_P1, 3, 1), TWL_REMAP_SLEEP(RES_VPLL2, DEV_GRP_P1, 0, 0), - TWL_REMAP_SLEEP(RES_VSIM, DEV_GRP_NULL, 0, 0), - TWL_REMAP_SLEEP(RES_VDAC, DEV_GRP_NULL, 0, 0), + TWL_REMAP_SLEEP(RES_VSIM, TWL4030_RESCONFIG_UNDEF, 0, 0), + TWL_REMAP_SLEEP(RES_VDAC, TWL4030_RESCONFIG_UNDEF, 0, 0), TWL_REMAP_SLEEP(RES_VINTANA1, TWL_DEV_GRP_P123, 1, 2), TWL_REMAP_SLEEP(RES_VINTANA2, TWL_DEV_GRP_P123, 0, 2), TWL_REMAP_SLEEP(RES_VINTDIG, TWL_DEV_GRP_P123, 1, 2), TWL_REMAP_SLEEP(RES_VIO, TWL_DEV_GRP_P123, 2, 2), TWL_REMAP_OFF(RES_VDD1, DEV_GRP_P1, 4, 1), TWL_REMAP_OFF(RES_VDD2, DEV_GRP_P1, 3, 1), - TWL_REMAP_SLEEP(RES_VUSB_1V5, DEV_GRP_NULL, 0, 0), - TWL_REMAP_SLEEP(RES_VUSB_1V8, DEV_GRP_NULL, 0, 0), + TWL_REMAP_SLEEP(RES_VUSB_1V5, TWL4030_RESCONFIG_UNDEF, 0, 0), + TWL_REMAP_SLEEP(RES_VUSB_1V8, TWL4030_RESCONFIG_UNDEF, 0, 0), TWL_REMAP_SLEEP(RES_VUSB_3V1, TWL_DEV_GRP_P123, 0, 0), /* Resource #20 USB charge pump skipped */ TWL_REMAP_SLEEP(RES_REGEN, TWL_DEV_GRP_P123, 2, 1), diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c index a6bb17d908b8..2807e1a95663 100644 --- a/drivers/mfd/twl6030-irq.c +++ b/drivers/mfd/twl6030-irq.c @@ -70,7 +70,7 @@ static int twl6030_interrupt_mapping[24] = { BATDETECT_INTR_OFFSET, /* Bit 9 BAT */ SIMDETECT_INTR_OFFSET, /* Bit 10 SIM */ MMCDETECT_INTR_OFFSET, /* Bit 11 MMC */ - RSV_INTR_OFFSET, /* Bit 12 Reserved */ + RSV_INTR_OFFSET, /* Bit 12 Reserved */ MADC_INTR_OFFSET, /* Bit 13 GPADC_RT_EOC */ MADC_INTR_OFFSET, /* Bit 14 GPADC_SW_EOC */ GASGAUGE_INTR_OFFSET, /* Bit 15 CC_AUTOCAL */ @@ -245,6 +245,7 @@ int twl6030_interrupt_unmask(u8 bit_mask, u8 offset) { int ret; u8 unmask_value; + ret = twl_i2c_read_u8(TWL_MODULE_PIH, &unmask_value, REG_INT_STS_A + offset); unmask_value &= (~(bit_mask)); @@ -258,6 +259,7 @@ int twl6030_interrupt_mask(u8 bit_mask, u8 offset) { int ret; u8 mask_value; + ret = twl_i2c_read_u8(TWL_MODULE_PIH, &mask_value, REG_INT_STS_A + offset); mask_value |= (bit_mask); diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c index ae26d84b3a59..f9c06c542a41 100644 --- a/drivers/mfd/twl6040.c +++ b/drivers/mfd/twl6040.c @@ -700,7 +700,7 @@ static int twl6040_probe(struct i2c_client *client, } ret = regmap_add_irq_chip(twl6040->regmap, twl6040->irq, IRQF_ONESHOT, - 0, &twl6040_irq_chip,&twl6040->irq_data); + 0, &twl6040_irq_chip, &twl6040->irq_data); if (ret < 0) goto gpio_err; diff --git a/drivers/mfd/wm5102-tables.c b/drivers/mfd/wm5102-tables.c index c8a993bd17ae..fb4d4bb0f47d 100644 --- a/drivers/mfd/wm5102-tables.c +++ b/drivers/mfd/wm5102-tables.c @@ -138,11 +138,11 @@ static const struct regmap_irq wm5102_irqs[ARIZONA_NUM_IRQ] = { .reg_offset = 1, .mask = ARIZONA_DSP_IRQ1_EINT1 }, - [ARIZONA_IRQ_SPK_SHUTDOWN_WARN] = { - .reg_offset = 2, .mask = ARIZONA_SPK_SHUTDOWN_WARN_EINT1 + [ARIZONA_IRQ_SPK_OVERHEAT_WARN] = { + .reg_offset = 2, .mask = ARIZONA_SPK_OVERHEAT_WARN_EINT1 }, - [ARIZONA_IRQ_SPK_SHUTDOWN] = { - .reg_offset = 2, .mask = ARIZONA_SPK_SHUTDOWN_EINT1 + [ARIZONA_IRQ_SPK_OVERHEAT] = { + .reg_offset = 2, .mask = ARIZONA_SPK_OVERHEAT_EINT1 }, [ARIZONA_IRQ_HPDET] = { .reg_offset = 2, .mask = ARIZONA_HPDET_EINT1 diff --git a/drivers/mfd/wm5110-tables.c b/drivers/mfd/wm5110-tables.c index 41a7f6fb7802..9b98ee559188 100644 --- a/drivers/mfd/wm5110-tables.c +++ b/drivers/mfd/wm5110-tables.c @@ -340,11 +340,11 @@ static const struct regmap_irq wm5110_irqs[ARIZONA_NUM_IRQ] = { .reg_offset = 1, .mask = ARIZONA_DSP_IRQ1_EINT1 }, - [ARIZONA_IRQ_SPK_SHUTDOWN_WARN] = { - .reg_offset = 2, .mask = ARIZONA_SPK_SHUTDOWN_WARN_EINT1 + [ARIZONA_IRQ_SPK_OVERHEAT_WARN] = { + .reg_offset = 2, .mask = ARIZONA_SPK_OVERHEAT_WARN_EINT1 }, - [ARIZONA_IRQ_SPK_SHUTDOWN] = { - .reg_offset = 2, .mask = ARIZONA_SPK_SHUTDOWN_EINT1 + [ARIZONA_IRQ_SPK_OVERHEAT] = { + .reg_offset = 2, .mask = ARIZONA_SPK_OVERHEAT_EINT1 }, [ARIZONA_IRQ_HPDET] = { .reg_offset = 2, .mask = ARIZONA_HPDET_EINT1 @@ -416,16 +416,28 @@ static const struct regmap_irq wm5110_irqs[ARIZONA_NUM_IRQ] = { [ARIZONA_IRQ_ISRC2_CFG_ERR] = { .reg_offset = 3, .mask = ARIZONA_ISRC2_CFG_ERR_EINT1 }, + [ARIZONA_IRQ_HP3R_DONE] = { + .reg_offset = 3, .mask = ARIZONA_HP3R_DONE_EINT1 + }, + [ARIZONA_IRQ_HP3L_DONE] = { + .reg_offset = 3, .mask = ARIZONA_HP3L_DONE_EINT1 + }, + [ARIZONA_IRQ_HP2R_DONE] = { + .reg_offset = 3, .mask = ARIZONA_HP2R_DONE_EINT1 + }, + [ARIZONA_IRQ_HP2L_DONE] = { + .reg_offset = 3, .mask = ARIZONA_HP2L_DONE_EINT1 + }, + [ARIZONA_IRQ_HP1R_DONE] = { + .reg_offset = 3, .mask = ARIZONA_HP1R_DONE_EINT1 + }, + [ARIZONA_IRQ_HP1L_DONE] = { + .reg_offset = 3, .mask = ARIZONA_HP1L_DONE_EINT1 + }, [ARIZONA_IRQ_BOOT_DONE] = { .reg_offset = 4, .mask = ARIZONA_BOOT_DONE_EINT1 }, - [ARIZONA_IRQ_DCS_DAC_DONE] = { - .reg_offset = 4, .mask = ARIZONA_DCS_DAC_DONE_EINT1 - }, - [ARIZONA_IRQ_DCS_HP_DONE] = { - .reg_offset = 4, .mask = ARIZONA_DCS_HP_DONE_EINT1 - }, [ARIZONA_IRQ_FLL2_CLOCK_OK] = { .reg_offset = 4, .mask = ARIZONA_FLL2_CLOCK_OK_EINT1 }, @@ -445,6 +457,209 @@ const struct regmap_irq_chip wm5110_irq = { }; EXPORT_SYMBOL_GPL(wm5110_irq); +static const struct regmap_irq wm5110_revd_irqs[ARIZONA_NUM_IRQ] = { + [ARIZONA_IRQ_GP4] = { .reg_offset = 0, .mask = ARIZONA_GP4_EINT1 }, + [ARIZONA_IRQ_GP3] = { .reg_offset = 0, .mask = ARIZONA_GP3_EINT1 }, + [ARIZONA_IRQ_GP2] = { .reg_offset = 0, .mask = ARIZONA_GP2_EINT1 }, + [ARIZONA_IRQ_GP1] = { .reg_offset = 0, .mask = ARIZONA_GP1_EINT1 }, + + [ARIZONA_IRQ_DSP4_RAM_RDY] = { + .reg_offset = 1, .mask = ARIZONA_DSP4_RAM_RDY_EINT1 + }, + [ARIZONA_IRQ_DSP3_RAM_RDY] = { + .reg_offset = 1, .mask = ARIZONA_DSP3_RAM_RDY_EINT1 + }, + [ARIZONA_IRQ_DSP2_RAM_RDY] = { + .reg_offset = 1, .mask = ARIZONA_DSP2_RAM_RDY_EINT1 + }, + [ARIZONA_IRQ_DSP1_RAM_RDY] = { + .reg_offset = 1, .mask = ARIZONA_DSP1_RAM_RDY_EINT1 + }, + [ARIZONA_IRQ_DSP_IRQ8] = { + .reg_offset = 1, .mask = ARIZONA_DSP_IRQ8_EINT1 + }, + [ARIZONA_IRQ_DSP_IRQ7] = { + .reg_offset = 1, .mask = ARIZONA_DSP_IRQ7_EINT1 + }, + [ARIZONA_IRQ_DSP_IRQ6] = { + .reg_offset = 1, .mask = ARIZONA_DSP_IRQ6_EINT1 + }, + [ARIZONA_IRQ_DSP_IRQ5] = { + .reg_offset = 1, .mask = ARIZONA_DSP_IRQ5_EINT1 + }, + [ARIZONA_IRQ_DSP_IRQ4] = { + .reg_offset = 1, .mask = ARIZONA_DSP_IRQ4_EINT1 + }, + [ARIZONA_IRQ_DSP_IRQ3] = { + .reg_offset = 1, .mask = ARIZONA_DSP_IRQ3_EINT1 + }, + [ARIZONA_IRQ_DSP_IRQ2] = { + .reg_offset = 1, .mask = ARIZONA_DSP_IRQ2_EINT1 + }, + [ARIZONA_IRQ_DSP_IRQ1] = { + .reg_offset = 1, .mask = ARIZONA_DSP_IRQ1_EINT1 + }, + + [ARIZONA_IRQ_SPK_OVERHEAT_WARN] = { + .reg_offset = 2, .mask = ARIZONA_SPK_OVERHEAT_WARN_EINT1 + }, + [ARIZONA_IRQ_SPK_OVERHEAT] = { + .reg_offset = 2, .mask = ARIZONA_SPK_OVERHEAT_EINT1 + }, + [ARIZONA_IRQ_HPDET] = { + .reg_offset = 2, .mask = ARIZONA_HPDET_EINT1 + }, + [ARIZONA_IRQ_MICDET] = { + .reg_offset = 2, .mask = ARIZONA_MICDET_EINT1 + }, + [ARIZONA_IRQ_WSEQ_DONE] = { + .reg_offset = 2, .mask = ARIZONA_WSEQ_DONE_EINT1 + }, + [ARIZONA_IRQ_DRC2_SIG_DET] = { + .reg_offset = 2, .mask = ARIZONA_DRC2_SIG_DET_EINT1 + }, + [ARIZONA_IRQ_DRC1_SIG_DET] = { + .reg_offset = 2, .mask = ARIZONA_DRC1_SIG_DET_EINT1 + }, + [ARIZONA_IRQ_ASRC2_LOCK] = { + .reg_offset = 2, .mask = ARIZONA_ASRC2_LOCK_EINT1 + }, + [ARIZONA_IRQ_ASRC1_LOCK] = { + .reg_offset = 2, .mask = ARIZONA_ASRC1_LOCK_EINT1 + }, + [ARIZONA_IRQ_UNDERCLOCKED] = { + .reg_offset = 2, .mask = ARIZONA_UNDERCLOCKED_EINT1 + }, + [ARIZONA_IRQ_OVERCLOCKED] = { + .reg_offset = 2, .mask = ARIZONA_OVERCLOCKED_EINT1 + }, + [ARIZONA_IRQ_FLL2_LOCK] = { + .reg_offset = 2, .mask = ARIZONA_FLL2_LOCK_EINT1 + }, + [ARIZONA_IRQ_FLL1_LOCK] = { + .reg_offset = 2, .mask = ARIZONA_FLL1_LOCK_EINT1 + }, + [ARIZONA_IRQ_CLKGEN_ERR] = { + .reg_offset = 2, .mask = ARIZONA_CLKGEN_ERR_EINT1 + }, + [ARIZONA_IRQ_CLKGEN_ERR_ASYNC] = { + .reg_offset = 2, .mask = ARIZONA_CLKGEN_ERR_ASYNC_EINT1 + }, + + [ARIZONA_IRQ_CTRLIF_ERR] = { + .reg_offset = 3, .mask = ARIZONA_V2_CTRLIF_ERR_EINT1 + }, + [ARIZONA_IRQ_MIXER_DROPPED_SAMPLES] = { + .reg_offset = 3, .mask = ARIZONA_V2_MIXER_DROPPED_SAMPLE_EINT1 + }, + [ARIZONA_IRQ_ASYNC_CLK_ENA_LOW] = { + .reg_offset = 3, .mask = ARIZONA_V2_ASYNC_CLK_ENA_LOW_EINT1 + }, + [ARIZONA_IRQ_SYSCLK_ENA_LOW] = { + .reg_offset = 3, .mask = ARIZONA_V2_SYSCLK_ENA_LOW_EINT1 + }, + [ARIZONA_IRQ_ISRC1_CFG_ERR] = { + .reg_offset = 3, .mask = ARIZONA_V2_ISRC1_CFG_ERR_EINT1 + }, + [ARIZONA_IRQ_ISRC2_CFG_ERR] = { + .reg_offset = 3, .mask = ARIZONA_V2_ISRC2_CFG_ERR_EINT1 + }, + [ARIZONA_IRQ_ISRC3_CFG_ERR] = { + .reg_offset = 3, .mask = ARIZONA_V2_ISRC3_CFG_ERR_EINT1 + }, + [ARIZONA_IRQ_HP3R_DONE] = { + .reg_offset = 3, .mask = ARIZONA_HP3R_DONE_EINT1 + }, + [ARIZONA_IRQ_HP3L_DONE] = { + .reg_offset = 3, .mask = ARIZONA_HP3L_DONE_EINT1 + }, + [ARIZONA_IRQ_HP2R_DONE] = { + .reg_offset = 3, .mask = ARIZONA_HP2R_DONE_EINT1 + }, + [ARIZONA_IRQ_HP2L_DONE] = { + .reg_offset = 3, .mask = ARIZONA_HP2L_DONE_EINT1 + }, + [ARIZONA_IRQ_HP1R_DONE] = { + .reg_offset = 3, .mask = ARIZONA_HP1R_DONE_EINT1 + }, + [ARIZONA_IRQ_HP1L_DONE] = { + .reg_offset = 3, .mask = ARIZONA_HP1L_DONE_EINT1 + }, + + [ARIZONA_IRQ_BOOT_DONE] = { + .reg_offset = 4, .mask = ARIZONA_BOOT_DONE_EINT1 + }, + [ARIZONA_IRQ_ASRC_CFG_ERR] = { + .reg_offset = 4, .mask = ARIZONA_V2_ASRC_CFG_ERR_EINT1 + }, + [ARIZONA_IRQ_FLL2_CLOCK_OK] = { + .reg_offset = 4, .mask = ARIZONA_FLL2_CLOCK_OK_EINT1 + }, + [ARIZONA_IRQ_FLL1_CLOCK_OK] = { + .reg_offset = 4, .mask = ARIZONA_FLL1_CLOCK_OK_EINT1 + }, + + [ARIZONA_IRQ_DSP_SHARED_WR_COLL] = { + .reg_offset = 5, .mask = ARIZONA_DSP_SHARED_WR_COLL_EINT1 + }, + [ARIZONA_IRQ_SPK_SHUTDOWN] = { + .reg_offset = 5, .mask = ARIZONA_SPK_SHUTDOWN_EINT1 + }, + [ARIZONA_IRQ_SPK1R_SHORT] = { + .reg_offset = 5, .mask = ARIZONA_SPK1R_SHORT_EINT1 + }, + [ARIZONA_IRQ_SPK1L_SHORT] = { + .reg_offset = 5, .mask = ARIZONA_SPK1L_SHORT_EINT1 + }, + [ARIZONA_IRQ_HP3R_SC_NEG] = { + .reg_offset = 5, .mask = ARIZONA_HP3R_SC_NEG_EINT1 + }, + [ARIZONA_IRQ_HP3R_SC_POS] = { + .reg_offset = 5, .mask = ARIZONA_HP3R_SC_POS_EINT1 + }, + [ARIZONA_IRQ_HP3L_SC_NEG] = { + .reg_offset = 5, .mask = ARIZONA_HP3L_SC_NEG_EINT1 + }, + [ARIZONA_IRQ_HP3L_SC_POS] = { + .reg_offset = 5, .mask = ARIZONA_HP3L_SC_POS_EINT1 + }, + [ARIZONA_IRQ_HP2R_SC_NEG] = { + .reg_offset = 5, .mask = ARIZONA_HP2R_SC_NEG_EINT1 + }, + [ARIZONA_IRQ_HP2R_SC_POS] = { + .reg_offset = 5, .mask = ARIZONA_HP2R_SC_POS_EINT1 + }, + [ARIZONA_IRQ_HP2L_SC_NEG] = { + .reg_offset = 5, .mask = ARIZONA_HP2L_SC_NEG_EINT1 + }, + [ARIZONA_IRQ_HP2L_SC_POS] = { + .reg_offset = 5, .mask = ARIZONA_HP2L_SC_POS_EINT1 + }, + [ARIZONA_IRQ_HP1R_SC_NEG] = { + .reg_offset = 5, .mask = ARIZONA_HP1R_SC_NEG_EINT1 + }, + [ARIZONA_IRQ_HP1R_SC_POS] = { + .reg_offset = 5, .mask = ARIZONA_HP1R_SC_POS_EINT1 + }, + [ARIZONA_IRQ_HP1L_SC_NEG] = { + .reg_offset = 5, .mask = ARIZONA_HP1L_SC_NEG_EINT1 + }, + [ARIZONA_IRQ_HP1L_SC_POS] = { + .reg_offset = 5, .mask = ARIZONA_HP1L_SC_POS_EINT1 + }, +}; + +const struct regmap_irq_chip wm5110_revd_irq = { + .name = "wm5110 IRQ", + .status_base = ARIZONA_INTERRUPT_STATUS_1, + .mask_base = ARIZONA_INTERRUPT_STATUS_1_MASK, + .ack_base = ARIZONA_INTERRUPT_STATUS_1, + .num_regs = 6, + .irqs = wm5110_revd_irqs, + .num_irqs = ARRAY_SIZE(wm5110_revd_irqs), +}; +EXPORT_SYMBOL_GPL(wm5110_revd_irq); + static const struct reg_default wm5110_reg_default[] = { { 0x00000008, 0x0019 }, /* R8 - Ctrl IF SPI CFG 1 */ { 0x00000009, 0x0001 }, /* R9 - Ctrl IF I2C1 CFG 1 */ @@ -1274,12 +1489,14 @@ static const struct reg_default wm5110_reg_default[] = { { 0x00000D0A, 0xFFFF }, /* R3338 - Interrupt Status 3 Mask */ { 0x00000D0B, 0xFFFF }, /* R3339 - Interrupt Status 4 Mask */ { 0x00000D0C, 0xFEFF }, /* R3340 - Interrupt Status 5 Mask */ + { 0x00000D0D, 0xFFFF }, /* R3341 - Interrupt Status 6 Mask */ { 0x00000D0F, 0x0000 }, /* R3343 - Interrupt Control */ { 0x00000D18, 0xFFFF }, /* R3352 - IRQ2 Status 1 Mask */ { 0x00000D19, 0xFFFF }, /* R3353 - IRQ2 Status 2 Mask */ { 0x00000D1A, 0xFFFF }, /* R3354 - IRQ2 Status 3 Mask */ { 0x00000D1B, 0xFFFF }, /* R3355 - IRQ2 Status 4 Mask */ { 0x00000D1C, 0xFFFF }, /* R3356 - IRQ2 Status 5 Mask */ + { 0x00000D1D, 0xFFFF }, /* R3357 - IRQ2 Status 6 Mask */ { 0x00000D1F, 0x0000 }, /* R3359 - IRQ2 Control */ { 0x00000D53, 0xFFFF }, /* R3411 - AOD IRQ Mask IRQ1 */ { 0x00000D54, 0xFFFF }, /* R3412 - AOD IRQ Mask IRQ2 */ @@ -2311,22 +2528,26 @@ static bool wm5110_readable_register(struct device *dev, unsigned int reg) case ARIZONA_INTERRUPT_STATUS_3: case ARIZONA_INTERRUPT_STATUS_4: case ARIZONA_INTERRUPT_STATUS_5: + case ARIZONA_INTERRUPT_STATUS_6: case ARIZONA_INTERRUPT_STATUS_1_MASK: case ARIZONA_INTERRUPT_STATUS_2_MASK: case ARIZONA_INTERRUPT_STATUS_3_MASK: case ARIZONA_INTERRUPT_STATUS_4_MASK: case ARIZONA_INTERRUPT_STATUS_5_MASK: + case ARIZONA_INTERRUPT_STATUS_6_MASK: case ARIZONA_INTERRUPT_CONTROL: case ARIZONA_IRQ2_STATUS_1: case ARIZONA_IRQ2_STATUS_2: case ARIZONA_IRQ2_STATUS_3: case ARIZONA_IRQ2_STATUS_4: case ARIZONA_IRQ2_STATUS_5: + case ARIZONA_IRQ2_STATUS_6: case ARIZONA_IRQ2_STATUS_1_MASK: case ARIZONA_IRQ2_STATUS_2_MASK: case ARIZONA_IRQ2_STATUS_3_MASK: case ARIZONA_IRQ2_STATUS_4_MASK: case ARIZONA_IRQ2_STATUS_5_MASK: + case ARIZONA_IRQ2_STATUS_6_MASK: case ARIZONA_IRQ2_CONTROL: case ARIZONA_INTERRUPT_RAW_STATUS_2: case ARIZONA_INTERRUPT_RAW_STATUS_3: @@ -2335,6 +2556,7 @@ static bool wm5110_readable_register(struct device *dev, unsigned int reg) case ARIZONA_INTERRUPT_RAW_STATUS_6: case ARIZONA_INTERRUPT_RAW_STATUS_7: case ARIZONA_INTERRUPT_RAW_STATUS_8: + case ARIZONA_INTERRUPT_RAW_STATUS_9: case ARIZONA_IRQ_PIN_STATUS: case ARIZONA_AOD_WKUP_AND_TRIG: case ARIZONA_AOD_IRQ1: @@ -2610,11 +2832,13 @@ static bool wm5110_volatile_register(struct device *dev, unsigned int reg) case ARIZONA_INTERRUPT_STATUS_3: case ARIZONA_INTERRUPT_STATUS_4: case ARIZONA_INTERRUPT_STATUS_5: + case ARIZONA_INTERRUPT_STATUS_6: case ARIZONA_IRQ2_STATUS_1: case ARIZONA_IRQ2_STATUS_2: case ARIZONA_IRQ2_STATUS_3: case ARIZONA_IRQ2_STATUS_4: case ARIZONA_IRQ2_STATUS_5: + case ARIZONA_IRQ2_STATUS_6: case ARIZONA_INTERRUPT_RAW_STATUS_2: case ARIZONA_INTERRUPT_RAW_STATUS_3: case ARIZONA_INTERRUPT_RAW_STATUS_4: @@ -2622,6 +2846,7 @@ static bool wm5110_volatile_register(struct device *dev, unsigned int reg) case ARIZONA_INTERRUPT_RAW_STATUS_6: case ARIZONA_INTERRUPT_RAW_STATUS_7: case ARIZONA_INTERRUPT_RAW_STATUS_8: + case ARIZONA_INTERRUPT_RAW_STATUS_9: case ARIZONA_IRQ_PIN_STATUS: case ARIZONA_AOD_WKUP_AND_TRIG: case ARIZONA_AOD_IRQ1: diff --git a/drivers/mfd/wm8350-i2c.c b/drivers/mfd/wm8350-i2c.c index f919def05e24..6a16a8a6f9fa 100644 --- a/drivers/mfd/wm8350-i2c.c +++ b/drivers/mfd/wm8350-i2c.c @@ -58,10 +58,10 @@ static int wm8350_i2c_remove(struct i2c_client *i2c) } static const struct i2c_device_id wm8350_i2c_id[] = { - { "wm8350", 0 }, - { "wm8351", 0 }, - { "wm8352", 0 }, - { } + { "wm8350", 0 }, + { "wm8351", 0 }, + { "wm8352", 0 }, + { } }; MODULE_DEVICE_TABLE(i2c, wm8350_i2c_id); diff --git a/drivers/mfd/wm8350-irq.c b/drivers/mfd/wm8350-irq.c index cd01f7962dfd..813ff50f95b6 100644 --- a/drivers/mfd/wm8350-irq.c +++ b/drivers/mfd/wm8350-irq.c @@ -497,7 +497,8 @@ int wm8350_irq_init(struct wm8350 *wm8350, int irq, if (pdata && pdata->irq_base > 0) irq_base = pdata->irq_base; - wm8350->irq_base = irq_alloc_descs(irq_base, 0, ARRAY_SIZE(wm8350_irqs), 0); + wm8350->irq_base = + irq_alloc_descs(irq_base, 0, ARRAY_SIZE(wm8350_irqs), 0); if (wm8350->irq_base < 0) { dev_warn(wm8350->dev, "Allocating irqs failed with %d\n", wm8350->irq_base); diff --git a/drivers/mfd/wm8994-regmap.c b/drivers/mfd/wm8994-regmap.c index 2fbce9c5950b..770a25696468 100644 --- a/drivers/mfd/wm8994-regmap.c +++ b/drivers/mfd/wm8994-regmap.c @@ -123,14 +123,23 @@ static struct reg_default wm1811_defaults[] = { { 0x0402, 0x00C0 }, /* R1026 - AIF1 DAC1 Left Volume */ { 0x0403, 0x00C0 }, /* R1027 - AIF1 DAC1 Right Volume */ { 0x0410, 0x0000 }, /* R1040 - AIF1 ADC1 Filters */ + { 0x0411, 0x0000 }, /* R1041 - AIF1 ADC2 Filters */ { 0x0420, 0x0200 }, /* R1056 - AIF1 DAC1 Filters (1) */ { 0x0421, 0x0010 }, /* R1057 - AIF1 DAC1 Filters (2) */ + { 0x0422, 0x0200 }, /* R1058 - AIF1 DAC2 Filters (1) */ + { 0x0423, 0x0010 }, /* R1059 - AIF1 DAC2 Filters (2) */ { 0x0430, 0x0068 }, /* R1072 - AIF1 DAC1 Noise Gate */ + { 0x0431, 0x0068 }, /* R1073 - AIF1 DAC2 Noise Gate */ { 0x0440, 0x0098 }, /* R1088 - AIF1 DRC1 (1) */ { 0x0441, 0x0845 }, /* R1089 - AIF1 DRC1 (2) */ { 0x0442, 0x0000 }, /* R1090 - AIF1 DRC1 (3) */ { 0x0443, 0x0000 }, /* R1091 - AIF1 DRC1 (4) */ { 0x0444, 0x0000 }, /* R1092 - AIF1 DRC1 (5) */ + { 0x0450, 0x0098 }, /* R1104 - AIF1 DRC2 (1) */ + { 0x0451, 0x0845 }, /* R1105 - AIF1 DRC2 (2) */ + { 0x0452, 0x0000 }, /* R1106 - AIF1 DRC2 (3) */ + { 0x0453, 0x0000 }, /* R1107 - AIF1 DRC2 (4) */ + { 0x0454, 0x0000 }, /* R1108 - AIF1 DRC2 (5) */ { 0x0480, 0x6318 }, /* R1152 - AIF1 DAC1 EQ Gains (1) */ { 0x0481, 0x6300 }, /* R1153 - AIF1 DAC1 EQ Gains (2) */ { 0x0482, 0x0FCA }, /* R1154 - AIF1 DAC1 EQ Band 1 A */ @@ -152,6 +161,27 @@ static struct reg_default wm1811_defaults[] = { { 0x0492, 0x0559 }, /* R1170 - AIF1 DAC1 EQ Band 5 B */ { 0x0493, 0x4000 }, /* R1171 - AIF1 DAC1 EQ Band 5 PG */ { 0x0494, 0x0000 }, /* R1172 - AIF1 DAC1 EQ Band 1 C */ + { 0x04A0, 0x6318 }, /* R1184 - AIF1 DAC2 EQ Gains (1) */ + { 0x04A1, 0x6300 }, /* R1185 - AIF1 DAC2 EQ Gains (2) */ + { 0x04A2, 0x0FCA }, /* R1186 - AIF1 DAC2 EQ Band 1 A */ + { 0x04A3, 0x0400 }, /* R1187 - AIF1 DAC2 EQ Band 1 B */ + { 0x04A4, 0x00D8 }, /* R1188 - AIF1 DAC2 EQ Band 1 PG */ + { 0x04A5, 0x1EB5 }, /* R1189 - AIF1 DAC2 EQ Band 2 A */ + { 0x04A6, 0xF145 }, /* R1190 - AIF1 DAC2 EQ Band 2 B */ + { 0x04A7, 0x0B75 }, /* R1191 - AIF1 DAC2 EQ Band 2 C */ + { 0x04A8, 0x01C5 }, /* R1192 - AIF1 DAC2 EQ Band 2 PG */ + { 0x04A9, 0x1C58 }, /* R1193 - AIF1 DAC2 EQ Band 3 A */ + { 0x04AA, 0xF373 }, /* R1194 - AIF1 DAC2 EQ Band 3 B */ + { 0x04AB, 0x0A54 }, /* R1195 - AIF1 DAC2 EQ Band 3 C */ + { 0x04AC, 0x0558 }, /* R1196 - AIF1 DAC2 EQ Band 3 PG */ + { 0x04AD, 0x168E }, /* R1197 - AIF1 DAC2 EQ Band 4 A */ + { 0x04AE, 0xF829 }, /* R1198 - AIF1 DAC2 EQ Band 4 B */ + { 0x04AF, 0x07AD }, /* R1199 - AIF1 DAC2 EQ Band 4 C */ + { 0x04B0, 0x1103 }, /* R1200 - AIF1 DAC2 EQ Band 4 PG */ + { 0x04B1, 0x0564 }, /* R1201 - AIF1 DAC2 EQ Band 5 A */ + { 0x04B2, 0x0559 }, /* R1202 - AIF1 DAC2 EQ Band 5 B */ + { 0x04B3, 0x4000 }, /* R1203 - AIF1 DAC2 EQ Band 5 PG */ + { 0x04B4, 0x0000 }, /* R1204 - AIF1 DAC2 EQ Band 1 C */ { 0x0500, 0x00C0 }, /* R1280 - AIF2 ADC Left Volume */ { 0x0501, 0x00C0 }, /* R1281 - AIF2 ADC Right Volume */ { 0x0502, 0x00C0 }, /* R1282 - AIF2 DAC Left Volume */ @@ -194,6 +224,8 @@ static struct reg_default wm1811_defaults[] = { { 0x0605, 0x0000 }, /* R1541 - AIF2ADC Right Mixer Routing */ { 0x0606, 0x0000 }, /* R1542 - AIF1 ADC1 Left Mixer Routing */ { 0x0607, 0x0000 }, /* R1543 - AIF1 ADC1 Right Mixer Routing */ + { 0x0608, 0x0000 }, /* R1544 - AIF1 ADC2 Left Mixer Routing */ + { 0x0609, 0x0000 }, /* R1545 - AIF1 ADC2 Right Mixer Routing */ { 0x0610, 0x02C0 }, /* R1552 - DAC1 Left Volume */ { 0x0611, 0x02C0 }, /* R1553 - DAC1 Right Volume */ { 0x0612, 0x02C0 }, /* R1554 - AIF2TX Left Volume */ @@ -846,14 +878,23 @@ static bool wm1811_readable_register(struct device *dev, unsigned int reg) case WM8994_AIF1_DAC1_LEFT_VOLUME: case WM8994_AIF1_DAC1_RIGHT_VOLUME: case WM8994_AIF1_ADC1_FILTERS: + case WM8994_AIF1_ADC2_FILTERS: case WM8994_AIF1_DAC1_FILTERS_1: case WM8994_AIF1_DAC1_FILTERS_2: + case WM8994_AIF1_DAC2_FILTERS_1: + case WM8994_AIF1_DAC2_FILTERS_2: case WM8958_AIF1_DAC1_NOISE_GATE: + case WM8958_AIF1_DAC2_NOISE_GATE: case WM8994_AIF1_DRC1_1: case WM8994_AIF1_DRC1_2: case WM8994_AIF1_DRC1_3: case WM8994_AIF1_DRC1_4: case WM8994_AIF1_DRC1_5: + case WM8994_AIF1_DRC2_1: + case WM8994_AIF1_DRC2_2: + case WM8994_AIF1_DRC2_3: + case WM8994_AIF1_DRC2_4: + case WM8994_AIF1_DRC2_5: case WM8994_AIF1_DAC1_EQ_GAINS_1: case WM8994_AIF1_DAC1_EQ_GAINS_2: case WM8994_AIF1_DAC1_EQ_BAND_1_A: @@ -875,6 +916,27 @@ static bool wm1811_readable_register(struct device *dev, unsigned int reg) case WM8994_AIF1_DAC1_EQ_BAND_5_B: case WM8994_AIF1_DAC1_EQ_BAND_5_PG: case WM8994_AIF1_DAC1_EQ_BAND_1_C: + case WM8994_AIF1_DAC2_EQ_GAINS_1: + case WM8994_AIF1_DAC2_EQ_GAINS_2: + case WM8994_AIF1_DAC2_EQ_BAND_1_A: + case WM8994_AIF1_DAC2_EQ_BAND_1_B: + case WM8994_AIF1_DAC2_EQ_BAND_1_PG: + case WM8994_AIF1_DAC2_EQ_BAND_2_A: + case WM8994_AIF1_DAC2_EQ_BAND_2_B: + case WM8994_AIF1_DAC2_EQ_BAND_2_C: + case WM8994_AIF1_DAC2_EQ_BAND_2_PG: + case WM8994_AIF1_DAC2_EQ_BAND_3_A: + case WM8994_AIF1_DAC2_EQ_BAND_3_B: + case WM8994_AIF1_DAC2_EQ_BAND_3_C: + case WM8994_AIF1_DAC2_EQ_BAND_3_PG: + case WM8994_AIF1_DAC2_EQ_BAND_4_A: + case WM8994_AIF1_DAC2_EQ_BAND_4_B: + case WM8994_AIF1_DAC2_EQ_BAND_4_C: + case WM8994_AIF1_DAC2_EQ_BAND_4_PG: + case WM8994_AIF1_DAC2_EQ_BAND_5_A: + case WM8994_AIF1_DAC2_EQ_BAND_5_B: + case WM8994_AIF1_DAC2_EQ_BAND_5_PG: + case WM8994_AIF1_DAC2_EQ_BAND_1_C: case WM8994_AIF2_ADC_LEFT_VOLUME: case WM8994_AIF2_ADC_RIGHT_VOLUME: case WM8994_AIF2_DAC_LEFT_VOLUME: @@ -917,6 +979,8 @@ static bool wm1811_readable_register(struct device *dev, unsigned int reg) case WM8994_DAC2_RIGHT_MIXER_ROUTING: case WM8994_AIF1_ADC1_LEFT_MIXER_ROUTING: case WM8994_AIF1_ADC1_RIGHT_MIXER_ROUTING: + case WM8994_AIF1_ADC2_LEFT_MIXER_ROUTING: + case WM8994_AIF1_ADC2_RIGHT_MIXER_ROUTING: case WM8994_DAC1_LEFT_VOLUME: case WM8994_DAC1_RIGHT_VOLUME: case WM8994_DAC2_LEFT_VOLUME: diff --git a/drivers/mfd/wm8997-tables.c b/drivers/mfd/wm8997-tables.c index c7a81da64ee1..510da3b52324 100644 --- a/drivers/mfd/wm8997-tables.c +++ b/drivers/mfd/wm8997-tables.c @@ -65,11 +65,11 @@ static const struct regmap_irq wm8997_irqs[ARIZONA_NUM_IRQ] = { [ARIZONA_IRQ_GP2] = { .reg_offset = 0, .mask = ARIZONA_GP2_EINT1 }, [ARIZONA_IRQ_GP1] = { .reg_offset = 0, .mask = ARIZONA_GP1_EINT1 }, - [ARIZONA_IRQ_SPK_SHUTDOWN_WARN] = { - .reg_offset = 2, .mask = ARIZONA_SPK_SHUTDOWN_WARN_EINT1 + [ARIZONA_IRQ_SPK_OVERHEAT_WARN] = { + .reg_offset = 2, .mask = ARIZONA_SPK_OVERHEAT_WARN_EINT1 }, - [ARIZONA_IRQ_SPK_SHUTDOWN] = { - .reg_offset = 2, .mask = ARIZONA_SPK_SHUTDOWN_EINT1 + [ARIZONA_IRQ_SPK_OVERHEAT] = { + .reg_offset = 2, .mask = ARIZONA_SPK_OVERHEAT_EINT1 }, [ARIZONA_IRQ_HPDET] = { .reg_offset = 2, .mask = ARIZONA_HPDET_EINT1 @@ -174,10 +174,10 @@ static const struct reg_default wm8997_reg_default[] = { { 0x00000062, 0x01FF }, /* R98 - Sample Rate Sequence Select 2 */ { 0x00000063, 0x01FF }, /* R99 - Sample Rate Sequence Select 3 */ { 0x00000064, 0x01FF }, /* R100 - Sample Rate Sequence Select 4 */ - { 0x00000068, 0x01FF }, /* R104 - Always On Triggers Sequence Select 3 */ - { 0x00000069, 0x01FF }, /* R105 - Always On Triggers Sequence Select 4 */ - { 0x0000006A, 0x01FF }, /* R106 - Always On Triggers Sequence Select 5 */ - { 0x0000006B, 0x01FF }, /* R107 - Always On Triggers Sequence Select 6 */ + { 0x00000068, 0x01FF }, /* R104 - AlwaysOn Triggers Seq Select 3 */ + { 0x00000069, 0x01FF }, /* R105 - AlwaysOn Triggers Seq Select 4 */ + { 0x0000006A, 0x01FF }, /* R106 - AlwaysOn Triggers Seq Select 5 */ + { 0x0000006B, 0x01FF }, /* R107 - AlwaysOn Triggers Seq Select 6 */ { 0x00000070, 0x0000 }, /* R112 - Comfort Noise Generator */ { 0x00000090, 0x0000 }, /* R144 - Haptics Control 1 */ { 0x00000091, 0x7FFF }, /* R145 - Haptics Control 2 */ diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index ee9402324a23..b841180c7c74 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -51,16 +51,6 @@ config AD525X_DPOT_SPI To compile this driver as a module, choose M here: the module will be called ad525x_dpot-spi. -config ATMEL_PWM - tristate "Atmel AT32/AT91 PWM support" - depends on HAVE_CLK - depends on AVR32 || ARCH_AT91SAM9263 || ARCH_AT91SAM9RL || ARCH_AT91SAM9G45 - help - This option enables device driver support for the PWM channels - on certain Atmel processors. Pulse Width Modulation is used for - purposes including software controlled power-efficient backlights - on LCD displays, motor control, and waveform generation. - config ATMEL_TCLIB bool "Atmel AT32/AT91 Timer/Counter Library" depends on (AVR32 || ARCH_AT91) diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index d59ce1261b38..5497d026e651 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -7,7 +7,6 @@ obj-$(CONFIG_AD525X_DPOT) += ad525x_dpot.o obj-$(CONFIG_AD525X_DPOT_I2C) += ad525x_dpot-i2c.o obj-$(CONFIG_AD525X_DPOT_SPI) += ad525x_dpot-spi.o obj-$(CONFIG_INTEL_MID_PTI) += pti.o -obj-$(CONFIG_ATMEL_PWM) += atmel_pwm.o obj-$(CONFIG_ATMEL_SSC) += atmel-ssc.o obj-$(CONFIG_ATMEL_TCLIB) += atmel_tclib.o obj-$(CONFIG_BMP085) += bmp085.o diff --git a/drivers/misc/atmel_pwm.c b/drivers/misc/atmel_pwm.c deleted file mode 100644 index a6dc56e1bc58..000000000000 --- a/drivers/misc/atmel_pwm.c +++ /dev/null @@ -1,402 +0,0 @@ -#include <linux/module.h> -#include <linux/clk.h> -#include <linux/err.h> -#include <linux/slab.h> -#include <linux/io.h> -#include <linux/interrupt.h> -#include <linux/platform_device.h> -#include <linux/atmel_pwm.h> - - -/* - * This is a simple driver for the PWM controller found in various newer - * Atmel SOCs, including the AVR32 series and the AT91sam9263. - * - * Chips with current Linux ports have only 4 PWM channels, out of max 32. - * AT32UC3A and AT32UC3B chips have 7 channels (but currently no Linux). - * Docs are inconsistent about the width of the channel counter registers; - * it's at least 16 bits, but several places say 20 bits. - */ -#define PWM_NCHAN 4 /* max 32 */ - -struct pwm { - spinlock_t lock; - struct platform_device *pdev; - u32 mask; - int irq; - void __iomem *base; - struct clk *clk; - struct pwm_channel *channel[PWM_NCHAN]; - void (*handler[PWM_NCHAN])(struct pwm_channel *); -}; - - -/* global PWM controller registers */ -#define PWM_MR 0x00 -#define PWM_ENA 0x04 -#define PWM_DIS 0x08 -#define PWM_SR 0x0c -#define PWM_IER 0x10 -#define PWM_IDR 0x14 -#define PWM_IMR 0x18 -#define PWM_ISR 0x1c - -static inline void pwm_writel(const struct pwm *p, unsigned offset, u32 val) -{ - __raw_writel(val, p->base + offset); -} - -static inline u32 pwm_readl(const struct pwm *p, unsigned offset) -{ - return __raw_readl(p->base + offset); -} - -static inline void __iomem *pwmc_regs(const struct pwm *p, int index) -{ - return p->base + 0x200 + index * 0x20; -} - -static struct pwm *pwm; - -static void pwm_dumpregs(struct pwm_channel *ch, char *tag) -{ - struct device *dev = &pwm->pdev->dev; - - dev_dbg(dev, "%s: mr %08x, sr %08x, imr %08x\n", - tag, - pwm_readl(pwm, PWM_MR), - pwm_readl(pwm, PWM_SR), - pwm_readl(pwm, PWM_IMR)); - dev_dbg(dev, - "pwm ch%d - mr %08x, dty %u, prd %u, cnt %u\n", - ch->index, - pwm_channel_readl(ch, PWM_CMR), - pwm_channel_readl(ch, PWM_CDTY), - pwm_channel_readl(ch, PWM_CPRD), - pwm_channel_readl(ch, PWM_CCNT)); -} - - -/** - * pwm_channel_alloc - allocate an unused PWM channel - * @index: identifies the channel - * @ch: structure to be initialized - * - * Drivers allocate PWM channels according to the board's wiring, and - * matching board-specific setup code. Returns zero or negative errno. - */ -int pwm_channel_alloc(int index, struct pwm_channel *ch) -{ - unsigned long flags; - int status = 0; - - if (!pwm) - return -EPROBE_DEFER; - - if (!(pwm->mask & 1 << index)) - return -ENODEV; - - if (index < 0 || index >= PWM_NCHAN || !ch) - return -EINVAL; - memset(ch, 0, sizeof *ch); - - spin_lock_irqsave(&pwm->lock, flags); - if (pwm->channel[index]) - status = -EBUSY; - else { - clk_enable(pwm->clk); - - ch->regs = pwmc_regs(pwm, index); - ch->index = index; - - /* REVISIT: ap7000 seems to go 2x as fast as we expect!! */ - ch->mck = clk_get_rate(pwm->clk); - - pwm->channel[index] = ch; - pwm->handler[index] = NULL; - - /* channel and irq are always disabled when we return */ - pwm_writel(pwm, PWM_DIS, 1 << index); - pwm_writel(pwm, PWM_IDR, 1 << index); - } - spin_unlock_irqrestore(&pwm->lock, flags); - return status; -} -EXPORT_SYMBOL(pwm_channel_alloc); - -static int pwmcheck(struct pwm_channel *ch) -{ - int index; - - if (!pwm) - return -ENODEV; - if (!ch) - return -EINVAL; - index = ch->index; - if (index < 0 || index >= PWM_NCHAN || pwm->channel[index] != ch) - return -EINVAL; - - return index; -} - -/** - * pwm_channel_free - release a previously allocated channel - * @ch: the channel being released - * - * The channel is completely shut down (counter and IRQ disabled), - * and made available for re-use. Returns zero, or negative errno. - */ -int pwm_channel_free(struct pwm_channel *ch) -{ - unsigned long flags; - int t; - - spin_lock_irqsave(&pwm->lock, flags); - t = pwmcheck(ch); - if (t >= 0) { - pwm->channel[t] = NULL; - pwm->handler[t] = NULL; - - /* channel and irq are always disabled when we return */ - pwm_writel(pwm, PWM_DIS, 1 << t); - pwm_writel(pwm, PWM_IDR, 1 << t); - - clk_disable(pwm->clk); - t = 0; - } - spin_unlock_irqrestore(&pwm->lock, flags); - return t; -} -EXPORT_SYMBOL(pwm_channel_free); - -int __pwm_channel_onoff(struct pwm_channel *ch, int enabled) -{ - unsigned long flags; - int t; - - /* OMITTED FUNCTIONALITY: starting several channels in synch */ - - spin_lock_irqsave(&pwm->lock, flags); - t = pwmcheck(ch); - if (t >= 0) { - pwm_writel(pwm, enabled ? PWM_ENA : PWM_DIS, 1 << t); - t = 0; - pwm_dumpregs(ch, enabled ? "enable" : "disable"); - } - spin_unlock_irqrestore(&pwm->lock, flags); - - return t; -} -EXPORT_SYMBOL(__pwm_channel_onoff); - -/** - * pwm_clk_alloc - allocate and configure CLKA or CLKB - * @prescale: from 0..10, the power of two used to divide MCK - * @div: from 1..255, the linear divisor to use - * - * Returns PWM_CPR_CLKA, PWM_CPR_CLKB, or negative errno. The allocated - * clock will run with a period of (2^prescale * div) / MCK, or twice as - * long if center aligned PWM output is used. The clock must later be - * deconfigured using pwm_clk_free(). - */ -int pwm_clk_alloc(unsigned prescale, unsigned div) -{ - unsigned long flags; - u32 mr; - u32 val = (prescale << 8) | div; - int ret = -EBUSY; - - if (prescale >= 10 || div == 0 || div > 255) - return -EINVAL; - - spin_lock_irqsave(&pwm->lock, flags); - mr = pwm_readl(pwm, PWM_MR); - if ((mr & 0xffff) == 0) { - mr |= val; - ret = PWM_CPR_CLKA; - } else if ((mr & (0xffff << 16)) == 0) { - mr |= val << 16; - ret = PWM_CPR_CLKB; - } - if (ret > 0) - pwm_writel(pwm, PWM_MR, mr); - spin_unlock_irqrestore(&pwm->lock, flags); - return ret; -} -EXPORT_SYMBOL(pwm_clk_alloc); - -/** - * pwm_clk_free - deconfigure and release CLKA or CLKB - * - * Reverses the effect of pwm_clk_alloc(). - */ -void pwm_clk_free(unsigned clk) -{ - unsigned long flags; - u32 mr; - - spin_lock_irqsave(&pwm->lock, flags); - mr = pwm_readl(pwm, PWM_MR); - if (clk == PWM_CPR_CLKA) - pwm_writel(pwm, PWM_MR, mr & ~(0xffff << 0)); - if (clk == PWM_CPR_CLKB) - pwm_writel(pwm, PWM_MR, mr & ~(0xffff << 16)); - spin_unlock_irqrestore(&pwm->lock, flags); -} -EXPORT_SYMBOL(pwm_clk_free); - -/** - * pwm_channel_handler - manage channel's IRQ handler - * @ch: the channel - * @handler: the handler to use, possibly NULL - * - * If the handler is non-null, the handler will be called after every - * period of this PWM channel. If the handler is null, this channel - * won't generate an IRQ. - */ -int pwm_channel_handler(struct pwm_channel *ch, - void (*handler)(struct pwm_channel *ch)) -{ - unsigned long flags; - int t; - - spin_lock_irqsave(&pwm->lock, flags); - t = pwmcheck(ch); - if (t >= 0) { - pwm->handler[t] = handler; - pwm_writel(pwm, handler ? PWM_IER : PWM_IDR, 1 << t); - t = 0; - } - spin_unlock_irqrestore(&pwm->lock, flags); - - return t; -} -EXPORT_SYMBOL(pwm_channel_handler); - -static irqreturn_t pwm_irq(int id, void *_pwm) -{ - struct pwm *p = _pwm; - irqreturn_t handled = IRQ_NONE; - u32 irqstat; - int index; - - spin_lock(&p->lock); - - /* ack irqs, then handle them */ - irqstat = pwm_readl(pwm, PWM_ISR); - - while (irqstat) { - struct pwm_channel *ch; - void (*handler)(struct pwm_channel *ch); - - index = ffs(irqstat) - 1; - irqstat &= ~(1 << index); - ch = pwm->channel[index]; - handler = pwm->handler[index]; - if (handler && ch) { - spin_unlock(&p->lock); - handler(ch); - spin_lock(&p->lock); - handled = IRQ_HANDLED; - } - } - - spin_unlock(&p->lock); - return handled; -} - -static int __init pwm_probe(struct platform_device *pdev) -{ - struct resource *r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - int irq = platform_get_irq(pdev, 0); - u32 *mp = pdev->dev.platform_data; - struct pwm *p; - int status = -EIO; - - if (pwm) - return -EBUSY; - if (!r || irq < 0 || !mp || !*mp) - return -ENODEV; - if (*mp & ~((1<<PWM_NCHAN)-1)) { - dev_warn(&pdev->dev, "mask 0x%x ... more than %d channels\n", - *mp, PWM_NCHAN); - return -EINVAL; - } - - p = kzalloc(sizeof(*p), GFP_KERNEL); - if (!p) - return -ENOMEM; - - spin_lock_init(&p->lock); - p->pdev = pdev; - p->mask = *mp; - p->irq = irq; - p->base = ioremap(r->start, resource_size(r)); - if (!p->base) - goto fail; - p->clk = clk_get(&pdev->dev, "pwm_clk"); - if (IS_ERR(p->clk)) { - status = PTR_ERR(p->clk); - p->clk = NULL; - goto fail; - } - - status = request_irq(irq, pwm_irq, 0, pdev->name, p); - if (status < 0) - goto fail; - - pwm = p; - platform_set_drvdata(pdev, p); - - return 0; - -fail: - if (p->clk) - clk_put(p->clk); - if (p->base) - iounmap(p->base); - - kfree(p); - return status; -} - -static int __exit pwm_remove(struct platform_device *pdev) -{ - struct pwm *p = platform_get_drvdata(pdev); - - if (p != pwm) - return -EINVAL; - - clk_enable(pwm->clk); - pwm_writel(pwm, PWM_DIS, (1 << PWM_NCHAN) - 1); - pwm_writel(pwm, PWM_IDR, (1 << PWM_NCHAN) - 1); - clk_disable(pwm->clk); - - pwm = NULL; - - free_irq(p->irq, p); - clk_put(p->clk); - iounmap(p->base); - kfree(p); - - return 0; -} - -static struct platform_driver atmel_pwm_driver = { - .driver = { - .name = "atmel_pwm", - .owner = THIS_MODULE, - }, - .remove = __exit_p(pwm_remove), - - /* NOTE: PWM can keep running in AVR32 "idle" and "frozen" states; - * and all AT91sam9263 states, albeit at reduced clock rate if - * MCK becomes the slow clock (i.e. what Linux labels STR). - */ -}; - -module_platform_driver_probe(atmel_pwm_driver, pwm_probe); - -MODULE_DESCRIPTION("Driver for AT32/AT91 PWM module"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:atmel_pwm"); diff --git a/drivers/misc/fuse/Makefile b/drivers/misc/fuse/Makefile new file mode 100644 index 000000000000..0679c4febc89 --- /dev/null +++ b/drivers/misc/fuse/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_ARCH_TEGRA) += tegra/ diff --git a/drivers/misc/lattice-ecp3-config.c b/drivers/misc/lattice-ecp3-config.c index 7ffdb589841e..7e1efd5f58f0 100644 --- a/drivers/misc/lattice-ecp3-config.c +++ b/drivers/misc/lattice-ecp3-config.c @@ -79,6 +79,11 @@ static void firmware_load(const struct firmware *fw, void *context) u32 jedec_id; u32 status; + if (fw == NULL) { + dev_err(&spi->dev, "Cannot load firmware, aborting\n"); + return; + } + if (fw->size == 0) { dev_err(&spi->dev, "Error: Firmware size is 0!\n"); return; diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c index 324e1de93687..2da05c0e113d 100644 --- a/drivers/misc/mei/client.c +++ b/drivers/misc/mei/client.c @@ -601,6 +601,7 @@ int mei_cl_connect(struct mei_cl *cl, struct file *file) cl->timer_count = MEI_CONNECT_TIMEOUT; list_add_tail(&cb->list, &dev->ctrl_rd_list.list); } else { + cl->state = MEI_FILE_INITIALIZING; list_add_tail(&cb->list, &dev->ctrl_wr_list.list); } diff --git a/drivers/misc/mei/nfc.c b/drivers/misc/mei/nfc.c index 3095fc514a65..5ccc23bc7690 100644 --- a/drivers/misc/mei/nfc.c +++ b/drivers/misc/mei/nfc.c @@ -342,9 +342,10 @@ static int mei_nfc_send(struct mei_cl_device *cldev, u8 *buf, size_t length) ndev = (struct mei_nfc_dev *) cldev->priv_data; dev = ndev->cl->dev; + err = -ENOMEM; mei_buf = kzalloc(length + MEI_NFC_HEADER_SIZE, GFP_KERNEL); if (!mei_buf) - return -ENOMEM; + goto out; hdr = (struct mei_nfc_hci_hdr *) mei_buf; hdr->cmd = MEI_NFC_CMD_HCI_SEND; @@ -354,12 +355,9 @@ static int mei_nfc_send(struct mei_cl_device *cldev, u8 *buf, size_t length) hdr->data_size = length; memcpy(mei_buf + MEI_NFC_HEADER_SIZE, buf, length); - err = __mei_cl_send(ndev->cl, mei_buf, length + MEI_NFC_HEADER_SIZE); if (err < 0) - return err; - - kfree(mei_buf); + goto out; if (!wait_event_interruptible_timeout(ndev->send_wq, ndev->recv_req_id == ndev->req_id, HZ)) { @@ -368,7 +366,8 @@ static int mei_nfc_send(struct mei_cl_device *cldev, u8 *buf, size_t length) } else { ndev->req_id++; } - +out: + kfree(mei_buf); return err; } diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index 452782bffebc..ede41f05c392 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -2028,8 +2028,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) /* complete ongoing async transfer before issuing discard */ if (card->host->areq) mmc_blk_issue_rw_rq(mq, NULL); - if (req->cmd_flags & REQ_SECURE && - !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN)) + if (req->cmd_flags & REQ_SECURE) ret = mmc_blk_issue_secdiscard_rq(mq, req); else ret = mmc_blk_issue_discard_rq(mq, req); @@ -2432,6 +2431,8 @@ static int mmc_blk_probe(struct mmc_card *card) if (!(card->csd.cmdclass & CCC_BLOCK_READ)) return -ENODEV; + mmc_fixup_device(card, blk_fixups); + md = mmc_blk_alloc(card); if (IS_ERR(md)) return PTR_ERR(md); @@ -2446,7 +2447,6 @@ static int mmc_blk_probe(struct mmc_card *card) goto out; mmc_set_drvdata(card, md); - mmc_fixup_device(card, blk_fixups); if (mmc_add_disk(md)) goto out; diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c index d2dbf02022bd..8a1f1240e058 100644 --- a/drivers/mmc/core/bus.c +++ b/drivers/mmc/core/bus.c @@ -180,7 +180,6 @@ static int mmc_bus_resume(struct device *dev) #endif #ifdef CONFIG_PM_RUNTIME - static int mmc_runtime_suspend(struct device *dev) { struct mmc_card *card = mmc_dev_to_card(dev); @@ -196,17 +195,10 @@ static int mmc_runtime_resume(struct device *dev) return host->bus_ops->runtime_resume(host); } - -static int mmc_runtime_idle(struct device *dev) -{ - return 0; -} - #endif /* !CONFIG_PM_RUNTIME */ static const struct dev_pm_ops mmc_bus_pm_ops = { - SET_RUNTIME_PM_OPS(mmc_runtime_suspend, mmc_runtime_resume, - mmc_runtime_idle) + SET_RUNTIME_PM_OPS(mmc_runtime_suspend, mmc_runtime_resume, NULL) SET_SYSTEM_SLEEP_PM_OPS(mmc_bus_suspend, mmc_bus_resume) }; diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 7dc0c85fdb60..d03a080fb9cd 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -2102,7 +2102,8 @@ EXPORT_SYMBOL(mmc_can_sanitize); int mmc_can_secure_erase_trim(struct mmc_card *card) { - if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) + if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) && + !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN)) return 1; return 0; } diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 793c6f7ddb04..1eda8dd8c867 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -324,13 +324,12 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd) } } + /* + * The EXT_CSD format is meant to be forward compatible. As long + * as CSD_STRUCTURE does not change, all values for EXT_CSD_REV + * are authorized, see JEDEC JESD84-B50 section B.8. + */ card->ext_csd.rev = ext_csd[EXT_CSD_REV]; - if (card->ext_csd.rev > 7) { - pr_err("%s: unrecognised EXT_CSD revision %d\n", - mmc_hostname(card->host), card->ext_csd.rev); - err = -EINVAL; - goto out; - } card->ext_csd.raw_sectors[0] = ext_csd[EXT_CSD_SEC_CNT + 0]; card->ext_csd.raw_sectors[1] = ext_csd[EXT_CSD_SEC_CNT + 1]; diff --git a/drivers/mmc/core/quirks.c b/drivers/mmc/core/quirks.c index 6c36fccaa1ec..dd1d1e0fe322 100644 --- a/drivers/mmc/core/quirks.c +++ b/drivers/mmc/core/quirks.c @@ -91,7 +91,7 @@ void mmc_fixup_device(struct mmc_card *card, const struct mmc_fixup *table) (f->cis_device == card->cis.device || f->cis_device == (u16) SDIO_ANY_ID) && rev >= f->rev_start && rev <= f->rev_end) { - dev_dbg(&card->dev, "calling %pF\n", f->vendor_fixup); + dev_dbg(&card->dev, "calling %pf\n", f->vendor_fixup); f->vendor_fixup(card, f->data); } } diff --git a/drivers/mmc/core/sd_ops.c b/drivers/mmc/core/sd_ops.c index 274ef00b4463..48d0c93ba25a 100644 --- a/drivers/mmc/core/sd_ops.c +++ b/drivers/mmc/core/sd_ops.c @@ -184,6 +184,9 @@ int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr) mmc_delay(10); } + if (!i) + pr_err("%s: card never left busy state\n", mmc_hostname(host)); + if (rocr && !mmc_host_is_spi(host)) *rocr = cmd.resp[0]; diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index a5652548230a..451135822464 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -290,6 +290,18 @@ config MMC_MOXART be found on some embedded hardware such as UC-7112-LX. If you have a controller with this interface, say Y here. +config MMC_SDHCI_ST + tristate "SDHCI support on STMicroelectronics SoC" + depends on ARCH_STI + depends on MMC_SDHCI_PLTFM + select MMC_SDHCI_IO_ACCESSORS + help + This selects the Secure Digital Host Controller Interface in + STMicroelectronics SoCs. + + If you have a controller with this interface, say Y or M here. + If unsure, say N. + config MMC_OMAP tristate "TI OMAP Multimedia Card Interface support" depends on ARCH_OMAP @@ -303,6 +315,7 @@ config MMC_OMAP config MMC_OMAP_HS tristate "TI OMAP High Speed Multimedia Card Interface support" + depends on HAS_DMA depends on ARCH_OMAP2PLUS || COMPILE_TEST help This selects the TI OMAP High Speed Multimedia card Interface. @@ -343,7 +356,7 @@ config MMC_ATMELMCI config MMC_SDHCI_MSM tristate "Qualcomm SDHCI Controller Support" - depends on ARCH_QCOM + depends on ARCH_QCOM || (ARM && COMPILE_TEST) depends on MMC_SDHCI_PLTFM help This selects the Secure Digital Host Controller Interface (SDHCI) @@ -440,6 +453,7 @@ config MMC_SPI config MMC_S3C tristate "Samsung S3C SD/MMC Card Interface support" depends on ARCH_S3C24XX + depends on S3C24XX_DMAC help This selects a driver for the MCI interface found in Samsung's S3C2410, S3C2412, S3C2440, S3C2442 CPUs. @@ -477,15 +491,6 @@ config MMC_S3C_DMA working properly and needs to be debugged before this option is useful. -config MMC_S3C_PIODMA - bool "Support for both PIO and DMA" - help - Compile both the PIO and DMA transfer routines into the - driver and let the platform select at run-time which one - is best. - - See notes for the DMA option. - endchoice config MMC_SDRICOH_CS @@ -623,7 +628,7 @@ config MMC_DW_PCI config MMC_SH_MMCIF tristate "SuperH Internal MMCIF support" - depends on MMC_BLOCK + depends on MMC_BLOCK && HAS_DMA depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST help This selects the MMC Host Interface controller (MMCIF). @@ -697,6 +702,7 @@ config MMC_WMT config MMC_USDHI6ROL0 tristate "Renesas USDHI6ROL0 SD/SDIO Host Controller support" + depends on HAS_DMA help This selects support for the Renesas USDHI6ROL0 SD/SDIO Host Controller diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile index 7f81ddf1dd2c..f211eede8db5 100644 --- a/drivers/mmc/host/Makefile +++ b/drivers/mmc/host/Makefile @@ -68,6 +68,7 @@ obj-$(CONFIG_MMC_SDHCI_OF_HLWD) += sdhci-of-hlwd.o obj-$(CONFIG_MMC_SDHCI_BCM_KONA) += sdhci-bcm-kona.o obj-$(CONFIG_MMC_SDHCI_BCM2835) += sdhci-bcm2835.o obj-$(CONFIG_MMC_SDHCI_MSM) += sdhci-msm.o +obj-$(CONFIG_MMC_SDHCI_ST) += sdhci-st.o ifeq ($(CONFIG_CB710_DEBUG),y) CFLAGS-cb710-mmc += -DDEBUG diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c index f5443a6c4915..9c9f6af29251 100644 --- a/drivers/mmc/host/au1xmmc.c +++ b/drivers/mmc/host/au1xmmc.c @@ -32,6 +32,7 @@ * (the low to high transition will not occur). */ +#include <linux/clk.h> #include <linux/module.h> #include <linux/init.h> #include <linux/platform_device.h> @@ -90,7 +91,7 @@ struct au1xmmc_host { struct mmc_request *mrq; u32 flags; - u32 iobase; + void __iomem *iobase; u32 clock; u32 bus_width; u32 power_mode; @@ -118,6 +119,7 @@ struct au1xmmc_host { struct au1xmmc_platform_data *platdata; struct platform_device *pdev; struct resource *ioarea; + struct clk *clk; }; /* Status flags used by the host structure */ @@ -162,32 +164,33 @@ static inline int has_dbdma(void) static inline void IRQ_ON(struct au1xmmc_host *host, u32 mask) { - u32 val = au_readl(HOST_CONFIG(host)); + u32 val = __raw_readl(HOST_CONFIG(host)); val |= mask; - au_writel(val, HOST_CONFIG(host)); - au_sync(); + __raw_writel(val, HOST_CONFIG(host)); + wmb(); /* drain writebuffer */ } static inline void FLUSH_FIFO(struct au1xmmc_host *host) { - u32 val = au_readl(HOST_CONFIG2(host)); + u32 val = __raw_readl(HOST_CONFIG2(host)); - au_writel(val | SD_CONFIG2_FF, HOST_CONFIG2(host)); - au_sync_delay(1); + __raw_writel(val | SD_CONFIG2_FF, HOST_CONFIG2(host)); + wmb(); /* drain writebuffer */ + mdelay(1); /* SEND_STOP will turn off clock control - this re-enables it */ val &= ~SD_CONFIG2_DF; - au_writel(val, HOST_CONFIG2(host)); - au_sync(); + __raw_writel(val, HOST_CONFIG2(host)); + wmb(); /* drain writebuffer */ } static inline void IRQ_OFF(struct au1xmmc_host *host, u32 mask) { - u32 val = au_readl(HOST_CONFIG(host)); + u32 val = __raw_readl(HOST_CONFIG(host)); val &= ~mask; - au_writel(val, HOST_CONFIG(host)); - au_sync(); + __raw_writel(val, HOST_CONFIG(host)); + wmb(); /* drain writebuffer */ } static inline void SEND_STOP(struct au1xmmc_host *host) @@ -197,12 +200,13 @@ static inline void SEND_STOP(struct au1xmmc_host *host) WARN_ON(host->status != HOST_S_DATA); host->status = HOST_S_STOP; - config2 = au_readl(HOST_CONFIG2(host)); - au_writel(config2 | SD_CONFIG2_DF, HOST_CONFIG2(host)); - au_sync(); + config2 = __raw_readl(HOST_CONFIG2(host)); + __raw_writel(config2 | SD_CONFIG2_DF, HOST_CONFIG2(host)); + wmb(); /* drain writebuffer */ /* Send the stop command */ - au_writel(STOP_CMD, HOST_CMD(host)); + __raw_writel(STOP_CMD, HOST_CMD(host)); + wmb(); /* drain writebuffer */ } static void au1xmmc_set_power(struct au1xmmc_host *host, int state) @@ -296,28 +300,28 @@ static int au1xmmc_send_command(struct au1xmmc_host *host, int wait, } } - au_writel(cmd->arg, HOST_CMDARG(host)); - au_sync(); + __raw_writel(cmd->arg, HOST_CMDARG(host)); + wmb(); /* drain writebuffer */ if (wait) IRQ_OFF(host, SD_CONFIG_CR); - au_writel((mmccmd | SD_CMD_GO), HOST_CMD(host)); - au_sync(); + __raw_writel((mmccmd | SD_CMD_GO), HOST_CMD(host)); + wmb(); /* drain writebuffer */ /* Wait for the command to go on the line */ - while (au_readl(HOST_CMD(host)) & SD_CMD_GO) + while (__raw_readl(HOST_CMD(host)) & SD_CMD_GO) /* nop */; /* Wait for the command to come back */ if (wait) { - u32 status = au_readl(HOST_STATUS(host)); + u32 status = __raw_readl(HOST_STATUS(host)); while (!(status & SD_STATUS_CR)) - status = au_readl(HOST_STATUS(host)); + status = __raw_readl(HOST_STATUS(host)); /* Clear the CR status */ - au_writel(SD_STATUS_CR, HOST_STATUS(host)); + __raw_writel(SD_STATUS_CR, HOST_STATUS(host)); IRQ_ON(host, SD_CONFIG_CR); } @@ -339,11 +343,11 @@ static void au1xmmc_data_complete(struct au1xmmc_host *host, u32 status) data = mrq->cmd->data; if (status == 0) - status = au_readl(HOST_STATUS(host)); + status = __raw_readl(HOST_STATUS(host)); /* The transaction is really over when the SD_STATUS_DB bit is clear */ while ((host->flags & HOST_F_XMIT) && (status & SD_STATUS_DB)) - status = au_readl(HOST_STATUS(host)); + status = __raw_readl(HOST_STATUS(host)); data->error = 0; dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma.dir); @@ -357,7 +361,7 @@ static void au1xmmc_data_complete(struct au1xmmc_host *host, u32 status) data->error = -EILSEQ; /* Clear the CRC bits */ - au_writel(SD_STATUS_WC | SD_STATUS_RC, HOST_STATUS(host)); + __raw_writel(SD_STATUS_WC | SD_STATUS_RC, HOST_STATUS(host)); data->bytes_xfered = 0; @@ -380,7 +384,7 @@ static void au1xmmc_tasklet_data(unsigned long param) { struct au1xmmc_host *host = (struct au1xmmc_host *)param; - u32 status = au_readl(HOST_STATUS(host)); + u32 status = __raw_readl(HOST_STATUS(host)); au1xmmc_data_complete(host, status); } @@ -412,15 +416,15 @@ static void au1xmmc_send_pio(struct au1xmmc_host *host) max = AU1XMMC_MAX_TRANSFER; for (count = 0; count < max; count++) { - status = au_readl(HOST_STATUS(host)); + status = __raw_readl(HOST_STATUS(host)); if (!(status & SD_STATUS_TH)) break; val = *sg_ptr++; - au_writel((unsigned long)val, HOST_TXPORT(host)); - au_sync(); + __raw_writel((unsigned long)val, HOST_TXPORT(host)); + wmb(); /* drain writebuffer */ } host->pio.len -= count; @@ -472,7 +476,7 @@ static void au1xmmc_receive_pio(struct au1xmmc_host *host) max = AU1XMMC_MAX_TRANSFER; for (count = 0; count < max; count++) { - status = au_readl(HOST_STATUS(host)); + status = __raw_readl(HOST_STATUS(host)); if (!(status & SD_STATUS_NE)) break; @@ -494,7 +498,7 @@ static void au1xmmc_receive_pio(struct au1xmmc_host *host) break; } - val = au_readl(HOST_RXPORT(host)); + val = __raw_readl(HOST_RXPORT(host)); if (sg_ptr) *sg_ptr++ = (unsigned char)(val & 0xFF); @@ -537,10 +541,10 @@ static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status) if (cmd->flags & MMC_RSP_PRESENT) { if (cmd->flags & MMC_RSP_136) { - r[0] = au_readl(host->iobase + SD_RESP3); - r[1] = au_readl(host->iobase + SD_RESP2); - r[2] = au_readl(host->iobase + SD_RESP1); - r[3] = au_readl(host->iobase + SD_RESP0); + r[0] = __raw_readl(host->iobase + SD_RESP3); + r[1] = __raw_readl(host->iobase + SD_RESP2); + r[2] = __raw_readl(host->iobase + SD_RESP1); + r[3] = __raw_readl(host->iobase + SD_RESP0); /* The CRC is omitted from the response, so really * we only got 120 bytes, but the engine expects @@ -559,7 +563,7 @@ static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status) * that means that the OSR data starts at bit 31, * so we can just read RESP0 and return that. */ - cmd->resp[0] = au_readl(host->iobase + SD_RESP0); + cmd->resp[0] = __raw_readl(host->iobase + SD_RESP0); } } @@ -586,7 +590,7 @@ static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status) u32 mask = SD_STATUS_DB | SD_STATUS_NE; while((status & mask) != mask) - status = au_readl(HOST_STATUS(host)); + status = __raw_readl(HOST_STATUS(host)); } au1xxx_dbdma_start(channel); @@ -595,24 +599,17 @@ static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status) static void au1xmmc_set_clock(struct au1xmmc_host *host, int rate) { - unsigned int pbus = get_au1x00_speed(); - unsigned int divisor; + unsigned int pbus = clk_get_rate(host->clk); + unsigned int divisor = ((pbus / rate) / 2) - 1; u32 config; - /* From databook: - * divisor = ((((cpuclock / sbus_divisor) / 2) / mmcclock) / 2) - 1 - */ - pbus /= ((au_readl(SYS_POWERCTRL) & 0x3) + 2); - pbus /= 2; - divisor = ((pbus / rate) / 2) - 1; - - config = au_readl(HOST_CONFIG(host)); + config = __raw_readl(HOST_CONFIG(host)); config &= ~(SD_CONFIG_DIV); config |= (divisor & SD_CONFIG_DIV) | SD_CONFIG_DE; - au_writel(config, HOST_CONFIG(host)); - au_sync(); + __raw_writel(config, HOST_CONFIG(host)); + wmb(); /* drain writebuffer */ } static int au1xmmc_prepare_data(struct au1xmmc_host *host, @@ -636,7 +633,7 @@ static int au1xmmc_prepare_data(struct au1xmmc_host *host, if (host->dma.len == 0) return -ETIMEDOUT; - au_writel(data->blksz - 1, HOST_BLKSIZE(host)); + __raw_writel(data->blksz - 1, HOST_BLKSIZE(host)); if (host->flags & (HOST_F_DMA | HOST_F_DBDMA)) { int i; @@ -723,31 +720,34 @@ static void au1xmmc_request(struct mmc_host* mmc, struct mmc_request* mrq) static void au1xmmc_reset_controller(struct au1xmmc_host *host) { /* Apply the clock */ - au_writel(SD_ENABLE_CE, HOST_ENABLE(host)); - au_sync_delay(1); + __raw_writel(SD_ENABLE_CE, HOST_ENABLE(host)); + wmb(); /* drain writebuffer */ + mdelay(1); - au_writel(SD_ENABLE_R | SD_ENABLE_CE, HOST_ENABLE(host)); - au_sync_delay(5); + __raw_writel(SD_ENABLE_R | SD_ENABLE_CE, HOST_ENABLE(host)); + wmb(); /* drain writebuffer */ + mdelay(5); - au_writel(~0, HOST_STATUS(host)); - au_sync(); + __raw_writel(~0, HOST_STATUS(host)); + wmb(); /* drain writebuffer */ - au_writel(0, HOST_BLKSIZE(host)); - au_writel(0x001fffff, HOST_TIMEOUT(host)); - au_sync(); + __raw_writel(0, HOST_BLKSIZE(host)); + __raw_writel(0x001fffff, HOST_TIMEOUT(host)); + wmb(); /* drain writebuffer */ - au_writel(SD_CONFIG2_EN, HOST_CONFIG2(host)); - au_sync(); + __raw_writel(SD_CONFIG2_EN, HOST_CONFIG2(host)); + wmb(); /* drain writebuffer */ - au_writel(SD_CONFIG2_EN | SD_CONFIG2_FF, HOST_CONFIG2(host)); - au_sync_delay(1); + __raw_writel(SD_CONFIG2_EN | SD_CONFIG2_FF, HOST_CONFIG2(host)); + wmb(); /* drain writebuffer */ + mdelay(1); - au_writel(SD_CONFIG2_EN, HOST_CONFIG2(host)); - au_sync(); + __raw_writel(SD_CONFIG2_EN, HOST_CONFIG2(host)); + wmb(); /* drain writebuffer */ /* Configure interrupts */ - au_writel(AU1XMMC_INTERRUPTS, HOST_CONFIG(host)); - au_sync(); + __raw_writel(AU1XMMC_INTERRUPTS, HOST_CONFIG(host)); + wmb(); /* drain writebuffer */ } @@ -767,7 +767,7 @@ static void au1xmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) host->clock = ios->clock; } - config2 = au_readl(HOST_CONFIG2(host)); + config2 = __raw_readl(HOST_CONFIG2(host)); switch (ios->bus_width) { case MMC_BUS_WIDTH_8: config2 |= SD_CONFIG2_BB; @@ -780,8 +780,8 @@ static void au1xmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) config2 &= ~(SD_CONFIG2_WB | SD_CONFIG2_BB); break; } - au_writel(config2, HOST_CONFIG2(host)); - au_sync(); + __raw_writel(config2, HOST_CONFIG2(host)); + wmb(); /* drain writebuffer */ } #define STATUS_TIMEOUT (SD_STATUS_RAT | SD_STATUS_DT) @@ -793,7 +793,7 @@ static irqreturn_t au1xmmc_irq(int irq, void *dev_id) struct au1xmmc_host *host = dev_id; u32 status; - status = au_readl(HOST_STATUS(host)); + status = __raw_readl(HOST_STATUS(host)); if (!(status & SD_STATUS_I)) return IRQ_NONE; /* not ours */ @@ -839,8 +839,8 @@ static irqreturn_t au1xmmc_irq(int irq, void *dev_id) status); } - au_writel(status, HOST_STATUS(host)); - au_sync(); + __raw_writel(status, HOST_STATUS(host)); + wmb(); /* drain writebuffer */ return IRQ_HANDLED; } @@ -976,7 +976,7 @@ static int au1xmmc_probe(struct platform_device *pdev) goto out1; } - host->iobase = (unsigned long)ioremap(r->start, 0x3c); + host->iobase = ioremap(r->start, 0x3c); if (!host->iobase) { dev_err(&pdev->dev, "cannot remap mmio\n"); goto out2; @@ -1025,6 +1025,16 @@ static int au1xmmc_probe(struct platform_device *pdev) goto out3; } + host->clk = clk_get(&pdev->dev, ALCHEMY_PERIPH_CLK); + if (IS_ERR(host->clk)) { + dev_err(&pdev->dev, "cannot find clock\n"); + goto out_irq; + } + if (clk_prepare_enable(host->clk)) { + dev_err(&pdev->dev, "cannot enable clock\n"); + goto out_clk; + } + host->status = HOST_S_IDLE; /* board-specific carddetect setup, if any */ @@ -1075,7 +1085,7 @@ static int au1xmmc_probe(struct platform_device *pdev) platform_set_drvdata(pdev, host); - pr_info(DRIVER_NAME ": MMC Controller %d set up at %8.8X" + pr_info(DRIVER_NAME ": MMC Controller %d set up at %p" " (mode=%s)\n", pdev->id, host->iobase, host->flags & HOST_F_DMA ? "dma" : "pio"); @@ -1087,10 +1097,10 @@ out6: led_classdev_unregister(host->platdata->led); out5: #endif - au_writel(0, HOST_ENABLE(host)); - au_writel(0, HOST_CONFIG(host)); - au_writel(0, HOST_CONFIG2(host)); - au_sync(); + __raw_writel(0, HOST_ENABLE(host)); + __raw_writel(0, HOST_CONFIG(host)); + __raw_writel(0, HOST_CONFIG2(host)); + wmb(); /* drain writebuffer */ if (host->flags & HOST_F_DBDMA) au1xmmc_dbdma_shutdown(host); @@ -1101,7 +1111,10 @@ out5: if (host->platdata && host->platdata->cd_setup && !(mmc->caps & MMC_CAP_NEEDS_POLL)) host->platdata->cd_setup(mmc, 0); - +out_clk: + clk_disable_unprepare(host->clk); + clk_put(host->clk); +out_irq: free_irq(host->irq, host); out3: iounmap((void *)host->iobase); @@ -1130,10 +1143,10 @@ static int au1xmmc_remove(struct platform_device *pdev) !(host->mmc->caps & MMC_CAP_NEEDS_POLL)) host->platdata->cd_setup(host->mmc, 0); - au_writel(0, HOST_ENABLE(host)); - au_writel(0, HOST_CONFIG(host)); - au_writel(0, HOST_CONFIG2(host)); - au_sync(); + __raw_writel(0, HOST_ENABLE(host)); + __raw_writel(0, HOST_CONFIG(host)); + __raw_writel(0, HOST_CONFIG2(host)); + wmb(); /* drain writebuffer */ tasklet_kill(&host->data_task); tasklet_kill(&host->finish_task); @@ -1143,6 +1156,9 @@ static int au1xmmc_remove(struct platform_device *pdev) au1xmmc_set_power(host, 0); + clk_disable_unprepare(host->clk); + clk_put(host->clk); + free_irq(host->irq, host); iounmap((void *)host->iobase); release_resource(host->ioarea); @@ -1158,11 +1174,11 @@ static int au1xmmc_suspend(struct platform_device *pdev, pm_message_t state) { struct au1xmmc_host *host = platform_get_drvdata(pdev); - au_writel(0, HOST_CONFIG2(host)); - au_writel(0, HOST_CONFIG(host)); - au_writel(0xffffffff, HOST_STATUS(host)); - au_writel(0, HOST_ENABLE(host)); - au_sync(); + __raw_writel(0, HOST_CONFIG2(host)); + __raw_writel(0, HOST_CONFIG(host)); + __raw_writel(0xffffffff, HOST_STATUS(host)); + __raw_writel(0, HOST_ENABLE(host)); + wmb(); /* drain writebuffer */ return 0; } diff --git a/drivers/mmc/host/dw_mmc-pci.c b/drivers/mmc/host/dw_mmc-pci.c index f70546a3a7cc..6ada1b36685b 100644 --- a/drivers/mmc/host/dw_mmc-pci.c +++ b/drivers/mmc/host/dw_mmc-pci.c @@ -102,7 +102,7 @@ static int dw_mci_pci_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(dw_mci_pci_pmops, dw_mci_pci_suspend, dw_mci_pci_resume); -static DEFINE_PCI_DEVICE_TABLE(dw_mci_pci_id) = { +static const struct pci_device_id dw_mci_pci_id[] = { { PCI_DEVICE(SYNOPSYS_DW_MCI_VENDOR_ID, SYNOPSYS_DW_MCI_DEVICE_ID) }, {} }; diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index 1ac227c603b7..8f216edbdf08 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c @@ -111,8 +111,7 @@ static const u8 tuning_blk_pattern_8bit[] = { 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, }; -static inline bool dw_mci_fifo_reset(struct dw_mci *host); -static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host); +static bool dw_mci_reset(struct dw_mci *host); #if defined(CONFIG_DEBUG_FS) static int dw_mci_req_show(struct seq_file *s, void *v) @@ -997,7 +996,8 @@ static int dw_mci_get_ro(struct mmc_host *mmc) int gpio_ro = mmc_gpio_get_ro(mmc); /* Use platform get_ro function, else try on board write protect */ - if (slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT) + if ((slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT) || + (slot->host->quirks & DW_MCI_QUIRK_NO_WRITE_PROTECT)) read_only = 0; else if (!IS_ERR_VALUE(gpio_ro)) read_only = gpio_ro; @@ -1235,7 +1235,7 @@ static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data) * After an error, there may be data lingering * in the FIFO */ - dw_mci_fifo_reset(host); + dw_mci_reset(host); } else { data->bytes_xfered = data->blocks * data->blksz; data->error = 0; @@ -1352,7 +1352,7 @@ static void dw_mci_tasklet_func(unsigned long priv) /* CMD error in data command */ if (mrq->cmd->error && mrq->data) - dw_mci_fifo_reset(host); + dw_mci_reset(host); host->cmd = NULL; host->data = NULL; @@ -1963,14 +1963,8 @@ static void dw_mci_work_routine_card(struct work_struct *work) } /* Power down slot */ - if (present == 0) { - /* Clear down the FIFO */ - dw_mci_fifo_reset(host); -#ifdef CONFIG_MMC_DW_IDMAC - dw_mci_idmac_reset(host); -#endif - - } + if (present == 0) + dw_mci_reset(host); spin_unlock_bh(&host->lock); @@ -2021,8 +2015,11 @@ static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot) /* get quirks */ for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++) - if (of_get_property(np, of_slot_quirks[idx].quirk, NULL)) + if (of_get_property(np, of_slot_quirks[idx].quirk, NULL)) { + dev_warn(dev, "Slot quirk %s is deprecated\n", + of_slot_quirks[idx].quirk); quirks |= of_slot_quirks[idx].id; + } return quirks; } @@ -2208,8 +2205,11 @@ static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset) return false; } -static inline bool dw_mci_fifo_reset(struct dw_mci *host) +static bool dw_mci_reset(struct dw_mci *host) { + u32 flags = SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET; + bool ret = false; + /* * Reseting generates a block interrupt, hence setting * the scatter-gather pointer to NULL. @@ -2219,15 +2219,60 @@ static inline bool dw_mci_fifo_reset(struct dw_mci *host) host->sg = NULL; } - return dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET); -} + if (host->use_dma) + flags |= SDMMC_CTRL_DMA_RESET; -static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host) -{ - return dw_mci_ctrl_reset(host, - SDMMC_CTRL_FIFO_RESET | - SDMMC_CTRL_RESET | - SDMMC_CTRL_DMA_RESET); + if (dw_mci_ctrl_reset(host, flags)) { + /* + * In all cases we clear the RAWINTS register to clear any + * interrupts. + */ + mci_writel(host, RINTSTS, 0xFFFFFFFF); + + /* if using dma we wait for dma_req to clear */ + if (host->use_dma) { + unsigned long timeout = jiffies + msecs_to_jiffies(500); + u32 status; + do { + status = mci_readl(host, STATUS); + if (!(status & SDMMC_STATUS_DMA_REQ)) + break; + cpu_relax(); + } while (time_before(jiffies, timeout)); + + if (status & SDMMC_STATUS_DMA_REQ) { + dev_err(host->dev, + "%s: Timeout waiting for dma_req to " + "clear during reset\n", __func__); + goto ciu_out; + } + + /* when using DMA next we reset the fifo again */ + if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET)) + goto ciu_out; + } + } else { + /* if the controller reset bit did clear, then set clock regs */ + if (!(mci_readl(host, CTRL) & SDMMC_CTRL_RESET)) { + dev_err(host->dev, "%s: fifo/dma reset bits didn't " + "clear but ciu was reset, doing clock update\n", + __func__); + goto ciu_out; + } + } + +#if IS_ENABLED(CONFIG_MMC_DW_IDMAC) + /* It is also recommended that we reset and reprogram idmac */ + dw_mci_idmac_reset(host); +#endif + + ret = true; + +ciu_out: + /* After a CTRL reset we need to have CIU set clock registers */ + mci_send_cmd(host->cur_slot, SDMMC_CMD_UPD_CLK, 0); + + return ret; } #ifdef CONFIG_OF @@ -2238,6 +2283,9 @@ static struct dw_mci_of_quirks { { .quirk = "broken-cd", .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION, + }, { + .quirk = "disable-wp", + .id = DW_MCI_QUIRK_NO_WRITE_PROTECT, }, }; @@ -2425,7 +2473,7 @@ int dw_mci_probe(struct dw_mci *host) } /* Reset all blocks */ - if (!dw_mci_ctrl_all_reset(host)) + if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) return -ENODEV; host->dma_ops = host->pdata->dma_ops; @@ -2612,7 +2660,7 @@ int dw_mci_resume(struct dw_mci *host) } } - if (!dw_mci_ctrl_all_reset(host)) { + if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) { ret = -ENODEV; return ret; } diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h index 738fa241d058..08fd956d81f3 100644 --- a/drivers/mmc/host/dw_mmc.h +++ b/drivers/mmc/host/dw_mmc.h @@ -129,6 +129,7 @@ #define SDMMC_CMD_INDX(n) ((n) & 0x1F) /* Status register defines */ #define SDMMC_GET_FCNT(x) (((x)>>17) & 0x1FFF) +#define SDMMC_STATUS_DMA_REQ BIT(31) /* FIFOTH register defines */ #define SDMMC_SET_FIFOTH(m, r, t) (((m) & 0x7) << 28 | \ ((r) & 0xFFF) << 16 | \ @@ -150,6 +151,10 @@ /* Card read threshold */ #define SDMMC_SET_RD_THLD(v, x) (((v) & 0x1FFF) << 16 | (x)) +/* All ctrl reset bits */ +#define SDMMC_CTRL_ALL_RESET_FLAGS \ + (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET | SDMMC_CTRL_DMA_RESET) + /* Register access macros */ #define mci_readl(dev, reg) \ __raw_readl((dev)->regs + SDMMC_##reg) diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index 7ad463e9741c..e4d470704150 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -52,34 +52,53 @@ static unsigned int fmax = 515633; * struct variant_data - MMCI variant-specific quirks * @clkreg: default value for MCICLOCK register * @clkreg_enable: enable value for MMCICLOCK register + * @clkreg_8bit_bus_enable: enable value for 8 bit bus + * @clkreg_neg_edge_enable: enable value for inverted data/cmd output * @datalength_bits: number of bits in the MMCIDATALENGTH register * @fifosize: number of bytes that can be written when MMCI_TXFIFOEMPTY * is asserted (likewise for RX) * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY * is asserted (likewise for RX) + * @data_cmd_enable: enable value for data commands. * @sdio: variant supports SDIO * @st_clkdiv: true if using a ST-specific clock divider algorithm + * @datactrl_mask_ddrmode: ddr mode mask in datactrl register. * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register + * @blksz_datactrl4: true if Block size is at b4..b16 position in datactrl + * register * @pwrreg_powerup: power up value for MMCIPOWER register + * @f_max: maximum clk frequency supported by the controller. * @signal_direction: input/out direction of bus signals can be indicated * @pwrreg_clkgate: MMCIPOWER register must be used to gate the clock * @busy_detect: true if busy detection on dat0 is supported * @pwrreg_nopower: bits in MMCIPOWER don't controls ext. power supply + * @explicit_mclk_control: enable explicit mclk control in driver. + * @qcom_fifo: enables qcom specific fifo pio read logic. + * @reversed_irq_handling: handle data irq before cmd irq. */ struct variant_data { unsigned int clkreg; unsigned int clkreg_enable; + unsigned int clkreg_8bit_bus_enable; + unsigned int clkreg_neg_edge_enable; unsigned int datalength_bits; unsigned int fifosize; unsigned int fifohalfsize; + unsigned int data_cmd_enable; + unsigned int datactrl_mask_ddrmode; bool sdio; bool st_clkdiv; bool blksz_datactrl16; + bool blksz_datactrl4; u32 pwrreg_powerup; + u32 f_max; bool signal_direction; bool pwrreg_clkgate; bool busy_detect; bool pwrreg_nopower; + bool explicit_mclk_control; + bool qcom_fifo; + bool reversed_irq_handling; }; static struct variant_data variant_arm = { @@ -87,6 +106,8 @@ static struct variant_data variant_arm = { .fifohalfsize = 8 * 4, .datalength_bits = 16, .pwrreg_powerup = MCI_PWR_UP, + .f_max = 100000000, + .reversed_irq_handling = true, }; static struct variant_data variant_arm_extended_fifo = { @@ -94,6 +115,7 @@ static struct variant_data variant_arm_extended_fifo = { .fifohalfsize = 64 * 4, .datalength_bits = 16, .pwrreg_powerup = MCI_PWR_UP, + .f_max = 100000000, }; static struct variant_data variant_arm_extended_fifo_hwfc = { @@ -102,15 +124,18 @@ static struct variant_data variant_arm_extended_fifo_hwfc = { .clkreg_enable = MCI_ARM_HWFCEN, .datalength_bits = 16, .pwrreg_powerup = MCI_PWR_UP, + .f_max = 100000000, }; static struct variant_data variant_u300 = { .fifosize = 16 * 4, .fifohalfsize = 8 * 4, .clkreg_enable = MCI_ST_U300_HWFCEN, + .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS, .datalength_bits = 16, .sdio = true, .pwrreg_powerup = MCI_PWR_ON, + .f_max = 100000000, .signal_direction = true, .pwrreg_clkgate = true, .pwrreg_nopower = true, @@ -124,6 +149,7 @@ static struct variant_data variant_nomadik = { .sdio = true, .st_clkdiv = true, .pwrreg_powerup = MCI_PWR_ON, + .f_max = 100000000, .signal_direction = true, .pwrreg_clkgate = true, .pwrreg_nopower = true, @@ -134,10 +160,13 @@ static struct variant_data variant_ux500 = { .fifohalfsize = 8 * 4, .clkreg = MCI_CLK_ENABLE, .clkreg_enable = MCI_ST_UX500_HWFCEN, + .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS, + .clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE, .datalength_bits = 24, .sdio = true, .st_clkdiv = true, .pwrreg_powerup = MCI_PWR_ON, + .f_max = 100000000, .signal_direction = true, .pwrreg_clkgate = true, .busy_detect = true, @@ -149,17 +178,38 @@ static struct variant_data variant_ux500v2 = { .fifohalfsize = 8 * 4, .clkreg = MCI_CLK_ENABLE, .clkreg_enable = MCI_ST_UX500_HWFCEN, + .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS, + .clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE, + .datactrl_mask_ddrmode = MCI_ST_DPSM_DDRMODE, .datalength_bits = 24, .sdio = true, .st_clkdiv = true, .blksz_datactrl16 = true, .pwrreg_powerup = MCI_PWR_ON, + .f_max = 100000000, .signal_direction = true, .pwrreg_clkgate = true, .busy_detect = true, .pwrreg_nopower = true, }; +static struct variant_data variant_qcom = { + .fifosize = 16 * 4, + .fifohalfsize = 8 * 4, + .clkreg = MCI_CLK_ENABLE, + .clkreg_enable = MCI_QCOM_CLK_FLOWENA | + MCI_QCOM_CLK_SELECT_IN_FBCLK, + .clkreg_8bit_bus_enable = MCI_QCOM_CLK_WIDEBUS_8, + .datactrl_mask_ddrmode = MCI_QCOM_CLK_SELECT_IN_DDR_MODE, + .data_cmd_enable = MCI_QCOM_CSPM_DATCMD, + .blksz_datactrl4 = true, + .datalength_bits = 24, + .pwrreg_powerup = MCI_PWR_UP, + .f_max = 208000000, + .explicit_mclk_control = true, + .qcom_fifo = true, +}; + static int mmci_card_busy(struct mmc_host *mmc) { struct mmci_host *host = mmc_priv(mmc); @@ -260,7 +310,9 @@ static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired) host->cclk = 0; if (desired) { - if (desired >= host->mclk) { + if (variant->explicit_mclk_control) { + host->cclk = host->mclk; + } else if (desired >= host->mclk) { clk = MCI_CLK_BYPASS; if (variant->st_clkdiv) clk |= MCI_ST_UX500_NEG_EDGE; @@ -299,11 +351,11 @@ static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired) if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) clk |= MCI_4BIT_BUS; if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) - clk |= MCI_ST_8BIT_BUS; + clk |= variant->clkreg_8bit_bus_enable; if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 || host->mmc->ios.timing == MMC_TIMING_MMC_DDR52) - clk |= MCI_ST_UX500_NEG_EDGE; + clk |= variant->clkreg_neg_edge_enable; mmci_write_clkreg(host, clk); } @@ -719,7 +771,7 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) data->bytes_xfered = 0; clks = (unsigned long long)data->timeout_ns * host->cclk; - do_div(clks, 1000000000UL); + do_div(clks, NSEC_PER_SEC); timeout = data->timeout_clks + (unsigned int)clks; @@ -732,6 +784,8 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) if (variant->blksz_datactrl16) datactrl = MCI_DPSM_ENABLE | (data->blksz << 16); + else if (variant->blksz_datactrl4) + datactrl = MCI_DPSM_ENABLE | (data->blksz << 4); else datactrl = MCI_DPSM_ENABLE | blksz_bits << 4; @@ -767,7 +821,7 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 || host->mmc->ios.timing == MMC_TIMING_MMC_DDR52) - datactrl |= MCI_ST_DPSM_DDRMODE; + datactrl |= variant->datactrl_mask_ddrmode; /* * Attempt to use DMA operation mode, if this @@ -812,7 +866,7 @@ mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c) if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) { writel(0, base + MMCICOMMAND); - udelay(1); + mmci_reg_delay(host); } c |= cmd->opcode | MCI_CPSM_ENABLE; @@ -824,6 +878,9 @@ mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c) if (/*interrupt*/0) c |= MCI_CPSM_INTERRUPT; + if (mmc_cmd_type(cmd) == MMC_CMD_ADTC) + c |= host->variant->data_cmd_enable; + host->cmd = cmd; writel(cmd->arg, base + MMCIARGUMENT); @@ -834,6 +891,10 @@ static void mmci_data_irq(struct mmci_host *host, struct mmc_data *data, unsigned int status) { + /* Make sure we have data to handle */ + if (!data) + return; + /* First check for errors */ if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR| MCI_TXUNDERRUN|MCI_RXOVERRUN)) { @@ -902,9 +963,17 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd, unsigned int status) { void __iomem *base = host->base; - bool sbc = (cmd == host->mrq->sbc); - bool busy_resp = host->variant->busy_detect && - (cmd->flags & MMC_RSP_BUSY); + bool sbc, busy_resp; + + if (!cmd) + return; + + sbc = (cmd == host->mrq->sbc); + busy_resp = host->variant->busy_detect && (cmd->flags & MMC_RSP_BUSY); + + if (!((status|host->busy_status) & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT| + MCI_CMDSENT|MCI_CMDRESPEND))) + return; /* Check if we need to wait for busy completion. */ if (host->busy_status && (status & MCI_ST_CARDBUSY)) @@ -957,15 +1026,34 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd, } } +static int mmci_get_rx_fifocnt(struct mmci_host *host, u32 status, int remain) +{ + return remain - (readl(host->base + MMCIFIFOCNT) << 2); +} + +static int mmci_qcom_get_rx_fifocnt(struct mmci_host *host, u32 status, int r) +{ + /* + * on qcom SDCC4 only 8 words are used in each burst so only 8 addresses + * from the fifo range should be used + */ + if (status & MCI_RXFIFOHALFFULL) + return host->variant->fifohalfsize; + else if (status & MCI_RXDATAAVLBL) + return 4; + + return 0; +} + static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain) { void __iomem *base = host->base; char *ptr = buffer; - u32 status; + u32 status = readl(host->base + MMCISTATUS); int host_remain = host->size; do { - int count = host_remain - (readl(base + MMCIFIFOCNT) << 2); + int count = host->get_rx_fifocnt(host, status, host_remain); if (count > remain) count = remain; @@ -1132,9 +1220,6 @@ static irqreturn_t mmci_irq(int irq, void *dev_id) spin_lock(&host->lock); do { - struct mmc_command *cmd; - struct mmc_data *data; - status = readl(host->base + MMCISTATUS); if (host->singleirq) { @@ -1154,16 +1239,13 @@ static irqreturn_t mmci_irq(int irq, void *dev_id) dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status); - cmd = host->cmd; - if ((status|host->busy_status) & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT| - MCI_CMDSENT|MCI_CMDRESPEND) && cmd) - mmci_cmd_irq(host, cmd, status); - - data = host->data; - if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR| - MCI_TXUNDERRUN|MCI_RXOVERRUN|MCI_DATAEND| - MCI_DATABLOCKEND) && data) - mmci_data_irq(host, data, status); + if (host->variant->reversed_irq_handling) { + mmci_data_irq(host, host->data, status); + mmci_cmd_irq(host, host->cmd, status); + } else { + mmci_cmd_irq(host, host->cmd, status); + mmci_data_irq(host, host->data, status); + } /* Don't poll for busy completion in irq context. */ if (host->busy_status) @@ -1296,6 +1378,17 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) if (!ios->clock && variant->pwrreg_clkgate) pwr &= ~MCI_PWR_ON; + if (host->variant->explicit_mclk_control && + ios->clock != host->clock_cache) { + ret = clk_set_rate(host->clk, ios->clock); + if (ret < 0) + dev_err(mmc_dev(host->mmc), + "Error setting clock rate (%d)\n", ret); + else + host->mclk = clk_get_rate(host->clk); + } + host->clock_cache = ios->clock; + spin_lock_irqsave(&host->lock, flags); mmci_set_clkreg(host, ios->clock); @@ -1443,6 +1536,11 @@ static int mmci_probe(struct amba_device *dev, if (ret) goto host_free; + if (variant->qcom_fifo) + host->get_rx_fifocnt = mmci_qcom_get_rx_fifocnt; + else + host->get_rx_fifocnt = mmci_get_rx_fifocnt; + host->plat = plat; host->variant = variant; host->mclk = clk_get_rate(host->clk); @@ -1451,8 +1549,8 @@ static int mmci_probe(struct amba_device *dev, * so we try to adjust the clock down to this, * (if possible). */ - if (host->mclk > 100000000) { - ret = clk_set_rate(host->clk, 100000000); + if (host->mclk > variant->f_max) { + ret = clk_set_rate(host->clk, variant->f_max); if (ret < 0) goto clk_disable; host->mclk = clk_get_rate(host->clk); @@ -1471,9 +1569,12 @@ static int mmci_probe(struct amba_device *dev, * The ARM and ST versions of the block have slightly different * clock divider equations which means that the minimum divider * differs too. + * on Qualcomm like controllers get the nearest minimum clock to 100Khz */ if (variant->st_clkdiv) mmc->f_min = DIV_ROUND_UP(host->mclk, 257); + else if (variant->explicit_mclk_control) + mmc->f_min = clk_round_rate(host->clk, 100000); else mmc->f_min = DIV_ROUND_UP(host->mclk, 512); /* @@ -1483,9 +1584,14 @@ static int mmci_probe(struct amba_device *dev, * the block, of course. */ if (mmc->f_max) - mmc->f_max = min(host->mclk, mmc->f_max); + mmc->f_max = variant->explicit_mclk_control ? + min(variant->f_max, mmc->f_max) : + min(host->mclk, mmc->f_max); else - mmc->f_max = min(host->mclk, fmax); + mmc->f_max = variant->explicit_mclk_control ? + fmax : min(host->mclk, fmax); + + dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max); /* Get regulators and the supported OCR mask */ @@ -1752,6 +1858,12 @@ static struct amba_id mmci_ids[] = { .mask = 0xf0ffffff, .data = &variant_ux500v2, }, + /* Qualcomm variants */ + { + .id = 0x00051180, + .mask = 0x000fffff, + .data = &variant_qcom, + }, { 0, 0 }, }; diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h index 347d942d740b..a1f5e4f49e2a 100644 --- a/drivers/mmc/host/mmci.h +++ b/drivers/mmc/host/mmci.h @@ -41,6 +41,15 @@ /* Modified PL180 on Versatile Express platform */ #define MCI_ARM_HWFCEN (1 << 12) +/* Modified on Qualcomm Integrations */ +#define MCI_QCOM_CLK_WIDEBUS_8 (BIT(10) | BIT(11)) +#define MCI_QCOM_CLK_FLOWENA BIT(12) +#define MCI_QCOM_CLK_INVERTOUT BIT(13) + +/* select in latch data and command in */ +#define MCI_QCOM_CLK_SELECT_IN_FBCLK BIT(15) +#define MCI_QCOM_CLK_SELECT_IN_DDR_MODE (BIT(14) | BIT(15)) + #define MMCIARGUMENT 0x008 #define MMCICOMMAND 0x00c #define MCI_CPSM_RESPONSE (1 << 6) @@ -54,6 +63,14 @@ #define MCI_ST_NIEN (1 << 13) #define MCI_ST_CE_ATACMD (1 << 14) +/* Modified on Qualcomm Integrations */ +#define MCI_QCOM_CSPM_DATCMD BIT(12) +#define MCI_QCOM_CSPM_MCIABORT BIT(13) +#define MCI_QCOM_CSPM_CCSENABLE BIT(14) +#define MCI_QCOM_CSPM_CCSDISABLE BIT(15) +#define MCI_QCOM_CSPM_AUTO_CMD19 BIT(16) +#define MCI_QCOM_CSPM_AUTO_CMD21 BIT(21) + #define MMCIRESPCMD 0x010 #define MMCIRESPONSE0 0x014 #define MMCIRESPONSE1 0x018 @@ -191,6 +208,8 @@ struct mmci_host { spinlock_t lock; unsigned int mclk; + /* cached value of requested clk in set_ios */ + unsigned int clock_cache; unsigned int cclk; u32 pwr_reg; u32 pwr_reg_add; @@ -210,6 +229,7 @@ struct mmci_host { /* pio stuff */ struct sg_mapping_iter sg_miter; unsigned int size; + int (*get_rx_fifocnt)(struct mmci_host *h, u32 status, int remain); #ifdef CONFIG_DMA_ENGINE /* DMA stuff */ diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c index 74924a04026e..b4b1efbf6c16 100644 --- a/drivers/mmc/host/moxart-mmc.c +++ b/drivers/mmc/host/moxart-mmc.c @@ -13,7 +13,6 @@ * warranty of any kind, whether express or implied. */ -#include <linux/version.h> #include <linux/module.h> #include <linux/init.h> #include <linux/platform_device.h> diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c index babfea03ba8a..140885a5a4e7 100644 --- a/drivers/mmc/host/mxs-mmc.c +++ b/drivers/mmc/host/mxs-mmc.c @@ -86,7 +86,8 @@ static int mxs_mmc_get_cd(struct mmc_host *mmc) if (ret >= 0) return ret; - present = !(readl(ssp->base + HW_SSP_STATUS(ssp)) & + present = mmc->caps & MMC_CAP_NEEDS_POLL || + !(readl(ssp->base + HW_SSP_STATUS(ssp)) & BM_SSP_STATUS_CARD_DETECT); if (mmc->caps2 & MMC_CAP2_CD_ACTIVE_HIGH) diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index 6b7b75585926..965672663ef0 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c @@ -29,6 +29,7 @@ #include <linux/timer.h> #include <linux/clk.h> #include <linux/of.h> +#include <linux/of_irq.h> #include <linux/of_gpio.h> #include <linux/of_device.h> #include <linux/omap-dmaengine.h> @@ -36,6 +37,7 @@ #include <linux/mmc/core.h> #include <linux/mmc/mmc.h> #include <linux/io.h> +#include <linux/irq.h> #include <linux/gpio.h> #include <linux/regulator/consumer.h> #include <linux/pinctrl/consumer.h> @@ -54,6 +56,7 @@ #define OMAP_HSMMC_RSP54 0x0118 #define OMAP_HSMMC_RSP76 0x011C #define OMAP_HSMMC_DATA 0x0120 +#define OMAP_HSMMC_PSTATE 0x0124 #define OMAP_HSMMC_HCTL 0x0128 #define OMAP_HSMMC_SYSCTL 0x012C #define OMAP_HSMMC_STAT 0x0130 @@ -91,7 +94,10 @@ #define BCE (1 << 1) #define FOUR_BIT (1 << 1) #define HSPE (1 << 2) +#define IWE (1 << 24) #define DDR (1 << 19) +#define CLKEXTFREE (1 << 16) +#define CTPL (1 << 11) #define DW8 (1 << 5) #define OD 0x1 #define STAT_CLEAR 0xFFFFFFFF @@ -101,11 +107,15 @@ #define SRD (1 << 26) #define SOFTRESET (1 << 1) +/* PSTATE */ +#define DLEV_DAT(x) (1 << (20 + (x))) + /* Interrupt masks for IE and ISE register */ #define CC_EN (1 << 0) #define TC_EN (1 << 1) #define BWR_EN (1 << 4) #define BRR_EN (1 << 5) +#define CIRQ_EN (1 << 8) #define ERR_EN (1 << 15) #define CTO_EN (1 << 16) #define CCRC_EN (1 << 17) @@ -140,7 +150,6 @@ #define VDD_3V0 3000000 /* 300000 uV */ #define VDD_165_195 (ffs(MMC_VDD_165_195) - 1) -#define AUTO_CMD23 (1 << 1) /* Auto CMD23 support */ /* * One controller can have multiple slots, like on some omap boards using * omap.c controller driver. Luckily this is not currently done on any known @@ -194,6 +203,7 @@ struct omap_hsmmc_host { u32 sysctl; u32 capa; int irq; + int wake_irq; int use_dma, dma_ch; struct dma_chan *tx_chan; struct dma_chan *rx_chan; @@ -206,6 +216,9 @@ struct omap_hsmmc_host { int req_in_progress; unsigned long clk_rate; unsigned int flags; +#define AUTO_CMD23 (1 << 0) /* Auto CMD23 support */ +#define HSMMC_SDIO_IRQ_ENABLED (1 << 1) /* SDIO irq enabled */ +#define HSMMC_WAKE_IRQ_ENABLED (1 << 2) struct omap_hsmmc_next next_data; struct omap_mmc_platform_data *pdata; }; @@ -510,27 +523,40 @@ static void omap_hsmmc_stop_clock(struct omap_hsmmc_host *host) static void omap_hsmmc_enable_irq(struct omap_hsmmc_host *host, struct mmc_command *cmd) { - unsigned int irq_mask; + u32 irq_mask = INT_EN_MASK; + unsigned long flags; if (host->use_dma) - irq_mask = INT_EN_MASK & ~(BRR_EN | BWR_EN); - else - irq_mask = INT_EN_MASK; + irq_mask &= ~(BRR_EN | BWR_EN); /* Disable timeout for erases */ if (cmd->opcode == MMC_ERASE) irq_mask &= ~DTO_EN; + spin_lock_irqsave(&host->irq_lock, flags); OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); OMAP_HSMMC_WRITE(host->base, ISE, irq_mask); + + /* latch pending CIRQ, but don't signal MMC core */ + if (host->flags & HSMMC_SDIO_IRQ_ENABLED) + irq_mask |= CIRQ_EN; OMAP_HSMMC_WRITE(host->base, IE, irq_mask); + spin_unlock_irqrestore(&host->irq_lock, flags); } static void omap_hsmmc_disable_irq(struct omap_hsmmc_host *host) { - OMAP_HSMMC_WRITE(host->base, ISE, 0); - OMAP_HSMMC_WRITE(host->base, IE, 0); + u32 irq_mask = 0; + unsigned long flags; + + spin_lock_irqsave(&host->irq_lock, flags); + /* no transfer running but need to keep cirq if enabled */ + if (host->flags & HSMMC_SDIO_IRQ_ENABLED) + irq_mask |= CIRQ_EN; + OMAP_HSMMC_WRITE(host->base, ISE, irq_mask); + OMAP_HSMMC_WRITE(host->base, IE, irq_mask); OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); + spin_unlock_irqrestore(&host->irq_lock, flags); } /* Calculate divisor for the given clock frequency */ @@ -667,6 +693,9 @@ static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host) capa = VS18; } + if (host->mmc->caps & MMC_CAP_SDIO_IRQ) + hctl |= IWE; + OMAP_HSMMC_WRITE(host->base, HCTL, OMAP_HSMMC_READ(host->base, HCTL) | hctl); @@ -681,7 +710,9 @@ static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host) && time_before(jiffies, timeout)) ; - omap_hsmmc_disable_irq(host); + OMAP_HSMMC_WRITE(host->base, ISE, 0); + OMAP_HSMMC_WRITE(host->base, IE, 0); + OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); /* Do not initialize card-specific things if the power is off */ if (host->power_mode == MMC_POWER_OFF) @@ -1118,8 +1149,12 @@ static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id) int status; status = OMAP_HSMMC_READ(host->base, STAT); - while (status & INT_EN_MASK && host->req_in_progress) { - omap_hsmmc_do_irq(host, status); + while (status & (INT_EN_MASK | CIRQ_EN)) { + if (host->req_in_progress) + omap_hsmmc_do_irq(host, status); + + if (status & CIRQ_EN) + mmc_signal_sdio_irq(host->mmc); /* Flush posted write */ status = OMAP_HSMMC_READ(host->base, STAT); @@ -1128,6 +1163,22 @@ static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id) return IRQ_HANDLED; } +static irqreturn_t omap_hsmmc_wake_irq(int irq, void *dev_id) +{ + struct omap_hsmmc_host *host = dev_id; + + /* cirq is level triggered, disable to avoid infinite loop */ + spin_lock(&host->irq_lock); + if (host->flags & HSMMC_WAKE_IRQ_ENABLED) { + disable_irq_nosync(host->wake_irq); + host->flags &= ~HSMMC_WAKE_IRQ_ENABLED; + } + spin_unlock(&host->irq_lock); + pm_request_resume(host->dev); /* no use counter */ + + return IRQ_HANDLED; +} + static void set_sd_bus_power(struct omap_hsmmc_host *host) { unsigned long i; @@ -1639,6 +1690,103 @@ static void omap_hsmmc_init_card(struct mmc_host *mmc, struct mmc_card *card) mmc_slot(host).init_card(card); } +static void omap_hsmmc_enable_sdio_irq(struct mmc_host *mmc, int enable) +{ + struct omap_hsmmc_host *host = mmc_priv(mmc); + u32 irq_mask, con; + unsigned long flags; + + spin_lock_irqsave(&host->irq_lock, flags); + + con = OMAP_HSMMC_READ(host->base, CON); + irq_mask = OMAP_HSMMC_READ(host->base, ISE); + if (enable) { + host->flags |= HSMMC_SDIO_IRQ_ENABLED; + irq_mask |= CIRQ_EN; + con |= CTPL | CLKEXTFREE; + } else { + host->flags &= ~HSMMC_SDIO_IRQ_ENABLED; + irq_mask &= ~CIRQ_EN; + con &= ~(CTPL | CLKEXTFREE); + } + OMAP_HSMMC_WRITE(host->base, CON, con); + OMAP_HSMMC_WRITE(host->base, IE, irq_mask); + + /* + * if enable, piggy back detection on current request + * but always disable immediately + */ + if (!host->req_in_progress || !enable) + OMAP_HSMMC_WRITE(host->base, ISE, irq_mask); + + /* flush posted write */ + OMAP_HSMMC_READ(host->base, IE); + + spin_unlock_irqrestore(&host->irq_lock, flags); +} + +static int omap_hsmmc_configure_wake_irq(struct omap_hsmmc_host *host) +{ + struct mmc_host *mmc = host->mmc; + int ret; + + /* + * For omaps with wake-up path, wakeirq will be irq from pinctrl and + * for other omaps, wakeirq will be from GPIO (dat line remuxed to + * gpio). wakeirq is needed to detect sdio irq in runtime suspend state + * with functional clock disabled. + */ + if (!host->dev->of_node || !host->wake_irq) + return -ENODEV; + + /* Prevent auto-enabling of IRQ */ + irq_set_status_flags(host->wake_irq, IRQ_NOAUTOEN); + ret = devm_request_irq(host->dev, host->wake_irq, omap_hsmmc_wake_irq, + IRQF_TRIGGER_LOW | IRQF_ONESHOT, + mmc_hostname(mmc), host); + if (ret) { + dev_err(mmc_dev(host->mmc), "Unable to request wake IRQ\n"); + goto err; + } + + /* + * Some omaps don't have wake-up path from deeper idle states + * and need to remux SDIO DAT1 to GPIO for wake-up from idle. + */ + if (host->pdata->controller_flags & OMAP_HSMMC_SWAKEUP_MISSING) { + struct pinctrl *p = devm_pinctrl_get(host->dev); + if (!p) { + ret = -ENODEV; + goto err_free_irq; + } + if (IS_ERR(pinctrl_lookup_state(p, PINCTRL_STATE_DEFAULT))) { + dev_info(host->dev, "missing default pinctrl state\n"); + devm_pinctrl_put(p); + ret = -EINVAL; + goto err_free_irq; + } + + if (IS_ERR(pinctrl_lookup_state(p, PINCTRL_STATE_IDLE))) { + dev_info(host->dev, "missing idle pinctrl state\n"); + devm_pinctrl_put(p); + ret = -EINVAL; + goto err_free_irq; + } + devm_pinctrl_put(p); + } + + OMAP_HSMMC_WRITE(host->base, HCTL, + OMAP_HSMMC_READ(host->base, HCTL) | IWE); + return 0; + +err_free_irq: + devm_free_irq(host->dev, host->wake_irq, host); +err: + dev_warn(host->dev, "no SDIO IRQ support, falling back to polling\n"); + host->wake_irq = 0; + return ret; +} + static void omap_hsmmc_conf_bus_power(struct omap_hsmmc_host *host) { u32 hctl, capa, value; @@ -1691,7 +1839,7 @@ static const struct mmc_host_ops omap_hsmmc_ops = { .get_cd = omap_hsmmc_get_cd, .get_ro = omap_hsmmc_get_ro, .init_card = omap_hsmmc_init_card, - /* NYET -- enable_sdio_irq */ + .enable_sdio_irq = omap_hsmmc_enable_sdio_irq, }; #ifdef CONFIG_DEBUG_FS @@ -1701,13 +1849,23 @@ static int omap_hsmmc_regs_show(struct seq_file *s, void *data) struct mmc_host *mmc = s->private; struct omap_hsmmc_host *host = mmc_priv(mmc); - seq_printf(s, "mmc%d:\n ctx_loss:\t%d\n\nregs:\n", - mmc->index, host->context_loss); + seq_printf(s, "mmc%d:\n", mmc->index); + seq_printf(s, "sdio irq mode\t%s\n", + (mmc->caps & MMC_CAP_SDIO_IRQ) ? "interrupt" : "polling"); - pm_runtime_get_sync(host->dev); + if (mmc->caps & MMC_CAP_SDIO_IRQ) { + seq_printf(s, "sdio irq \t%s\n", + (host->flags & HSMMC_SDIO_IRQ_ENABLED) ? "enabled" + : "disabled"); + } + seq_printf(s, "ctx_loss:\t%d\n", host->context_loss); + pm_runtime_get_sync(host->dev); + seq_puts(s, "\nregs:\n"); seq_printf(s, "CON:\t\t0x%08x\n", OMAP_HSMMC_READ(host->base, CON)); + seq_printf(s, "PSTATE:\t\t0x%08x\n", + OMAP_HSMMC_READ(host->base, PSTATE)); seq_printf(s, "HCTL:\t\t0x%08x\n", OMAP_HSMMC_READ(host->base, HCTL)); seq_printf(s, "SYSCTL:\t\t0x%08x\n", @@ -1761,6 +1919,10 @@ static const struct omap_mmc_of_data omap3_pre_es3_mmc_of_data = { static const struct omap_mmc_of_data omap4_mmc_of_data = { .reg_offset = 0x100, }; +static const struct omap_mmc_of_data am33xx_mmc_of_data = { + .reg_offset = 0x100, + .controller_flags = OMAP_HSMMC_SWAKEUP_MISSING, +}; static const struct of_device_id omap_mmc_of_match[] = { { @@ -1777,6 +1939,10 @@ static const struct of_device_id omap_mmc_of_match[] = { .compatible = "ti,omap4-hsmmc", .data = &omap4_mmc_of_data, }, + { + .compatible = "ti,am33xx-hsmmc", + .data = &am33xx_mmc_of_data, + }, {}, }; MODULE_DEVICE_TABLE(of, omap_mmc_of_match); @@ -1850,7 +2016,6 @@ static int omap_hsmmc_probe(struct platform_device *pdev) const struct of_device_id *match; dma_cap_mask_t mask; unsigned tx_req, rx_req; - struct pinctrl *pinctrl; const struct omap_mmc_of_data *data; void __iomem *base; @@ -1913,6 +2078,9 @@ static int omap_hsmmc_probe(struct platform_device *pdev) platform_set_drvdata(pdev, host); + if (pdev->dev.of_node) + host->wake_irq = irq_of_parse_and_map(pdev->dev.of_node, 1); + mmc->ops = &omap_hsmmc_ops; mmc->f_min = OMAP_MMC_MIN_CLOCK; @@ -2061,10 +2229,17 @@ static int omap_hsmmc_probe(struct platform_device *pdev) omap_hsmmc_disable_irq(host); - pinctrl = devm_pinctrl_get_select_default(&pdev->dev); - if (IS_ERR(pinctrl)) - dev_warn(&pdev->dev, - "pins are not configured from the driver\n"); + /* + * For now, only support SDIO interrupt if we have a separate + * wake-up interrupt configured from device tree. This is because + * the wake-up interrupt is needed for idle state and some + * platforms need special quirks. And we don't want to add new + * legacy mux platform init code callbacks any longer as we + * are moving to DT based booting anyways. + */ + ret = omap_hsmmc_configure_wake_irq(host); + if (!ret) + mmc->caps |= MMC_CAP_SDIO_IRQ; omap_hsmmc_protect_card(host); @@ -2170,11 +2345,18 @@ static int omap_hsmmc_suspend(struct device *dev) pm_runtime_get_sync(host->dev); if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER)) { - omap_hsmmc_disable_irq(host); + OMAP_HSMMC_WRITE(host->base, ISE, 0); + OMAP_HSMMC_WRITE(host->base, IE, 0); + OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); OMAP_HSMMC_WRITE(host->base, HCTL, OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP); } + /* do not wake up due to sdio irq */ + if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) && + !(host->mmc->pm_flags & MMC_PM_WAKE_SDIO_IRQ)) + disable_irq(host->wake_irq); + if (host->dbclk) clk_disable_unprepare(host->dbclk); @@ -2200,6 +2382,10 @@ static int omap_hsmmc_resume(struct device *dev) omap_hsmmc_protect_card(host); + if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) && + !(host->mmc->pm_flags & MMC_PM_WAKE_SDIO_IRQ)) + enable_irq(host->wake_irq); + pm_runtime_mark_last_busy(host->dev); pm_runtime_put_autosuspend(host->dev); return 0; @@ -2215,22 +2401,77 @@ static int omap_hsmmc_resume(struct device *dev) static int omap_hsmmc_runtime_suspend(struct device *dev) { struct omap_hsmmc_host *host; + unsigned long flags; + int ret = 0; host = platform_get_drvdata(to_platform_device(dev)); omap_hsmmc_context_save(host); dev_dbg(dev, "disabled\n"); - return 0; + spin_lock_irqsave(&host->irq_lock, flags); + if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) && + (host->flags & HSMMC_SDIO_IRQ_ENABLED)) { + /* disable sdio irq handling to prevent race */ + OMAP_HSMMC_WRITE(host->base, ISE, 0); + OMAP_HSMMC_WRITE(host->base, IE, 0); + + if (!(OMAP_HSMMC_READ(host->base, PSTATE) & DLEV_DAT(1))) { + /* + * dat1 line low, pending sdio irq + * race condition: possible irq handler running on + * multi-core, abort + */ + dev_dbg(dev, "pending sdio irq, abort suspend\n"); + OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); + OMAP_HSMMC_WRITE(host->base, ISE, CIRQ_EN); + OMAP_HSMMC_WRITE(host->base, IE, CIRQ_EN); + pm_runtime_mark_last_busy(dev); + ret = -EBUSY; + goto abort; + } + + pinctrl_pm_select_idle_state(dev); + + WARN_ON(host->flags & HSMMC_WAKE_IRQ_ENABLED); + enable_irq(host->wake_irq); + host->flags |= HSMMC_WAKE_IRQ_ENABLED; + } else { + pinctrl_pm_select_idle_state(dev); + } + +abort: + spin_unlock_irqrestore(&host->irq_lock, flags); + return ret; } static int omap_hsmmc_runtime_resume(struct device *dev) { struct omap_hsmmc_host *host; + unsigned long flags; host = platform_get_drvdata(to_platform_device(dev)); omap_hsmmc_context_restore(host); dev_dbg(dev, "enabled\n"); + spin_lock_irqsave(&host->irq_lock, flags); + if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) && + (host->flags & HSMMC_SDIO_IRQ_ENABLED)) { + /* sdio irq flag can't change while in runtime suspend */ + if (host->flags & HSMMC_WAKE_IRQ_ENABLED) { + disable_irq_nosync(host->wake_irq); + host->flags &= ~HSMMC_WAKE_IRQ_ENABLED; + } + + pinctrl_pm_select_default_state(host->dev); + + /* irq lost, if pinmux incorrect */ + OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); + OMAP_HSMMC_WRITE(host->base, ISE, CIRQ_EN); + OMAP_HSMMC_WRITE(host->base, IE, CIRQ_EN); + } else { + pinctrl_pm_select_default_state(host->dev); + } + spin_unlock_irqrestore(&host->irq_lock, flags); return 0; } diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c index 0d519649b575..dfde4a210238 100644 --- a/drivers/mmc/host/rtsx_pci_sdmmc.c +++ b/drivers/mmc/host/rtsx_pci_sdmmc.c @@ -24,6 +24,7 @@ #include <linux/highmem.h> #include <linux/delay.h> #include <linux/platform_device.h> +#include <linux/workqueue.h> #include <linux/mmc/host.h> #include <linux/mmc/mmc.h> #include <linux/mmc/sd.h> @@ -36,7 +37,10 @@ struct realtek_pci_sdmmc { struct rtsx_pcr *pcr; struct mmc_host *mmc; struct mmc_request *mrq; + struct workqueue_struct *workq; +#define SDMMC_WORKQ_NAME "rtsx_pci_sdmmc_workq" + struct work_struct work; struct mutex host_mutex; u8 ssc_depth; @@ -48,6 +52,11 @@ struct realtek_pci_sdmmc { int power_state; #define SDMMC_POWER_ON 1 #define SDMMC_POWER_OFF 0 + + unsigned int sg_count; + s32 cookie; + unsigned int cookie_sg_count; + bool using_cookie; }; static inline struct device *sdmmc_dev(struct realtek_pci_sdmmc *host) @@ -86,6 +95,77 @@ static void sd_print_debug_regs(struct realtek_pci_sdmmc *host) #define sd_print_debug_regs(host) #endif /* DEBUG */ +/* + * sd_pre_dma_transfer - do dma_map_sg() or using cookie + * + * @pre: if called in pre_req() + * return: + * 0 - do dma_map_sg() + * 1 - using cookie + */ +static int sd_pre_dma_transfer(struct realtek_pci_sdmmc *host, + struct mmc_data *data, bool pre) +{ + struct rtsx_pcr *pcr = host->pcr; + int read = data->flags & MMC_DATA_READ; + int count = 0; + int using_cookie = 0; + + if (!pre && data->host_cookie && data->host_cookie != host->cookie) { + dev_err(sdmmc_dev(host), + "error: data->host_cookie = %d, host->cookie = %d\n", + data->host_cookie, host->cookie); + data->host_cookie = 0; + } + + if (pre || data->host_cookie != host->cookie) { + count = rtsx_pci_dma_map_sg(pcr, data->sg, data->sg_len, read); + } else { + count = host->cookie_sg_count; + using_cookie = 1; + } + + if (pre) { + host->cookie_sg_count = count; + if (++host->cookie < 0) + host->cookie = 1; + data->host_cookie = host->cookie; + } else { + host->sg_count = count; + } + + return using_cookie; +} + +static void sdmmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq, + bool is_first_req) +{ + struct realtek_pci_sdmmc *host = mmc_priv(mmc); + struct mmc_data *data = mrq->data; + + if (data->host_cookie) { + dev_err(sdmmc_dev(host), + "error: reset data->host_cookie = %d\n", + data->host_cookie); + data->host_cookie = 0; + } + + sd_pre_dma_transfer(host, data, true); + dev_dbg(sdmmc_dev(host), "pre dma sg: %d\n", host->cookie_sg_count); +} + +static void sdmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq, + int err) +{ + struct realtek_pci_sdmmc *host = mmc_priv(mmc); + struct rtsx_pcr *pcr = host->pcr; + struct mmc_data *data = mrq->data; + int read = data->flags & MMC_DATA_READ; + + rtsx_pci_dma_unmap_sg(pcr, data->sg, data->sg_len, read); + data->host_cookie = 0; +} + static int sd_read_data(struct realtek_pci_sdmmc *host, u8 *cmd, u16 byte_cnt, u8 *buf, int buf_len, int timeout) { @@ -415,7 +495,7 @@ static int sd_rw_multi(struct realtek_pci_sdmmc *host, struct mmc_request *mrq) rtsx_pci_send_cmd_no_wait(pcr); - err = rtsx_pci_transfer_data(pcr, data->sg, data->sg_len, read, 10000); + err = rtsx_pci_dma_transfer(pcr, data->sg, host->sg_count, read, 10000); if (err < 0) { sd_clear_error(host); return err; @@ -640,12 +720,24 @@ static int sd_tuning_rx(struct realtek_pci_sdmmc *host, u8 opcode) return 0; } -static void sdmmc_request(struct mmc_host *mmc, struct mmc_request *mrq) +static inline int sd_rw_cmd(struct mmc_command *cmd) { - struct realtek_pci_sdmmc *host = mmc_priv(mmc); + return mmc_op_multi(cmd->opcode) || + (cmd->opcode == MMC_READ_SINGLE_BLOCK) || + (cmd->opcode == MMC_WRITE_BLOCK); +} + +static void sd_request(struct work_struct *work) +{ + struct realtek_pci_sdmmc *host = container_of(work, + struct realtek_pci_sdmmc, work); struct rtsx_pcr *pcr = host->pcr; + + struct mmc_host *mmc = host->mmc; + struct mmc_request *mrq = host->mrq; struct mmc_command *cmd = mrq->cmd; struct mmc_data *data = mrq->data; + unsigned int data_size = 0; int err; @@ -677,13 +769,13 @@ static void sdmmc_request(struct mmc_host *mmc, struct mmc_request *mrq) if (mrq->data) data_size = data->blocks * data->blksz; - if (!data_size || mmc_op_multi(cmd->opcode) || - (cmd->opcode == MMC_READ_SINGLE_BLOCK) || - (cmd->opcode == MMC_WRITE_BLOCK)) { + if (!data_size || sd_rw_cmd(cmd)) { sd_send_cmd_get_rsp(host, cmd); if (!cmd->error && data_size) { sd_rw_multi(host, mrq); + if (!host->using_cookie) + sdmmc_post_req(host->mmc, host->mrq, 0); if (mmc_op_multi(cmd->opcode) && mrq->stop) sd_send_cmd_get_rsp(host, mrq->stop); @@ -712,6 +804,21 @@ finish: mmc_request_done(mmc, mrq); } +static void sdmmc_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct realtek_pci_sdmmc *host = mmc_priv(mmc); + struct mmc_data *data = mrq->data; + + mutex_lock(&host->host_mutex); + host->mrq = mrq; + mutex_unlock(&host->host_mutex); + + if (sd_rw_cmd(mrq->cmd)) + host->using_cookie = sd_pre_dma_transfer(host, data, false); + + queue_work(host->workq, &host->work); +} + static int sd_set_bus_width(struct realtek_pci_sdmmc *host, unsigned char bus_width) { @@ -1146,6 +1253,8 @@ out: } static const struct mmc_host_ops realtek_pci_sdmmc_ops = { + .pre_req = sdmmc_pre_req, + .post_req = sdmmc_post_req, .request = sdmmc_request, .set_ios = sdmmc_set_ios, .get_ro = sdmmc_get_ro, @@ -1224,10 +1333,16 @@ static int rtsx_pci_sdmmc_drv_probe(struct platform_device *pdev) return -ENOMEM; host = mmc_priv(mmc); + host->workq = create_singlethread_workqueue(SDMMC_WORKQ_NAME); + if (!host->workq) { + mmc_free_host(mmc); + return -ENOMEM; + } host->pcr = pcr; host->mmc = mmc; host->pdev = pdev; host->power_state = SDMMC_POWER_OFF; + INIT_WORK(&host->work, sd_request); platform_set_drvdata(pdev, host); pcr->slots[RTSX_SD_CARD].p_dev = pdev; pcr->slots[RTSX_SD_CARD].card_event = rtsx_pci_sdmmc_card_event; @@ -1255,6 +1370,8 @@ static int rtsx_pci_sdmmc_drv_remove(struct platform_device *pdev) pcr->slots[RTSX_SD_CARD].card_event = NULL; mmc = host->mmc; + cancel_work_sync(&host->work); + mutex_lock(&host->host_mutex); if (host->mrq) { dev_dbg(&(pdev->dev), @@ -1273,6 +1390,10 @@ static int rtsx_pci_sdmmc_drv_remove(struct platform_device *pdev) mmc_remove_host(mmc); host->eject = true; + flush_workqueue(host->workq); + destroy_workqueue(host->workq); + host->workq = NULL; + mmc_free_host(mmc); dev_dbg(&(pdev->dev), diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c index f23782683a7c..e5516a226362 100644 --- a/drivers/mmc/host/s3cmci.c +++ b/drivers/mmc/host/s3cmci.c @@ -12,6 +12,7 @@ */ #include <linux/module.h> +#include <linux/dmaengine.h> #include <linux/dma-mapping.h> #include <linux/clk.h> #include <linux/mmc/host.h> @@ -27,6 +28,7 @@ #include <mach/dma.h> #include <mach/gpio-samsung.h> +#include <linux/platform_data/dma-s3c24xx.h> #include <linux/platform_data/mmc-s3cmci.h> #include "s3cmci.h" @@ -140,10 +142,6 @@ static const int dbgmap_debug = dbg_err | dbg_debug; dev_dbg(&host->pdev->dev, args); \ } while (0) -static struct s3c2410_dma_client s3cmci_dma_client = { - .name = "s3c-mci", -}; - static void finalize_request(struct s3cmci_host *host); static void s3cmci_send_request(struct mmc_host *mmc); static void s3cmci_reset(struct s3cmci_host *host); @@ -256,25 +254,8 @@ static inline bool s3cmci_host_usedma(struct s3cmci_host *host) { #ifdef CONFIG_MMC_S3C_PIO return false; -#elif defined(CONFIG_MMC_S3C_DMA) +#else /* CONFIG_MMC_S3C_DMA */ return true; -#else - return host->dodma; -#endif -} - -/** - * s3cmci_host_canpio - return true if host has pio code available - * - * Return true if the driver has been compiled with the PIO support code - * available. - */ -static inline bool s3cmci_host_canpio(void) -{ -#ifdef CONFIG_MMC_S3C_PIO - return true; -#else - return false; #endif } @@ -841,60 +822,24 @@ static irqreturn_t s3cmci_irq_cd(int irq, void *dev_id) return IRQ_HANDLED; } -static void s3cmci_dma_done_callback(struct s3c2410_dma_chan *dma_ch, - void *buf_id, int size, - enum s3c2410_dma_buffresult result) +static void s3cmci_dma_done_callback(void *arg) { - struct s3cmci_host *host = buf_id; + struct s3cmci_host *host = arg; unsigned long iflags; - u32 mci_csta, mci_dsta, mci_fsta, mci_dcnt; - - mci_csta = readl(host->base + S3C2410_SDICMDSTAT); - mci_dsta = readl(host->base + S3C2410_SDIDSTA); - mci_fsta = readl(host->base + S3C2410_SDIFSTA); - mci_dcnt = readl(host->base + S3C2410_SDIDCNT); BUG_ON(!host->mrq); BUG_ON(!host->mrq->data); - BUG_ON(!host->dmatogo); spin_lock_irqsave(&host->complete_lock, iflags); - if (result != S3C2410_RES_OK) { - dbg(host, dbg_fail, "DMA FAILED: csta=0x%08x dsta=0x%08x " - "fsta=0x%08x dcnt:0x%08x result:0x%08x toGo:%u\n", - mci_csta, mci_dsta, mci_fsta, - mci_dcnt, result, host->dmatogo); - - goto fail_request; - } - - host->dmatogo--; - if (host->dmatogo) { - dbg(host, dbg_dma, "DMA DONE Size:%i DSTA:[%08x] " - "DCNT:[%08x] toGo:%u\n", - size, mci_dsta, mci_dcnt, host->dmatogo); - - goto out; - } - - dbg(host, dbg_dma, "DMA FINISHED Size:%i DSTA:%08x DCNT:%08x\n", - size, mci_dsta, mci_dcnt); + dbg(host, dbg_dma, "DMA FINISHED\n"); host->dma_complete = 1; host->complete_what = COMPLETION_FINALIZE; -out: tasklet_schedule(&host->pio_tasklet); spin_unlock_irqrestore(&host->complete_lock, iflags); - return; -fail_request: - host->mrq->data->error = -EINVAL; - host->complete_what = COMPLETION_FINALIZE; - clear_imask(host); - - goto out; } static void finalize_request(struct s3cmci_host *host) @@ -966,7 +911,7 @@ static void finalize_request(struct s3cmci_host *host) * DMA channel and the fifo to clear out any garbage. */ if (mrq->data->error != 0) { if (s3cmci_host_usedma(host)) - s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_FLUSH); + dmaengine_terminate_all(host->dma); if (host->is2440) { /* Clear failure register and reset fifo. */ @@ -992,29 +937,6 @@ request_done: mmc_request_done(host->mmc, mrq); } -static void s3cmci_dma_setup(struct s3cmci_host *host, - enum dma_data_direction source) -{ - static enum dma_data_direction last_source = -1; - static int setup_ok; - - if (last_source == source) - return; - - last_source = source; - - s3c2410_dma_devconfig(host->dma, source, - host->mem->start + host->sdidata); - - if (!setup_ok) { - s3c2410_dma_config(host->dma, 4); - s3c2410_dma_set_buffdone_fn(host->dma, - s3cmci_dma_done_callback); - s3c2410_dma_setflags(host->dma, S3C2410_DMAF_AUTOSTART); - setup_ok = 1; - } -} - static void s3cmci_send_command(struct s3cmci_host *host, struct mmc_command *cmd) { @@ -1162,43 +1084,45 @@ static int s3cmci_prepare_pio(struct s3cmci_host *host, struct mmc_data *data) static int s3cmci_prepare_dma(struct s3cmci_host *host, struct mmc_data *data) { - int dma_len, i; int rw = data->flags & MMC_DATA_WRITE; + struct dma_async_tx_descriptor *desc; + struct dma_slave_config conf = { + .src_addr = host->mem->start + host->sdidata, + .dst_addr = host->mem->start + host->sdidata, + .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, + .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, + }; BUG_ON((data->flags & BOTH_DIR) == BOTH_DIR); - s3cmci_dma_setup(host, rw ? DMA_TO_DEVICE : DMA_FROM_DEVICE); - s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_FLUSH); - - dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, - rw ? DMA_TO_DEVICE : DMA_FROM_DEVICE); - - if (dma_len == 0) - return -ENOMEM; - - host->dma_complete = 0; - host->dmatogo = dma_len; - - for (i = 0; i < dma_len; i++) { - int res; - - dbg(host, dbg_dma, "enqueue %i: %08x@%u\n", i, - sg_dma_address(&data->sg[i]), - sg_dma_len(&data->sg[i])); + /* Restore prescaler value */ + writel(host->prescaler, host->base + S3C2410_SDIPRE); - res = s3c2410_dma_enqueue(host->dma, host, - sg_dma_address(&data->sg[i]), - sg_dma_len(&data->sg[i])); + if (!rw) + conf.direction = DMA_DEV_TO_MEM; + else + conf.direction = DMA_MEM_TO_DEV; - if (res) { - s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_FLUSH); - return -EBUSY; - } - } + dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, + rw ? DMA_TO_DEVICE : DMA_FROM_DEVICE); - s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_START); + dmaengine_slave_config(host->dma, &conf); + desc = dmaengine_prep_slave_sg(host->dma, data->sg, data->sg_len, + conf.direction, + DMA_CTRL_ACK | DMA_PREP_INTERRUPT); + if (!desc) + goto unmap_exit; + desc->callback = s3cmci_dma_done_callback; + desc->callback_param = host; + dmaengine_submit(desc); + dma_async_issue_pending(host->dma); return 0; + +unmap_exit: + dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, + rw ? DMA_TO_DEVICE : DMA_FROM_DEVICE); + return -ENOMEM; } static void s3cmci_send_request(struct mmc_host *mmc) @@ -1676,10 +1600,6 @@ static int s3cmci_probe(struct platform_device *pdev) host->complete_what = COMPLETION_NONE; host->pio_active = XFER_NONE; -#ifdef CONFIG_MMC_S3C_PIODMA - host->dodma = host->pdata->use_dma; -#endif - host->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!host->mem) { dev_err(&pdev->dev, @@ -1765,17 +1685,17 @@ static int s3cmci_probe(struct platform_device *pdev) /* depending on the dma state, get a dma channel to use. */ if (s3cmci_host_usedma(host)) { - host->dma = s3c2410_dma_request(DMACH_SDI, &s3cmci_dma_client, - host); - if (host->dma < 0) { + dma_cap_mask_t mask; + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + host->dma = dma_request_slave_channel_compat(mask, + s3c24xx_dma_filter, (void *)DMACH_SDI, &pdev->dev, "rx-tx"); + if (!host->dma) { dev_err(&pdev->dev, "cannot get DMA channel.\n"); - if (!s3cmci_host_canpio()) { - ret = -EBUSY; - goto probe_free_gpio_wp; - } else { - dev_warn(&pdev->dev, "falling back to PIO.\n"); - host->dodma = 0; - } + ret = -EBUSY; + goto probe_free_gpio_wp; } } @@ -1787,7 +1707,7 @@ static int s3cmci_probe(struct platform_device *pdev) goto probe_free_dma; } - ret = clk_enable(host->clk); + ret = clk_prepare_enable(host->clk); if (ret) { dev_err(&pdev->dev, "failed to enable clock source.\n"); goto clk_free; @@ -1816,7 +1736,7 @@ static int s3cmci_probe(struct platform_device *pdev) mmc->max_segs = 128; dbg(host, dbg_debug, - "probe: mode:%s mapped mci_base:%p irq:%u irq_cd:%u dma:%u.\n", + "probe: mode:%s mapped mci_base:%p irq:%u irq_cd:%u dma:%p.\n", (host->is2440?"2440":""), host->base, host->irq, host->irq_cd, host->dma); @@ -1845,14 +1765,14 @@ static int s3cmci_probe(struct platform_device *pdev) s3cmci_cpufreq_deregister(host); free_dmabuf: - clk_disable(host->clk); + clk_disable_unprepare(host->clk); clk_free: clk_put(host->clk); probe_free_dma: if (s3cmci_host_usedma(host)) - s3c2410_dma_free(host->dma, &s3cmci_dma_client); + dma_release_channel(host->dma); probe_free_gpio_wp: if (!host->pdata->no_wprotect) @@ -1897,7 +1817,7 @@ static void s3cmci_shutdown(struct platform_device *pdev) s3cmci_debugfs_remove(host); s3cmci_cpufreq_deregister(host); mmc_remove_host(mmc); - clk_disable(host->clk); + clk_disable_unprepare(host->clk); } static int s3cmci_remove(struct platform_device *pdev) @@ -1914,7 +1834,7 @@ static int s3cmci_remove(struct platform_device *pdev) tasklet_disable(&host->pio_tasklet); if (s3cmci_host_usedma(host)) - s3c2410_dma_free(host->dma, &s3cmci_dma_client); + dma_release_channel(host->dma); free_irq(host->irq, host); diff --git a/drivers/mmc/host/s3cmci.h b/drivers/mmc/host/s3cmci.h index c76b53dbeb61..cc2e46cb5c64 100644 --- a/drivers/mmc/host/s3cmci.h +++ b/drivers/mmc/host/s3cmci.h @@ -26,7 +26,7 @@ struct s3cmci_host { void __iomem *base; int irq; int irq_cd; - int dma; + struct dma_chan *dma; unsigned long clk_rate; unsigned long clk_div; @@ -36,8 +36,6 @@ struct s3cmci_host { int is2440; unsigned sdiimsk; unsigned sdidata; - int dodma; - int dmatogo; bool irq_disabled; bool irq_enabled; diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c index 8ce3c28cb76e..8c5337002c51 100644 --- a/drivers/mmc/host/sdhci-acpi.c +++ b/drivers/mmc/host/sdhci-acpi.c @@ -124,9 +124,11 @@ static const struct sdhci_acpi_chip sdhci_acpi_chip_int = { static const struct sdhci_acpi_slot sdhci_acpi_slot_int_emmc = { .chip = &sdhci_acpi_chip_int, - .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE | MMC_CAP_HW_RESET, + .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE | + MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR, .caps2 = MMC_CAP2_HC_ERASE_SZ, .flags = SDHCI_ACPI_RUNTIME_PM, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, }; static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = { diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index 40573a58486a..1a6661ed6205 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -16,7 +16,6 @@ #include <linux/module.h> #include <linux/of_device.h> -#include <linux/regulator/consumer.h> #include <linux/delay.h> #include <linux/mmc/mmc.h> #include <linux/slab.h> diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c index 52c42fcc284c..c3a1debc9289 100644 --- a/drivers/mmc/host/sdhci-pci.c +++ b/drivers/mmc/host/sdhci-pci.c @@ -103,6 +103,10 @@ static const struct sdhci_pci_fixes sdhci_cafe = { SDHCI_QUIRK_BROKEN_TIMEOUT_VAL, }; +static const struct sdhci_pci_fixes sdhci_intel_qrk = { + .quirks = SDHCI_QUIRK_NO_HISPD_BIT, +}; + static int mrst_hc_probe_slot(struct sdhci_pci_slot *slot) { slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA; @@ -264,7 +268,7 @@ static void sdhci_pci_int_hw_reset(struct sdhci_host *host) static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot) { slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE | - MMC_CAP_HW_RESET; + MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR; slot->host->mmc->caps2 |= MMC_CAP2_HC_ERASE_SZ; slot->hw_reset = sdhci_pci_int_hw_reset; return 0; @@ -279,6 +283,7 @@ static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot) static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = { .allow_runtime_pm = true, .probe_slot = byt_emmc_probe_slot, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, }; static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = { @@ -753,6 +758,14 @@ static const struct pci_device_id pci_ids[] = { { .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_QRK_SD, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_intel_qrk, + }, + + { + .vendor = PCI_VENDOR_ID_INTEL, .device = PCI_DEVICE_ID_INTEL_MRST_SD0, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, @@ -1130,18 +1143,13 @@ static int sdhci_pci_suspend(struct device *dev) goto err_pci_suspend; } - pci_save_state(pdev); if (pm_flags & MMC_PM_KEEP_POWER) { - if (pm_flags & MMC_PM_WAKE_SDIO_IRQ) { - pci_pme_active(pdev, true); - pci_enable_wake(pdev, PCI_D3hot, 1); - } - pci_set_power_state(pdev, PCI_D3hot); - } else { - pci_enable_wake(pdev, PCI_D3hot, 0); - pci_disable_device(pdev); - pci_set_power_state(pdev, PCI_D3hot); - } + if (pm_flags & MMC_PM_WAKE_SDIO_IRQ) + device_init_wakeup(dev, true); + else + device_init_wakeup(dev, false); + } else + device_init_wakeup(dev, false); return 0; @@ -1162,12 +1170,6 @@ static int sdhci_pci_resume(struct device *dev) if (!chip) return 0; - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - ret = pci_enable_device(pdev); - if (ret) - return ret; - if (chip->fixes && chip->fixes->resume) { ret = chip->fixes->resume(chip); if (ret) diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h index 6d718719659e..c101477ef3be 100644 --- a/drivers/mmc/host/sdhci-pci.h +++ b/drivers/mmc/host/sdhci-pci.h @@ -17,6 +17,7 @@ #define PCI_DEVICE_ID_INTEL_CLV_SDIO2 0x08fb #define PCI_DEVICE_ID_INTEL_CLV_EMMC0 0x08e5 #define PCI_DEVICE_ID_INTEL_CLV_EMMC1 0x08e6 +#define PCI_DEVICE_ID_INTEL_QRK_SD 0x08A7 /* * PCI registers diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c index f4f128947561..6f842fb8e6b8 100644 --- a/drivers/mmc/host/sdhci-pxav3.c +++ b/drivers/mmc/host/sdhci-pxav3.c @@ -288,15 +288,13 @@ static int sdhci_pxav3_probe(struct platform_device *pdev) int ret; struct clk *clk; - pxa = kzalloc(sizeof(struct sdhci_pxa), GFP_KERNEL); + pxa = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_pxa), GFP_KERNEL); if (!pxa) return -ENOMEM; host = sdhci_pltfm_init(pdev, &sdhci_pxav3_pdata, 0); - if (IS_ERR(host)) { - kfree(pxa); + if (IS_ERR(host)) return PTR_ERR(host); - } if (of_device_is_compatible(np, "marvell,armada-380-sdhci")) { ret = mv_conf_mbus_windows(pdev, mv_mbus_dram_info()); @@ -308,7 +306,7 @@ static int sdhci_pxav3_probe(struct platform_device *pdev) pltfm_host = sdhci_priv(host); pltfm_host->priv = pxa; - clk = clk_get(dev, NULL); + clk = devm_clk_get(dev, NULL); if (IS_ERR(clk)) { dev_err(dev, "failed to get io clock\n"); ret = PTR_ERR(clk); @@ -389,11 +387,9 @@ err_add_host: pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); clk_disable_unprepare(clk); - clk_put(clk); err_clk_get: err_mbus_win: sdhci_pltfm_free(pdev); - kfree(pxa); return ret; } @@ -401,17 +397,14 @@ static int sdhci_pxav3_remove(struct platform_device *pdev) { struct sdhci_host *host = platform_get_drvdata(pdev); struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); - struct sdhci_pxa *pxa = pltfm_host->priv; pm_runtime_get_sync(&pdev->dev); sdhci_remove_host(host, 1); pm_runtime_disable(&pdev->dev); clk_disable_unprepare(pltfm_host->clk); - clk_put(pltfm_host->clk); sdhci_pltfm_free(pdev); - kfree(pxa); return 0; } diff --git a/drivers/mmc/host/sdhci-st.c b/drivers/mmc/host/sdhci-st.c new file mode 100644 index 000000000000..328f348c7243 --- /dev/null +++ b/drivers/mmc/host/sdhci-st.c @@ -0,0 +1,176 @@ +/* + * Support for SDHCI on STMicroelectronics SoCs + * + * Copyright (C) 2014 STMicroelectronics Ltd + * Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> + * Contributors: Peter Griffin <peter.griffin@linaro.org> + * + * Based on sdhci-cns3xxx.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/io.h> +#include <linux/of.h> +#include <linux/module.h> +#include <linux/err.h> +#include <linux/mmc/host.h> + +#include "sdhci-pltfm.h" + +static u32 sdhci_st_readl(struct sdhci_host *host, int reg) +{ + u32 ret; + + switch (reg) { + case SDHCI_CAPABILITIES: + ret = readl_relaxed(host->ioaddr + reg); + /* Support 3.3V and 1.8V */ + ret &= ~SDHCI_CAN_VDD_300; + break; + default: + ret = readl_relaxed(host->ioaddr + reg); + } + return ret; +} + +static const struct sdhci_ops sdhci_st_ops = { + .get_max_clock = sdhci_pltfm_clk_get_max_clock, + .set_clock = sdhci_set_clock, + .set_bus_width = sdhci_set_bus_width, + .read_l = sdhci_st_readl, + .reset = sdhci_reset, +}; + +static const struct sdhci_pltfm_data sdhci_st_pdata = { + .ops = &sdhci_st_ops, + .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC | + SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, +}; + + +static int sdhci_st_probe(struct platform_device *pdev) +{ + struct sdhci_host *host; + struct sdhci_pltfm_host *pltfm_host; + struct clk *clk; + int ret = 0; + u16 host_version; + + clk = devm_clk_get(&pdev->dev, "mmc"); + if (IS_ERR(clk)) { + dev_err(&pdev->dev, "Peripheral clk not found\n"); + return PTR_ERR(clk); + } + + host = sdhci_pltfm_init(pdev, &sdhci_st_pdata, 0); + if (IS_ERR(host)) { + dev_err(&pdev->dev, "Failed sdhci_pltfm_init\n"); + return PTR_ERR(host); + } + + ret = mmc_of_parse(host->mmc); + + if (ret) { + dev_err(&pdev->dev, "Failed mmc_of_parse\n"); + return ret; + } + + clk_prepare_enable(clk); + + pltfm_host = sdhci_priv(host); + pltfm_host->clk = clk; + + ret = sdhci_add_host(host); + if (ret) { + dev_err(&pdev->dev, "Failed sdhci_add_host\n"); + goto err_out; + } + + platform_set_drvdata(pdev, host); + + host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION)); + + dev_info(&pdev->dev, "SDHCI ST Initialised: Host Version: 0x%x Vendor Version 0x%x\n", + ((host_version & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT), + ((host_version & SDHCI_VENDOR_VER_MASK) >> + SDHCI_VENDOR_VER_SHIFT)); + + return 0; + +err_out: + clk_disable_unprepare(clk); + sdhci_pltfm_free(pdev); + + return ret; +} + +static int sdhci_st_remove(struct platform_device *pdev) +{ + struct sdhci_host *host = platform_get_drvdata(pdev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + + clk_disable_unprepare(pltfm_host->clk); + + return sdhci_pltfm_unregister(pdev); +} + +#ifdef CONFIG_PM_SLEEP +static int sdhci_st_suspend(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + int ret = sdhci_suspend_host(host); + + if (ret) + goto out; + + clk_disable_unprepare(pltfm_host->clk); +out: + return ret; +} + +static int sdhci_st_resume(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + + clk_prepare_enable(pltfm_host->clk); + + return sdhci_resume_host(host); +} +#endif + +static SIMPLE_DEV_PM_OPS(sdhci_st_pmops, sdhci_st_suspend, sdhci_st_resume); + +static const struct of_device_id st_sdhci_match[] = { + { .compatible = "st,sdhci" }, + {}, +}; + +MODULE_DEVICE_TABLE(of, st_sdhci_match); + +static struct platform_driver sdhci_st_driver = { + .probe = sdhci_st_probe, + .remove = sdhci_st_remove, + .driver = { + .name = "sdhci-st", + .pm = &sdhci_st_pmops, + .of_match_table = of_match_ptr(st_sdhci_match), + }, +}; + +module_platform_driver(sdhci_st_driver); + +MODULE_DESCRIPTION("SDHCI driver for STMicroelectronics SoCs"); +MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:st-sdhci"); diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c index d93a063a36f3..33100d10d176 100644 --- a/drivers/mmc/host/sdhci-tegra.c +++ b/drivers/mmc/host/sdhci-tegra.c @@ -26,8 +26,6 @@ #include <linux/mmc/host.h> #include <linux/mmc/slot-gpio.h> -#include <asm/gpio.h> - #include "sdhci-pltfm.h" /* Tegra SDHOST controller vendor register definitions */ diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 47055f3f01b8..37b2a9ae52ef 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -1223,8 +1223,16 @@ EXPORT_SYMBOL_GPL(sdhci_set_clock); static void sdhci_set_power(struct sdhci_host *host, unsigned char mode, unsigned short vdd) { + struct mmc_host *mmc = host->mmc; u8 pwr = 0; + if (!IS_ERR(mmc->supply.vmmc)) { + spin_unlock_irq(&host->lock); + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); + spin_lock_irq(&host->lock); + return; + } + if (mode != MMC_POWER_OFF) { switch (1 << vdd) { case MMC_VDD_165_195: @@ -1283,12 +1291,6 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned char mode, if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER) mdelay(10); } - - if (host->vmmc) { - spin_unlock_irq(&host->lock); - mmc_regulator_set_ocr(host->mmc, host->vmmc, vdd); - spin_lock_irq(&host->lock); - } } /*****************************************************************************\ @@ -1440,13 +1442,15 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios) { unsigned long flags; u8 ctrl; + struct mmc_host *mmc = host->mmc; spin_lock_irqsave(&host->lock, flags); if (host->flags & SDHCI_DEVICE_DEAD) { spin_unlock_irqrestore(&host->lock, flags); - if (host->vmmc && ios->power_mode == MMC_POWER_OFF) - mmc_regulator_set_ocr(host->mmc, host->vmmc, 0); + if (!IS_ERR(mmc->supply.vmmc) && + ios->power_mode == MMC_POWER_OFF) + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); return; } @@ -1530,7 +1534,6 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios) host->ops->set_clock(host, host->clock); } - /* Reset SD Clock Enable */ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); clk &= ~SDHCI_CLOCK_CARD_EN; @@ -1707,6 +1710,7 @@ static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable) static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host, struct mmc_ios *ios) { + struct mmc_host *mmc = host->mmc; u16 ctrl; int ret; @@ -1725,11 +1729,12 @@ static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host, ctrl &= ~SDHCI_CTRL_VDD_180; sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); - if (host->vqmmc) { - ret = regulator_set_voltage(host->vqmmc, 2700000, 3600000); + if (!IS_ERR(mmc->supply.vqmmc)) { + ret = regulator_set_voltage(mmc->supply.vqmmc, 2700000, + 3600000); if (ret) { pr_warning("%s: Switching to 3.3V signalling voltage " - " failed\n", mmc_hostname(host->mmc)); + " failed\n", mmc_hostname(mmc)); return -EIO; } } @@ -1742,16 +1747,16 @@ static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host, return 0; pr_warning("%s: 3.3V regulator output did not became stable\n", - mmc_hostname(host->mmc)); + mmc_hostname(mmc)); return -EAGAIN; case MMC_SIGNAL_VOLTAGE_180: - if (host->vqmmc) { - ret = regulator_set_voltage(host->vqmmc, + if (!IS_ERR(mmc->supply.vqmmc)) { + ret = regulator_set_voltage(mmc->supply.vqmmc, 1700000, 1950000); if (ret) { pr_warning("%s: Switching to 1.8V signalling voltage " - " failed\n", mmc_hostname(host->mmc)); + " failed\n", mmc_hostname(mmc)); return -EIO; } } @@ -1763,24 +1768,22 @@ static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host, ctrl |= SDHCI_CTRL_VDD_180; sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); - /* Wait for 5ms */ - usleep_range(5000, 5500); - /* 1.8V regulator output should be stable within 5 ms */ ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); if (ctrl & SDHCI_CTRL_VDD_180) return 0; pr_warning("%s: 1.8V regulator output did not became stable\n", - mmc_hostname(host->mmc)); + mmc_hostname(mmc)); return -EAGAIN; case MMC_SIGNAL_VOLTAGE_120: - if (host->vqmmc) { - ret = regulator_set_voltage(host->vqmmc, 1100000, 1300000); + if (!IS_ERR(mmc->supply.vqmmc)) { + ret = regulator_set_voltage(mmc->supply.vqmmc, 1100000, + 1300000); if (ret) { pr_warning("%s: Switching to 1.2V signalling voltage " - " failed\n", mmc_hostname(host->mmc)); + " failed\n", mmc_hostname(mmc)); return -EIO; } } @@ -2643,7 +2646,6 @@ static void sdhci_runtime_pm_bus_off(struct sdhci_host *host) int sdhci_runtime_suspend_host(struct sdhci_host *host) { unsigned long flags; - int ret = 0; /* Disable tuning since we are suspending */ if (host->flags & SDHCI_USING_RETUNING_TIMER) { @@ -2663,14 +2665,14 @@ int sdhci_runtime_suspend_host(struct sdhci_host *host) host->runtime_suspended = true; spin_unlock_irqrestore(&host->lock, flags); - return ret; + return 0; } EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host); int sdhci_runtime_resume_host(struct sdhci_host *host) { unsigned long flags; - int ret = 0, host_flags = host->flags; + int host_flags = host->flags; if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { if (host->ops->enable_dma) @@ -2709,7 +2711,7 @@ int sdhci_runtime_resume_host(struct sdhci_host *host) spin_unlock_irqrestore(&host->lock, flags); - return ret; + return 0; } EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host); @@ -2820,12 +2822,12 @@ int sdhci_add_host(struct sdhci_host *host) * (128) and potentially one alignment transfer for * each of those entries. */ - host->adma_desc = dma_alloc_coherent(mmc_dev(host->mmc), + host->adma_desc = dma_alloc_coherent(mmc_dev(mmc), ADMA_SIZE, &host->adma_addr, GFP_KERNEL); host->align_buffer = kmalloc(128 * 4, GFP_KERNEL); if (!host->adma_desc || !host->align_buffer) { - dma_free_coherent(mmc_dev(host->mmc), ADMA_SIZE, + dma_free_coherent(mmc_dev(mmc), ADMA_SIZE, host->adma_desc, host->adma_addr); kfree(host->align_buffer); pr_warning("%s: Unable to allocate ADMA " @@ -2838,7 +2840,7 @@ int sdhci_add_host(struct sdhci_host *host) pr_warning("%s: unable to allocate aligned ADMA descriptor\n", mmc_hostname(mmc)); host->flags &= ~SDHCI_USE_ADMA; - dma_free_coherent(mmc_dev(host->mmc), ADMA_SIZE, + dma_free_coherent(mmc_dev(mmc), ADMA_SIZE, host->adma_desc, host->adma_addr); kfree(host->align_buffer); host->adma_desc = NULL; @@ -2853,7 +2855,7 @@ int sdhci_add_host(struct sdhci_host *host) */ if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) { host->dma_mask = DMA_BIT_MASK(64); - mmc_dev(host->mmc)->dma_mask = &host->dma_mask; + mmc_dev(mmc)->dma_mask = &host->dma_mask; } if (host->version >= SDHCI_SPEC_300) @@ -2959,28 +2961,25 @@ int sdhci_add_host(struct sdhci_host *host) mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) && - !(host->mmc->caps & MMC_CAP_NONREMOVABLE)) + !(mmc->caps & MMC_CAP_NONREMOVABLE)) mmc->caps |= MMC_CAP_NEEDS_POLL; + /* If there are external regulators, get them */ + if (mmc_regulator_get_supply(mmc) == -EPROBE_DEFER) + return -EPROBE_DEFER; + /* If vqmmc regulator and no 1.8V signalling, then there's no UHS */ - host->vqmmc = regulator_get_optional(mmc_dev(mmc), "vqmmc"); - if (IS_ERR_OR_NULL(host->vqmmc)) { - if (PTR_ERR(host->vqmmc) < 0) { - pr_info("%s: no vqmmc regulator found\n", - mmc_hostname(mmc)); - host->vqmmc = NULL; - } - } else { - ret = regulator_enable(host->vqmmc); - if (!regulator_is_supported_voltage(host->vqmmc, 1700000, - 1950000)) + if (!IS_ERR(mmc->supply.vqmmc)) { + ret = regulator_enable(mmc->supply.vqmmc); + if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000, + 1950000)) caps[1] &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_DDR50); if (ret) { pr_warn("%s: Failed to enable vqmmc regulator: %d\n", mmc_hostname(mmc), ret); - host->vqmmc = NULL; + mmc->supply.vqmmc = NULL; } } @@ -3041,34 +3040,6 @@ int sdhci_add_host(struct sdhci_host *host) ocr_avail = 0; - host->vmmc = regulator_get_optional(mmc_dev(mmc), "vmmc"); - if (IS_ERR_OR_NULL(host->vmmc)) { - if (PTR_ERR(host->vmmc) < 0) { - pr_info("%s: no vmmc regulator found\n", - mmc_hostname(mmc)); - host->vmmc = NULL; - } - } - -#ifdef CONFIG_REGULATOR - /* - * Voltage range check makes sense only if regulator reports - * any voltage value. - */ - if (host->vmmc && regulator_get_voltage(host->vmmc) > 0) { - ret = regulator_is_supported_voltage(host->vmmc, 2700000, - 3600000); - if ((ret <= 0) || (!(caps[0] & SDHCI_CAN_VDD_330))) - caps[0] &= ~SDHCI_CAN_VDD_330; - if ((ret <= 0) || (!(caps[0] & SDHCI_CAN_VDD_300))) - caps[0] &= ~SDHCI_CAN_VDD_300; - ret = regulator_is_supported_voltage(host->vmmc, 1700000, - 1950000); - if ((ret <= 0) || (!(caps[0] & SDHCI_CAN_VDD_180))) - caps[0] &= ~SDHCI_CAN_VDD_180; - } -#endif /* CONFIG_REGULATOR */ - /* * According to SD Host Controller spec v3.00, if the Host System * can afford more than 150mA, Host Driver should set XPC to 1. Also @@ -3077,8 +3048,8 @@ int sdhci_add_host(struct sdhci_host *host) * value. */ max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT); - if (!max_current_caps && host->vmmc) { - u32 curr = regulator_get_current_limit(host->vmmc); + if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) { + u32 curr = regulator_get_current_limit(mmc->supply.vmmc); if (curr > 0) { /* convert to SDHCI_MAX_CURRENT format */ @@ -3118,8 +3089,12 @@ int sdhci_add_host(struct sdhci_host *host) SDHCI_MAX_CURRENT_MULTIPLIER; } + /* If OCR set by external regulators, use it instead */ + if (mmc->ocr_avail) + ocr_avail = mmc->ocr_avail; + if (host->ocr_mask) - ocr_avail = host->ocr_mask; + ocr_avail &= host->ocr_mask; mmc->ocr_avail = ocr_avail; mmc->ocr_avail_sdio = ocr_avail; @@ -3273,6 +3248,7 @@ EXPORT_SYMBOL_GPL(sdhci_add_host); void sdhci_remove_host(struct sdhci_host *host, int dead) { + struct mmc_host *mmc = host->mmc; unsigned long flags; if (dead) { @@ -3282,7 +3258,7 @@ void sdhci_remove_host(struct sdhci_host *host, int dead) if (host->mrq) { pr_err("%s: Controller removed during " - " transfer!\n", mmc_hostname(host->mmc)); + " transfer!\n", mmc_hostname(mmc)); host->mrq->cmd->error = -ENOMEDIUM; tasklet_schedule(&host->finish_tasklet); @@ -3293,7 +3269,7 @@ void sdhci_remove_host(struct sdhci_host *host, int dead) sdhci_disable_card_detection(host); - mmc_remove_host(host->mmc); + mmc_remove_host(mmc); #ifdef SDHCI_USE_LEDS_CLASS led_classdev_unregister(&host->led); @@ -3310,18 +3286,14 @@ void sdhci_remove_host(struct sdhci_host *host, int dead) tasklet_kill(&host->finish_tasklet); - if (host->vmmc) { - regulator_disable(host->vmmc); - regulator_put(host->vmmc); - } + if (!IS_ERR(mmc->supply.vmmc)) + regulator_disable(mmc->supply.vmmc); - if (host->vqmmc) { - regulator_disable(host->vqmmc); - regulator_put(host->vqmmc); - } + if (!IS_ERR(mmc->supply.vqmmc)) + regulator_disable(mmc->supply.vqmmc); if (host->adma_desc) - dma_free_coherent(mmc_dev(host->mmc), ADMA_SIZE, + dma_free_coherent(mmc_dev(mmc), ADMA_SIZE, host->adma_desc, host->adma_addr); kfree(host->align_buffer); diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c index 656fbba4c422..d11708c815d7 100644 --- a/drivers/mmc/host/sh_mmcif.c +++ b/drivers/mmc/host/sh_mmcif.c @@ -386,7 +386,7 @@ sh_mmcif_request_dma_one(struct sh_mmcif_host *host, struct sh_mmcif_plat_data *pdata, enum dma_transfer_direction direction) { - struct dma_slave_config cfg; + struct dma_slave_config cfg = { 0, }; struct dma_chan *chan; unsigned int slave_id; struct resource *res; @@ -417,8 +417,15 @@ sh_mmcif_request_dma_one(struct sh_mmcif_host *host, /* In the OF case the driver will get the slave ID from the DT */ cfg.slave_id = slave_id; cfg.direction = direction; - cfg.dst_addr = res->start + MMCIF_CE_DATA; - cfg.src_addr = 0; + + if (direction == DMA_DEV_TO_MEM) { + cfg.src_addr = res->start + MMCIF_CE_DATA; + cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + } else { + cfg.dst_addr = res->start + MMCIF_CE_DATA; + cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + } + ret = dmaengine_slave_config(chan, &cfg); if (ret < 0) { dma_release_channel(chan); @@ -1378,26 +1385,19 @@ static int sh_mmcif_probe(struct platform_device *pdev) dev_err(&pdev->dev, "Get irq error\n"); return -ENXIO; } + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(&pdev->dev, "platform_get_resource error.\n"); - return -ENXIO; - } - reg = ioremap(res->start, resource_size(res)); - if (!reg) { - dev_err(&pdev->dev, "ioremap error.\n"); - return -ENOMEM; - } + reg = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(reg)) + return PTR_ERR(reg); mmc = mmc_alloc_host(sizeof(struct sh_mmcif_host), &pdev->dev); - if (!mmc) { - ret = -ENOMEM; - goto ealloch; - } + if (!mmc) + return -ENOMEM; ret = mmc_of_parse(mmc); if (ret < 0) - goto eofparse; + goto err_host; host = mmc_priv(mmc); host->mmc = mmc; @@ -1427,19 +1427,19 @@ static int sh_mmcif_probe(struct platform_device *pdev) pm_runtime_enable(&pdev->dev); host->power = false; - host->hclk = clk_get(&pdev->dev, NULL); + host->hclk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(host->hclk)) { ret = PTR_ERR(host->hclk); dev_err(&pdev->dev, "cannot get clock: %d\n", ret); - goto eclkget; + goto err_pm; } ret = sh_mmcif_clk_update(host); if (ret < 0) - goto eclkupdate; + goto err_pm; ret = pm_runtime_resume(&pdev->dev); if (ret < 0) - goto eresume; + goto err_clk; INIT_DELAYED_WORK(&host->timeout_work, mmcif_timeout_work); @@ -1447,65 +1447,55 @@ static int sh_mmcif_probe(struct platform_device *pdev) sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); name = irq[1] < 0 ? dev_name(&pdev->dev) : "sh_mmc:error"; - ret = request_threaded_irq(irq[0], sh_mmcif_intr, sh_mmcif_irqt, 0, name, host); + ret = devm_request_threaded_irq(&pdev->dev, irq[0], sh_mmcif_intr, + sh_mmcif_irqt, 0, name, host); if (ret) { dev_err(&pdev->dev, "request_irq error (%s)\n", name); - goto ereqirq0; + goto err_clk; } if (irq[1] >= 0) { - ret = request_threaded_irq(irq[1], sh_mmcif_intr, sh_mmcif_irqt, - 0, "sh_mmc:int", host); + ret = devm_request_threaded_irq(&pdev->dev, irq[1], + sh_mmcif_intr, sh_mmcif_irqt, + 0, "sh_mmc:int", host); if (ret) { dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n"); - goto ereqirq1; + goto err_clk; } } if (pd && pd->use_cd_gpio) { ret = mmc_gpio_request_cd(mmc, pd->cd_gpio, 0); if (ret < 0) - goto erqcd; + goto err_clk; } mutex_init(&host->thread_lock); - clk_disable_unprepare(host->hclk); ret = mmc_add_host(mmc); if (ret < 0) - goto emmcaddh; + goto err_clk; dev_pm_qos_expose_latency_limit(&pdev->dev, 100); - dev_info(&pdev->dev, "driver version %s\n", DRIVER_VERSION); - dev_dbg(&pdev->dev, "chip ver H'%04x\n", - sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0x0000ffff); + dev_info(&pdev->dev, "Chip version 0x%04x, clock rate %luMHz\n", + sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0xffff, + clk_get_rate(host->hclk) / 1000000UL); + + clk_disable_unprepare(host->hclk); return ret; -emmcaddh: -erqcd: - if (irq[1] >= 0) - free_irq(irq[1], host); -ereqirq1: - free_irq(irq[0], host); -ereqirq0: - pm_runtime_suspend(&pdev->dev); -eresume: +err_clk: clk_disable_unprepare(host->hclk); -eclkupdate: - clk_put(host->hclk); -eclkget: +err_pm: pm_runtime_disable(&pdev->dev); -eofparse: +err_host: mmc_free_host(mmc); -ealloch: - iounmap(reg); return ret; } static int sh_mmcif_remove(struct platform_device *pdev) { struct sh_mmcif_host *host = platform_get_drvdata(pdev); - int irq[2]; host->dying = true; clk_prepare_enable(host->hclk); @@ -1523,16 +1513,6 @@ static int sh_mmcif_remove(struct platform_device *pdev) */ cancel_delayed_work_sync(&host->timeout_work); - if (host->addr) - iounmap(host->addr); - - irq[0] = platform_get_irq(pdev, 0); - irq[1] = platform_get_irq(pdev, 1); - - free_irq(irq[0], host); - if (irq[1] >= 0) - free_irq(irq[1], host); - clk_disable_unprepare(host->hclk); mmc_free_host(host->mmc); pm_runtime_put_sync(&pdev->dev); diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c index 03e7b280cb4c..eb8f1d5c34b1 100644 --- a/drivers/mmc/host/tmio_mmc_dma.c +++ b/drivers/mmc/host/tmio_mmc_dma.c @@ -294,6 +294,7 @@ void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdat cfg.slave_id = pdata->dma->slave_id_tx; cfg.direction = DMA_MEM_TO_DEV; cfg.dst_addr = res->start + (CTL_SD_DATA_PORT << host->pdata->bus_shift); + cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; cfg.src_addr = 0; ret = dmaengine_slave_config(host->chan_tx, &cfg); if (ret < 0) @@ -312,6 +313,7 @@ void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdat cfg.slave_id = pdata->dma->slave_id_rx; cfg.direction = DMA_DEV_TO_MEM; cfg.src_addr = cfg.dst_addr; + cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; cfg.dst_addr = 0; ret = dmaengine_slave_config(host->chan_rx, &cfg); if (ret < 0) diff --git a/drivers/mmc/host/wmt-sdmmc.c b/drivers/mmc/host/wmt-sdmmc.c index 282891a8e451..54181b4f6e9e 100644 --- a/drivers/mmc/host/wmt-sdmmc.c +++ b/drivers/mmc/host/wmt-sdmmc.c @@ -72,7 +72,6 @@ #define BM_SPI_CS 0x20 #define BM_SD_POWER 0x40 #define BM_SOFT_RESET 0x80 -#define BM_ONEBIT_MASK 0xFD /* SDMMC_BLKLEN bit fields */ #define BLKL_CRCERR_ABORT 0x0800 @@ -120,6 +119,8 @@ #define STS2_DATARSP_BUSY 0x20 #define STS2_DIS_FORCECLK 0x80 +/* SDMMC_EXTCTRL bit fields */ +#define EXT_EIGHTBIT 0x04 /* MMC/SD DMA Controller Registers */ #define SDDMA_GCR 0x100 @@ -672,7 +673,7 @@ static void wmt_mci_request(struct mmc_host *mmc, struct mmc_request *req) static void wmt_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) { struct wmt_mci_priv *priv; - u32 reg_tmp; + u32 busmode, extctrl; priv = mmc_priv(mmc); @@ -687,28 +688,26 @@ static void wmt_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) if (ios->clock != 0) clk_set_rate(priv->clk_sdmmc, ios->clock); + busmode = readb(priv->sdmmc_base + SDMMC_BUSMODE); + extctrl = readb(priv->sdmmc_base + SDMMC_EXTCTRL); + + busmode &= ~(BM_EIGHTBIT_MODE | BM_FOURBIT_MODE); + extctrl &= ~EXT_EIGHTBIT; + switch (ios->bus_width) { case MMC_BUS_WIDTH_8: - reg_tmp = readb(priv->sdmmc_base + SDMMC_EXTCTRL); - writeb(reg_tmp | 0x04, priv->sdmmc_base + SDMMC_EXTCTRL); + busmode |= BM_EIGHTBIT_MODE; + extctrl |= EXT_EIGHTBIT; break; case MMC_BUS_WIDTH_4: - reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE); - writeb(reg_tmp | BM_FOURBIT_MODE, priv->sdmmc_base + - SDMMC_BUSMODE); - - reg_tmp = readb(priv->sdmmc_base + SDMMC_EXTCTRL); - writeb(reg_tmp & 0xFB, priv->sdmmc_base + SDMMC_EXTCTRL); + busmode |= BM_FOURBIT_MODE; break; case MMC_BUS_WIDTH_1: - reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE); - writeb(reg_tmp & BM_ONEBIT_MASK, priv->sdmmc_base + - SDMMC_BUSMODE); - - reg_tmp = readb(priv->sdmmc_base + SDMMC_EXTCTRL); - writeb(reg_tmp & 0xFB, priv->sdmmc_base + SDMMC_EXTCTRL); break; } + + writeb(busmode, priv->sdmmc_base + SDMMC_BUSMODE); + writeb(extctrl, priv->sdmmc_base + SDMMC_EXTCTRL); } static int wmt_mci_get_ro(struct mmc_host *mmc) @@ -830,7 +829,7 @@ static int wmt_mci_probe(struct platform_device *pdev) goto fail3; } - ret = request_irq(dma_irq, wmt_mci_dma_isr, 32, "sdmmc", priv); + ret = request_irq(dma_irq, wmt_mci_dma_isr, 0, "sdmmc", priv); if (ret) { dev_err(&pdev->dev, "Register DMA IRQ fail\n"); goto fail4; diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c index e21fde9d4d7e..46c4643b7a07 100644 --- a/drivers/mtd/chips/cfi_cmdset_0002.c +++ b/drivers/mtd/chips/cfi_cmdset_0002.c @@ -58,7 +58,18 @@ static void cfi_amdstd_sync (struct mtd_info *); static int cfi_amdstd_suspend (struct mtd_info *); static void cfi_amdstd_resume (struct mtd_info *); static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *); +static int cfi_amdstd_get_fact_prot_info(struct mtd_info *, size_t, + size_t *, struct otp_info *); +static int cfi_amdstd_get_user_prot_info(struct mtd_info *, size_t, + size_t *, struct otp_info *); static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); +static int cfi_amdstd_read_fact_prot_reg(struct mtd_info *, loff_t, size_t, + size_t *, u_char *); +static int cfi_amdstd_read_user_prot_reg(struct mtd_info *, loff_t, size_t, + size_t *, u_char *); +static int cfi_amdstd_write_user_prot_reg(struct mtd_info *, loff_t, size_t, + size_t *, u_char *); +static int cfi_amdstd_lock_user_prot_reg(struct mtd_info *, loff_t, size_t); static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf); @@ -518,6 +529,12 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary) mtd->_sync = cfi_amdstd_sync; mtd->_suspend = cfi_amdstd_suspend; mtd->_resume = cfi_amdstd_resume; + mtd->_read_user_prot_reg = cfi_amdstd_read_user_prot_reg; + mtd->_read_fact_prot_reg = cfi_amdstd_read_fact_prot_reg; + mtd->_get_fact_prot_info = cfi_amdstd_get_fact_prot_info; + mtd->_get_user_prot_info = cfi_amdstd_get_user_prot_info; + mtd->_write_user_prot_reg = cfi_amdstd_write_user_prot_reg; + mtd->_lock_user_prot_reg = cfi_amdstd_lock_user_prot_reg; mtd->flags = MTD_CAP_NORFLASH; mtd->name = map->name; mtd->writesize = 1; @@ -628,6 +645,23 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary) cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp; cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp; cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp; + /* + * First calculate the timeout max according to timeout field + * of struct cfi_ident that probed from chip's CFI aera, if + * available. Specify a minimum of 2000us, in case the CFI data + * is wrong. + */ + if (cfi->cfiq->BufWriteTimeoutTyp && + cfi->cfiq->BufWriteTimeoutMax) + cfi->chips[i].buffer_write_time_max = + 1 << (cfi->cfiq->BufWriteTimeoutTyp + + cfi->cfiq->BufWriteTimeoutMax); + else + cfi->chips[i].buffer_write_time_max = 0; + + cfi->chips[i].buffer_write_time_max = + max(cfi->chips[i].buffer_write_time_max, 2000); + cfi->chips[i].ref_point_counter = 0; init_waitqueue_head(&(cfi->chips[i].wq)); } @@ -1137,12 +1171,48 @@ static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_ return ret; } +typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip, + loff_t adr, size_t len, u_char *buf, size_t grouplen); + +static inline void otp_enter(struct map_info *map, struct flchip *chip, + loff_t adr, size_t len) +{ + struct cfi_private *cfi = map->fldrv_priv; + + cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, + cfi->device_type, NULL); + cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, + cfi->device_type, NULL); + cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, + cfi->device_type, NULL); + + INVALIDATE_CACHED_RANGE(map, chip->start + adr, len); +} + +static inline void otp_exit(struct map_info *map, struct flchip *chip, + loff_t adr, size_t len) +{ + struct cfi_private *cfi = map->fldrv_priv; + + cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, + cfi->device_type, NULL); + cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, + cfi->device_type, NULL); + cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, + cfi->device_type, NULL); + cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, + cfi->device_type, NULL); + + INVALIDATE_CACHED_RANGE(map, chip->start + adr, len); +} -static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf) +static inline int do_read_secsi_onechip(struct map_info *map, + struct flchip *chip, loff_t adr, + size_t len, u_char *buf, + size_t grouplen) { DECLARE_WAITQUEUE(wait, current); unsigned long timeo = jiffies + HZ; - struct cfi_private *cfi = map->fldrv_priv; retry: mutex_lock(&chip->mutex); @@ -1164,16 +1234,9 @@ static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chi chip->state = FL_READY; - cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); - cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); - cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); - + otp_enter(map, chip, adr, len); map_copy_from(map, buf, adr, len); - - cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); - cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); - cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); - cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); + otp_exit(map, chip, adr, len); wake_up(&chip->wq); mutex_unlock(&chip->mutex); @@ -1205,7 +1268,8 @@ static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, else thislen = len; - ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf); + ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, + thislen, buf, 0); if (ret) break; @@ -1219,8 +1283,271 @@ static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, return ret; } +static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, + unsigned long adr, map_word datum, + int mode); + +static int do_otp_write(struct map_info *map, struct flchip *chip, loff_t adr, + size_t len, u_char *buf, size_t grouplen) +{ + int ret; + while (len) { + unsigned long bus_ofs = adr & ~(map_bankwidth(map)-1); + int gap = adr - bus_ofs; + int n = min_t(int, len, map_bankwidth(map) - gap); + map_word datum; + + if (n != map_bankwidth(map)) { + /* partial write of a word, load old contents */ + otp_enter(map, chip, bus_ofs, map_bankwidth(map)); + datum = map_read(map, bus_ofs); + otp_exit(map, chip, bus_ofs, map_bankwidth(map)); + } + + datum = map_word_load_partial(map, datum, buf, gap, n); + ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE); + if (ret) + return ret; + + adr += n; + buf += n; + len -= n; + } + + return 0; +} + +static int do_otp_lock(struct map_info *map, struct flchip *chip, loff_t adr, + size_t len, u_char *buf, size_t grouplen) +{ + struct cfi_private *cfi = map->fldrv_priv; + uint8_t lockreg; + unsigned long timeo; + int ret; + + /* make sure area matches group boundaries */ + if ((adr != 0) || (len != grouplen)) + return -EINVAL; + + mutex_lock(&chip->mutex); + ret = get_chip(map, chip, chip->start, FL_LOCKING); + if (ret) { + mutex_unlock(&chip->mutex); + return ret; + } + chip->state = FL_LOCKING; + + /* Enter lock register command */ + cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, + cfi->device_type, NULL); + cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, + cfi->device_type, NULL); + cfi_send_gen_cmd(0x40, cfi->addr_unlock1, chip->start, map, cfi, + cfi->device_type, NULL); + + /* read lock register */ + lockreg = cfi_read_query(map, 0); + + /* set bit 0 to protect extended memory block */ + lockreg &= ~0x01; + + /* set bit 0 to protect extended memory block */ + /* write lock register */ + map_write(map, CMD(0xA0), chip->start); + map_write(map, CMD(lockreg), chip->start); + + /* wait for chip to become ready */ + timeo = jiffies + msecs_to_jiffies(2); + for (;;) { + if (chip_ready(map, adr)) + break; + + if (time_after(jiffies, timeo)) { + pr_err("Waiting for chip to be ready timed out.\n"); + ret = -EIO; + break; + } + UDELAY(map, chip, 0, 1); + } + + /* exit protection commands */ + map_write(map, CMD(0x90), chip->start); + map_write(map, CMD(0x00), chip->start); + + chip->state = FL_READY; + put_chip(map, chip, chip->start); + mutex_unlock(&chip->mutex); + + return ret; +} + +static int cfi_amdstd_otp_walk(struct mtd_info *mtd, loff_t from, size_t len, + size_t *retlen, u_char *buf, + otp_op_t action, int user_regs) +{ + struct map_info *map = mtd->priv; + struct cfi_private *cfi = map->fldrv_priv; + int ofs_factor = cfi->interleave * cfi->device_type; + unsigned long base; + int chipnum; + struct flchip *chip; + uint8_t otp, lockreg; + int ret; + + size_t user_size, factory_size, otpsize; + loff_t user_offset, factory_offset, otpoffset; + int user_locked = 0, otplocked; + + *retlen = 0; + + for (chipnum = 0; chipnum < cfi->numchips; chipnum++) { + chip = &cfi->chips[chipnum]; + factory_size = 0; + user_size = 0; + + /* Micron M29EW family */ + if (is_m29ew(cfi)) { + base = chip->start; + + /* check whether secsi area is factory locked + or user lockable */ + mutex_lock(&chip->mutex); + ret = get_chip(map, chip, base, FL_CFI_QUERY); + if (ret) { + mutex_unlock(&chip->mutex); + return ret; + } + cfi_qry_mode_on(base, map, cfi); + otp = cfi_read_query(map, base + 0x3 * ofs_factor); + cfi_qry_mode_off(base, map, cfi); + put_chip(map, chip, base); + mutex_unlock(&chip->mutex); + + if (otp & 0x80) { + /* factory locked */ + factory_offset = 0; + factory_size = 0x100; + } else { + /* customer lockable */ + user_offset = 0; + user_size = 0x100; + + mutex_lock(&chip->mutex); + ret = get_chip(map, chip, base, FL_LOCKING); + if (ret) { + mutex_unlock(&chip->mutex); + return ret; + } + + /* Enter lock register command */ + cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, + chip->start, map, cfi, + cfi->device_type, NULL); + cfi_send_gen_cmd(0x55, cfi->addr_unlock2, + chip->start, map, cfi, + cfi->device_type, NULL); + cfi_send_gen_cmd(0x40, cfi->addr_unlock1, + chip->start, map, cfi, + cfi->device_type, NULL); + /* read lock register */ + lockreg = cfi_read_query(map, 0); + /* exit protection commands */ + map_write(map, CMD(0x90), chip->start); + map_write(map, CMD(0x00), chip->start); + put_chip(map, chip, chip->start); + mutex_unlock(&chip->mutex); + + user_locked = ((lockreg & 0x01) == 0x00); + } + } + + otpsize = user_regs ? user_size : factory_size; + if (!otpsize) + continue; + otpoffset = user_regs ? user_offset : factory_offset; + otplocked = user_regs ? user_locked : 1; + + if (!action) { + /* return otpinfo */ + struct otp_info *otpinfo; + len -= sizeof(*otpinfo); + if (len <= 0) + return -ENOSPC; + otpinfo = (struct otp_info *)buf; + otpinfo->start = from; + otpinfo->length = otpsize; + otpinfo->locked = otplocked; + buf += sizeof(*otpinfo); + *retlen += sizeof(*otpinfo); + from += otpsize; + } else if ((from < otpsize) && (len > 0)) { + size_t size; + size = (len < otpsize - from) ? len : otpsize - from; + ret = action(map, chip, otpoffset + from, size, buf, + otpsize); + if (ret < 0) + return ret; + + buf += size; + len -= size; + *retlen += size; + from = 0; + } else { + from -= otpsize; + } + } + return 0; +} + +static int cfi_amdstd_get_fact_prot_info(struct mtd_info *mtd, size_t len, + size_t *retlen, struct otp_info *buf) +{ + return cfi_amdstd_otp_walk(mtd, 0, len, retlen, (u_char *)buf, + NULL, 0); +} + +static int cfi_amdstd_get_user_prot_info(struct mtd_info *mtd, size_t len, + size_t *retlen, struct otp_info *buf) +{ + return cfi_amdstd_otp_walk(mtd, 0, len, retlen, (u_char *)buf, + NULL, 1); +} + +static int cfi_amdstd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, + size_t len, size_t *retlen, + u_char *buf) +{ + return cfi_amdstd_otp_walk(mtd, from, len, retlen, + buf, do_read_secsi_onechip, 0); +} + +static int cfi_amdstd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, + size_t len, size_t *retlen, + u_char *buf) +{ + return cfi_amdstd_otp_walk(mtd, from, len, retlen, + buf, do_read_secsi_onechip, 1); +} -static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum) +static int cfi_amdstd_write_user_prot_reg(struct mtd_info *mtd, loff_t from, + size_t len, size_t *retlen, + u_char *buf) +{ + return cfi_amdstd_otp_walk(mtd, from, len, retlen, buf, + do_otp_write, 1); +} + +static int cfi_amdstd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, + size_t len) +{ + size_t retlen; + return cfi_amdstd_otp_walk(mtd, from, len, &retlen, NULL, + do_otp_lock, 1); +} + +static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, + unsigned long adr, map_word datum, + int mode) { struct cfi_private *cfi = map->fldrv_priv; unsigned long timeo = jiffies + HZ; @@ -1241,7 +1568,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, adr += chip->start; mutex_lock(&chip->mutex); - ret = get_chip(map, chip, adr, FL_WRITING); + ret = get_chip(map, chip, adr, mode); if (ret) { mutex_unlock(&chip->mutex); return ret; @@ -1250,6 +1577,9 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", __func__, adr, datum.x[0] ); + if (mode == FL_OTP_WRITE) + otp_enter(map, chip, adr, map_bankwidth(map)); + /* * Check for a NOP for the case when the datum to write is already * present - it saves time and works around buggy chips that corrupt @@ -1266,12 +1596,13 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map)); ENABLE_VPP(map); xip_disable(map, chip, adr); + retry: cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); map_write(map, datum, adr); - chip->state = FL_WRITING; + chip->state = mode; INVALIDATE_CACHE_UDELAY(map, chip, adr, map_bankwidth(map), @@ -1280,7 +1611,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, /* See comment above for timeout value. */ timeo = jiffies + uWriteTimeout; for (;;) { - if (chip->state != FL_WRITING) { + if (chip->state != mode) { /* Someone's suspended the write. Sleep */ DECLARE_WAITQUEUE(wait, current); @@ -1320,6 +1651,8 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, } xip_enable(map, chip, adr); op_done: + if (mode == FL_OTP_WRITE) + otp_exit(map, chip, adr, map_bankwidth(map)); chip->state = FL_READY; DISABLE_VPP(map); put_chip(map, chip, adr); @@ -1375,7 +1708,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len, tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n); ret = do_write_oneword(map, &cfi->chips[chipnum], - bus_ofs, tmp_buf); + bus_ofs, tmp_buf, FL_WRITING); if (ret) return ret; @@ -1399,7 +1732,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len, datum = map_word_load(map, buf); ret = do_write_oneword(map, &cfi->chips[chipnum], - ofs, datum); + ofs, datum, FL_WRITING); if (ret) return ret; @@ -1442,7 +1775,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len, tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len); ret = do_write_oneword(map, &cfi->chips[chipnum], - ofs, tmp_buf); + ofs, tmp_buf, FL_WRITING); if (ret) return ret; @@ -1462,8 +1795,12 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, { struct cfi_private *cfi = map->fldrv_priv; unsigned long timeo = jiffies + HZ; - /* see comments in do_write_oneword() regarding uWriteTimeo. */ - unsigned long uWriteTimeout = ( HZ / 1000 ) + 1; + /* + * Timeout is calculated according to CFI data, if available. + * See more comments in cfi_cmdset_0002(). + */ + unsigned long uWriteTimeout = + usecs_to_jiffies(chip->buffer_write_time_max); int ret = -EIO; unsigned long cmd_adr; int z, words; diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c index 3e829b37af8d..c8503006f17a 100644 --- a/drivers/mtd/cmdlinepart.c +++ b/drivers/mtd/cmdlinepart.c @@ -26,7 +26,7 @@ * <mtd-id> := unique name used in mapping driver/device (mtd->name) * <size> := standard linux memsize OR "-" to denote all remaining space * size is automatically truncated at end of device - * if specified or trucated size is 0 the part is skipped + * if specified or truncated size is 0 the part is skipped * <offset> := standard linux memsize * if omitted the part will immediately follow the previous part * or 0 if the first part diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c index 2cceebfb251e..effd9a4ef7ee 100644 --- a/drivers/mtd/devices/phram.c +++ b/drivers/mtd/devices/phram.c @@ -181,12 +181,10 @@ static int parse_name(char **pname, const char *token) if (len > 64) return -ENOSPC; - name = kmalloc(len, GFP_KERNEL); + name = kstrdup(token, GFP_KERNEL); if (!name) return -ENOMEM; - strcpy(name, token); - *pname = name; return 0; } @@ -195,6 +193,7 @@ static int parse_name(char **pname, const char *token) static inline void kill_final_newline(char *str) { char *newline = strrchr(str, '\n'); + if (newline && !newline[1]) *newline = 0; } @@ -233,7 +232,7 @@ static int phram_setup(const char *val) strcpy(str, val); kill_final_newline(str); - for (i=0; i<3; i++) + for (i = 0; i < 3; i++) token[i] = strsep(&str, ","); if (str) diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c index 19d637266fcd..dabf08450d0b 100644 --- a/drivers/mtd/ftl.c +++ b/drivers/mtd/ftl.c @@ -111,7 +111,6 @@ typedef struct partition_t { struct mtd_blktrans_dev mbd; uint32_t state; uint32_t *VirtualBlockMap; - uint32_t *VirtualPageMap; uint32_t FreeTotal; struct eun_info_t { uint32_t Offset; @@ -1035,8 +1034,6 @@ static void ftl_freepart(partition_t *part) { vfree(part->VirtualBlockMap); part->VirtualBlockMap = NULL; - kfree(part->VirtualPageMap); - part->VirtualPageMap = NULL; kfree(part->EUNInfo); part->EUNInfo = NULL; kfree(part->XferInfo); @@ -1075,7 +1072,6 @@ static void ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) return; } - ftl_freepart(partition); kfree(partition); } diff --git a/drivers/mtd/maps/rbtx4939-flash.c b/drivers/mtd/maps/rbtx4939-flash.c index 146b6047ed2b..a84fdfb10518 100644 --- a/drivers/mtd/maps/rbtx4939-flash.c +++ b/drivers/mtd/maps/rbtx4939-flash.c @@ -35,8 +35,6 @@ static int rbtx4939_flash_remove(struct platform_device *dev) return 0; if (info->mtd) { - struct rbtx4939_flash_data *pdata = dev_get_platdata(&dev->dev); - mtd_device_unregister(info->mtd); map_destroy(info->mtd); } diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index d201feeb3ca6..e4831b4159db 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c @@ -298,6 +298,47 @@ static ssize_t mtd_ecc_step_size_show(struct device *dev, } static DEVICE_ATTR(ecc_step_size, S_IRUGO, mtd_ecc_step_size_show, NULL); +static ssize_t mtd_ecc_stats_corrected_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct mtd_info *mtd = dev_get_drvdata(dev); + struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats; + + return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->corrected); +} +static DEVICE_ATTR(corrected_bits, S_IRUGO, + mtd_ecc_stats_corrected_show, NULL); + +static ssize_t mtd_ecc_stats_errors_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct mtd_info *mtd = dev_get_drvdata(dev); + struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats; + + return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->failed); +} +static DEVICE_ATTR(ecc_failures, S_IRUGO, mtd_ecc_stats_errors_show, NULL); + +static ssize_t mtd_badblocks_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct mtd_info *mtd = dev_get_drvdata(dev); + struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats; + + return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->badblocks); +} +static DEVICE_ATTR(bad_blocks, S_IRUGO, mtd_badblocks_show, NULL); + +static ssize_t mtd_bbtblocks_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct mtd_info *mtd = dev_get_drvdata(dev); + struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats; + + return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->bbtblocks); +} +static DEVICE_ATTR(bbt_blocks, S_IRUGO, mtd_bbtblocks_show, NULL); + static struct attribute *mtd_attrs[] = { &dev_attr_type.attr, &dev_attr_flags.attr, @@ -310,6 +351,10 @@ static struct attribute *mtd_attrs[] = { &dev_attr_name.attr, &dev_attr_ecc_strength.attr, &dev_attr_ecc_step_size.attr, + &dev_attr_corrected_bits.attr, + &dev_attr_ecc_failures.attr, + &dev_attr_bad_blocks.attr, + &dev_attr_bbt_blocks.attr, &dev_attr_bitflip_threshold.attr, NULL, }; @@ -998,12 +1043,22 @@ int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len) } EXPORT_SYMBOL_GPL(mtd_is_locked); -int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs) +int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs) { - if (!mtd->_block_isbad) + if (ofs < 0 || ofs > mtd->size) + return -EINVAL; + if (!mtd->_block_isreserved) return 0; + return mtd->_block_isreserved(mtd, ofs); +} +EXPORT_SYMBOL_GPL(mtd_block_isreserved); + +int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs) +{ if (ofs < 0 || ofs > mtd->size) return -EINVAL; + if (!mtd->_block_isbad) + return 0; return mtd->_block_isbad(mtd, ofs); } EXPORT_SYMBOL_GPL(mtd_block_isbad); diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c index 1ca9aec141ff..a3e3a7d074d5 100644 --- a/drivers/mtd/mtdpart.c +++ b/drivers/mtd/mtdpart.c @@ -290,6 +290,13 @@ static void part_resume(struct mtd_info *mtd) part->master->_resume(part->master); } +static int part_block_isreserved(struct mtd_info *mtd, loff_t ofs) +{ + struct mtd_part *part = PART(mtd); + ofs += part->offset; + return part->master->_block_isreserved(part->master, ofs); +} + static int part_block_isbad(struct mtd_info *mtd, loff_t ofs) { struct mtd_part *part = PART(mtd); @@ -422,6 +429,8 @@ static struct mtd_part *allocate_partition(struct mtd_info *master, slave->mtd._unlock = part_unlock; if (master->_is_locked) slave->mtd._is_locked = part_is_locked; + if (master->_block_isreserved) + slave->mtd._block_isreserved = part_block_isreserved; if (master->_block_isbad) slave->mtd._block_isbad = part_block_isbad; if (master->_block_markbad) @@ -526,7 +535,9 @@ static struct mtd_part *allocate_partition(struct mtd_info *master, uint64_t offs = 0; while (offs < slave->mtd.size) { - if (mtd_block_isbad(master, offs + slave->offset)) + if (mtd_block_isreserved(master, offs + slave->offset)) + slave->mtd.ecc_stats.bbtblocks++; + else if (mtd_block_isbad(master, offs + slave->offset)) slave->mtd.ecc_stats.badblocks++; offs += slave->mtd.erasesize; } diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile index 542b5689eb63..a035e7cc6d46 100644 --- a/drivers/mtd/nand/Makefile +++ b/drivers/mtd/nand/Makefile @@ -50,4 +50,4 @@ obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi-nand/ obj-$(CONFIG_MTD_NAND_XWAY) += xway_nand.o obj-$(CONFIG_MTD_NAND_BCM47XXNFLASH) += bcm47xxnflash/ -nand-objs := nand_base.o nand_bbt.o +nand-objs := nand_base.o nand_bbt.o nand_timings.o diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c index 4ce181a35bcd..e321c564ff05 100644 --- a/drivers/mtd/nand/atmel_nand.c +++ b/drivers/mtd/nand/atmel_nand.c @@ -97,7 +97,9 @@ struct atmel_nfc { bool write_by_sram; bool is_initialized; - struct completion comp_nfc; + struct completion comp_ready; + struct completion comp_cmd_done; + struct completion comp_xfer_done; /* Point to the sram bank which include readed data via NFC */ void __iomem *data_in_sram; @@ -861,12 +863,11 @@ static int pmecc_correction(struct mtd_info *mtd, u32 pmecc_stat, uint8_t *buf, { struct nand_chip *nand_chip = mtd->priv; struct atmel_nand_host *host = nand_chip->priv; - int i, err_nbr, eccbytes; + int i, err_nbr; uint8_t *buf_pos; int total_err = 0; - eccbytes = nand_chip->ecc.bytes; - for (i = 0; i < eccbytes; i++) + for (i = 0; i < nand_chip->ecc.total; i++) if (ecc[i] != 0xff) goto normal_check; /* Erased page, return OK */ @@ -928,7 +929,7 @@ static int atmel_nand_pmecc_read_page(struct mtd_info *mtd, struct nand_chip *chip, uint8_t *buf, int oob_required, int page) { struct atmel_nand_host *host = chip->priv; - int eccsize = chip->ecc.size; + int eccsize = chip->ecc.size * chip->ecc.steps; uint8_t *oob = chip->oob_poi; uint32_t *eccpos = chip->ecc.layout->eccpos; uint32_t stat; @@ -1169,8 +1170,7 @@ static int atmel_pmecc_nand_init_params(struct platform_device *pdev, goto err; } - /* ECC is calculated for the whole page (1 step) */ - nand_chip->ecc.size = mtd->writesize; + nand_chip->ecc.size = sector_size; /* set ECC page size and oob layout */ switch (mtd->writesize) { @@ -1185,18 +1185,20 @@ static int atmel_pmecc_nand_init_params(struct platform_device *pdev, host->pmecc_index_of = host->pmecc_rom_base + host->pmecc_lookup_table_offset; - nand_chip->ecc.steps = 1; + nand_chip->ecc.steps = host->pmecc_sector_number; nand_chip->ecc.strength = cap; - nand_chip->ecc.bytes = host->pmecc_bytes_per_sector * + nand_chip->ecc.bytes = host->pmecc_bytes_per_sector; + nand_chip->ecc.total = host->pmecc_bytes_per_sector * host->pmecc_sector_number; - if (nand_chip->ecc.bytes > mtd->oobsize - 2) { + if (nand_chip->ecc.total > mtd->oobsize - 2) { dev_err(host->dev, "No room for ECC bytes\n"); err_no = -EINVAL; goto err; } pmecc_config_ecc_layout(&atmel_pmecc_oobinfo, mtd->oobsize, - nand_chip->ecc.bytes); + nand_chip->ecc.total); + nand_chip->ecc.layout = &atmel_pmecc_oobinfo; break; case 512: @@ -1572,49 +1574,104 @@ static int atmel_hw_nand_init_params(struct platform_device *pdev, return 0; } +static inline u32 nfc_read_status(struct atmel_nand_host *host) +{ + u32 err_flags = NFC_SR_DTOE | NFC_SR_UNDEF | NFC_SR_AWB | NFC_SR_ASE; + u32 nfc_status = nfc_readl(host->nfc->hsmc_regs, SR); + + if (unlikely(nfc_status & err_flags)) { + if (nfc_status & NFC_SR_DTOE) + dev_err(host->dev, "NFC: Waiting Nand R/B Timeout Error\n"); + else if (nfc_status & NFC_SR_UNDEF) + dev_err(host->dev, "NFC: Access Undefined Area Error\n"); + else if (nfc_status & NFC_SR_AWB) + dev_err(host->dev, "NFC: Access memory While NFC is busy\n"); + else if (nfc_status & NFC_SR_ASE) + dev_err(host->dev, "NFC: Access memory Size Error\n"); + } + + return nfc_status; +} + /* SMC interrupt service routine */ static irqreturn_t hsmc_interrupt(int irq, void *dev_id) { struct atmel_nand_host *host = dev_id; u32 status, mask, pending; - irqreturn_t ret = IRQ_HANDLED; + irqreturn_t ret = IRQ_NONE; - status = nfc_readl(host->nfc->hsmc_regs, SR); + status = nfc_read_status(host); mask = nfc_readl(host->nfc->hsmc_regs, IMR); pending = status & mask; if (pending & NFC_SR_XFR_DONE) { - complete(&host->nfc->comp_nfc); + complete(&host->nfc->comp_xfer_done); nfc_writel(host->nfc->hsmc_regs, IDR, NFC_SR_XFR_DONE); - } else if (pending & NFC_SR_RB_EDGE) { - complete(&host->nfc->comp_nfc); + ret = IRQ_HANDLED; + } + if (pending & NFC_SR_RB_EDGE) { + complete(&host->nfc->comp_ready); nfc_writel(host->nfc->hsmc_regs, IDR, NFC_SR_RB_EDGE); - } else if (pending & NFC_SR_CMD_DONE) { - complete(&host->nfc->comp_nfc); + ret = IRQ_HANDLED; + } + if (pending & NFC_SR_CMD_DONE) { + complete(&host->nfc->comp_cmd_done); nfc_writel(host->nfc->hsmc_regs, IDR, NFC_SR_CMD_DONE); - } else { - ret = IRQ_NONE; + ret = IRQ_HANDLED; } return ret; } /* NFC(Nand Flash Controller) related functions */ -static int nfc_wait_interrupt(struct atmel_nand_host *host, u32 flag) +static void nfc_prepare_interrupt(struct atmel_nand_host *host, u32 flag) { - unsigned long timeout; - init_completion(&host->nfc->comp_nfc); + if (flag & NFC_SR_XFR_DONE) + init_completion(&host->nfc->comp_xfer_done); + + if (flag & NFC_SR_RB_EDGE) + init_completion(&host->nfc->comp_ready); + + if (flag & NFC_SR_CMD_DONE) + init_completion(&host->nfc->comp_cmd_done); /* Enable interrupt that need to wait for */ nfc_writel(host->nfc->hsmc_regs, IER, flag); +} - timeout = wait_for_completion_timeout(&host->nfc->comp_nfc, - msecs_to_jiffies(NFC_TIME_OUT_MS)); - if (timeout) - return 0; +static int nfc_wait_interrupt(struct atmel_nand_host *host, u32 flag) +{ + int i, index = 0; + struct completion *comp[3]; /* Support 3 interrupt completion */ - /* Time out to wait for the interrupt */ + if (flag & NFC_SR_XFR_DONE) + comp[index++] = &host->nfc->comp_xfer_done; + + if (flag & NFC_SR_RB_EDGE) + comp[index++] = &host->nfc->comp_ready; + + if (flag & NFC_SR_CMD_DONE) + comp[index++] = &host->nfc->comp_cmd_done; + + if (index == 0) { + dev_err(host->dev, "Unkown interrupt flag: 0x%08x\n", flag); + return -EINVAL; + } + + for (i = 0; i < index; i++) { + if (wait_for_completion_timeout(comp[i], + msecs_to_jiffies(NFC_TIME_OUT_MS))) + continue; /* wait for next completion */ + else + goto err_timeout; + } + + return 0; + +err_timeout: dev_err(host->dev, "Time out to wait for interrupt: 0x%08x\n", flag); + /* Disable the interrupt as it is not handled by interrupt handler */ + nfc_writel(host->nfc->hsmc_regs, IDR, flag); return -ETIMEDOUT; } @@ -1622,6 +1679,9 @@ static int nfc_send_command(struct atmel_nand_host *host, unsigned int cmd, unsigned int addr, unsigned char cycle0) { unsigned long timeout; + u32 flag = NFC_SR_CMD_DONE; + flag |= cmd & NFCADDR_CMD_DATAEN ? NFC_SR_XFR_DONE : 0; + dev_dbg(host->dev, "nfc_cmd: 0x%08x, addr1234: 0x%08x, cycle0: 0x%02x\n", cmd, addr, cycle0); @@ -1635,18 +1695,28 @@ static int nfc_send_command(struct atmel_nand_host *host, return -ETIMEDOUT; } } + + nfc_prepare_interrupt(host, flag); nfc_writel(host->nfc->hsmc_regs, CYCLE0, cycle0); nfc_cmd_addr1234_writel(cmd, addr, host->nfc->base_cmd_regs); - return nfc_wait_interrupt(host, NFC_SR_CMD_DONE); + return nfc_wait_interrupt(host, flag); } static int nfc_device_ready(struct mtd_info *mtd) { + u32 status, mask; struct nand_chip *nand_chip = mtd->priv; struct atmel_nand_host *host = nand_chip->priv; - if (!nfc_wait_interrupt(host, NFC_SR_RB_EDGE)) - return 1; - return 0; + + status = nfc_read_status(host); + mask = nfc_readl(host->nfc->hsmc_regs, IMR); + + /* The mask should be 0. If not we may lost interrupts */ + if (unlikely(mask & status)) + dev_err(host->dev, "Lost the interrupt flags: 0x%08x\n", + mask & status); + + return status & NFC_SR_RB_EDGE; } static void nfc_select_chip(struct mtd_info *mtd, int chip) @@ -1795,10 +1865,6 @@ static void nfc_nand_command(struct mtd_info *mtd, unsigned int command, nfc_addr_cmd = cmd1 | cmd2 | vcmd2 | acycle | csid | dataen | nfcwr; nfc_send_command(host, nfc_addr_cmd, addr1234, cycle0); - if (dataen == NFCADDR_CMD_DATAEN) - if (nfc_wait_interrupt(host, NFC_SR_XFR_DONE)) - dev_err(host->dev, "something wrong, No XFR_DONE interrupt comes.\n"); - /* * Program and erase have their own busy handlers status, sequential * in, and deplete1 need no delay. @@ -1823,6 +1889,7 @@ static void nfc_nand_command(struct mtd_info *mtd, unsigned int command, } /* fall through */ default: + nfc_prepare_interrupt(host, NFC_SR_RB_EDGE); nfc_wait_interrupt(host, NFC_SR_RB_EDGE); } } @@ -2209,6 +2276,9 @@ static int atmel_nand_nfc_probe(struct platform_device *pdev) } } + nfc_writel(nfc->hsmc_regs, IDR, 0xffffffff); + nfc_readl(nfc->hsmc_regs, SR); /* clear the NFC_SR */ + nfc->is_initialized = true; dev_info(&pdev->dev, "NFC is probed.\n"); return 0; diff --git a/drivers/mtd/nand/atmel_nand_nfc.h b/drivers/mtd/nand/atmel_nand_nfc.h index 4efd117cd3a3..85b8ca6af7d2 100644 --- a/drivers/mtd/nand/atmel_nand_nfc.h +++ b/drivers/mtd/nand/atmel_nand_nfc.h @@ -37,6 +37,10 @@ #define ATMEL_HSMC_NFC_SR 0x08 /* NFC Status Register */ #define NFC_SR_XFR_DONE (1 << 16) #define NFC_SR_CMD_DONE (1 << 17) +#define NFC_SR_DTOE (1 << 20) +#define NFC_SR_UNDEF (1 << 21) +#define NFC_SR_AWB (1 << 22) +#define NFC_SR_ASE (1 << 23) #define NFC_SR_RB_EDGE (1 << 24) #define ATMEL_HSMC_NFC_IER 0x0c diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c index bc5c518828d2..77d6c17b38c2 100644 --- a/drivers/mtd/nand/au1550nd.c +++ b/drivers/mtd/nand/au1550nd.c @@ -41,7 +41,7 @@ static u_char au_read_byte(struct mtd_info *mtd) { struct nand_chip *this = mtd->priv; u_char ret = readb(this->IO_ADDR_R); - au_sync(); + wmb(); /* drain writebuffer */ return ret; } @@ -56,7 +56,7 @@ static void au_write_byte(struct mtd_info *mtd, u_char byte) { struct nand_chip *this = mtd->priv; writeb(byte, this->IO_ADDR_W); - au_sync(); + wmb(); /* drain writebuffer */ } /** @@ -69,7 +69,7 @@ static u_char au_read_byte16(struct mtd_info *mtd) { struct nand_chip *this = mtd->priv; u_char ret = (u_char) cpu_to_le16(readw(this->IO_ADDR_R)); - au_sync(); + wmb(); /* drain writebuffer */ return ret; } @@ -84,7 +84,7 @@ static void au_write_byte16(struct mtd_info *mtd, u_char byte) { struct nand_chip *this = mtd->priv; writew(le16_to_cpu((u16) byte), this->IO_ADDR_W); - au_sync(); + wmb(); /* drain writebuffer */ } /** @@ -97,7 +97,7 @@ static u16 au_read_word(struct mtd_info *mtd) { struct nand_chip *this = mtd->priv; u16 ret = readw(this->IO_ADDR_R); - au_sync(); + wmb(); /* drain writebuffer */ return ret; } @@ -116,7 +116,7 @@ static void au_write_buf(struct mtd_info *mtd, const u_char *buf, int len) for (i = 0; i < len; i++) { writeb(buf[i], this->IO_ADDR_W); - au_sync(); + wmb(); /* drain writebuffer */ } } @@ -135,7 +135,7 @@ static void au_read_buf(struct mtd_info *mtd, u_char *buf, int len) for (i = 0; i < len; i++) { buf[i] = readb(this->IO_ADDR_R); - au_sync(); + wmb(); /* drain writebuffer */ } } @@ -156,7 +156,7 @@ static void au_write_buf16(struct mtd_info *mtd, const u_char *buf, int len) for (i = 0; i < len; i++) { writew(p[i], this->IO_ADDR_W); - au_sync(); + wmb(); /* drain writebuffer */ } } @@ -178,7 +178,7 @@ static void au_read_buf16(struct mtd_info *mtd, u_char *buf, int len) for (i = 0; i < len; i++) { p[i] = readw(this->IO_ADDR_R); - au_sync(); + wmb(); /* drain writebuffer */ } } @@ -223,26 +223,23 @@ static void au1550_hwcontrol(struct mtd_info *mtd, int cmd) case NAND_CTL_SETNCE: /* assert (force assert) chip enable */ - au_writel((1 << (4 + ctx->cs)), MEM_STNDCTL); + alchemy_wrsmem((1 << (4 + ctx->cs)), AU1000_MEM_STNDCTL); break; case NAND_CTL_CLRNCE: /* deassert chip enable */ - au_writel(0, MEM_STNDCTL); + alchemy_wrsmem(0, AU1000_MEM_STNDCTL); break; } this->IO_ADDR_R = this->IO_ADDR_W; - /* Drain the writebuffer */ - au_sync(); + wmb(); /* Drain the writebuffer */ } int au1550_device_ready(struct mtd_info *mtd) { - int ret = (au_readl(MEM_STSTAT) & 0x1) ? 1 : 0; - au_sync(); - return ret; + return (alchemy_rdsmem(AU1000_MEM_STSTAT) & 0x1) ? 1 : 0; } /** diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c index 722898aea7a6..871c4f712654 100644 --- a/drivers/mtd/nand/bf5xx_nand.c +++ b/drivers/mtd/nand/bf5xx_nand.c @@ -830,34 +830,10 @@ out_err: return err; } -/* PM Support */ -#ifdef CONFIG_PM - -static int bf5xx_nand_suspend(struct platform_device *dev, pm_message_t pm) -{ - struct bf5xx_nand_info *info = platform_get_drvdata(dev); - - return 0; -} - -static int bf5xx_nand_resume(struct platform_device *dev) -{ - struct bf5xx_nand_info *info = platform_get_drvdata(dev); - - return 0; -} - -#else -#define bf5xx_nand_suspend NULL -#define bf5xx_nand_resume NULL -#endif - /* driver device registration */ static struct platform_driver bf5xx_nand_driver = { .probe = bf5xx_nand_probe, .remove = bf5xx_nand_remove, - .suspend = bf5xx_nand_suspend, - .resume = bf5xx_nand_resume, .driver = { .name = DRV_NAME, .owner = THIS_MODULE, diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c index 9f2012a3e764..0b071a3136a2 100644 --- a/drivers/mtd/nand/denali.c +++ b/drivers/mtd/nand/denali.c @@ -473,7 +473,7 @@ static void detect_partition_feature(struct denali_nand_info *denali) static uint16_t denali_nand_timing_set(struct denali_nand_info *denali) { uint16_t status = PASS; - uint32_t id_bytes[5], addr; + uint32_t id_bytes[8], addr; uint8_t i, maf_id, device_id; dev_dbg(denali->dev, @@ -488,7 +488,7 @@ static uint16_t denali_nand_timing_set(struct denali_nand_info *denali) addr = (uint32_t)MODE_11 | BANK(denali->flash_bank); index_addr(denali, (uint32_t)addr | 0, 0x90); index_addr(denali, (uint32_t)addr | 1, 0); - for (i = 0; i < 5; i++) + for (i = 0; i < 8; i++) index_addr_read_data(denali, addr | 2, &id_bytes[i]); maf_id = id_bytes[0]; device_id = id_bytes[1]; @@ -1276,7 +1276,7 @@ static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col, addr = (uint32_t)MODE_11 | BANK(denali->flash_bank); index_addr(denali, (uint32_t)addr | 0, 0x90); index_addr(denali, (uint32_t)addr | 1, 0); - for (i = 0; i < 5; i++) { + for (i = 0; i < 8; i++) { index_addr_read_data(denali, (uint32_t)addr | 2, &id); diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c index f638cd8077ca..959cb9b70310 100644 --- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c @@ -285,9 +285,8 @@ static int legacy_set_geometry(struct gpmi_nand_data *this) geo->ecc_strength = get_ecc_strength(this); if (!gpmi_check_ecc(this)) { dev_err(this->dev, - "We can not support this nand chip." - " Its required ecc strength(%d) is beyond our" - " capability(%d).\n", geo->ecc_strength, + "required ecc strength of the NAND chip: %d is not supported by the GPMI controller (%d)\n", + geo->ecc_strength, this->devdata->bch_max_ecc_strength); return -EINVAL; } @@ -1082,6 +1081,7 @@ static int gpmi_ecc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip, int first, last, marker_pos; int ecc_parity_size; int col = 0; + int old_swap_block_mark = this->swap_block_mark; /* The size of ECC parity */ ecc_parity_size = geo->gf_len * geo->ecc_strength / 8; @@ -1090,17 +1090,21 @@ static int gpmi_ecc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip, first = offs / size; last = (offs + len - 1) / size; - /* - * Find the chunk which contains the Block Marker. If this chunk is - * in the range of [first, last], we have to read out the whole page. - * Why? since we had swapped the data at the position of Block Marker - * to the metadata which is bound with the chunk 0. - */ - marker_pos = geo->block_mark_byte_offset / size; - if (last >= marker_pos && first <= marker_pos) { - dev_dbg(this->dev, "page:%d, first:%d, last:%d, marker at:%d\n", + if (this->swap_block_mark) { + /* + * Find the chunk which contains the Block Marker. + * If this chunk is in the range of [first, last], + * we have to read out the whole page. + * Why? since we had swapped the data at the position of Block + * Marker to the metadata which is bound with the chunk 0. + */ + marker_pos = geo->block_mark_byte_offset / size; + if (last >= marker_pos && first <= marker_pos) { + dev_dbg(this->dev, + "page:%d, first:%d, last:%d, marker at:%d\n", page, first, last, marker_pos); - return gpmi_ecc_read_page(mtd, chip, buf, 0, page); + return gpmi_ecc_read_page(mtd, chip, buf, 0, page); + } } meta = geo->metadata_size; @@ -1146,7 +1150,7 @@ static int gpmi_ecc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip, writel(r1_old, bch_regs + HW_BCH_FLASH0LAYOUT0); writel(r2_old, bch_regs + HW_BCH_FLASH0LAYOUT1); this->bch_geometry = old_geo; - this->swap_block_mark = true; + this->swap_block_mark = old_swap_block_mark; return max_bitflips; } @@ -1180,7 +1184,7 @@ static int gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip, /* Handle block mark swapping. */ block_mark_swapping(this, - (void *) payload_virt, (void *) auxiliary_virt); + (void *)payload_virt, (void *)auxiliary_virt); } else { /* * If control arrives here, we're not doing block mark swapping, @@ -1310,10 +1314,10 @@ static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip, /* * Now, we want to make sure the block mark is correct. In the - * Swapping/Raw case, we already have it. Otherwise, we need to - * explicitly read it. + * non-transcribing case (!GPMI_IS_MX23()), we already have it. + * Otherwise, we need to explicitly read it. */ - if (!this->swap_block_mark) { + if (GPMI_IS_MX23(this)) { /* Read the block mark into the first byte of the OOB buffer. */ chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page); chip->oob_poi[0] = chip->read_byte(mtd); @@ -1354,7 +1358,7 @@ static int gpmi_block_markbad(struct mtd_info *mtd, loff_t ofs) chipnr = (int)(ofs >> chip->chip_shift); chip->select_chip(mtd, chipnr); - column = this->swap_block_mark ? mtd->writesize : 0; + column = !GPMI_IS_MX23(this) ? mtd->writesize : 0; /* Write the block mark. */ block_mark = this->data_buffer_dma; @@ -1597,8 +1601,9 @@ static int mx23_boot_init(struct gpmi_nand_data *this) dev_dbg(dev, "Transcribing mark in block %u\n", block); ret = chip->block_markbad(mtd, byte); if (ret) - dev_err(dev, "Failed to mark block bad with " - "ret %d\n", ret); + dev_err(dev, + "Failed to mark block bad with ret %d\n", + ret); } } @@ -1649,9 +1654,6 @@ static int gpmi_init_last(struct gpmi_nand_data *this) struct bch_geometry *bch_geo = &this->bch_geometry; int ret; - /* Set up swap_block_mark, must be set before the gpmi_set_geometry() */ - this->swap_block_mark = !GPMI_IS_MX23(this); - /* Set up the medium geometry */ ret = gpmi_set_geometry(this); if (ret) @@ -1715,9 +1717,20 @@ static int gpmi_nand_init(struct gpmi_nand_data *this) chip->badblock_pattern = &gpmi_bbt_descr; chip->block_markbad = gpmi_block_markbad; chip->options |= NAND_NO_SUBPAGE_WRITE; - if (of_get_nand_on_flash_bbt(this->dev->of_node)) + + /* Set up swap_block_mark, must be set before the gpmi_set_geometry() */ + this->swap_block_mark = !GPMI_IS_MX23(this); + + if (of_get_nand_on_flash_bbt(this->dev->of_node)) { chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB; + if (of_property_read_bool(this->dev->of_node, + "fsl,no-blockmark-swap")) + this->swap_block_mark = false; + } + dev_dbg(this->dev, "Blockmark swapping %sabled\n", + this->swap_block_mark ? "en" : "dis"); + /* * Allocate a temporary DMA buffer for reading ID in the * nand_scan_ident(). @@ -1760,16 +1773,16 @@ err_out: static const struct of_device_id gpmi_nand_id_table[] = { { .compatible = "fsl,imx23-gpmi-nand", - .data = (void *)&gpmi_devdata_imx23, + .data = &gpmi_devdata_imx23, }, { .compatible = "fsl,imx28-gpmi-nand", - .data = (void *)&gpmi_devdata_imx28, + .data = &gpmi_devdata_imx28, }, { .compatible = "fsl,imx6q-gpmi-nand", - .data = (void *)&gpmi_devdata_imx6q, + .data = &gpmi_devdata_imx6q, }, { .compatible = "fsl,imx6sx-gpmi-nand", - .data = (void *)&gpmi_devdata_imx6sx, + .data = &gpmi_devdata_imx6sx, }, {} }; MODULE_DEVICE_TABLE(of, gpmi_nand_id_table); diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c index 687478c9f09c..7335346dc126 100644 --- a/drivers/mtd/nand/lpc32xx_mlc.c +++ b/drivers/mtd/nand/lpc32xx_mlc.c @@ -721,12 +721,6 @@ static int lpc32xx_nand_probe(struct platform_device *pdev) nand_chip->bbt_td = &lpc32xx_nand_bbt; nand_chip->bbt_md = &lpc32xx_nand_bbt_mirror; - /* bitflip_threshold's default is defined as ecc_strength anyway. - * Unfortunately, it is set only later at add_mtd_device(). Meanwhile - * being 0, it causes bad block table scanning errors in - * nand_scan_tail(), so preparing it here. */ - mtd->bitflip_threshold = nand_chip->ecc.strength; - if (use_dma) { res = lpc32xx_dma_setup(host); if (res) { diff --git a/drivers/mtd/nand/lpc32xx_slc.c b/drivers/mtd/nand/lpc32xx_slc.c index 53a6742e3da3..8caef28e0756 100644 --- a/drivers/mtd/nand/lpc32xx_slc.c +++ b/drivers/mtd/nand/lpc32xx_slc.c @@ -840,12 +840,6 @@ static int lpc32xx_nand_probe(struct platform_device *pdev) chip->ecc.strength = 1; chip->ecc.hwctl = lpc32xx_nand_ecc_enable; - /* bitflip_threshold's default is defined as ecc_strength anyway. - * Unfortunately, it is set only later at add_mtd_device(). Meanwhile - * being 0, it causes bad block table scanning errors in - * nand_scan_tail(), so preparing it here already. */ - mtd->bitflip_threshold = chip->ecc.strength; - /* * Allocate a large enough buffer for a single huge page plus * extra space for the spare area and ECC storage area diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 4f3e80c68a26..d8cdf06343fb 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -488,6 +488,23 @@ static int nand_check_wp(struct mtd_info *mtd) * nand_block_checkbad - [GENERIC] Check if a block is marked bad * @mtd: MTD device structure * @ofs: offset from device start + * + * Check if the block is mark as reserved. + */ +static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs) +{ + struct nand_chip *chip = mtd->priv; + + if (!chip->bbt) + return 0; + /* Return info from the table */ + return nand_isreserved_bbt(mtd, ofs); +} + +/** + * nand_block_checkbad - [GENERIC] Check if a block is marked bad + * @mtd: MTD device structure + * @ofs: offset from device start * @getchip: 0, if the chip is already selected * @allowbbt: 1, if its allowed to access the bbt area * @@ -4113,6 +4130,7 @@ int nand_scan_tail(struct mtd_info *mtd) mtd->_unlock = NULL; mtd->_suspend = nand_suspend; mtd->_resume = nand_resume; + mtd->_block_isreserved = nand_block_isreserved; mtd->_block_isbad = nand_block_isbad; mtd->_block_markbad = nand_block_markbad; mtd->writebufsize = mtd->writesize; diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c index 7f0c3b4c2a4f..443fa82cde6a 100644 --- a/drivers/mtd/nand/nand_bbt.c +++ b/drivers/mtd/nand/nand_bbt.c @@ -1311,6 +1311,20 @@ int nand_default_bbt(struct mtd_info *mtd) } /** + * nand_isreserved_bbt - [NAND Interface] Check if a block is reserved + * @mtd: MTD device structure + * @offs: offset in the device + */ +int nand_isreserved_bbt(struct mtd_info *mtd, loff_t offs) +{ + struct nand_chip *this = mtd->priv; + int block; + + block = (int)(offs >> this->bbt_erase_shift); + return bbt_get_entry(this, block) == BBT_BLOCK_RESERVED; +} + +/** * nand_isbad_bbt - [NAND Interface] Check if a block is bad * @mtd: MTD device structure * @offs: offset in the device diff --git a/drivers/mtd/nand/nand_timings.c b/drivers/mtd/nand/nand_timings.c new file mode 100644 index 000000000000..8b36253420fa --- /dev/null +++ b/drivers/mtd/nand/nand_timings.c @@ -0,0 +1,253 @@ +/* + * Copyright (C) 2014 Free Electrons + * + * Author: Boris BREZILLON <boris.brezillon@free-electrons.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ +#include <linux/kernel.h> +#include <linux/err.h> +#include <linux/export.h> +#include <linux/mtd/nand.h> + +static const struct nand_sdr_timings onfi_sdr_timings[] = { + /* Mode 0 */ + { + .tADL_min = 200000, + .tALH_min = 20000, + .tALS_min = 50000, + .tAR_min = 25000, + .tCEA_max = 100000, + .tCEH_min = 20000, + .tCH_min = 20000, + .tCHZ_max = 100000, + .tCLH_min = 20000, + .tCLR_min = 20000, + .tCLS_min = 50000, + .tCOH_min = 0, + .tCS_min = 70000, + .tDH_min = 20000, + .tDS_min = 40000, + .tFEAT_max = 1000000, + .tIR_min = 10000, + .tITC_max = 1000000, + .tRC_min = 100000, + .tREA_max = 40000, + .tREH_min = 30000, + .tRHOH_min = 0, + .tRHW_min = 200000, + .tRHZ_max = 200000, + .tRLOH_min = 0, + .tRP_min = 50000, + .tRST_max = 250000000000, + .tWB_max = 200000, + .tRR_min = 40000, + .tWC_min = 100000, + .tWH_min = 30000, + .tWHR_min = 120000, + .tWP_min = 50000, + .tWW_min = 100000, + }, + /* Mode 1 */ + { + .tADL_min = 100000, + .tALH_min = 10000, + .tALS_min = 25000, + .tAR_min = 10000, + .tCEA_max = 45000, + .tCEH_min = 20000, + .tCH_min = 10000, + .tCHZ_max = 50000, + .tCLH_min = 10000, + .tCLR_min = 10000, + .tCLS_min = 25000, + .tCOH_min = 15000, + .tCS_min = 35000, + .tDH_min = 10000, + .tDS_min = 20000, + .tFEAT_max = 1000000, + .tIR_min = 0, + .tITC_max = 1000000, + .tRC_min = 50000, + .tREA_max = 30000, + .tREH_min = 15000, + .tRHOH_min = 15000, + .tRHW_min = 100000, + .tRHZ_max = 100000, + .tRLOH_min = 0, + .tRP_min = 25000, + .tRR_min = 20000, + .tRST_max = 500000000, + .tWB_max = 100000, + .tWC_min = 45000, + .tWH_min = 15000, + .tWHR_min = 80000, + .tWP_min = 25000, + .tWW_min = 100000, + }, + /* Mode 2 */ + { + .tADL_min = 100000, + .tALH_min = 10000, + .tALS_min = 15000, + .tAR_min = 10000, + .tCEA_max = 30000, + .tCEH_min = 20000, + .tCH_min = 10000, + .tCHZ_max = 50000, + .tCLH_min = 10000, + .tCLR_min = 10000, + .tCLS_min = 15000, + .tCOH_min = 15000, + .tCS_min = 25000, + .tDH_min = 5000, + .tDS_min = 15000, + .tFEAT_max = 1000000, + .tIR_min = 0, + .tITC_max = 1000000, + .tRC_min = 35000, + .tREA_max = 25000, + .tREH_min = 15000, + .tRHOH_min = 15000, + .tRHW_min = 100000, + .tRHZ_max = 100000, + .tRLOH_min = 0, + .tRR_min = 20000, + .tRST_max = 500000000, + .tWB_max = 100000, + .tRP_min = 17000, + .tWC_min = 35000, + .tWH_min = 15000, + .tWHR_min = 80000, + .tWP_min = 17000, + .tWW_min = 100000, + }, + /* Mode 3 */ + { + .tADL_min = 100000, + .tALH_min = 5000, + .tALS_min = 10000, + .tAR_min = 10000, + .tCEA_max = 25000, + .tCEH_min = 20000, + .tCH_min = 5000, + .tCHZ_max = 50000, + .tCLH_min = 5000, + .tCLR_min = 10000, + .tCLS_min = 10000, + .tCOH_min = 15000, + .tCS_min = 25000, + .tDH_min = 5000, + .tDS_min = 10000, + .tFEAT_max = 1000000, + .tIR_min = 0, + .tITC_max = 1000000, + .tRC_min = 30000, + .tREA_max = 20000, + .tREH_min = 10000, + .tRHOH_min = 15000, + .tRHW_min = 100000, + .tRHZ_max = 100000, + .tRLOH_min = 0, + .tRP_min = 15000, + .tRR_min = 20000, + .tRST_max = 500000000, + .tWB_max = 100000, + .tWC_min = 30000, + .tWH_min = 10000, + .tWHR_min = 80000, + .tWP_min = 15000, + .tWW_min = 100000, + }, + /* Mode 4 */ + { + .tADL_min = 70000, + .tALH_min = 5000, + .tALS_min = 10000, + .tAR_min = 10000, + .tCEA_max = 25000, + .tCEH_min = 20000, + .tCH_min = 5000, + .tCHZ_max = 30000, + .tCLH_min = 5000, + .tCLR_min = 10000, + .tCLS_min = 10000, + .tCOH_min = 15000, + .tCS_min = 20000, + .tDH_min = 5000, + .tDS_min = 10000, + .tFEAT_max = 1000000, + .tIR_min = 0, + .tITC_max = 1000000, + .tRC_min = 25000, + .tREA_max = 20000, + .tREH_min = 10000, + .tRHOH_min = 15000, + .tRHW_min = 100000, + .tRHZ_max = 100000, + .tRLOH_min = 5000, + .tRP_min = 12000, + .tRR_min = 20000, + .tRST_max = 500000000, + .tWB_max = 100000, + .tWC_min = 25000, + .tWH_min = 10000, + .tWHR_min = 80000, + .tWP_min = 12000, + .tWW_min = 100000, + }, + /* Mode 5 */ + { + .tADL_min = 70000, + .tALH_min = 5000, + .tALS_min = 10000, + .tAR_min = 10000, + .tCEA_max = 25000, + .tCEH_min = 20000, + .tCH_min = 5000, + .tCHZ_max = 30000, + .tCLH_min = 5000, + .tCLR_min = 10000, + .tCLS_min = 10000, + .tCOH_min = 15000, + .tCS_min = 15000, + .tDH_min = 5000, + .tDS_min = 7000, + .tFEAT_max = 1000000, + .tIR_min = 0, + .tITC_max = 1000000, + .tRC_min = 20000, + .tREA_max = 16000, + .tREH_min = 7000, + .tRHOH_min = 15000, + .tRHW_min = 100000, + .tRHZ_max = 100000, + .tRLOH_min = 5000, + .tRP_min = 10000, + .tRR_min = 20000, + .tRST_max = 500000000, + .tWB_max = 100000, + .tWC_min = 20000, + .tWH_min = 7000, + .tWHR_min = 80000, + .tWP_min = 10000, + .tWW_min = 100000, + }, +}; + +/** + * onfi_async_timing_mode_to_sdr_timings - [NAND Interface] Retrieve NAND + * timings according to the given ONFI timing mode + * @mode: ONFI timing mode + */ +const struct nand_sdr_timings *onfi_async_timing_mode_to_sdr_timings(int mode) +{ + if (mode < 0 || mode >= ARRAY_SIZE(onfi_sdr_timings)) + return ERR_PTR(-EINVAL); + + return &onfi_sdr_timings[mode]; +} +EXPORT_SYMBOL(onfi_async_timing_mode_to_sdr_timings); diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c index f0ed92e210a1..5967b385141b 100644 --- a/drivers/mtd/nand/omap2.c +++ b/drivers/mtd/nand/omap2.c @@ -931,7 +931,7 @@ static int omap_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u32 val; val = readl(info->reg.gpmc_ecc_config); - if (((val >> ECC_CONFIG_CS_SHIFT) & ~CS_MASK) != info->gpmc_cs) + if (((val >> ECC_CONFIG_CS_SHIFT) & CS_MASK) != info->gpmc_cs) return -EINVAL; /* read ecc result */ @@ -1794,9 +1794,12 @@ static int omap_nand_probe(struct platform_device *pdev) } /* populate MTD interface based on ECC scheme */ - nand_chip->ecc.layout = &omap_oobinfo; ecclayout = &omap_oobinfo; switch (info->ecc_opt) { + case OMAP_ECC_HAM1_CODE_SW: + nand_chip->ecc.mode = NAND_ECC_SOFT; + break; + case OMAP_ECC_HAM1_CODE_HW: pr_info("nand: using OMAP_ECC_HAM1_CODE_HW\n"); nand_chip->ecc.mode = NAND_ECC_HW; @@ -1848,7 +1851,7 @@ static int omap_nand_probe(struct platform_device *pdev) nand_chip->ecc.priv = nand_bch_init(mtd, nand_chip->ecc.size, nand_chip->ecc.bytes, - &nand_chip->ecc.layout); + &ecclayout); if (!nand_chip->ecc.priv) { pr_err("nand: error: unable to use s/w BCH library\n"); err = -EINVAL; @@ -1923,7 +1926,7 @@ static int omap_nand_probe(struct platform_device *pdev) nand_chip->ecc.priv = nand_bch_init(mtd, nand_chip->ecc.size, nand_chip->ecc.bytes, - &nand_chip->ecc.layout); + &ecclayout); if (!nand_chip->ecc.priv) { pr_err("nand: error: unable to use s/w BCH library\n"); err = -EINVAL; @@ -2012,6 +2015,9 @@ static int omap_nand_probe(struct platform_device *pdev) goto return_error; } + if (info->ecc_opt == OMAP_ECC_HAM1_CODE_SW) + goto scan_tail; + /* all OOB bytes from oobfree->offset till end off OOB are free */ ecclayout->oobfree->length = mtd->oobsize - ecclayout->oobfree->offset; /* check if NAND device's OOB is enough to store ECC signatures */ @@ -2021,7 +2027,9 @@ static int omap_nand_probe(struct platform_device *pdev) err = -EINVAL; goto return_error; } + nand_chip->ecc.layout = ecclayout; +scan_tail: /* second phase scan */ if (nand_scan_tail(mtd)) { err = -ENXIO; diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c index 79acbb8691b5..6b97bf17ce5d 100644 --- a/drivers/mtd/nand/s3c2410.c +++ b/drivers/mtd/nand/s3c2410.c @@ -208,10 +208,10 @@ static void s3c2410_nand_clk_set_state(struct s3c2410_nand_info *info, if (info->clk_state == CLOCK_ENABLE) { if (new_state != CLOCK_ENABLE) - clk_disable(info->clk); + clk_disable_unprepare(info->clk); } else { if (new_state == CLOCK_ENABLE) - clk_enable(info->clk); + clk_prepare_enable(info->clk); } info->clk_state = new_state; diff --git a/drivers/mtd/onenand/Kconfig b/drivers/mtd/onenand/Kconfig index ab2607273e80..dcae2f6a2b11 100644 --- a/drivers/mtd/onenand/Kconfig +++ b/drivers/mtd/onenand/Kconfig @@ -32,10 +32,10 @@ config MTD_ONENAND_OMAP2 config MTD_ONENAND_SAMSUNG tristate "OneNAND on Samsung SOC controller support" - depends on ARCH_S3C64XX || ARCH_S5PC100 || ARCH_S5PV210 || ARCH_EXYNOS4 + depends on ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS4 help Support for a OneNAND flash device connected to an Samsung SOC. - S3C64XX/S5PC100 use command mapping method. + S3C64XX uses command mapping method. S5PC110/S5PC210 use generic OneNAND method. config MTD_ONENAND_OTP diff --git a/drivers/mtd/onenand/samsung.c b/drivers/mtd/onenand/samsung.c index efb819c3df2f..19cfb97adbc0 100644 --- a/drivers/mtd/onenand/samsung.c +++ b/drivers/mtd/onenand/samsung.c @@ -10,7 +10,7 @@ * published by the Free Software Foundation. * * Implementation: - * S3C64XX and S5PC100: emulate the pseudo BufferRAM + * S3C64XX: emulate the pseudo BufferRAM * S5PC110: use DMA */ @@ -32,7 +32,6 @@ enum soc_type { TYPE_S3C6400, TYPE_S3C6410, - TYPE_S5PC100, TYPE_S5PC110, }; @@ -59,7 +58,6 @@ enum soc_type { #define MAP_11 (0x3) #define S3C64XX_CMD_MAP_SHIFT 24 -#define S5PC100_CMD_MAP_SHIFT 26 #define S3C6400_FBA_SHIFT 10 #define S3C6400_FPA_SHIFT 4 @@ -69,10 +67,6 @@ enum soc_type { #define S3C6410_FPA_SHIFT 6 #define S3C6410_FSA_SHIFT 4 -#define S5PC100_FBA_SHIFT 13 -#define S5PC100_FPA_SHIFT 7 -#define S5PC100_FSA_SHIFT 5 - /* S5PC110 specific definitions */ #define S5PC110_DMA_SRC_ADDR 0x400 #define S5PC110_DMA_SRC_CFG 0x404 @@ -195,11 +189,6 @@ static unsigned int s3c64xx_cmd_map(unsigned type, unsigned val) return (type << S3C64XX_CMD_MAP_SHIFT) | val; } -static unsigned int s5pc1xx_cmd_map(unsigned type, unsigned val) -{ - return (type << S5PC100_CMD_MAP_SHIFT) | val; -} - static unsigned int s3c6400_mem_addr(int fba, int fpa, int fsa) { return (fba << S3C6400_FBA_SHIFT) | (fpa << S3C6400_FPA_SHIFT) | @@ -212,12 +201,6 @@ static unsigned int s3c6410_mem_addr(int fba, int fpa, int fsa) (fsa << S3C6410_FSA_SHIFT); } -static unsigned int s5pc100_mem_addr(int fba, int fpa, int fsa) -{ - return (fba << S5PC100_FBA_SHIFT) | (fpa << S5PC100_FPA_SHIFT) | - (fsa << S5PC100_FSA_SHIFT); -} - static void s3c_onenand_reset(void) { unsigned long timeout = 0x10000; @@ -835,9 +818,6 @@ static void s3c_onenand_setup(struct mtd_info *mtd) } else if (onenand->type == TYPE_S3C6410) { onenand->mem_addr = s3c6410_mem_addr; onenand->cmd_map = s3c64xx_cmd_map; - } else if (onenand->type == TYPE_S5PC100) { - onenand->mem_addr = s5pc100_mem_addr; - onenand->cmd_map = s5pc1xx_cmd_map; } else if (onenand->type == TYPE_S5PC110) { /* Use generic onenand functions */ this->read_bufferram = s5pc110_read_bufferram; @@ -1111,9 +1091,6 @@ static struct platform_device_id s3c_onenand_driver_ids[] = { .name = "s3c6410-onenand", .driver_data = TYPE_S3C6410, }, { - .name = "s5pc100-onenand", - .driver_data = TYPE_S5PC100, - }, { .name = "s5pc110-onenand", .driver_data = TYPE_S5PC110, }, { }, diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c index c713c8656710..b5ad6bebf5e7 100644 --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c @@ -48,6 +48,25 @@ static int read_sr(struct spi_nor *nor) } /* + * Read the flag status register, returning its value in the location + * Return the status register value. + * Returns negative if error occurred. + */ +static int read_fsr(struct spi_nor *nor) +{ + int ret; + u8 val; + + ret = nor->read_reg(nor, SPINOR_OP_RDFSR, &val, 1); + if (ret < 0) { + pr_err("error %d reading FSR\n", ret); + return ret; + } + + return val; +} + +/* * Read configuration register, returning its value in the * location. Return the configuration register value. * Returns negative if error occured. @@ -165,6 +184,32 @@ static int spi_nor_wait_till_ready(struct spi_nor *nor) return -ETIMEDOUT; } +static int spi_nor_wait_till_fsr_ready(struct spi_nor *nor) +{ + unsigned long deadline; + int sr; + int fsr; + + deadline = jiffies + MAX_READY_WAIT_JIFFIES; + + do { + cond_resched(); + + sr = read_sr(nor); + if (sr < 0) { + break; + } else if (!(sr & SR_WIP)) { + fsr = read_fsr(nor); + if (fsr < 0) + break; + if (fsr & FSR_READY) + return 0; + } + } while (!time_after_eq(jiffies, deadline)); + + return -ETIMEDOUT; +} + /* * Service routine to read status register until ready, or timeout occurs. * Returns non-zero if error. @@ -402,6 +447,7 @@ struct flash_info { #define SECT_4K_PMC 0x10 /* SPINOR_OP_BE_4K_PMC works uniformly */ #define SPI_NOR_DUAL_READ 0x20 /* Flash supports Dual Read */ #define SPI_NOR_QUAD_READ 0x40 /* Flash supports Quad Read */ +#define USE_FSR 0x80 /* use flag status register */ }; #define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \ @@ -449,6 +495,7 @@ const struct spi_device_id spi_nor_ids[] = { { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) }, { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) }, { "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) }, + { "en25qh128", INFO(0x1c7018, 0, 64 * 1024, 256, 0) }, { "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) }, /* ESMT */ @@ -488,6 +535,8 @@ const struct spi_device_id spi_nor_ids[] = { { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, 0) }, { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K) }, { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K) }, + { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024, USE_FSR) }, + { "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048, USE_FSR) }, /* PMC */ { "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) }, @@ -965,6 +1014,10 @@ int spi_nor_scan(struct spi_nor *nor, const struct spi_device_id *id, else mtd->_write = spi_nor_write; + if ((info->flags & USE_FSR) && + nor->wait_till_ready == spi_nor_wait_till_ready) + nor->wait_till_ready = spi_nor_wait_till_fsr_ready; + /* prefer "small sector" erase if possible */ if (info->flags & SECT_4K) { nor->erase_opcode = SPINOR_OP_BE_4K; diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c index 8457df7ec5af..33c64955d4d7 100644 --- a/drivers/mtd/ubi/block.c +++ b/drivers/mtd/ubi/block.c @@ -378,9 +378,11 @@ int ubiblock_create(struct ubi_volume_info *vi) { struct ubiblock *dev; struct gendisk *gd; - int disk_capacity; + u64 disk_capacity = ((u64)vi->size * vi->usable_leb_size) >> 9; int ret; + if ((sector_t)disk_capacity != disk_capacity) + return -EFBIG; /* Check that the volume isn't already handled */ mutex_lock(&devices_mutex); if (find_dev_nolock(vi->ubi_num, vi->vol_id)) { @@ -412,7 +414,6 @@ int ubiblock_create(struct ubi_volume_info *vi) gd->first_minor = dev->ubi_num * UBI_MAX_VOLUMES + dev->vol_id; gd->private_data = dev; sprintf(gd->disk_name, "ubiblock%d_%d", dev->ubi_num, dev->vol_id); - disk_capacity = (vi->size * vi->usable_leb_size) >> 9; set_capacity(gd, disk_capacity); dev->gd = gd; @@ -498,11 +499,16 @@ int ubiblock_remove(struct ubi_volume_info *vi) return 0; } -static void ubiblock_resize(struct ubi_volume_info *vi) +static int ubiblock_resize(struct ubi_volume_info *vi) { struct ubiblock *dev; - int disk_capacity; + u64 disk_capacity = ((u64)vi->size * vi->usable_leb_size) >> 9; + if ((sector_t)disk_capacity != disk_capacity) { + ubi_warn("%s: the volume is too big, cannot resize (%d LEBs)", + dev->gd->disk_name, vi->size); + return -EFBIG; + } /* * Need to lock the device list until we stop using the device, * otherwise the device struct might get released in @@ -512,15 +518,15 @@ static void ubiblock_resize(struct ubi_volume_info *vi) dev = find_dev_nolock(vi->ubi_num, vi->vol_id); if (!dev) { mutex_unlock(&devices_mutex); - return; + return -ENODEV; } mutex_lock(&dev->dev_mutex); - disk_capacity = (vi->size * vi->usable_leb_size) >> 9; set_capacity(dev->gd, disk_capacity); ubi_msg("%s resized to %d LEBs", dev->gd->disk_name, vi->size); mutex_unlock(&dev->dev_mutex); mutex_unlock(&devices_mutex); + return 0; } static int ubiblock_notify(struct notifier_block *nb, diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c index d77b1c1d7c72..07cac5f9ffb8 100644 --- a/drivers/mtd/ubi/vtbl.c +++ b/drivers/mtd/ubi/vtbl.c @@ -591,7 +591,7 @@ static int init_volumes(struct ubi_device *ubi, /* Static volumes only */ av = ubi_find_av(ai, i); - if (!av) { + if (!av || !av->leb_count) { /* * No eraseblocks belonging to this volume found. We * don't actually know whether this static volume is diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index 0f3425dac910..20f491713145 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -1718,12 +1718,12 @@ int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum) vol_id, lnum, ubi->works_count); while (found) { - struct ubi_work *wrk; + struct ubi_work *wrk, *tmp; found = 0; down_read(&ubi->work_sem); spin_lock(&ubi->wl_lock); - list_for_each_entry(wrk, &ubi->works, list) { + list_for_each_entry_safe(wrk, tmp, &ubi->works, list) { if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) && (lnum == UBI_ALL || wrk->lnum == lnum)) { list_del(&wrk->list); diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c index cbc44f53755a..7bb292e59559 100644 --- a/drivers/net/arcnet/com20020-pci.c +++ b/drivers/net/arcnet/com20020-pci.c @@ -144,7 +144,7 @@ static void com20020pci_remove(struct pci_dev *pdev) free_netdev(dev); } -static DEFINE_PCI_DEVICE_TABLE(com20020pci_id_table) = { +static const struct pci_device_id com20020pci_id_table[] = { { 0x1571, 0xa001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { 0x1571, 0xa002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { 0x1571, 0xa003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, diff --git a/drivers/net/arcnet/com20020_cs.c b/drivers/net/arcnet/com20020_cs.c index 326a612a2730..1a790a20210d 100644 --- a/drivers/net/arcnet/com20020_cs.c +++ b/drivers/net/arcnet/com20020_cs.c @@ -112,20 +112,20 @@ static void com20020_detach(struct pcmcia_device *p_dev); /*====================================================================*/ -typedef struct com20020_dev_t { +struct com20020_dev { struct net_device *dev; -} com20020_dev_t; +}; static int com20020_probe(struct pcmcia_device *p_dev) { - com20020_dev_t *info; + struct com20020_dev *info; struct net_device *dev; struct arcnet_local *lp; dev_dbg(&p_dev->dev, "com20020_attach()\n"); /* Create new network device */ - info = kzalloc(sizeof(struct com20020_dev_t), GFP_KERNEL); + info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) goto fail_alloc_info; @@ -160,7 +160,7 @@ fail_alloc_info: static void com20020_detach(struct pcmcia_device *link) { - struct com20020_dev_t *info = link->priv; + struct com20020_dev *info = link->priv; struct net_device *dev = info->dev; dev_dbg(&link->dev, "detach...\n"); @@ -199,7 +199,7 @@ static void com20020_detach(struct pcmcia_device *link) static int com20020_config(struct pcmcia_device *link) { struct arcnet_local *lp; - com20020_dev_t *info; + struct com20020_dev *info; struct net_device *dev; int i, ret; int ioaddr; @@ -291,7 +291,7 @@ static void com20020_release(struct pcmcia_device *link) static int com20020_suspend(struct pcmcia_device *link) { - com20020_dev_t *info = link->priv; + struct com20020_dev *info = link->priv; struct net_device *dev = info->dev; if (link->open) @@ -302,7 +302,7 @@ static int com20020_suspend(struct pcmcia_device *link) static int com20020_resume(struct pcmcia_device *link) { - com20020_dev_t *info = link->priv; + struct com20020_dev *info = link->priv; struct net_device *dev = info->dev; if (link->open) { diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index f0f5eab0fab1..798ae69fb63c 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -175,7 +175,7 @@ MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to " "the same MAC; 0 for none (default), " "1 for active, 2 for follow"); module_param(all_slaves_active, int, 0); -MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface" +MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface " "by setting active flag for all slaves; " "0 for never (default), 1 for always."); module_param(resend_igmp, int, 0); @@ -3659,8 +3659,14 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev else bond_xmit_slave_id(bond, skb, 0); } else { - slave_id = bond_rr_gen_slave_id(bond); - bond_xmit_slave_id(bond, skb, slave_id % bond->slave_cnt); + int slave_cnt = ACCESS_ONCE(bond->slave_cnt); + + if (likely(slave_cnt)) { + slave_id = bond_rr_gen_slave_id(bond); + bond_xmit_slave_id(bond, skb, slave_id % slave_cnt); + } else { + dev_kfree_skb_any(skb); + } } return NETDEV_TX_OK; @@ -3691,8 +3697,13 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); + int slave_cnt = ACCESS_ONCE(bond->slave_cnt); - bond_xmit_slave_id(bond, skb, bond_xmit_hash(bond, skb) % bond->slave_cnt); + if (likely(slave_cnt)) + bond_xmit_slave_id(bond, skb, + bond_xmit_hash(bond, skb) % slave_cnt); + else + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c index f07fa89b5fd5..05e1aa090add 100644 --- a/drivers/net/can/at91_can.c +++ b/drivers/net/can/at91_can.c @@ -1123,7 +1123,9 @@ static int at91_open(struct net_device *dev) struct at91_priv *priv = netdev_priv(dev); int err; - clk_enable(priv->clk); + err = clk_prepare_enable(priv->clk); + if (err) + return err; /* check or determine and set bittime */ err = open_candev(dev); @@ -1149,7 +1151,7 @@ static int at91_open(struct net_device *dev) out_close: close_candev(dev); out: - clk_disable(priv->clk); + clk_disable_unprepare(priv->clk); return err; } @@ -1166,7 +1168,7 @@ static int at91_close(struct net_device *dev) at91_chip_stop(dev, CAN_STATE_STOPPED); free_irq(dev->irq, dev); - clk_disable(priv->clk); + clk_disable_unprepare(priv->clk); close_candev(dev); diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c index 5d11e0e4225b..7be393c96b1a 100644 --- a/drivers/net/can/c_can/c_can_pci.c +++ b/drivers/net/can/c_can/c_can_pci.c @@ -270,7 +270,8 @@ static struct c_can_pci_data c_can_pch = { PCI_DEVICE(_vend, _dev), \ .driver_data = (unsigned long)&_driverdata, \ } -static DEFINE_PCI_DEVICE_TABLE(c_can_pci_tbl) = { + +static const struct pci_device_id c_can_pci_tbl[] = { C_CAN_ID(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_CAN, c_can_sta2x11), C_CAN_ID(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PCH_CAN, diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c index 5dede6e64376..fb279d6ae484 100644 --- a/drivers/net/can/c_can/c_can_platform.c +++ b/drivers/net/can/c_can/c_can_platform.c @@ -97,14 +97,14 @@ static void c_can_hw_raminit_ti(const struct c_can_priv *priv, bool enable) ctrl |= CAN_RAMINIT_DONE_MASK(priv->instance); writel(ctrl, priv->raminit_ctrlreg); ctrl &= ~CAN_RAMINIT_DONE_MASK(priv->instance); - c_can_hw_raminit_wait_ti(priv, ctrl, mask); + c_can_hw_raminit_wait_ti(priv, mask, ctrl); if (enable) { /* Set start bit and wait for the done bit. */ ctrl |= CAN_RAMINIT_START_MASK(priv->instance); writel(ctrl, priv->raminit_ctrlreg); ctrl |= CAN_RAMINIT_DONE_MASK(priv->instance); - c_can_hw_raminit_wait_ti(priv, ctrl, mask); + c_can_hw_raminit_wait_ti(priv, mask, ctrl); } spin_unlock(&raminit_lock); } @@ -280,7 +280,7 @@ static int c_can_plat_probe(struct platform_device *pdev) priv->raminit_ctrlreg = devm_ioremap(&pdev->dev, res->start, resource_size(res)); - if (IS_ERR(priv->raminit_ctrlreg) || priv->instance < 0) + if (!priv->raminit_ctrlreg || priv->instance < 0) dev_info(&pdev->dev, "control memory is not used for raminit\n"); else priv->raminit = c_can_hw_raminit_ti; diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index f425ec2c7839..6586309329e6 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -62,7 +62,7 @@ #define FLEXCAN_MCR_BCC BIT(16) #define FLEXCAN_MCR_LPRIO_EN BIT(13) #define FLEXCAN_MCR_AEN BIT(12) -#define FLEXCAN_MCR_MAXMB(x) ((x) & 0x1f) +#define FLEXCAN_MCR_MAXMB(x) ((x) & 0x7f) #define FLEXCAN_MCR_IDAM_A (0 << 8) #define FLEXCAN_MCR_IDAM_B (1 << 8) #define FLEXCAN_MCR_IDAM_C (2 << 8) @@ -125,7 +125,9 @@ FLEXCAN_ESR_BOFF_INT | FLEXCAN_ESR_ERR_INT) /* FLEXCAN interrupt flag register (IFLAG) bits */ -#define FLEXCAN_TX_BUF_ID 8 +/* Errata ERR005829 step7: Reserve first valid MB */ +#define FLEXCAN_TX_BUF_RESERVED 8 +#define FLEXCAN_TX_BUF_ID 9 #define FLEXCAN_IFLAG_BUF(x) BIT(x) #define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW BIT(7) #define FLEXCAN_IFLAG_RX_FIFO_WARN BIT(6) @@ -136,6 +138,17 @@ /* FLEXCAN message buffers */ #define FLEXCAN_MB_CNT_CODE(x) (((x) & 0xf) << 24) +#define FLEXCAN_MB_CODE_RX_INACTIVE (0x0 << 24) +#define FLEXCAN_MB_CODE_RX_EMPTY (0x4 << 24) +#define FLEXCAN_MB_CODE_RX_FULL (0x2 << 24) +#define FLEXCAN_MB_CODE_RX_OVERRRUN (0x6 << 24) +#define FLEXCAN_MB_CODE_RX_RANSWER (0xa << 24) + +#define FLEXCAN_MB_CODE_TX_INACTIVE (0x8 << 24) +#define FLEXCAN_MB_CODE_TX_ABORT (0x9 << 24) +#define FLEXCAN_MB_CODE_TX_DATA (0xc << 24) +#define FLEXCAN_MB_CODE_TX_TANSWER (0xe << 24) + #define FLEXCAN_MB_CNT_SRR BIT(22) #define FLEXCAN_MB_CNT_IDE BIT(21) #define FLEXCAN_MB_CNT_RTR BIT(20) @@ -298,7 +311,7 @@ static int flexcan_chip_enable(struct flexcan_priv *priv) flexcan_write(reg, ®s->mcr); while (timeout-- && (flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) - usleep_range(10, 20); + udelay(10); if (flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK) return -ETIMEDOUT; @@ -317,7 +330,7 @@ static int flexcan_chip_disable(struct flexcan_priv *priv) flexcan_write(reg, ®s->mcr); while (timeout-- && !(flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) - usleep_range(10, 20); + udelay(10); if (!(flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) return -ETIMEDOUT; @@ -336,7 +349,7 @@ static int flexcan_chip_freeze(struct flexcan_priv *priv) flexcan_write(reg, ®s->mcr); while (timeout-- && !(flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK)) - usleep_range(100, 200); + udelay(100); if (!(flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK)) return -ETIMEDOUT; @@ -355,7 +368,7 @@ static int flexcan_chip_unfreeze(struct flexcan_priv *priv) flexcan_write(reg, ®s->mcr); while (timeout-- && (flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK)) - usleep_range(10, 20); + udelay(10); if (flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK) return -ETIMEDOUT; @@ -370,7 +383,7 @@ static int flexcan_chip_softreset(struct flexcan_priv *priv) flexcan_write(FLEXCAN_MCR_SOFTRST, ®s->mcr); while (timeout-- && (flexcan_read(®s->mcr) & FLEXCAN_MCR_SOFTRST)) - usleep_range(10, 20); + udelay(10); if (flexcan_read(®s->mcr) & FLEXCAN_MCR_SOFTRST) return -ETIMEDOUT; @@ -428,6 +441,14 @@ static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev) flexcan_write(can_id, ®s->cantxfg[FLEXCAN_TX_BUF_ID].can_id); flexcan_write(ctrl, ®s->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl); + /* Errata ERR005829 step8: + * Write twice INACTIVE(0x8) code to first MB. + */ + flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE, + ®s->cantxfg[FLEXCAN_TX_BUF_RESERVED].can_ctrl); + flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE, + ®s->cantxfg[FLEXCAN_TX_BUF_RESERVED].can_ctrl); + return NETDEV_TX_OK; } @@ -549,6 +570,13 @@ static void do_state(struct net_device *dev, /* process state changes depending on the new state */ switch (new_state) { + case CAN_STATE_ERROR_WARNING: + netdev_dbg(dev, "Error Warning\n"); + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] = (bec.txerr > bec.rxerr) ? + CAN_ERR_CRTL_TX_WARNING : + CAN_ERR_CRTL_RX_WARNING; + break; case CAN_STATE_ERROR_ACTIVE: netdev_dbg(dev, "Error Active\n"); cf->can_id |= CAN_ERR_PROT; @@ -737,6 +765,9 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id) stats->tx_bytes += can_get_echo_skb(dev, 0); stats->tx_packets++; can_led_event(dev, CAN_LED_EVENT_TX); + /* after sending a RTR frame mailbox is in RX mode */ + flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE, + ®s->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl); flexcan_write((1 << FLEXCAN_TX_BUF_ID), ®s->iflag1); netif_wake_queue(dev); } @@ -794,6 +825,7 @@ static int flexcan_chip_start(struct net_device *dev) struct flexcan_regs __iomem *regs = priv->base; int err; u32 reg_mcr, reg_ctrl; + int i; /* enable module */ err = flexcan_chip_enable(priv); @@ -852,14 +884,26 @@ static int flexcan_chip_start(struct net_device *dev) if (priv->devtype_data->features & FLEXCAN_HAS_BROKEN_ERR_STATE || priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) reg_ctrl |= FLEXCAN_CTRL_ERR_MSK; + else + reg_ctrl &= ~FLEXCAN_CTRL_ERR_MSK; /* save for later use */ priv->reg_ctrl_default = reg_ctrl; netdev_dbg(dev, "%s: writing ctrl=0x%08x", __func__, reg_ctrl); flexcan_write(reg_ctrl, ®s->ctrl); - /* Abort any pending TX, mark Mailbox as INACTIVE */ - flexcan_write(FLEXCAN_MB_CNT_CODE(0x4), + /* clear and invalidate all mailboxes first */ + for (i = FLEXCAN_TX_BUF_ID; i < ARRAY_SIZE(regs->cantxfg); i++) { + flexcan_write(FLEXCAN_MB_CODE_RX_INACTIVE, + ®s->cantxfg[i].can_ctrl); + } + + /* Errata ERR005829: mark first TX mailbox as INACTIVE */ + flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE, + ®s->cantxfg[FLEXCAN_TX_BUF_RESERVED].can_ctrl); + + /* mark TX mailbox as INACTIVE */ + flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE, ®s->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl); /* acceptance mask/acceptance code (accept everything) */ diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c index 6472562efedc..a67eb01f3028 100644 --- a/drivers/net/can/pch_can.c +++ b/drivers/net/can/pch_can.c @@ -194,7 +194,7 @@ static const struct can_bittiming_const pch_can_bittiming_const = { .brp_inc = 1, }; -static DEFINE_PCI_DEVICE_TABLE(pch_pci_tbl) = { +static const struct pci_device_id pch_pci_tbl[] = { {PCI_VENDOR_ID_INTEL, 0x8818, PCI_ANY_ID, PCI_ANY_ID,}, {0,} }; diff --git a/drivers/net/can/sja1000/ems_pci.c b/drivers/net/can/sja1000/ems_pci.c index fd13dbf07d9c..7481c324a476 100644 --- a/drivers/net/can/sja1000/ems_pci.c +++ b/drivers/net/can/sja1000/ems_pci.c @@ -101,7 +101,7 @@ struct ems_pci_card { #define EMS_PCI_BASE_SIZE 4096 /* size of controller area */ -static DEFINE_PCI_DEVICE_TABLE(ems_pci_tbl) = { +static const struct pci_device_id ems_pci_tbl[] = { /* CPC-PCI v1 */ {PCI_VENDOR_ID_SIEMENS, 0x2104, PCI_ANY_ID, PCI_ANY_ID,}, /* CPC-PCI v2 */ diff --git a/drivers/net/can/sja1000/kvaser_pci.c b/drivers/net/can/sja1000/kvaser_pci.c index 23b8e1324e25..8ff3424d5147 100644 --- a/drivers/net/can/sja1000/kvaser_pci.c +++ b/drivers/net/can/sja1000/kvaser_pci.c @@ -107,7 +107,7 @@ struct kvaser_pci { #define KVASER_PCI_VENDOR_ID2 0x1a07 /* the PCI device and vendor IDs */ #define KVASER_PCI_DEVICE_ID2 0x0008 -static DEFINE_PCI_DEVICE_TABLE(kvaser_pci_tbl) = { +static const struct pci_device_id kvaser_pci_tbl[] = { {KVASER_PCI_VENDOR_ID1, KVASER_PCI_DEVICE_ID1, PCI_ANY_ID, PCI_ANY_ID,}, {KVASER_PCI_VENDOR_ID2, KVASER_PCI_DEVICE_ID2, PCI_ANY_ID, PCI_ANY_ID,}, { 0,} diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c index 564933ae218c..e5fac368068a 100644 --- a/drivers/net/can/sja1000/peak_pci.c +++ b/drivers/net/can/sja1000/peak_pci.c @@ -70,6 +70,8 @@ struct peak_pci_chan { #define PEAK_PC_104P_DEVICE_ID 0x0006 /* PCAN-PC/104+ cards */ #define PEAK_PCI_104E_DEVICE_ID 0x0007 /* PCAN-PCI/104 Express cards */ #define PEAK_MPCIE_DEVICE_ID 0x0008 /* The miniPCIe slot cards */ +#define PEAK_PCIE_OEM_ID 0x0009 /* PCAN-PCI Express OEM */ +#define PEAK_PCIEC34_DEVICE_ID 0x000A /* PCAN-PCI Express 34 (one channel) */ #define PEAK_PCI_CHAN_MAX 4 @@ -77,7 +79,7 @@ static const u16 peak_pci_icr_masks[PEAK_PCI_CHAN_MAX] = { 0x02, 0x01, 0x40, 0x80 }; -static DEFINE_PCI_DEVICE_TABLE(peak_pci_tbl) = { +static const struct pci_device_id peak_pci_tbl[] = { {PEAK_PCI_VENDOR_ID, PEAK_PCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, {PEAK_PCI_VENDOR_ID, PEAK_PCIE_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, {PEAK_PCI_VENDOR_ID, PEAK_MPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, @@ -87,6 +89,7 @@ static DEFINE_PCI_DEVICE_TABLE(peak_pci_tbl) = { {PEAK_PCI_VENDOR_ID, PEAK_CPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, #ifdef CONFIG_CAN_PEAK_PCIEC {PEAK_PCI_VENDOR_ID, PEAK_PCIEC_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, + {PEAK_PCI_VENDOR_ID, PEAK_PCIEC34_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, #endif {0,} }; @@ -653,7 +656,8 @@ static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) * This must be done *before* register_sja1000dev() but * *after* devices linkage */ - if (pdev->device == PEAK_PCIEC_DEVICE_ID) { + if (pdev->device == PEAK_PCIEC_DEVICE_ID || + pdev->device == PEAK_PCIEC34_DEVICE_ID) { err = peak_pciec_probe(pdev, dev); if (err) { dev_err(&pdev->dev, diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c index ec39b7cb2287..8836a7485c81 100644 --- a/drivers/net/can/sja1000/plx_pci.c +++ b/drivers/net/can/sja1000/plx_pci.c @@ -247,7 +247,7 @@ static struct plx_pci_card_info plx_pci_card_info_elcus = { /* based on PLX9030 */ }; -static DEFINE_PCI_DEVICE_TABLE(plx_pci_tbl) = { +static const struct pci_device_id plx_pci_tbl[] = { { /* Adlink PCI-7841/cPCI-7841 */ ADLINK_PCI_VENDOR_ID, ADLINK_PCI_DEVICE_ID, diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c index d1692154ed1b..b27ac6074afb 100644 --- a/drivers/net/can/sja1000/sja1000.c +++ b/drivers/net/can/sja1000/sja1000.c @@ -172,6 +172,35 @@ static void set_normal_mode(struct net_device *dev) netdev_err(dev, "setting SJA1000 into normal mode failed!\n"); } +/* + * initialize SJA1000 chip: + * - reset chip + * - set output mode + * - set baudrate + * - enable interrupts + * - start operating mode + */ +static void chipset_init(struct net_device *dev) +{ + struct sja1000_priv *priv = netdev_priv(dev); + + /* set clock divider and output control register */ + priv->write_reg(priv, SJA1000_CDR, priv->cdr | CDR_PELICAN); + + /* set acceptance filter (accept all) */ + priv->write_reg(priv, SJA1000_ACCC0, 0x00); + priv->write_reg(priv, SJA1000_ACCC1, 0x00); + priv->write_reg(priv, SJA1000_ACCC2, 0x00); + priv->write_reg(priv, SJA1000_ACCC3, 0x00); + + priv->write_reg(priv, SJA1000_ACCM0, 0xFF); + priv->write_reg(priv, SJA1000_ACCM1, 0xFF); + priv->write_reg(priv, SJA1000_ACCM2, 0xFF); + priv->write_reg(priv, SJA1000_ACCM3, 0xFF); + + priv->write_reg(priv, SJA1000_OCR, priv->ocr | OCR_MODE_NORMAL); +} + static void sja1000_start(struct net_device *dev) { struct sja1000_priv *priv = netdev_priv(dev); @@ -180,6 +209,10 @@ static void sja1000_start(struct net_device *dev) if (priv->can.state != CAN_STATE_STOPPED) set_reset_mode(dev); + /* Initialize chip if uninitialized at this stage */ + if (!(priv->read_reg(priv, SJA1000_CDR) & CDR_PELICAN)) + chipset_init(dev); + /* Clear error counters and error code capture */ priv->write_reg(priv, SJA1000_TXERR, 0x0); priv->write_reg(priv, SJA1000_RXERR, 0x0); @@ -237,35 +270,6 @@ static int sja1000_get_berr_counter(const struct net_device *dev, } /* - * initialize SJA1000 chip: - * - reset chip - * - set output mode - * - set baudrate - * - enable interrupts - * - start operating mode - */ -static void chipset_init(struct net_device *dev) -{ - struct sja1000_priv *priv = netdev_priv(dev); - - /* set clock divider and output control register */ - priv->write_reg(priv, SJA1000_CDR, priv->cdr | CDR_PELICAN); - - /* set acceptance filter (accept all) */ - priv->write_reg(priv, SJA1000_ACCC0, 0x00); - priv->write_reg(priv, SJA1000_ACCC1, 0x00); - priv->write_reg(priv, SJA1000_ACCC2, 0x00); - priv->write_reg(priv, SJA1000_ACCC3, 0x00); - - priv->write_reg(priv, SJA1000_ACCM0, 0xFF); - priv->write_reg(priv, SJA1000_ACCM1, 0xFF); - priv->write_reg(priv, SJA1000_ACCM2, 0xFF); - priv->write_reg(priv, SJA1000_ACCM3, 0xFF); - - priv->write_reg(priv, SJA1000_OCR, priv->ocr | OCR_MODE_NORMAL); -} - -/* * transmit a CAN message * message layout in the sk_buff should be like this: * xx xx xx xx ff ll 00 11 22 33 44 55 66 77 diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index 61477b8e8d24..8ca49f04acec 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c @@ -375,7 +375,7 @@ static struct vortex_chip_info { }; -static DEFINE_PCI_DEVICE_TABLE(vortex_pci_tbl) = { +static const struct pci_device_id vortex_pci_tbl[] = { { 0x10B7, 0x5900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C590 }, { 0x10B7, 0x5920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C592 }, { 0x10B7, 0x5970, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C597 }, @@ -2129,6 +2129,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) int entry = vp->cur_tx % TX_RING_SIZE; struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE]; unsigned long flags; + dma_addr_t dma_addr; if (vortex_debug > 6) { pr_debug("boomerang_start_xmit()\n"); @@ -2163,24 +2164,48 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum); if (!skb_shinfo(skb)->nr_frags) { - vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, - skb->len, PCI_DMA_TODEVICE)); + dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, + PCI_DMA_TODEVICE); + if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) + goto out_dma_err; + + vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr); vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len | LAST_FRAG); } else { int i; - vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, - skb_headlen(skb), PCI_DMA_TODEVICE)); + dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, + skb_headlen(skb), PCI_DMA_TODEVICE); + if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) + goto out_dma_err; + + vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr); vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb_headlen(skb)); for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + dma_addr = skb_frag_dma_map(&VORTEX_PCI(vp)->dev, frag, + 0, + frag->size, + DMA_TO_DEVICE); + if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) { + for(i = i-1; i >= 0; i--) + dma_unmap_page(&VORTEX_PCI(vp)->dev, + le32_to_cpu(vp->tx_ring[entry].frag[i+1].addr), + le32_to_cpu(vp->tx_ring[entry].frag[i+1].length), + DMA_TO_DEVICE); + + pci_unmap_single(VORTEX_PCI(vp), + le32_to_cpu(vp->tx_ring[entry].frag[0].addr), + le32_to_cpu(vp->tx_ring[entry].frag[0].length), + PCI_DMA_TODEVICE); + + goto out_dma_err; + } + vp->tx_ring[entry].frag[i+1].addr = - cpu_to_le32(pci_map_single( - VORTEX_PCI(vp), - (void *)skb_frag_address(frag), - skb_frag_size(frag), PCI_DMA_TODEVICE)); + cpu_to_le32(dma_addr); if (i == skb_shinfo(skb)->nr_frags-1) vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag)|LAST_FRAG); @@ -2189,7 +2214,10 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) } } #else - vp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE)); + dma_addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE)); + if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) + goto out_dma_err; + vp->tx_ring[entry].addr = cpu_to_le32(dma_addr); vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG); vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded); #endif @@ -2217,7 +2245,11 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) skb_tx_timestamp(skb); iowrite16(DownUnstall, ioaddr + EL3_CMD); spin_unlock_irqrestore(&vp->lock, flags); +out: return NETDEV_TX_OK; +out_dma_err: + dev_err(&VORTEX_PCI(vp)->dev, "Error mapping dma buffer\n"); + goto out; } /* The interrupt handler does all of the Rx thread work and cleans up diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c index e13b04624ded..48775b88bac7 100644 --- a/drivers/net/ethernet/3com/typhoon.c +++ b/drivers/net/ethernet/3com/typhoon.c @@ -203,7 +203,7 @@ static struct typhoon_card_info typhoon_card_info[] = { * bit 8 indicates if this is a (0) copper or (1) fiber card * bits 12-16 indicate card type: (0) client and (1) server */ -static DEFINE_PCI_DEVICE_TABLE(typhoon_pci_tbl) = { +static const struct pci_device_id typhoon_pci_tbl[] = { { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990, PCI_ANY_ID, PCI_ANY_ID, 0, 0,TYPHOON_TX }, { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_95, diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig index 0988811f4e40..2d89bd00de61 100644 --- a/drivers/net/ethernet/8390/Kconfig +++ b/drivers/net/ethernet/8390/Kconfig @@ -91,7 +91,8 @@ config MCF8390 config NE2000 tristate "NE2000/NE1000 support" - depends on (ISA || (Q40 && m) || M32R || MACH_TX49XX) + depends on (ISA || (Q40 && m) || M32R || MACH_TX49XX || \ + ATARI_ETHERNEC) select CRC32 ---help--- If you have a network (Ethernet) card of this type, say Y and read diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c index 73c57a4a7b9e..7769c05543f1 100644 --- a/drivers/net/ethernet/8390/axnet_cs.c +++ b/drivers/net/ethernet/8390/axnet_cs.c @@ -108,7 +108,7 @@ static u32 axnet_msg_enable; /*====================================================================*/ -typedef struct axnet_dev_t { +struct axnet_dev { struct pcmcia_device *p_dev; caddr_t base; struct timer_list watchdog; @@ -118,9 +118,9 @@ typedef struct axnet_dev_t { int phy_id; int flags; int active_low; -} axnet_dev_t; +}; -static inline axnet_dev_t *PRIV(struct net_device *dev) +static inline struct axnet_dev *PRIV(struct net_device *dev) { void *p = (char *)netdev_priv(dev) + sizeof(struct ei_device); return p; @@ -141,13 +141,13 @@ static const struct net_device_ops axnet_netdev_ops = { static int axnet_probe(struct pcmcia_device *link) { - axnet_dev_t *info; + struct axnet_dev *info; struct net_device *dev; struct ei_device *ei_local; dev_dbg(&link->dev, "axnet_attach()\n"); - dev = alloc_etherdev(sizeof(struct ei_device) + sizeof(axnet_dev_t)); + dev = alloc_etherdev(sizeof(struct ei_device) + sizeof(struct axnet_dev)); if (!dev) return -ENOMEM; @@ -274,7 +274,7 @@ static int axnet_configcheck(struct pcmcia_device *p_dev, void *priv_data) static int axnet_config(struct pcmcia_device *link) { struct net_device *dev = link->priv; - axnet_dev_t *info = PRIV(dev); + struct axnet_dev *info = PRIV(dev); int i, j, j2, ret; dev_dbg(&link->dev, "axnet_config(0x%p)\n", link); @@ -389,7 +389,7 @@ static int axnet_suspend(struct pcmcia_device *link) static int axnet_resume(struct pcmcia_device *link) { struct net_device *dev = link->priv; - axnet_dev_t *info = PRIV(dev); + struct axnet_dev *info = PRIV(dev); if (link->open) { if (info->active_low == 1) @@ -467,7 +467,7 @@ static void mdio_write(unsigned int addr, int phy_id, int loc, int value) static int axnet_open(struct net_device *dev) { int ret; - axnet_dev_t *info = PRIV(dev); + struct axnet_dev *info = PRIV(dev); struct pcmcia_device *link = info->p_dev; unsigned int nic_base = dev->base_addr; @@ -497,7 +497,7 @@ static int axnet_open(struct net_device *dev) static int axnet_close(struct net_device *dev) { - axnet_dev_t *info = PRIV(dev); + struct axnet_dev *info = PRIV(dev); struct pcmcia_device *link = info->p_dev; dev_dbg(&link->dev, "axnet_close('%s')\n", dev->name); @@ -554,7 +554,7 @@ static irqreturn_t ei_irq_wrapper(int irq, void *dev_id) static void ei_watchdog(u_long arg) { struct net_device *dev = (struct net_device *)(arg); - axnet_dev_t *info = PRIV(dev); + struct axnet_dev *info = PRIV(dev); unsigned int nic_base = dev->base_addr; unsigned int mii_addr = nic_base + AXNET_MII_EEP; u_short link; @@ -610,7 +610,7 @@ reschedule: static int axnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { - axnet_dev_t *info = PRIV(dev); + struct axnet_dev *info = PRIV(dev); struct mii_ioctl_data *data = if_mii(rq); unsigned int mii_addr = dev->base_addr + AXNET_MII_EEP; switch (cmd) { @@ -1452,7 +1452,7 @@ static void ei_receive(struct net_device *dev) static void ei_rx_overrun(struct net_device *dev) { - axnet_dev_t *info = PRIV(dev); + struct axnet_dev *info = PRIV(dev); long e8390_base = dev->base_addr; unsigned char was_txing, must_resend = 0; struct ei_device *ei_local = netdev_priv(dev); @@ -1624,7 +1624,7 @@ static void set_multicast_list(struct net_device *dev) static void AX88190_init(struct net_device *dev, int startp) { - axnet_dev_t *info = PRIV(dev); + struct axnet_dev *info = PRIV(dev); long e8390_base = dev->base_addr; struct ei_device *ei_local = netdev_priv(dev); int i; diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c index 58eaa8f34942..de566fb6e0f7 100644 --- a/drivers/net/ethernet/8390/ne.c +++ b/drivers/net/ethernet/8390/ne.c @@ -169,6 +169,8 @@ bad_clone_list[] __initdata = { #elif defined(CONFIG_PLAT_OAKS32R) || \ defined(CONFIG_MACH_TX49XX) # define DCR_VAL 0x48 /* 8-bit mode */ +#elif defined(CONFIG_ATARI) /* 8-bit mode on Atari, normal on Q40 */ +# define DCR_VAL (MACH_IS_ATARI ? 0x48 : 0x49) #else # define DCR_VAL 0x49 #endif diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c index f395c967262e..89c8d9fc97de 100644 --- a/drivers/net/ethernet/8390/ne2k-pci.c +++ b/drivers/net/ethernet/8390/ne2k-pci.c @@ -135,7 +135,7 @@ static struct { }; -static DEFINE_PCI_DEVICE_TABLE(ne2k_pci_tbl) = { +static const struct pci_device_id ne2k_pci_tbl[] = { { 0x10ec, 0x8029, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_RealTek_RTL_8029 }, { 0x1050, 0x0940, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Winbond_89C940 }, { 0x11f6, 0x1401, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Compex_RL2000 }, diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c index ca3c2b921cf6..9fb7b9d4fd6c 100644 --- a/drivers/net/ethernet/8390/pcnet_cs.c +++ b/drivers/net/ethernet/8390/pcnet_cs.c @@ -111,11 +111,11 @@ static void pcnet_detach(struct pcmcia_device *p_dev); /*====================================================================*/ -typedef struct hw_info_t { +struct hw_info { u_int offset; u_char a0, a1, a2; u_int flags; -} hw_info_t; +}; #define DELAY_OUTPUT 0x01 #define HAS_MISC_REG 0x02 @@ -132,7 +132,7 @@ typedef struct hw_info_t { #define MII_PHYID_REG1 0x02 #define MII_PHYID_REG2 0x03 -static hw_info_t hw_info[] = { +static struct hw_info hw_info[] = { { /* Accton EN2212 */ 0x0ff0, 0x00, 0x00, 0xe8, DELAY_OUTPUT }, { /* Allied Telesis LA-PCM */ 0x0ff0, 0x00, 0x00, 0xf4, 0 }, { /* APEX MultiCard */ 0x03f4, 0x00, 0x20, 0xe5, 0 }, @@ -196,11 +196,11 @@ static hw_info_t hw_info[] = { #define NR_INFO ARRAY_SIZE(hw_info) -static hw_info_t default_info = { 0, 0, 0, 0, 0 }; -static hw_info_t dl10019_info = { 0, 0, 0, 0, IS_DL10019|HAS_MII }; -static hw_info_t dl10022_info = { 0, 0, 0, 0, IS_DL10022|HAS_MII }; +static struct hw_info default_info = { 0, 0, 0, 0, 0 }; +static struct hw_info dl10019_info = { 0, 0, 0, 0, IS_DL10019|HAS_MII }; +static struct hw_info dl10022_info = { 0, 0, 0, 0, IS_DL10022|HAS_MII }; -typedef struct pcnet_dev_t { +struct pcnet_dev { struct pcmcia_device *p_dev; u_int flags; void __iomem *base; @@ -210,12 +210,12 @@ typedef struct pcnet_dev_t { u_char eth_phy, pna_phy; u_short link_status; u_long mii_reset; -} pcnet_dev_t; +}; -static inline pcnet_dev_t *PRIV(struct net_device *dev) +static inline struct pcnet_dev *PRIV(struct net_device *dev) { char *p = netdev_priv(dev); - return (pcnet_dev_t *)(p + sizeof(struct ei_device)); + return (struct pcnet_dev *)(p + sizeof(struct ei_device)); } static const struct net_device_ops pcnet_netdev_ops = { @@ -237,13 +237,13 @@ static const struct net_device_ops pcnet_netdev_ops = { static int pcnet_probe(struct pcmcia_device *link) { - pcnet_dev_t *info; + struct pcnet_dev *info; struct net_device *dev; dev_dbg(&link->dev, "pcnet_attach()\n"); /* Create new ethernet device */ - dev = __alloc_ei_netdev(sizeof(pcnet_dev_t)); + dev = __alloc_ei_netdev(sizeof(struct pcnet_dev)); if (!dev) return -ENOMEM; info = PRIV(dev); info->p_dev = link; @@ -276,7 +276,7 @@ static void pcnet_detach(struct pcmcia_device *link) ======================================================================*/ -static hw_info_t *get_hwinfo(struct pcmcia_device *link) +static struct hw_info *get_hwinfo(struct pcmcia_device *link) { struct net_device *dev = link->priv; u_char __iomem *base, *virt; @@ -317,7 +317,7 @@ static hw_info_t *get_hwinfo(struct pcmcia_device *link) ======================================================================*/ -static hw_info_t *get_prom(struct pcmcia_device *link) +static struct hw_info *get_prom(struct pcmcia_device *link) { struct net_device *dev = link->priv; unsigned int ioaddr = dev->base_addr; @@ -371,7 +371,7 @@ static hw_info_t *get_prom(struct pcmcia_device *link) ======================================================================*/ -static hw_info_t *get_dl10019(struct pcmcia_device *link) +static struct hw_info *get_dl10019(struct pcmcia_device *link) { struct net_device *dev = link->priv; int i; @@ -393,7 +393,7 @@ static hw_info_t *get_dl10019(struct pcmcia_device *link) ======================================================================*/ -static hw_info_t *get_ax88190(struct pcmcia_device *link) +static struct hw_info *get_ax88190(struct pcmcia_device *link) { struct net_device *dev = link->priv; unsigned int ioaddr = dev->base_addr; @@ -424,7 +424,7 @@ static hw_info_t *get_ax88190(struct pcmcia_device *link) ======================================================================*/ -static hw_info_t *get_hwired(struct pcmcia_device *link) +static struct hw_info *get_hwired(struct pcmcia_device *link) { struct net_device *dev = link->priv; int i; @@ -489,12 +489,12 @@ static int pcnet_confcheck(struct pcmcia_device *p_dev, void *priv_data) return try_io_port(p_dev); } -static hw_info_t *pcnet_try_config(struct pcmcia_device *link, - int *has_shmem, int try) +static struct hw_info *pcnet_try_config(struct pcmcia_device *link, + int *has_shmem, int try) { struct net_device *dev = link->priv; - hw_info_t *local_hw_info; - pcnet_dev_t *info = PRIV(dev); + struct hw_info *local_hw_info; + struct pcnet_dev *info = PRIV(dev); int priv = try; int ret; @@ -553,10 +553,10 @@ static hw_info_t *pcnet_try_config(struct pcmcia_device *link, static int pcnet_config(struct pcmcia_device *link) { struct net_device *dev = link->priv; - pcnet_dev_t *info = PRIV(dev); + struct pcnet_dev *info = PRIV(dev); int start_pg, stop_pg, cm_offset; int has_shmem = 0; - hw_info_t *local_hw_info; + struct hw_info *local_hw_info; struct ei_device *ei_local; dev_dbg(&link->dev, "pcnet_config\n"); @@ -639,7 +639,7 @@ failed: static void pcnet_release(struct pcmcia_device *link) { - pcnet_dev_t *info = PRIV(link->priv); + struct pcnet_dev *info = PRIV(link->priv); dev_dbg(&link->dev, "pcnet_release\n"); @@ -836,7 +836,7 @@ static void write_asic(unsigned int ioaddr, int location, short asic_data) static void set_misc_reg(struct net_device *dev) { unsigned int nic_base = dev->base_addr; - pcnet_dev_t *info = PRIV(dev); + struct pcnet_dev *info = PRIV(dev); u_char tmp; if (info->flags & HAS_MISC_REG) { @@ -873,7 +873,7 @@ static void set_misc_reg(struct net_device *dev) static void mii_phy_probe(struct net_device *dev) { - pcnet_dev_t *info = PRIV(dev); + struct pcnet_dev *info = PRIV(dev); unsigned int mii_addr = dev->base_addr + DLINK_GPIO; int i; u_int tmp, phyid; @@ -898,7 +898,7 @@ static void mii_phy_probe(struct net_device *dev) static int pcnet_open(struct net_device *dev) { int ret; - pcnet_dev_t *info = PRIV(dev); + struct pcnet_dev *info = PRIV(dev); struct pcmcia_device *link = info->p_dev; unsigned int nic_base = dev->base_addr; @@ -931,7 +931,7 @@ static int pcnet_open(struct net_device *dev) static int pcnet_close(struct net_device *dev) { - pcnet_dev_t *info = PRIV(dev); + struct pcnet_dev *info = PRIV(dev); struct pcmcia_device *link = info->p_dev; dev_dbg(&link->dev, "pcnet_close('%s')\n", dev->name); @@ -982,7 +982,7 @@ static void pcnet_reset_8390(struct net_device *dev) static int set_config(struct net_device *dev, struct ifmap *map) { - pcnet_dev_t *info = PRIV(dev); + struct pcnet_dev *info = PRIV(dev); if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) { if (!(info->flags & HAS_MISC_REG)) return -EOPNOTSUPP; @@ -1000,7 +1000,7 @@ static int set_config(struct net_device *dev, struct ifmap *map) static irqreturn_t ei_irq_wrapper(int irq, void *dev_id) { struct net_device *dev = dev_id; - pcnet_dev_t *info; + struct pcnet_dev *info; irqreturn_t ret = ei_interrupt(irq, dev_id); if (ret == IRQ_HANDLED) { @@ -1013,7 +1013,7 @@ static irqreturn_t ei_irq_wrapper(int irq, void *dev_id) static void ei_watchdog(u_long arg) { struct net_device *dev = (struct net_device *)arg; - pcnet_dev_t *info = PRIV(dev); + struct pcnet_dev *info = PRIV(dev); unsigned int nic_base = dev->base_addr; unsigned int mii_addr = nic_base + DLINK_GPIO; u_short link; @@ -1101,7 +1101,7 @@ reschedule: static int ei_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { - pcnet_dev_t *info = PRIV(dev); + struct pcnet_dev *info = PRIV(dev); struct mii_ioctl_data *data = if_mii(rq); unsigned int mii_addr = dev->base_addr + DLINK_GPIO; @@ -1214,7 +1214,7 @@ static void dma_block_output(struct net_device *dev, int count, const u_char *buf, const int start_page) { unsigned int nic_base = dev->base_addr; - pcnet_dev_t *info = PRIV(dev); + struct pcnet_dev *info = PRIV(dev); #ifdef PCMCIA_DEBUG int retries = 0; struct ei_device *ei_local = netdev_priv(dev); @@ -1403,7 +1403,7 @@ static int setup_shmem_window(struct pcmcia_device *link, int start_pg, int stop_pg, int cm_offset) { struct net_device *dev = link->priv; - pcnet_dev_t *info = PRIV(dev); + struct pcnet_dev *info = PRIV(dev); int i, window_size, offset, ret; window_size = (stop_pg - start_pg) << 8; diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index edb718661850..dc7406c81c45 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -24,6 +24,7 @@ source "drivers/net/ethernet/allwinner/Kconfig" source "drivers/net/ethernet/alteon/Kconfig" source "drivers/net/ethernet/altera/Kconfig" source "drivers/net/ethernet/amd/Kconfig" +source "drivers/net/ethernet/apm/Kconfig" source "drivers/net/ethernet/apple/Kconfig" source "drivers/net/ethernet/arc/Kconfig" source "drivers/net/ethernet/atheros/Kconfig" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 58de3339ab3c..224a01877149 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -10,6 +10,7 @@ obj-$(CONFIG_NET_VENDOR_ALLWINNER) += allwinner/ obj-$(CONFIG_NET_VENDOR_ALTEON) += alteon/ obj-$(CONFIG_ALTERA_TSE) += altera/ obj-$(CONFIG_NET_VENDOR_AMD) += amd/ +obj-$(CONFIG_NET_XGENE) += apm/ obj-$(CONFIG_NET_VENDOR_APPLE) += apple/ obj-$(CONFIG_NET_VENDOR_ARC) += arc/ obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/ diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c index 40dbbf740331..ac7288240d55 100644 --- a/drivers/net/ethernet/adaptec/starfire.c +++ b/drivers/net/ethernet/adaptec/starfire.c @@ -285,7 +285,7 @@ enum chipset { CH_6915 = 0, }; -static DEFINE_PCI_DEVICE_TABLE(starfire_pci_tbl) = { +static const struct pci_device_id starfire_pci_tbl[] = { { PCI_VDEVICE(ADAPTEC, 0x6915), CH_6915 }, { 0, } }; diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c index 23578dfee249..3005155e412b 100644 --- a/drivers/net/ethernet/aeroflex/greth.c +++ b/drivers/net/ethernet/aeroflex/greth.c @@ -123,6 +123,12 @@ static inline void greth_enable_tx(struct greth_private *greth) GRETH_REGORIN(greth->regs->control, GRETH_TXEN); } +static inline void greth_enable_tx_and_irq(struct greth_private *greth) +{ + wmb(); /* BDs must been written to memory before enabling TX */ + GRETH_REGORIN(greth->regs->control, GRETH_TXEN | GRETH_TXI); +} + static inline void greth_disable_tx(struct greth_private *greth) { GRETH_REGANDIN(greth->regs->control, ~GRETH_TXEN); @@ -447,29 +453,30 @@ out: return err; } +static inline u16 greth_num_free_bds(u16 tx_last, u16 tx_next) +{ + if (tx_next < tx_last) + return (tx_last - tx_next) - 1; + else + return GRETH_TXBD_NUM - (tx_next - tx_last) - 1; +} static netdev_tx_t greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev) { struct greth_private *greth = netdev_priv(dev); struct greth_bd *bdp; - u32 status = 0, dma_addr, ctrl; + u32 status, dma_addr; int curr_tx, nr_frags, i, err = NETDEV_TX_OK; unsigned long flags; + u16 tx_last; nr_frags = skb_shinfo(skb)->nr_frags; + tx_last = greth->tx_last; + rmb(); /* tx_last is updated by the poll task */ - /* Clean TX Ring */ - greth_clean_tx_gbit(dev); - - if (greth->tx_free < nr_frags + 1) { - spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/ - ctrl = GRETH_REGLOAD(greth->regs->control); - /* Enable TX IRQ only if not already in poll() routine */ - if (ctrl & GRETH_RXI) - GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI); + if (greth_num_free_bds(tx_last, greth->tx_next) < nr_frags + 1) { netif_stop_queue(dev); - spin_unlock_irqrestore(&greth->devlock, flags); err = NETDEV_TX_BUSY; goto out; } @@ -488,6 +495,8 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev) /* Linear buf */ if (nr_frags != 0) status = GRETH_TXBD_MORE; + else + status = GRETH_BD_IE; if (skb->ip_summed == CHECKSUM_PARTIAL) status |= GRETH_TXBD_CSALL; @@ -545,14 +554,12 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev) /* Enable the descriptor chain by enabling the first descriptor */ bdp = greth->tx_bd_base + greth->tx_next; - greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN); - greth->tx_next = curr_tx; - greth->tx_free -= nr_frags + 1; - - wmb(); + greth_write_bd(&bdp->stat, + greth_read_bd(&bdp->stat) | GRETH_BD_EN); spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/ - greth_enable_tx(greth); + greth->tx_next = curr_tx; + greth_enable_tx_and_irq(greth); spin_unlock_irqrestore(&greth->devlock, flags); return NETDEV_TX_OK; @@ -648,7 +655,6 @@ static void greth_clean_tx(struct net_device *dev) if (greth->tx_free > 0) { netif_wake_queue(dev); } - } static inline void greth_update_tx_stats(struct net_device *dev, u32 stat) @@ -670,20 +676,22 @@ static void greth_clean_tx_gbit(struct net_device *dev) { struct greth_private *greth; struct greth_bd *bdp, *bdp_last_frag; - struct sk_buff *skb; + struct sk_buff *skb = NULL; u32 stat; int nr_frags, i; + u16 tx_last; greth = netdev_priv(dev); + tx_last = greth->tx_last; - while (greth->tx_free < GRETH_TXBD_NUM) { + while (tx_last != greth->tx_next) { - skb = greth->tx_skbuff[greth->tx_last]; + skb = greth->tx_skbuff[tx_last]; nr_frags = skb_shinfo(skb)->nr_frags; /* We only clean fully completed SKBs */ - bdp_last_frag = greth->tx_bd_base + SKIP_TX(greth->tx_last, nr_frags); + bdp_last_frag = greth->tx_bd_base + SKIP_TX(tx_last, nr_frags); GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX); mb(); @@ -692,14 +700,14 @@ static void greth_clean_tx_gbit(struct net_device *dev) if (stat & GRETH_BD_EN) break; - greth->tx_skbuff[greth->tx_last] = NULL; + greth->tx_skbuff[tx_last] = NULL; greth_update_tx_stats(dev, stat); dev->stats.tx_bytes += skb->len; - bdp = greth->tx_bd_base + greth->tx_last; + bdp = greth->tx_bd_base + tx_last; - greth->tx_last = NEXT_TX(greth->tx_last); + tx_last = NEXT_TX(tx_last); dma_unmap_single(greth->dev, greth_read_bd(&bdp->addr), @@ -708,21 +716,26 @@ static void greth_clean_tx_gbit(struct net_device *dev) for (i = 0; i < nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - bdp = greth->tx_bd_base + greth->tx_last; + bdp = greth->tx_bd_base + tx_last; dma_unmap_page(greth->dev, greth_read_bd(&bdp->addr), skb_frag_size(frag), DMA_TO_DEVICE); - greth->tx_last = NEXT_TX(greth->tx_last); + tx_last = NEXT_TX(tx_last); } - greth->tx_free += nr_frags+1; dev_kfree_skb(skb); } + if (skb) { /* skb is set only if the above while loop was entered */ + wmb(); + greth->tx_last = tx_last; - if (netif_queue_stopped(dev) && (greth->tx_free > (MAX_SKB_FRAGS+1))) - netif_wake_queue(dev); + if (netif_queue_stopped(dev) && + (greth_num_free_bds(tx_last, greth->tx_next) > + (MAX_SKB_FRAGS+1))) + netif_wake_queue(dev); + } } static int greth_rx(struct net_device *dev, int limit) @@ -965,16 +978,12 @@ static int greth_poll(struct napi_struct *napi, int budget) greth = container_of(napi, struct greth_private, napi); restart_txrx_poll: - if (netif_queue_stopped(greth->netdev)) { - if (greth->gbit_mac) - greth_clean_tx_gbit(greth->netdev); - else - greth_clean_tx(greth->netdev); - } - if (greth->gbit_mac) { + greth_clean_tx_gbit(greth->netdev); work_done += greth_rx_gbit(greth->netdev, budget - work_done); } else { + if (netif_queue_stopped(greth->netdev)) + greth_clean_tx(greth->netdev); work_done += greth_rx(greth->netdev, budget - work_done); } @@ -983,7 +992,8 @@ restart_txrx_poll: spin_lock_irqsave(&greth->devlock, flags); ctrl = GRETH_REGLOAD(greth->regs->control); - if (netif_queue_stopped(greth->netdev)) { + if ((greth->gbit_mac && (greth->tx_last != greth->tx_next)) || + (!greth->gbit_mac && netif_queue_stopped(greth->netdev))) { GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI | GRETH_RXI); mask = GRETH_INT_RX | GRETH_INT_RE | diff --git a/drivers/net/ethernet/aeroflex/greth.h b/drivers/net/ethernet/aeroflex/greth.h index 232a622a85b7..ae16ac94daf8 100644 --- a/drivers/net/ethernet/aeroflex/greth.h +++ b/drivers/net/ethernet/aeroflex/greth.h @@ -107,7 +107,7 @@ struct greth_private { u16 tx_next; u16 tx_last; - u16 tx_free; + u16 tx_free; /* only used on 10/100Mbit */ u16 rx_cur; struct greth_regs *regs; /* Address of controller registers. */ diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c index 9a6991be9749..b68074803de3 100644 --- a/drivers/net/ethernet/alteon/acenic.c +++ b/drivers/net/ethernet/alteon/acenic.c @@ -131,7 +131,7 @@ #define PCI_DEVICE_ID_SGI_ACENIC 0x0009 #endif -static DEFINE_PCI_DEVICE_TABLE(acenic_pci_tbl) = { +static const struct pci_device_id acenic_pci_tbl[] = { { PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, }, { PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_ALTEON_ACENIC_COPPER, diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c index a78e4c136959..31c48a7ac2b6 100644 --- a/drivers/net/ethernet/amd/au1000_eth.c +++ b/drivers/net/ethernet/amd/au1000_eth.c @@ -89,6 +89,124 @@ MODULE_DESCRIPTION(DRV_DESC); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); +/* AU1000 MAC registers and bits */ +#define MAC_CONTROL 0x0 +# define MAC_RX_ENABLE (1 << 2) +# define MAC_TX_ENABLE (1 << 3) +# define MAC_DEF_CHECK (1 << 5) +# define MAC_SET_BL(X) (((X) & 0x3) << 6) +# define MAC_AUTO_PAD (1 << 8) +# define MAC_DISABLE_RETRY (1 << 10) +# define MAC_DISABLE_BCAST (1 << 11) +# define MAC_LATE_COL (1 << 12) +# define MAC_HASH_MODE (1 << 13) +# define MAC_HASH_ONLY (1 << 15) +# define MAC_PASS_ALL (1 << 16) +# define MAC_INVERSE_FILTER (1 << 17) +# define MAC_PROMISCUOUS (1 << 18) +# define MAC_PASS_ALL_MULTI (1 << 19) +# define MAC_FULL_DUPLEX (1 << 20) +# define MAC_NORMAL_MODE 0 +# define MAC_INT_LOOPBACK (1 << 21) +# define MAC_EXT_LOOPBACK (1 << 22) +# define MAC_DISABLE_RX_OWN (1 << 23) +# define MAC_BIG_ENDIAN (1 << 30) +# define MAC_RX_ALL (1 << 31) +#define MAC_ADDRESS_HIGH 0x4 +#define MAC_ADDRESS_LOW 0x8 +#define MAC_MCAST_HIGH 0xC +#define MAC_MCAST_LOW 0x10 +#define MAC_MII_CNTRL 0x14 +# define MAC_MII_BUSY (1 << 0) +# define MAC_MII_READ 0 +# define MAC_MII_WRITE (1 << 1) +# define MAC_SET_MII_SELECT_REG(X) (((X) & 0x1f) << 6) +# define MAC_SET_MII_SELECT_PHY(X) (((X) & 0x1f) << 11) +#define MAC_MII_DATA 0x18 +#define MAC_FLOW_CNTRL 0x1C +# define MAC_FLOW_CNTRL_BUSY (1 << 0) +# define MAC_FLOW_CNTRL_ENABLE (1 << 1) +# define MAC_PASS_CONTROL (1 << 2) +# define MAC_SET_PAUSE(X) (((X) & 0xffff) << 16) +#define MAC_VLAN1_TAG 0x20 +#define MAC_VLAN2_TAG 0x24 + +/* Ethernet Controller Enable */ +# define MAC_EN_CLOCK_ENABLE (1 << 0) +# define MAC_EN_RESET0 (1 << 1) +# define MAC_EN_TOSS (0 << 2) +# define MAC_EN_CACHEABLE (1 << 3) +# define MAC_EN_RESET1 (1 << 4) +# define MAC_EN_RESET2 (1 << 5) +# define MAC_DMA_RESET (1 << 6) + +/* Ethernet Controller DMA Channels */ +/* offsets from MAC_TX_RING_ADDR address */ +#define MAC_TX_BUFF0_STATUS 0x0 +# define TX_FRAME_ABORTED (1 << 0) +# define TX_JAB_TIMEOUT (1 << 1) +# define TX_NO_CARRIER (1 << 2) +# define TX_LOSS_CARRIER (1 << 3) +# define TX_EXC_DEF (1 << 4) +# define TX_LATE_COLL_ABORT (1 << 5) +# define TX_EXC_COLL (1 << 6) +# define TX_UNDERRUN (1 << 7) +# define TX_DEFERRED (1 << 8) +# define TX_LATE_COLL (1 << 9) +# define TX_COLL_CNT_MASK (0xF << 10) +# define TX_PKT_RETRY (1 << 31) +#define MAC_TX_BUFF0_ADDR 0x4 +# define TX_DMA_ENABLE (1 << 0) +# define TX_T_DONE (1 << 1) +# define TX_GET_DMA_BUFFER(X) (((X) >> 2) & 0x3) +#define MAC_TX_BUFF0_LEN 0x8 +#define MAC_TX_BUFF1_STATUS 0x10 +#define MAC_TX_BUFF1_ADDR 0x14 +#define MAC_TX_BUFF1_LEN 0x18 +#define MAC_TX_BUFF2_STATUS 0x20 +#define MAC_TX_BUFF2_ADDR 0x24 +#define MAC_TX_BUFF2_LEN 0x28 +#define MAC_TX_BUFF3_STATUS 0x30 +#define MAC_TX_BUFF3_ADDR 0x34 +#define MAC_TX_BUFF3_LEN 0x38 + +/* offsets from MAC_RX_RING_ADDR */ +#define MAC_RX_BUFF0_STATUS 0x0 +# define RX_FRAME_LEN_MASK 0x3fff +# define RX_WDOG_TIMER (1 << 14) +# define RX_RUNT (1 << 15) +# define RX_OVERLEN (1 << 16) +# define RX_COLL (1 << 17) +# define RX_ETHER (1 << 18) +# define RX_MII_ERROR (1 << 19) +# define RX_DRIBBLING (1 << 20) +# define RX_CRC_ERROR (1 << 21) +# define RX_VLAN1 (1 << 22) +# define RX_VLAN2 (1 << 23) +# define RX_LEN_ERROR (1 << 24) +# define RX_CNTRL_FRAME (1 << 25) +# define RX_U_CNTRL_FRAME (1 << 26) +# define RX_MCAST_FRAME (1 << 27) +# define RX_BCAST_FRAME (1 << 28) +# define RX_FILTER_FAIL (1 << 29) +# define RX_PACKET_FILTER (1 << 30) +# define RX_MISSED_FRAME (1 << 31) + +# define RX_ERROR (RX_WDOG_TIMER | RX_RUNT | RX_OVERLEN | \ + RX_COLL | RX_MII_ERROR | RX_CRC_ERROR | \ + RX_LEN_ERROR | RX_U_CNTRL_FRAME | RX_MISSED_FRAME) +#define MAC_RX_BUFF0_ADDR 0x4 +# define RX_DMA_ENABLE (1 << 0) +# define RX_T_DONE (1 << 1) +# define RX_GET_DMA_BUFFER(X) (((X) >> 2) & 0x3) +# define RX_SET_BUFF_ADDR(X) ((X) & 0xffffffc0) +#define MAC_RX_BUFF1_STATUS 0x10 +#define MAC_RX_BUFF1_ADDR 0x14 +#define MAC_RX_BUFF2_STATUS 0x20 +#define MAC_RX_BUFF2_ADDR 0x24 +#define MAC_RX_BUFF3_STATUS 0x30 +#define MAC_RX_BUFF3_ADDR 0x34 + /* * Theory of operation * @@ -152,10 +270,12 @@ static void au1000_enable_mac(struct net_device *dev, int force_reset) if (force_reset || (!aup->mac_enabled)) { writel(MAC_EN_CLOCK_ENABLE, aup->enable); - au_sync_delay(2); + wmb(); /* drain writebuffer */ + mdelay(2); writel((MAC_EN_RESET0 | MAC_EN_RESET1 | MAC_EN_RESET2 | MAC_EN_CLOCK_ENABLE), aup->enable); - au_sync_delay(2); + wmb(); /* drain writebuffer */ + mdelay(2); aup->mac_enabled = 1; } @@ -273,7 +393,8 @@ static void au1000_hard_stop(struct net_device *dev) reg = readl(&aup->mac->control); reg &= ~(MAC_RX_ENABLE | MAC_TX_ENABLE); writel(reg, &aup->mac->control); - au_sync_delay(10); + wmb(); /* drain writebuffer */ + mdelay(10); } static void au1000_enable_rx_tx(struct net_device *dev) @@ -286,7 +407,8 @@ static void au1000_enable_rx_tx(struct net_device *dev) reg = readl(&aup->mac->control); reg |= (MAC_RX_ENABLE | MAC_TX_ENABLE); writel(reg, &aup->mac->control); - au_sync_delay(10); + wmb(); /* drain writebuffer */ + mdelay(10); } static void @@ -336,7 +458,8 @@ au1000_adjust_link(struct net_device *dev) reg |= MAC_DISABLE_RX_OWN; } writel(reg, &aup->mac->control); - au_sync_delay(1); + wmb(); /* drain writebuffer */ + mdelay(1); au1000_enable_rx_tx(dev); aup->old_duplex = phydev->duplex; @@ -500,9 +623,11 @@ static void au1000_reset_mac_unlocked(struct net_device *dev) au1000_hard_stop(dev); writel(MAC_EN_CLOCK_ENABLE, aup->enable); - au_sync_delay(2); + wmb(); /* drain writebuffer */ + mdelay(2); writel(0, aup->enable); - au_sync_delay(2); + wmb(); /* drain writebuffer */ + mdelay(2); aup->tx_full = 0; for (i = 0; i < NUM_RX_DMA; i++) { @@ -652,7 +777,7 @@ static int au1000_init(struct net_device *dev) for (i = 0; i < NUM_RX_DMA; i++) aup->rx_dma_ring[i]->buff_stat |= RX_DMA_ENABLE; - au_sync(); + wmb(); /* drain writebuffer */ control = MAC_RX_ENABLE | MAC_TX_ENABLE; #ifndef CONFIG_CPU_LITTLE_ENDIAN @@ -669,7 +794,7 @@ static int au1000_init(struct net_device *dev) writel(control, &aup->mac->control); writel(0x8100, &aup->mac->vlan1_tag); /* activate vlan support */ - au_sync(); + wmb(); /* drain writebuffer */ spin_unlock_irqrestore(&aup->lock, flags); return 0; @@ -760,7 +885,7 @@ static int au1000_rx(struct net_device *dev) } prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE); aup->rx_head = (aup->rx_head + 1) & (NUM_RX_DMA - 1); - au_sync(); + wmb(); /* drain writebuffer */ /* next descriptor */ prxd = aup->rx_dma_ring[aup->rx_head]; @@ -808,7 +933,7 @@ static void au1000_tx_ack(struct net_device *dev) au1000_update_tx_stats(dev, ptxd->status); ptxd->buff_stat &= ~TX_T_DONE; ptxd->len = 0; - au_sync(); + wmb(); /* drain writebuffer */ aup->tx_tail = (aup->tx_tail + 1) & (NUM_TX_DMA - 1); ptxd = aup->tx_dma_ring[aup->tx_tail]; @@ -939,7 +1064,7 @@ static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev) ps->tx_bytes += ptxd->len; ptxd->buff_stat = pDB->dma_addr | TX_DMA_ENABLE; - au_sync(); + wmb(); /* drain writebuffer */ dev_kfree_skb(skb); aup->tx_head = (aup->tx_head + 1) & (NUM_TX_DMA - 1); return NETDEV_TX_OK; diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c index e7cc9174e364..e2e3aaf501a2 100644 --- a/drivers/net/ethernet/amd/pcnet32.c +++ b/drivers/net/ethernet/amd/pcnet32.c @@ -61,7 +61,7 @@ static const char *const version = /* * PCI device identifiers for "new style" Linux PCI Device Drivers */ -static DEFINE_PCI_DEVICE_TABLE(pcnet32_pci_tbl) = { +static const struct pci_device_id pcnet32_pci_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME), }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE), }, @@ -481,37 +481,32 @@ static void pcnet32_realloc_tx_ring(struct net_device *dev, dma_addr_t *new_dma_addr_list; struct pcnet32_tx_head *new_tx_ring; struct sk_buff **new_skb_list; + unsigned int entries = BIT(size); pcnet32_purge_tx_ring(dev); - new_tx_ring = pci_alloc_consistent(lp->pci_dev, - sizeof(struct pcnet32_tx_head) * - (1 << size), - &new_ring_dma_addr); - if (new_tx_ring == NULL) { - netif_err(lp, drv, dev, "Consistent memory allocation failed\n"); + new_tx_ring = + pci_zalloc_consistent(lp->pci_dev, + sizeof(struct pcnet32_tx_head) * entries, + &new_ring_dma_addr); + if (new_tx_ring == NULL) return; - } - memset(new_tx_ring, 0, sizeof(struct pcnet32_tx_head) * (1 << size)); - new_dma_addr_list = kcalloc(1 << size, sizeof(dma_addr_t), - GFP_ATOMIC); + new_dma_addr_list = kcalloc(entries, sizeof(dma_addr_t), GFP_ATOMIC); if (!new_dma_addr_list) goto free_new_tx_ring; - new_skb_list = kcalloc(1 << size, sizeof(struct sk_buff *), - GFP_ATOMIC); + new_skb_list = kcalloc(entries, sizeof(struct sk_buff *), GFP_ATOMIC); if (!new_skb_list) goto free_new_lists; kfree(lp->tx_skbuff); kfree(lp->tx_dma_addr); pci_free_consistent(lp->pci_dev, - sizeof(struct pcnet32_tx_head) * - lp->tx_ring_size, lp->tx_ring, - lp->tx_ring_dma_addr); + sizeof(struct pcnet32_tx_head) * lp->tx_ring_size, + lp->tx_ring, lp->tx_ring_dma_addr); - lp->tx_ring_size = (1 << size); + lp->tx_ring_size = entries; lp->tx_mod_mask = lp->tx_ring_size - 1; lp->tx_len_bits = (size << 12); lp->tx_ring = new_tx_ring; @@ -524,8 +519,7 @@ free_new_lists: kfree(new_dma_addr_list); free_new_tx_ring: pci_free_consistent(lp->pci_dev, - sizeof(struct pcnet32_tx_head) * - (1 << size), + sizeof(struct pcnet32_tx_head) * entries, new_tx_ring, new_ring_dma_addr); } @@ -549,17 +543,14 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev, struct pcnet32_rx_head *new_rx_ring; struct sk_buff **new_skb_list; int new, overlap; - unsigned int entries = 1 << size; + unsigned int entries = BIT(size); - new_rx_ring = pci_alloc_consistent(lp->pci_dev, - sizeof(struct pcnet32_rx_head) * - entries, - &new_ring_dma_addr); - if (new_rx_ring == NULL) { - netif_err(lp, drv, dev, "Consistent memory allocation failed\n"); + new_rx_ring = + pci_zalloc_consistent(lp->pci_dev, + sizeof(struct pcnet32_rx_head) * entries, + &new_ring_dma_addr); + if (new_rx_ring == NULL) return; - } - memset(new_rx_ring, 0, sizeof(struct pcnet32_rx_head) * entries); new_dma_addr_list = kcalloc(entries, sizeof(dma_addr_t), GFP_ATOMIC); if (!new_dma_addr_list) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c index 346592dca33c..a3c11355a34d 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c @@ -272,8 +272,8 @@ static ssize_t xpcs_reg_value_read(struct file *filp, char __user *buffer, struct xgbe_prv_data *pdata = filp->private_data; unsigned int value; - value = pdata->hw_if.read_mmd_regs(pdata, pdata->debugfs_xpcs_mmd, - pdata->debugfs_xpcs_reg); + value = XMDIO_READ(pdata, pdata->debugfs_xpcs_mmd, + pdata->debugfs_xpcs_reg); return xgbe_common_read(buffer, count, ppos, value); } @@ -290,8 +290,8 @@ static ssize_t xpcs_reg_value_write(struct file *filp, if (len < 0) return len; - pdata->hw_if.write_mmd_regs(pdata, pdata->debugfs_xpcs_mmd, - pdata->debugfs_xpcs_reg, value); + XMDIO_WRITE(pdata, pdata->debugfs_xpcs_mmd, pdata->debugfs_xpcs_reg, + value); return len; } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index edaca4496264..ea273836d999 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -348,7 +348,7 @@ static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata) /* Clear MAC flow control */ max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; - q_count = min_t(unsigned int, pdata->rx_q_count, max_q_count); + q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count); reg = MAC_Q0TFCR; for (i = 0; i < q_count; i++) { reg_val = XGMAC_IOREAD(pdata, reg); @@ -373,7 +373,7 @@ static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata) /* Set MAC flow control */ max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; - q_count = min_t(unsigned int, pdata->rx_q_count, max_q_count); + q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count); reg = MAC_Q0TFCR; for (i = 0; i < q_count; i++) { reg_val = XGMAC_IOREAD(pdata, reg); @@ -509,8 +509,8 @@ static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata) XGMAC_IOWRITE(pdata, MAC_IER, mac_ier); /* Enable all counter interrupts */ - XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xff); - XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xff); + XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xffffffff); + XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xffffffff); } static int xgbe_set_gmii_speed(struct xgbe_prv_data *pdata) @@ -1633,6 +1633,9 @@ static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata) { unsigned int i, count; + if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21) + return 0; + for (i = 0; i < pdata->tx_q_count; i++) XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1); @@ -1703,8 +1706,8 @@ static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata) XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP); } -static unsigned int xgbe_calculate_per_queue_fifo(unsigned long fifo_size, - unsigned char queue_count) +static unsigned int xgbe_calculate_per_queue_fifo(unsigned int fifo_size, + unsigned int queue_count) { unsigned int q_fifo_size = 0; enum xgbe_mtl_fifo_size p_fifo = XGMAC_MTL_FIFO_SIZE_256; @@ -1748,6 +1751,10 @@ static unsigned int xgbe_calculate_per_queue_fifo(unsigned long fifo_size, q_fifo_size = XGBE_FIFO_SIZE_KB(256); break; } + + /* The configured value is not the actual amount of fifo RAM */ + q_fifo_size = min_t(unsigned int, XGBE_FIFO_MAX, q_fifo_size); + q_fifo_size = q_fifo_size / queue_count; /* Set the queue fifo size programmable value */ @@ -1947,6 +1954,32 @@ static void xgbe_config_vlan_support(struct xgbe_prv_data *pdata) xgbe_disable_rx_vlan_stripping(pdata); } +static u64 xgbe_mmc_read(struct xgbe_prv_data *pdata, unsigned int reg_lo) +{ + bool read_hi; + u64 val; + + switch (reg_lo) { + /* These registers are always 64 bit */ + case MMC_TXOCTETCOUNT_GB_LO: + case MMC_TXOCTETCOUNT_G_LO: + case MMC_RXOCTETCOUNT_GB_LO: + case MMC_RXOCTETCOUNT_G_LO: + read_hi = true; + break; + + default: + read_hi = false; + }; + + val = XGMAC_IOREAD(pdata, reg_lo); + + if (read_hi) + val |= ((u64)XGMAC_IOREAD(pdata, reg_lo + 4) << 32); + + return val; +} + static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata) { struct xgbe_mmc_stats *stats = &pdata->mmc_stats; @@ -1954,75 +1987,75 @@ static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata) if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_GB)) stats->txoctetcount_gb += - XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO); + xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_GB)) stats->txframecount_gb += - XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO); + xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_G)) stats->txbroadcastframes_g += - XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO); + xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_G)) stats->txmulticastframes_g += - XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO); + xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX64OCTETS_GB)) stats->tx64octets_gb += - XGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX65TO127OCTETS_GB)) stats->tx65to127octets_gb += - XGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX128TO255OCTETS_GB)) stats->tx128to255octets_gb += - XGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX256TO511OCTETS_GB)) stats->tx256to511octets_gb += - XGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX512TO1023OCTETS_GB)) stats->tx512to1023octets_gb += - XGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX1024TOMAXOCTETS_GB)) stats->tx1024tomaxoctets_gb += - XGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNICASTFRAMES_GB)) stats->txunicastframes_gb += - XGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO); + xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_GB)) stats->txmulticastframes_gb += - XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO); + xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_GB)) stats->txbroadcastframes_g += - XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO); + xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNDERFLOWERROR)) stats->txunderflowerror += - XGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO); + xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_G)) stats->txoctetcount_g += - XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO); + xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_G)) stats->txframecount_g += - XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO); + xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXPAUSEFRAMES)) stats->txpauseframes += - XGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO); + xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXVLANFRAMES_G)) stats->txvlanframes_g += - XGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO); + xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO); } static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata) @@ -2032,95 +2065,95 @@ static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata) if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFRAMECOUNT_GB)) stats->rxframecount_gb += - XGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO); + xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_GB)) stats->rxoctetcount_gb += - XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO); + xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_G)) stats->rxoctetcount_g += - XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO); + xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXBROADCASTFRAMES_G)) stats->rxbroadcastframes_g += - XGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO); + xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXMULTICASTFRAMES_G)) stats->rxmulticastframes_g += - XGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO); + xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXCRCERROR)) stats->rxcrcerror += - XGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO); + xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXRUNTERROR)) stats->rxrunterror += - XGMAC_IOREAD(pdata, MMC_RXRUNTERROR); + xgbe_mmc_read(pdata, MMC_RXRUNTERROR); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXJABBERERROR)) stats->rxjabbererror += - XGMAC_IOREAD(pdata, MMC_RXJABBERERROR); + xgbe_mmc_read(pdata, MMC_RXJABBERERROR); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNDERSIZE_G)) stats->rxundersize_g += - XGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G); + xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOVERSIZE_G)) stats->rxoversize_g += - XGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G); + xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX64OCTETS_GB)) stats->rx64octets_gb += - XGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX65TO127OCTETS_GB)) stats->rx65to127octets_gb += - XGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX128TO255OCTETS_GB)) stats->rx128to255octets_gb += - XGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX256TO511OCTETS_GB)) stats->rx256to511octets_gb += - XGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX512TO1023OCTETS_GB)) stats->rx512to1023octets_gb += - XGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX1024TOMAXOCTETS_GB)) stats->rx1024tomaxoctets_gb += - XGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNICASTFRAMES_G)) stats->rxunicastframes_g += - XGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO); + xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXLENGTHERROR)) stats->rxlengtherror += - XGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO); + xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOUTOFRANGETYPE)) stats->rxoutofrangetype += - XGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO); + xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXPAUSEFRAMES)) stats->rxpauseframes += - XGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO); + xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFIFOOVERFLOW)) stats->rxfifooverflow += - XGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO); + xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXVLANFRAMES_GB)) stats->rxvlanframes_gb += - XGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO); + xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXWATCHDOGERROR)) stats->rxwatchdogerror += - XGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR); + xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR); } static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata) @@ -2131,127 +2164,127 @@ static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata) XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1); stats->txoctetcount_gb += - XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO); + xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO); stats->txframecount_gb += - XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO); + xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO); stats->txbroadcastframes_g += - XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO); + xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO); stats->txmulticastframes_g += - XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO); + xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO); stats->tx64octets_gb += - XGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO); stats->tx65to127octets_gb += - XGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO); stats->tx128to255octets_gb += - XGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO); stats->tx256to511octets_gb += - XGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO); stats->tx512to1023octets_gb += - XGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO); stats->tx1024tomaxoctets_gb += - XGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); stats->txunicastframes_gb += - XGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO); + xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO); stats->txmulticastframes_gb += - XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO); + xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO); stats->txbroadcastframes_g += - XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO); + xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO); stats->txunderflowerror += - XGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO); + xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO); stats->txoctetcount_g += - XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO); + xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO); stats->txframecount_g += - XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO); + xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO); stats->txpauseframes += - XGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO); + xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO); stats->txvlanframes_g += - XGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO); + xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO); stats->rxframecount_gb += - XGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO); + xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO); stats->rxoctetcount_gb += - XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO); + xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO); stats->rxoctetcount_g += - XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO); + xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO); stats->rxbroadcastframes_g += - XGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO); + xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO); stats->rxmulticastframes_g += - XGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO); + xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO); stats->rxcrcerror += - XGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO); + xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO); stats->rxrunterror += - XGMAC_IOREAD(pdata, MMC_RXRUNTERROR); + xgbe_mmc_read(pdata, MMC_RXRUNTERROR); stats->rxjabbererror += - XGMAC_IOREAD(pdata, MMC_RXJABBERERROR); + xgbe_mmc_read(pdata, MMC_RXJABBERERROR); stats->rxundersize_g += - XGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G); + xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G); stats->rxoversize_g += - XGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G); + xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G); stats->rx64octets_gb += - XGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO); stats->rx65to127octets_gb += - XGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO); stats->rx128to255octets_gb += - XGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO); stats->rx256to511octets_gb += - XGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO); stats->rx512to1023octets_gb += - XGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO); stats->rx1024tomaxoctets_gb += - XGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); stats->rxunicastframes_g += - XGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO); + xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO); stats->rxlengtherror += - XGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO); + xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO); stats->rxoutofrangetype += - XGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO); + xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO); stats->rxpauseframes += - XGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO); + xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO); stats->rxfifooverflow += - XGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO); + xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO); stats->rxvlanframes_gb += - XGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO); + xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO); stats->rxwatchdogerror += - XGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR); + xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR); /* Un-freeze counters */ XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 1f5487f4888c..b26d75856553 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -117,7 +117,6 @@ #include <linux/spinlock.h> #include <linux/tcp.h> #include <linux/if_vlan.h> -#include <linux/phy.h> #include <net/busy_poll.h> #include <linux/clk.h> #include <linux/if_ether.h> @@ -362,6 +361,8 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata) memset(hw_feat, 0, sizeof(*hw_feat)); + hw_feat->version = XGMAC_IOREAD(pdata, MAC_VR); + /* Hardware feature register 0 */ hw_feat->gmii = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL); hw_feat->vlhash = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c index a076aca138a1..46f613028e9c 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c @@ -361,15 +361,16 @@ static void xgbe_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { struct xgbe_prv_data *pdata = netdev_priv(netdev); + struct xgbe_hw_features *hw_feat = &pdata->hw_feat; strlcpy(drvinfo->driver, XGBE_DRV_NAME, sizeof(drvinfo->driver)); strlcpy(drvinfo->version, XGBE_DRV_VERSION, sizeof(drvinfo->version)); strlcpy(drvinfo->bus_info, dev_name(pdata->dev), sizeof(drvinfo->bus_info)); snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d.%d", - XGMAC_IOREAD_BITS(pdata, MAC_VR, USERVER), - XGMAC_IOREAD_BITS(pdata, MAC_VR, DEVID), - XGMAC_IOREAD_BITS(pdata, MAC_VR, SNPSVER)); + XGMAC_GET_BITS(hw_feat->version, MAC_VR, USERVER), + XGMAC_GET_BITS(hw_feat->version, MAC_VR, DEVID), + XGMAC_GET_BITS(hw_feat->version, MAC_VR, SNPSVER)); drvinfo->n_stats = XGBE_STATS_COUNT; } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c index 8aa6a9353f7b..bdf9cfa70e88 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c @@ -172,7 +172,7 @@ static struct xgbe_channel *xgbe_alloc_rings(struct xgbe_prv_data *pdata) } if (i < pdata->rx_ring_count) { - spin_lock_init(&tx_ring->lock); + spin_lock_init(&rx_ring->lock); channel->rx_ring = rx_ring++; } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index 07bf70a82908..e9fe6e6ddcc3 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -183,6 +183,7 @@ #define XGMAC_DRIVER_CONTEXT 1 #define XGMAC_IOCTL_CONTEXT 2 +#define XGBE_FIFO_MAX 81920 #define XGBE_FIFO_SIZE_B(x) (x) #define XGBE_FIFO_SIZE_KB(x) (x * 1024) @@ -526,6 +527,9 @@ struct xgbe_desc_if { * or configurations are present in the device. */ struct xgbe_hw_features { + /* HW Version */ + unsigned int version; + /* HW Feature Register0 */ unsigned int gmii; /* 1000 Mbps support */ unsigned int vlhash; /* VLAN Hash Filter */ diff --git a/drivers/net/ethernet/apm/Kconfig b/drivers/net/ethernet/apm/Kconfig new file mode 100644 index 000000000000..ec63d706d464 --- /dev/null +++ b/drivers/net/ethernet/apm/Kconfig @@ -0,0 +1 @@ +source "drivers/net/ethernet/apm/xgene/Kconfig" diff --git a/drivers/net/ethernet/apm/Makefile b/drivers/net/ethernet/apm/Makefile new file mode 100644 index 000000000000..65ce32ad1b2c --- /dev/null +++ b/drivers/net/ethernet/apm/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for APM X-GENE Ethernet driver. +# + +obj-$(CONFIG_NET_XGENE) += xgene/ diff --git a/drivers/net/ethernet/apm/xgene/Kconfig b/drivers/net/ethernet/apm/xgene/Kconfig new file mode 100644 index 000000000000..f4054d242f3c --- /dev/null +++ b/drivers/net/ethernet/apm/xgene/Kconfig @@ -0,0 +1,10 @@ +config NET_XGENE + tristate "APM X-Gene SoC Ethernet Driver" + depends on HAS_DMA + select PHYLIB + help + This is the Ethernet driver for the on-chip ethernet interface on the + APM X-Gene SoC. + + To compile this driver as a module, choose M here. This module will + be called xgene_enet. diff --git a/drivers/net/ethernet/apm/xgene/Makefile b/drivers/net/ethernet/apm/xgene/Makefile new file mode 100644 index 000000000000..c643e8a0a0dc --- /dev/null +++ b/drivers/net/ethernet/apm/xgene/Makefile @@ -0,0 +1,6 @@ +# +# Makefile for APM X-Gene Ethernet Driver. +# + +xgene-enet-objs := xgene_enet_hw.o xgene_enet_main.o xgene_enet_ethtool.o +obj-$(CONFIG_NET_XGENE) += xgene-enet.o diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c b/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c new file mode 100644 index 000000000000..63f2aa54a594 --- /dev/null +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c @@ -0,0 +1,125 @@ +/* Applied Micro X-Gene SoC Ethernet Driver + * + * Copyright (c) 2014, Applied Micro Circuits Corporation + * Authors: Iyappan Subramanian <isubramanian@apm.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/ethtool.h> +#include "xgene_enet_main.h" + +struct xgene_gstrings_stats { + char name[ETH_GSTRING_LEN]; + int offset; +}; + +#define XGENE_STAT(m) { #m, offsetof(struct xgene_enet_pdata, stats.m) } + +static const struct xgene_gstrings_stats gstrings_stats[] = { + XGENE_STAT(rx_packets), + XGENE_STAT(tx_packets), + XGENE_STAT(rx_bytes), + XGENE_STAT(tx_bytes), + XGENE_STAT(rx_errors), + XGENE_STAT(tx_errors), + XGENE_STAT(rx_length_errors), + XGENE_STAT(rx_crc_errors), + XGENE_STAT(rx_frame_errors), + XGENE_STAT(rx_fifo_errors) +}; + +#define XGENE_STATS_LEN ARRAY_SIZE(gstrings_stats) + +static void xgene_get_drvinfo(struct net_device *ndev, + struct ethtool_drvinfo *info) +{ + struct xgene_enet_pdata *pdata = netdev_priv(ndev); + struct platform_device *pdev = pdata->pdev; + + strcpy(info->driver, "xgene_enet"); + strcpy(info->version, XGENE_DRV_VERSION); + snprintf(info->fw_version, ETHTOOL_FWVERS_LEN, "N/A"); + sprintf(info->bus_info, "%s", pdev->name); +} + +static int xgene_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd) +{ + struct xgene_enet_pdata *pdata = netdev_priv(ndev); + struct phy_device *phydev = pdata->phy_dev; + + if (phydev == NULL) + return -ENODEV; + + return phy_ethtool_gset(phydev, cmd); +} + +static int xgene_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd) +{ + struct xgene_enet_pdata *pdata = netdev_priv(ndev); + struct phy_device *phydev = pdata->phy_dev; + + if (phydev == NULL) + return -ENODEV; + + return phy_ethtool_sset(phydev, cmd); +} + +static void xgene_get_strings(struct net_device *ndev, u32 stringset, u8 *data) +{ + int i; + u8 *p = data; + + if (stringset != ETH_SS_STATS) + return; + + for (i = 0; i < XGENE_STATS_LEN; i++) { + memcpy(p, gstrings_stats[i].name, ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } +} + +static int xgene_get_sset_count(struct net_device *ndev, int sset) +{ + if (sset != ETH_SS_STATS) + return -EINVAL; + + return XGENE_STATS_LEN; +} + +static void xgene_get_ethtool_stats(struct net_device *ndev, + struct ethtool_stats *dummy, + u64 *data) +{ + void *pdata = netdev_priv(ndev); + int i; + + for (i = 0; i < XGENE_STATS_LEN; i++) + *data++ = *(u64 *)(pdata + gstrings_stats[i].offset); +} + +static const struct ethtool_ops xgene_ethtool_ops = { + .get_drvinfo = xgene_get_drvinfo, + .get_settings = xgene_get_settings, + .set_settings = xgene_set_settings, + .get_link = ethtool_op_get_link, + .get_strings = xgene_get_strings, + .get_sset_count = xgene_get_sset_count, + .get_ethtool_stats = xgene_get_ethtool_stats +}; + +void xgene_enet_set_ethtool_ops(struct net_device *ndev) +{ + ndev->ethtool_ops = &xgene_ethtool_ops; +} diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c new file mode 100644 index 000000000000..812d8d65159b --- /dev/null +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c @@ -0,0 +1,728 @@ +/* Applied Micro X-Gene SoC Ethernet Driver + * + * Copyright (c) 2014, Applied Micro Circuits Corporation + * Authors: Iyappan Subramanian <isubramanian@apm.com> + * Ravi Patel <rapatel@apm.com> + * Keyur Chudgar <kchudgar@apm.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include "xgene_enet_main.h" +#include "xgene_enet_hw.h" + +static void xgene_enet_ring_init(struct xgene_enet_desc_ring *ring) +{ + u32 *ring_cfg = ring->state; + u64 addr = ring->dma; + enum xgene_enet_ring_cfgsize cfgsize = ring->cfgsize; + + ring_cfg[4] |= (1 << SELTHRSH_POS) & + CREATE_MASK(SELTHRSH_POS, SELTHRSH_LEN); + ring_cfg[3] |= ACCEPTLERR; + ring_cfg[2] |= QCOHERENT; + + addr >>= 8; + ring_cfg[2] |= (addr << RINGADDRL_POS) & + CREATE_MASK_ULL(RINGADDRL_POS, RINGADDRL_LEN); + addr >>= RINGADDRL_LEN; + ring_cfg[3] |= addr & CREATE_MASK_ULL(RINGADDRH_POS, RINGADDRH_LEN); + ring_cfg[3] |= ((u32)cfgsize << RINGSIZE_POS) & + CREATE_MASK(RINGSIZE_POS, RINGSIZE_LEN); +} + +static void xgene_enet_ring_set_type(struct xgene_enet_desc_ring *ring) +{ + u32 *ring_cfg = ring->state; + bool is_bufpool; + u32 val; + + is_bufpool = xgene_enet_is_bufpool(ring->id); + val = (is_bufpool) ? RING_BUFPOOL : RING_REGULAR; + ring_cfg[4] |= (val << RINGTYPE_POS) & + CREATE_MASK(RINGTYPE_POS, RINGTYPE_LEN); + + if (is_bufpool) { + ring_cfg[3] |= (BUFPOOL_MODE << RINGMODE_POS) & + CREATE_MASK(RINGMODE_POS, RINGMODE_LEN); + } +} + +static void xgene_enet_ring_set_recombbuf(struct xgene_enet_desc_ring *ring) +{ + u32 *ring_cfg = ring->state; + + ring_cfg[3] |= RECOMBBUF; + ring_cfg[3] |= (0xf << RECOMTIMEOUTL_POS) & + CREATE_MASK(RECOMTIMEOUTL_POS, RECOMTIMEOUTL_LEN); + ring_cfg[4] |= 0x7 & CREATE_MASK(RECOMTIMEOUTH_POS, RECOMTIMEOUTH_LEN); +} + +static void xgene_enet_ring_wr32(struct xgene_enet_desc_ring *ring, + u32 offset, u32 data) +{ + struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev); + + iowrite32(data, pdata->ring_csr_addr + offset); +} + +static void xgene_enet_ring_rd32(struct xgene_enet_desc_ring *ring, + u32 offset, u32 *data) +{ + struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev); + + *data = ioread32(pdata->ring_csr_addr + offset); +} + +static void xgene_enet_write_ring_state(struct xgene_enet_desc_ring *ring) +{ + int i; + + xgene_enet_ring_wr32(ring, CSR_RING_CONFIG, ring->num); + for (i = 0; i < NUM_RING_CONFIG; i++) { + xgene_enet_ring_wr32(ring, CSR_RING_WR_BASE + (i * 4), + ring->state[i]); + } +} + +static void xgene_enet_clr_ring_state(struct xgene_enet_desc_ring *ring) +{ + memset(ring->state, 0, sizeof(u32) * NUM_RING_CONFIG); + xgene_enet_write_ring_state(ring); +} + +static void xgene_enet_set_ring_state(struct xgene_enet_desc_ring *ring) +{ + xgene_enet_ring_set_type(ring); + + if (xgene_enet_ring_owner(ring->id) == RING_OWNER_ETH0) + xgene_enet_ring_set_recombbuf(ring); + + xgene_enet_ring_init(ring); + xgene_enet_write_ring_state(ring); +} + +static void xgene_enet_set_ring_id(struct xgene_enet_desc_ring *ring) +{ + u32 ring_id_val, ring_id_buf; + bool is_bufpool; + + is_bufpool = xgene_enet_is_bufpool(ring->id); + + ring_id_val = ring->id & GENMASK(9, 0); + ring_id_val |= OVERWRITE; + + ring_id_buf = (ring->num << 9) & GENMASK(18, 9); + ring_id_buf |= PREFETCH_BUF_EN; + if (is_bufpool) + ring_id_buf |= IS_BUFFER_POOL; + + xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id_val); + xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, ring_id_buf); +} + +static void xgene_enet_clr_desc_ring_id(struct xgene_enet_desc_ring *ring) +{ + u32 ring_id; + + ring_id = ring->id | OVERWRITE; + xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id); + xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, 0); +} + +struct xgene_enet_desc_ring *xgene_enet_setup_ring( + struct xgene_enet_desc_ring *ring) +{ + u32 size = ring->size; + u32 i, data; + bool is_bufpool; + + xgene_enet_clr_ring_state(ring); + xgene_enet_set_ring_state(ring); + xgene_enet_set_ring_id(ring); + + ring->slots = xgene_enet_get_numslots(ring->id, size); + + is_bufpool = xgene_enet_is_bufpool(ring->id); + if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU) + return ring; + + for (i = 0; i < ring->slots; i++) + xgene_enet_mark_desc_slot_empty(&ring->raw_desc[i]); + + xgene_enet_ring_rd32(ring, CSR_RING_NE_INT_MODE, &data); + data |= BIT(31 - xgene_enet_ring_bufnum(ring->id)); + xgene_enet_ring_wr32(ring, CSR_RING_NE_INT_MODE, data); + + return ring; +} + +void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring) +{ + u32 data; + bool is_bufpool; + + is_bufpool = xgene_enet_is_bufpool(ring->id); + if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU) + goto out; + + xgene_enet_ring_rd32(ring, CSR_RING_NE_INT_MODE, &data); + data &= ~BIT(31 - xgene_enet_ring_bufnum(ring->id)); + xgene_enet_ring_wr32(ring, CSR_RING_NE_INT_MODE, data); + +out: + xgene_enet_clr_desc_ring_id(ring); + xgene_enet_clr_ring_state(ring); +} + +void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring, + struct xgene_enet_pdata *pdata, + enum xgene_enet_err_code status) +{ + struct rtnl_link_stats64 *stats = &pdata->stats; + + switch (status) { + case INGRESS_CRC: + stats->rx_crc_errors++; + break; + case INGRESS_CHECKSUM: + case INGRESS_CHECKSUM_COMPUTE: + stats->rx_errors++; + break; + case INGRESS_TRUNC_FRAME: + stats->rx_frame_errors++; + break; + case INGRESS_PKT_LEN: + stats->rx_length_errors++; + break; + case INGRESS_PKT_UNDER: + stats->rx_frame_errors++; + break; + case INGRESS_FIFO_OVERRUN: + stats->rx_fifo_errors++; + break; + default: + break; + } +} + +static void xgene_enet_wr_csr(struct xgene_enet_pdata *pdata, + u32 offset, u32 val) +{ + void __iomem *addr = pdata->eth_csr_addr + offset; + + iowrite32(val, addr); +} + +static void xgene_enet_wr_ring_if(struct xgene_enet_pdata *pdata, + u32 offset, u32 val) +{ + void __iomem *addr = pdata->eth_ring_if_addr + offset; + + iowrite32(val, addr); +} + +static void xgene_enet_wr_diag_csr(struct xgene_enet_pdata *pdata, + u32 offset, u32 val) +{ + void __iomem *addr = pdata->eth_diag_csr_addr + offset; + + iowrite32(val, addr); +} + +static void xgene_enet_wr_mcx_csr(struct xgene_enet_pdata *pdata, + u32 offset, u32 val) +{ + void __iomem *addr = pdata->mcx_mac_csr_addr + offset; + + iowrite32(val, addr); +} + +static bool xgene_enet_wr_indirect(void __iomem *addr, void __iomem *wr, + void __iomem *cmd, void __iomem *cmd_done, + u32 wr_addr, u32 wr_data) +{ + u32 done; + u8 wait = 10; + + iowrite32(wr_addr, addr); + iowrite32(wr_data, wr); + iowrite32(XGENE_ENET_WR_CMD, cmd); + + /* wait for write command to complete */ + while (!(done = ioread32(cmd_done)) && wait--) + udelay(1); + + if (!done) + return false; + + iowrite32(0, cmd); + + return true; +} + +static void xgene_enet_wr_mcx_mac(struct xgene_enet_pdata *pdata, + u32 wr_addr, u32 wr_data) +{ + void __iomem *addr, *wr, *cmd, *cmd_done; + + addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET; + wr = pdata->mcx_mac_addr + MAC_WRITE_REG_OFFSET; + cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET; + cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET; + + if (!xgene_enet_wr_indirect(addr, wr, cmd, cmd_done, wr_addr, wr_data)) + netdev_err(pdata->ndev, "MCX mac write failed, addr: %04x\n", + wr_addr); +} + +static void xgene_enet_rd_csr(struct xgene_enet_pdata *pdata, + u32 offset, u32 *val) +{ + void __iomem *addr = pdata->eth_csr_addr + offset; + + *val = ioread32(addr); +} + +static void xgene_enet_rd_diag_csr(struct xgene_enet_pdata *pdata, + u32 offset, u32 *val) +{ + void __iomem *addr = pdata->eth_diag_csr_addr + offset; + + *val = ioread32(addr); +} + +static void xgene_enet_rd_mcx_csr(struct xgene_enet_pdata *pdata, + u32 offset, u32 *val) +{ + void __iomem *addr = pdata->mcx_mac_csr_addr + offset; + + *val = ioread32(addr); +} + +static bool xgene_enet_rd_indirect(void __iomem *addr, void __iomem *rd, + void __iomem *cmd, void __iomem *cmd_done, + u32 rd_addr, u32 *rd_data) +{ + u32 done; + u8 wait = 10; + + iowrite32(rd_addr, addr); + iowrite32(XGENE_ENET_RD_CMD, cmd); + + /* wait for read command to complete */ + while (!(done = ioread32(cmd_done)) && wait--) + udelay(1); + + if (!done) + return false; + + *rd_data = ioread32(rd); + iowrite32(0, cmd); + + return true; +} + +static void xgene_enet_rd_mcx_mac(struct xgene_enet_pdata *pdata, + u32 rd_addr, u32 *rd_data) +{ + void __iomem *addr, *rd, *cmd, *cmd_done; + + addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET; + rd = pdata->mcx_mac_addr + MAC_READ_REG_OFFSET; + cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET; + cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET; + + if (!xgene_enet_rd_indirect(addr, rd, cmd, cmd_done, rd_addr, rd_data)) + netdev_err(pdata->ndev, "MCX mac read failed, addr: %04x\n", + rd_addr); +} + +static int xgene_mii_phy_write(struct xgene_enet_pdata *pdata, int phy_id, + u32 reg, u16 data) +{ + u32 addr = 0, wr_data = 0; + u32 done; + u8 wait = 10; + + PHY_ADDR_SET(&addr, phy_id); + REG_ADDR_SET(&addr, reg); + xgene_enet_wr_mcx_mac(pdata, MII_MGMT_ADDRESS_ADDR, addr); + + PHY_CONTROL_SET(&wr_data, data); + xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONTROL_ADDR, wr_data); + do { + usleep_range(5, 10); + xgene_enet_rd_mcx_mac(pdata, MII_MGMT_INDICATORS_ADDR, &done); + } while ((done & BUSY_MASK) && wait--); + + if (done & BUSY_MASK) { + netdev_err(pdata->ndev, "MII_MGMT write failed\n"); + return -EBUSY; + } + + return 0; +} + +static int xgene_mii_phy_read(struct xgene_enet_pdata *pdata, + u8 phy_id, u32 reg) +{ + u32 addr = 0; + u32 data, done; + u8 wait = 10; + + PHY_ADDR_SET(&addr, phy_id); + REG_ADDR_SET(&addr, reg); + xgene_enet_wr_mcx_mac(pdata, MII_MGMT_ADDRESS_ADDR, addr); + xgene_enet_wr_mcx_mac(pdata, MII_MGMT_COMMAND_ADDR, READ_CYCLE_MASK); + do { + usleep_range(5, 10); + xgene_enet_rd_mcx_mac(pdata, MII_MGMT_INDICATORS_ADDR, &done); + } while ((done & BUSY_MASK) && wait--); + + if (done & BUSY_MASK) { + netdev_err(pdata->ndev, "MII_MGMT read failed\n"); + return -EBUSY; + } + + xgene_enet_rd_mcx_mac(pdata, MII_MGMT_STATUS_ADDR, &data); + xgene_enet_wr_mcx_mac(pdata, MII_MGMT_COMMAND_ADDR, 0); + + return data; +} + +void xgene_gmac_set_mac_addr(struct xgene_enet_pdata *pdata) +{ + u32 addr0, addr1; + u8 *dev_addr = pdata->ndev->dev_addr; + + addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) | + (dev_addr[1] << 8) | dev_addr[0]; + addr1 = (dev_addr[5] << 24) | (dev_addr[4] << 16); + addr1 |= pdata->phy_addr & 0xFFFF; + + xgene_enet_wr_mcx_mac(pdata, STATION_ADDR0_ADDR, addr0); + xgene_enet_wr_mcx_mac(pdata, STATION_ADDR1_ADDR, addr1); +} + +static int xgene_enet_ecc_init(struct xgene_enet_pdata *pdata) +{ + struct net_device *ndev = pdata->ndev; + u32 data; + u8 wait = 10; + + xgene_enet_wr_diag_csr(pdata, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0x0); + do { + usleep_range(100, 110); + xgene_enet_rd_diag_csr(pdata, ENET_BLOCK_MEM_RDY_ADDR, &data); + } while ((data != 0xffffffff) && wait--); + + if (data != 0xffffffff) { + netdev_err(ndev, "Failed to release memory from shutdown\n"); + return -ENODEV; + } + + return 0; +} + +void xgene_gmac_reset(struct xgene_enet_pdata *pdata) +{ + xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, SOFT_RESET1); + xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, 0); +} + +void xgene_gmac_init(struct xgene_enet_pdata *pdata, int speed) +{ + u32 value, mc2; + u32 intf_ctl, rgmii; + u32 icm0, icm2; + + xgene_gmac_reset(pdata); + + xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, &icm0); + xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, &icm2); + xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_2_ADDR, &mc2); + xgene_enet_rd_mcx_mac(pdata, INTERFACE_CONTROL_ADDR, &intf_ctl); + xgene_enet_rd_csr(pdata, RGMII_REG_0_ADDR, &rgmii); + + switch (speed) { + case SPEED_10: + ENET_INTERFACE_MODE2_SET(&mc2, 1); + CFG_MACMODE_SET(&icm0, 0); + CFG_WAITASYNCRD_SET(&icm2, 500); + rgmii &= ~CFG_SPEED_1250; + break; + case SPEED_100: + ENET_INTERFACE_MODE2_SET(&mc2, 1); + intf_ctl |= ENET_LHD_MODE; + CFG_MACMODE_SET(&icm0, 1); + CFG_WAITASYNCRD_SET(&icm2, 80); + rgmii &= ~CFG_SPEED_1250; + break; + default: + ENET_INTERFACE_MODE2_SET(&mc2, 2); + intf_ctl |= ENET_GHD_MODE; + CFG_TXCLK_MUXSEL0_SET(&rgmii, 4); + xgene_enet_rd_csr(pdata, DEBUG_REG_ADDR, &value); + value |= CFG_BYPASS_UNISEC_TX | CFG_BYPASS_UNISEC_RX; + xgene_enet_wr_csr(pdata, DEBUG_REG_ADDR, value); + break; + } + + mc2 |= FULL_DUPLEX2; + xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_2_ADDR, mc2); + xgene_enet_wr_mcx_mac(pdata, INTERFACE_CONTROL_ADDR, intf_ctl); + + xgene_gmac_set_mac_addr(pdata); + + /* Adjust MDC clock frequency */ + xgene_enet_rd_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, &value); + MGMT_CLOCK_SEL_SET(&value, 7); + xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, value); + + /* Enable drop if bufpool not available */ + xgene_enet_rd_csr(pdata, RSIF_CONFIG_REG_ADDR, &value); + value |= CFG_RSIF_FPBUFF_TIMEOUT_EN; + xgene_enet_wr_csr(pdata, RSIF_CONFIG_REG_ADDR, value); + + /* Rtype should be copied from FP */ + xgene_enet_wr_csr(pdata, RSIF_RAM_DBG_REG0_ADDR, 0); + xgene_enet_wr_csr(pdata, RGMII_REG_0_ADDR, rgmii); + + /* Rx-Tx traffic resume */ + xgene_enet_wr_csr(pdata, CFG_LINK_AGGR_RESUME_0_ADDR, TX_PORT0); + + xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, icm0); + xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, icm2); + + xgene_enet_rd_mcx_csr(pdata, RX_DV_GATE_REG_0_ADDR, &value); + value &= ~TX_DV_GATE_EN0; + value &= ~RX_DV_GATE_EN0; + value |= RESUME_RX0; + xgene_enet_wr_mcx_csr(pdata, RX_DV_GATE_REG_0_ADDR, value); + + xgene_enet_wr_csr(pdata, CFG_BYPASS_ADDR, RESUME_TX); +} + +static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *pdata) +{ + u32 val = 0xffffffff; + + xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQASSOC_ADDR, val); + xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPQASSOC_ADDR, val); + xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEWQASSOC_ADDR, val); + xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEFPQASSOC_ADDR, val); +} + +void xgene_enet_cle_bypass(struct xgene_enet_pdata *pdata, + u32 dst_ring_num, u16 bufpool_id) +{ + u32 cb; + u32 fpsel; + + fpsel = xgene_enet_ring_bufnum(bufpool_id) - 0x20; + + xgene_enet_rd_csr(pdata, CLE_BYPASS_REG0_0_ADDR, &cb); + cb |= CFG_CLE_BYPASS_EN0; + CFG_CLE_IP_PROTOCOL0_SET(&cb, 3); + xgene_enet_wr_csr(pdata, CLE_BYPASS_REG0_0_ADDR, cb); + + xgene_enet_rd_csr(pdata, CLE_BYPASS_REG1_0_ADDR, &cb); + CFG_CLE_DSTQID0_SET(&cb, dst_ring_num); + CFG_CLE_FPSEL0_SET(&cb, fpsel); + xgene_enet_wr_csr(pdata, CLE_BYPASS_REG1_0_ADDR, cb); +} + +void xgene_gmac_rx_enable(struct xgene_enet_pdata *pdata) +{ + u32 data; + + xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data); + xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data | RX_EN); +} + +void xgene_gmac_tx_enable(struct xgene_enet_pdata *pdata) +{ + u32 data; + + xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data); + xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data | TX_EN); +} + +void xgene_gmac_rx_disable(struct xgene_enet_pdata *pdata) +{ + u32 data; + + xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data); + xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~RX_EN); +} + +void xgene_gmac_tx_disable(struct xgene_enet_pdata *pdata) +{ + u32 data; + + xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data); + xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~TX_EN); +} + +void xgene_enet_reset(struct xgene_enet_pdata *pdata) +{ + u32 val; + + clk_prepare_enable(pdata->clk); + clk_disable_unprepare(pdata->clk); + clk_prepare_enable(pdata->clk); + xgene_enet_ecc_init(pdata); + xgene_enet_config_ring_if_assoc(pdata); + + /* Enable auto-incr for scanning */ + xgene_enet_rd_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, &val); + val |= SCAN_AUTO_INCR; + MGMT_CLOCK_SEL_SET(&val, 1); + xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, val); +} + +void xgene_gport_shutdown(struct xgene_enet_pdata *pdata) +{ + clk_disable_unprepare(pdata->clk); +} + +static int xgene_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) +{ + struct xgene_enet_pdata *pdata = bus->priv; + u32 val; + + val = xgene_mii_phy_read(pdata, mii_id, regnum); + netdev_dbg(pdata->ndev, "mdio_rd: bus=%d reg=%d val=%x\n", + mii_id, regnum, val); + + return val; +} + +static int xgene_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, + u16 val) +{ + struct xgene_enet_pdata *pdata = bus->priv; + + netdev_dbg(pdata->ndev, "mdio_wr: bus=%d reg=%d val=%x\n", + mii_id, regnum, val); + return xgene_mii_phy_write(pdata, mii_id, regnum, val); +} + +static void xgene_enet_adjust_link(struct net_device *ndev) +{ + struct xgene_enet_pdata *pdata = netdev_priv(ndev); + struct phy_device *phydev = pdata->phy_dev; + + if (phydev->link) { + if (pdata->phy_speed != phydev->speed) { + xgene_gmac_init(pdata, phydev->speed); + xgene_gmac_rx_enable(pdata); + xgene_gmac_tx_enable(pdata); + pdata->phy_speed = phydev->speed; + phy_print_status(phydev); + } + } else { + xgene_gmac_rx_disable(pdata); + xgene_gmac_tx_disable(pdata); + pdata->phy_speed = SPEED_UNKNOWN; + phy_print_status(phydev); + } +} + +static int xgene_enet_phy_connect(struct net_device *ndev) +{ + struct xgene_enet_pdata *pdata = netdev_priv(ndev); + struct device_node *phy_np; + struct phy_device *phy_dev; + struct device *dev = &pdata->pdev->dev; + + phy_np = of_parse_phandle(dev->of_node, "phy-handle", 0); + if (!phy_np) { + netdev_dbg(ndev, "No phy-handle found\n"); + return -ENODEV; + } + + phy_dev = of_phy_connect(ndev, phy_np, &xgene_enet_adjust_link, + 0, pdata->phy_mode); + if (!phy_dev) { + netdev_err(ndev, "Could not connect to PHY\n"); + return -ENODEV; + } + + pdata->phy_speed = SPEED_UNKNOWN; + phy_dev->supported &= ~SUPPORTED_10baseT_Half & + ~SUPPORTED_100baseT_Half & + ~SUPPORTED_1000baseT_Half; + phy_dev->advertising = phy_dev->supported; + pdata->phy_dev = phy_dev; + + return 0; +} + +int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata) +{ + struct net_device *ndev = pdata->ndev; + struct device *dev = &pdata->pdev->dev; + struct device_node *child_np; + struct device_node *mdio_np = NULL; + struct mii_bus *mdio_bus; + int ret; + + for_each_child_of_node(dev->of_node, child_np) { + if (of_device_is_compatible(child_np, "apm,xgene-mdio")) { + mdio_np = child_np; + break; + } + } + + if (!mdio_np) { + netdev_dbg(ndev, "No mdio node in the dts\n"); + return -ENXIO; + } + + mdio_bus = mdiobus_alloc(); + if (!mdio_bus) + return -ENOMEM; + + mdio_bus->name = "APM X-Gene MDIO bus"; + mdio_bus->read = xgene_enet_mdio_read; + mdio_bus->write = xgene_enet_mdio_write; + snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%s", "xgene-mii", + ndev->name); + + mdio_bus->priv = pdata; + mdio_bus->parent = &ndev->dev; + + ret = of_mdiobus_register(mdio_bus, mdio_np); + if (ret) { + netdev_err(ndev, "Failed to register MDIO bus\n"); + mdiobus_free(mdio_bus); + return ret; + } + pdata->mdio_bus = mdio_bus; + + ret = xgene_enet_phy_connect(ndev); + if (ret) + xgene_enet_mdio_remove(pdata); + + return ret; +} + +void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata) +{ + mdiobus_unregister(pdata->mdio_bus); + mdiobus_free(pdata->mdio_bus); + pdata->mdio_bus = NULL; +} diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h new file mode 100644 index 000000000000..371e7a5b2507 --- /dev/null +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h @@ -0,0 +1,337 @@ +/* Applied Micro X-Gene SoC Ethernet Driver + * + * Copyright (c) 2014, Applied Micro Circuits Corporation + * Authors: Iyappan Subramanian <isubramanian@apm.com> + * Ravi Patel <rapatel@apm.com> + * Keyur Chudgar <kchudgar@apm.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __XGENE_ENET_HW_H__ +#define __XGENE_ENET_HW_H__ + +#include "xgene_enet_main.h" + +struct xgene_enet_pdata; +struct xgene_enet_stats; + +/* clears and then set bits */ +static inline void xgene_set_bits(u32 *dst, u32 val, u32 start, u32 len) +{ + u32 end = start + len - 1; + u32 mask = GENMASK(end, start); + + *dst &= ~mask; + *dst |= (val << start) & mask; +} + +static inline u32 xgene_get_bits(u32 val, u32 start, u32 end) +{ + return (val & GENMASK(end, start)) >> start; +} + +#define CSR_RING_ID 0x0008 +#define OVERWRITE BIT(31) +#define IS_BUFFER_POOL BIT(20) +#define PREFETCH_BUF_EN BIT(21) +#define CSR_RING_ID_BUF 0x000c +#define CSR_RING_NE_INT_MODE 0x017c +#define CSR_RING_CONFIG 0x006c +#define CSR_RING_WR_BASE 0x0070 +#define NUM_RING_CONFIG 5 +#define BUFPOOL_MODE 3 +#define RM3 3 +#define INC_DEC_CMD_ADDR 0x002c +#define UDP_HDR_SIZE 2 +#define BUF_LEN_CODE_2K 0x5000 + +#define CREATE_MASK(pos, len) GENMASK((pos)+(len)-1, (pos)) +#define CREATE_MASK_ULL(pos, len) GENMASK_ULL((pos)+(len)-1, (pos)) + +/* Empty slot soft signature */ +#define EMPTY_SLOT_INDEX 1 +#define EMPTY_SLOT ~0ULL + +#define WORK_DESC_SIZE 32 +#define BUFPOOL_DESC_SIZE 16 + +#define RING_OWNER_MASK GENMASK(9, 6) +#define RING_BUFNUM_MASK GENMASK(5, 0) + +#define SELTHRSH_POS 3 +#define SELTHRSH_LEN 3 +#define RINGADDRL_POS 5 +#define RINGADDRL_LEN 27 +#define RINGADDRH_POS 0 +#define RINGADDRH_LEN 6 +#define RINGSIZE_POS 23 +#define RINGSIZE_LEN 3 +#define RINGTYPE_POS 19 +#define RINGTYPE_LEN 2 +#define RINGMODE_POS 20 +#define RINGMODE_LEN 3 +#define RECOMTIMEOUTL_POS 28 +#define RECOMTIMEOUTL_LEN 3 +#define RECOMTIMEOUTH_POS 0 +#define RECOMTIMEOUTH_LEN 2 +#define NUMMSGSINQ_POS 1 +#define NUMMSGSINQ_LEN 16 +#define ACCEPTLERR BIT(19) +#define QCOHERENT BIT(4) +#define RECOMBBUF BIT(27) + +#define BLOCK_ETH_CSR_OFFSET 0x2000 +#define BLOCK_ETH_RING_IF_OFFSET 0x9000 +#define BLOCK_ETH_CLKRST_CSR_OFFSET 0xC000 +#define BLOCK_ETH_DIAG_CSR_OFFSET 0xD000 + +#define BLOCK_ETH_MAC_OFFSET 0x0000 +#define BLOCK_ETH_STATS_OFFSET 0x0014 +#define BLOCK_ETH_MAC_CSR_OFFSET 0x2800 + +#define MAC_ADDR_REG_OFFSET 0x00 +#define MAC_COMMAND_REG_OFFSET 0x04 +#define MAC_WRITE_REG_OFFSET 0x08 +#define MAC_READ_REG_OFFSET 0x0c +#define MAC_COMMAND_DONE_REG_OFFSET 0x10 + +#define STAT_ADDR_REG_OFFSET 0x00 +#define STAT_COMMAND_REG_OFFSET 0x04 +#define STAT_WRITE_REG_OFFSET 0x08 +#define STAT_READ_REG_OFFSET 0x0c +#define STAT_COMMAND_DONE_REG_OFFSET 0x10 + +#define MII_MGMT_CONFIG_ADDR 0x20 +#define MII_MGMT_COMMAND_ADDR 0x24 +#define MII_MGMT_ADDRESS_ADDR 0x28 +#define MII_MGMT_CONTROL_ADDR 0x2c +#define MII_MGMT_STATUS_ADDR 0x30 +#define MII_MGMT_INDICATORS_ADDR 0x34 + +#define BUSY_MASK BIT(0) +#define READ_CYCLE_MASK BIT(0) +#define PHY_CONTROL_SET(dst, val) xgene_set_bits(dst, val, 0, 16) + +#define ENET_SPARE_CFG_REG_ADDR 0x0750 +#define RSIF_CONFIG_REG_ADDR 0x0010 +#define RSIF_RAM_DBG_REG0_ADDR 0x0048 +#define RGMII_REG_0_ADDR 0x07e0 +#define CFG_LINK_AGGR_RESUME_0_ADDR 0x07c8 +#define DEBUG_REG_ADDR 0x0700 +#define CFG_BYPASS_ADDR 0x0294 +#define CLE_BYPASS_REG0_0_ADDR 0x0490 +#define CLE_BYPASS_REG1_0_ADDR 0x0494 +#define CFG_RSIF_FPBUFF_TIMEOUT_EN BIT(31) +#define RESUME_TX BIT(0) +#define CFG_SPEED_1250 BIT(24) +#define TX_PORT0 BIT(0) +#define CFG_BYPASS_UNISEC_TX BIT(2) +#define CFG_BYPASS_UNISEC_RX BIT(1) +#define CFG_CLE_BYPASS_EN0 BIT(31) +#define CFG_TXCLK_MUXSEL0_SET(dst, val) xgene_set_bits(dst, val, 29, 3) + +#define CFG_CLE_IP_PROTOCOL0_SET(dst, val) xgene_set_bits(dst, val, 16, 2) +#define CFG_CLE_DSTQID0_SET(dst, val) xgene_set_bits(dst, val, 0, 12) +#define CFG_CLE_FPSEL0_SET(dst, val) xgene_set_bits(dst, val, 16, 4) +#define CFG_MACMODE_SET(dst, val) xgene_set_bits(dst, val, 18, 2) +#define CFG_WAITASYNCRD_SET(dst, val) xgene_set_bits(dst, val, 0, 16) +#define ICM_CONFIG0_REG_0_ADDR 0x0400 +#define ICM_CONFIG2_REG_0_ADDR 0x0410 +#define RX_DV_GATE_REG_0_ADDR 0x05fc +#define TX_DV_GATE_EN0 BIT(2) +#define RX_DV_GATE_EN0 BIT(1) +#define RESUME_RX0 BIT(0) +#define ENET_CFGSSQMIWQASSOC_ADDR 0xe0 +#define ENET_CFGSSQMIFPQASSOC_ADDR 0xdc +#define ENET_CFGSSQMIQMLITEFPQASSOC_ADDR 0xf0 +#define ENET_CFGSSQMIQMLITEWQASSOC_ADDR 0xf4 +#define ENET_CFG_MEM_RAM_SHUTDOWN_ADDR 0x70 +#define ENET_BLOCK_MEM_RDY_ADDR 0x74 +#define MAC_CONFIG_1_ADDR 0x00 +#define MAC_CONFIG_2_ADDR 0x04 +#define MAX_FRAME_LEN_ADDR 0x10 +#define INTERFACE_CONTROL_ADDR 0x38 +#define STATION_ADDR0_ADDR 0x40 +#define STATION_ADDR1_ADDR 0x44 +#define PHY_ADDR_SET(dst, val) xgene_set_bits(dst, val, 8, 5) +#define REG_ADDR_SET(dst, val) xgene_set_bits(dst, val, 0, 5) +#define ENET_INTERFACE_MODE2_SET(dst, val) xgene_set_bits(dst, val, 8, 2) +#define MGMT_CLOCK_SEL_SET(dst, val) xgene_set_bits(dst, val, 0, 3) +#define SOFT_RESET1 BIT(31) +#define TX_EN BIT(0) +#define RX_EN BIT(2) +#define ENET_LHD_MODE BIT(25) +#define ENET_GHD_MODE BIT(26) +#define FULL_DUPLEX2 BIT(0) +#define SCAN_AUTO_INCR BIT(5) +#define TBYT_ADDR 0x38 +#define TPKT_ADDR 0x39 +#define TDRP_ADDR 0x45 +#define TFCS_ADDR 0x47 +#define TUND_ADDR 0x4a + +#define TSO_IPPROTO_TCP 1 +#define FULL_DUPLEX 2 + +#define USERINFO_POS 0 +#define USERINFO_LEN 32 +#define FPQNUM_POS 32 +#define FPQNUM_LEN 12 +#define LERR_POS 60 +#define LERR_LEN 3 +#define STASH_POS 52 +#define STASH_LEN 2 +#define BUFDATALEN_POS 48 +#define BUFDATALEN_LEN 12 +#define DATAADDR_POS 0 +#define DATAADDR_LEN 42 +#define COHERENT_POS 63 +#define HENQNUM_POS 48 +#define HENQNUM_LEN 12 +#define TYPESEL_POS 44 +#define TYPESEL_LEN 4 +#define ETHHDR_POS 12 +#define ETHHDR_LEN 8 +#define IC_POS 35 /* Insert CRC */ +#define TCPHDR_POS 0 +#define TCPHDR_LEN 6 +#define IPHDR_POS 6 +#define IPHDR_LEN 6 +#define EC_POS 22 /* Enable checksum */ +#define EC_LEN 1 +#define IS_POS 24 /* IP protocol select */ +#define IS_LEN 1 +#define TYPE_ETH_WORK_MESSAGE_POS 44 + +struct xgene_enet_raw_desc { + __le64 m0; + __le64 m1; + __le64 m2; + __le64 m3; +}; + +struct xgene_enet_raw_desc16 { + __le64 m0; + __le64 m1; +}; + +static inline void xgene_enet_mark_desc_slot_empty(void *desc_slot_ptr) +{ + __le64 *desc_slot = desc_slot_ptr; + + desc_slot[EMPTY_SLOT_INDEX] = cpu_to_le64(EMPTY_SLOT); +} + +static inline bool xgene_enet_is_desc_slot_empty(void *desc_slot_ptr) +{ + __le64 *desc_slot = desc_slot_ptr; + + return (desc_slot[EMPTY_SLOT_INDEX] == cpu_to_le64(EMPTY_SLOT)); +} + +enum xgene_enet_ring_cfgsize { + RING_CFGSIZE_512B, + RING_CFGSIZE_2KB, + RING_CFGSIZE_16KB, + RING_CFGSIZE_64KB, + RING_CFGSIZE_512KB, + RING_CFGSIZE_INVALID +}; + +enum xgene_enet_ring_type { + RING_DISABLED, + RING_REGULAR, + RING_BUFPOOL +}; + +enum xgene_ring_owner { + RING_OWNER_ETH0, + RING_OWNER_CPU = 15, + RING_OWNER_INVALID +}; + +enum xgene_enet_ring_bufnum { + RING_BUFNUM_REGULAR = 0x0, + RING_BUFNUM_BUFPOOL = 0x20, + RING_BUFNUM_INVALID +}; + +enum xgene_enet_cmd { + XGENE_ENET_WR_CMD = BIT(31), + XGENE_ENET_RD_CMD = BIT(30) +}; + +enum xgene_enet_err_code { + HBF_READ_DATA = 3, + HBF_LL_READ = 4, + BAD_WORK_MSG = 6, + BUFPOOL_TIMEOUT = 15, + INGRESS_CRC = 16, + INGRESS_CHECKSUM = 17, + INGRESS_TRUNC_FRAME = 18, + INGRESS_PKT_LEN = 19, + INGRESS_PKT_UNDER = 20, + INGRESS_FIFO_OVERRUN = 21, + INGRESS_CHECKSUM_COMPUTE = 26, + ERR_CODE_INVALID +}; + +static inline enum xgene_ring_owner xgene_enet_ring_owner(u16 id) +{ + return (id & RING_OWNER_MASK) >> 6; +} + +static inline u8 xgene_enet_ring_bufnum(u16 id) +{ + return id & RING_BUFNUM_MASK; +} + +static inline bool xgene_enet_is_bufpool(u16 id) +{ + return ((id & RING_BUFNUM_MASK) >= 0x20) ? true : false; +} + +static inline u16 xgene_enet_get_numslots(u16 id, u32 size) +{ + bool is_bufpool = xgene_enet_is_bufpool(id); + + return (is_bufpool) ? size / BUFPOOL_DESC_SIZE : + size / WORK_DESC_SIZE; +} + +struct xgene_enet_desc_ring *xgene_enet_setup_ring( + struct xgene_enet_desc_ring *ring); +void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring); +void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring, + struct xgene_enet_pdata *pdata, + enum xgene_enet_err_code status); + +void xgene_enet_reset(struct xgene_enet_pdata *priv); +void xgene_gmac_reset(struct xgene_enet_pdata *priv); +void xgene_gmac_init(struct xgene_enet_pdata *priv, int speed); +void xgene_gmac_tx_enable(struct xgene_enet_pdata *priv); +void xgene_gmac_rx_enable(struct xgene_enet_pdata *priv); +void xgene_gmac_tx_disable(struct xgene_enet_pdata *priv); +void xgene_gmac_rx_disable(struct xgene_enet_pdata *priv); +void xgene_gmac_set_mac_addr(struct xgene_enet_pdata *pdata); +void xgene_enet_cle_bypass(struct xgene_enet_pdata *pdata, + u32 dst_ring_num, u16 bufpool_id); +void xgene_gport_shutdown(struct xgene_enet_pdata *priv); +void xgene_gmac_get_tx_stats(struct xgene_enet_pdata *pdata); + +int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata); +void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata); + +#endif /* __XGENE_ENET_HW_H__ */ diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c new file mode 100644 index 000000000000..e4222af2baa6 --- /dev/null +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -0,0 +1,960 @@ +/* Applied Micro X-Gene SoC Ethernet Driver + * + * Copyright (c) 2014, Applied Micro Circuits Corporation + * Authors: Iyappan Subramanian <isubramanian@apm.com> + * Ravi Patel <rapatel@apm.com> + * Keyur Chudgar <kchudgar@apm.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include "xgene_enet_main.h" +#include "xgene_enet_hw.h" + +static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool) +{ + struct xgene_enet_raw_desc16 *raw_desc; + int i; + + for (i = 0; i < buf_pool->slots; i++) { + raw_desc = &buf_pool->raw_desc16[i]; + + /* Hardware expects descriptor in little endian format */ + raw_desc->m0 = cpu_to_le64(i | + SET_VAL(FPQNUM, buf_pool->dst_ring_num) | + SET_VAL(STASH, 3)); + } +} + +static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool, + u32 nbuf) +{ + struct sk_buff *skb; + struct xgene_enet_raw_desc16 *raw_desc; + struct net_device *ndev; + struct device *dev; + dma_addr_t dma_addr; + u32 tail = buf_pool->tail; + u32 slots = buf_pool->slots - 1; + u16 bufdatalen, len; + int i; + + ndev = buf_pool->ndev; + dev = ndev_to_dev(buf_pool->ndev); + bufdatalen = BUF_LEN_CODE_2K | (SKB_BUFFER_SIZE & GENMASK(11, 0)); + len = XGENE_ENET_MAX_MTU; + + for (i = 0; i < nbuf; i++) { + raw_desc = &buf_pool->raw_desc16[tail]; + + skb = netdev_alloc_skb_ip_align(ndev, len); + if (unlikely(!skb)) + return -ENOMEM; + buf_pool->rx_skb[tail] = skb; + + dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE); + if (dma_mapping_error(dev, dma_addr)) { + netdev_err(ndev, "DMA mapping error\n"); + dev_kfree_skb_any(skb); + return -EINVAL; + } + + raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) | + SET_VAL(BUFDATALEN, bufdatalen) | + SET_BIT(COHERENT)); + tail = (tail + 1) & slots; + } + + iowrite32(nbuf, buf_pool->cmd); + buf_pool->tail = tail; + + return 0; +} + +static u16 xgene_enet_dst_ring_num(struct xgene_enet_desc_ring *ring) +{ + struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev); + + return ((u16)pdata->rm << 10) | ring->num; +} + +static u8 xgene_enet_hdr_len(const void *data) +{ + const struct ethhdr *eth = data; + + return (eth->h_proto == htons(ETH_P_8021Q)) ? VLAN_ETH_HLEN : ETH_HLEN; +} + +static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring) +{ + u32 __iomem *cmd_base = ring->cmd_base; + u32 ring_state, num_msgs; + + ring_state = ioread32(&cmd_base[1]); + num_msgs = ring_state & CREATE_MASK(NUMMSGSINQ_POS, NUMMSGSINQ_LEN); + + return num_msgs >> NUMMSGSINQ_POS; +} + +static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool) +{ + struct xgene_enet_raw_desc16 *raw_desc; + u32 slots = buf_pool->slots - 1; + u32 tail = buf_pool->tail; + u32 userinfo; + int i, len; + + len = xgene_enet_ring_len(buf_pool); + for (i = 0; i < len; i++) { + tail = (tail - 1) & slots; + raw_desc = &buf_pool->raw_desc16[tail]; + + /* Hardware stores descriptor in little endian format */ + userinfo = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0)); + dev_kfree_skb_any(buf_pool->rx_skb[userinfo]); + } + + iowrite32(-len, buf_pool->cmd); + buf_pool->tail = tail; +} + +static irqreturn_t xgene_enet_rx_irq(const int irq, void *data) +{ + struct xgene_enet_desc_ring *rx_ring = data; + + if (napi_schedule_prep(&rx_ring->napi)) { + disable_irq_nosync(irq); + __napi_schedule(&rx_ring->napi); + } + + return IRQ_HANDLED; +} + +static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring, + struct xgene_enet_raw_desc *raw_desc) +{ + struct sk_buff *skb; + struct device *dev; + u16 skb_index; + u8 status; + int ret = 0; + + skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0)); + skb = cp_ring->cp_skb[skb_index]; + + dev = ndev_to_dev(cp_ring->ndev); + dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)), + GET_VAL(BUFDATALEN, le64_to_cpu(raw_desc->m1)), + DMA_TO_DEVICE); + + /* Checking for error */ + status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0)); + if (unlikely(status > 2)) { + xgene_enet_parse_error(cp_ring, netdev_priv(cp_ring->ndev), + status); + ret = -EIO; + } + + if (likely(skb)) { + dev_kfree_skb_any(skb); + } else { + netdev_err(cp_ring->ndev, "completion skb is NULL\n"); + ret = -EIO; + } + + return ret; +} + +static u64 xgene_enet_work_msg(struct sk_buff *skb) +{ + struct iphdr *iph; + u8 l3hlen, l4hlen = 0; + u8 csum_enable = 0; + u8 proto = 0; + u8 ethhdr; + u64 hopinfo; + + if (unlikely(skb->protocol != htons(ETH_P_IP)) && + unlikely(skb->protocol != htons(ETH_P_8021Q))) + goto out; + + if (unlikely(!(skb->dev->features & NETIF_F_IP_CSUM))) + goto out; + + iph = ip_hdr(skb); + if (unlikely(ip_is_fragment(iph))) + goto out; + + if (likely(iph->protocol == IPPROTO_TCP)) { + l4hlen = tcp_hdrlen(skb) >> 2; + csum_enable = 1; + proto = TSO_IPPROTO_TCP; + } else if (iph->protocol == IPPROTO_UDP) { + l4hlen = UDP_HDR_SIZE; + csum_enable = 1; + } +out: + l3hlen = ip_hdrlen(skb) >> 2; + ethhdr = xgene_enet_hdr_len(skb->data); + hopinfo = SET_VAL(TCPHDR, l4hlen) | + SET_VAL(IPHDR, l3hlen) | + SET_VAL(ETHHDR, ethhdr) | + SET_VAL(EC, csum_enable) | + SET_VAL(IS, proto) | + SET_BIT(IC) | + SET_BIT(TYPE_ETH_WORK_MESSAGE); + + return hopinfo; +} + +static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring, + struct sk_buff *skb) +{ + struct device *dev = ndev_to_dev(tx_ring->ndev); + struct xgene_enet_raw_desc *raw_desc; + dma_addr_t dma_addr; + u16 tail = tx_ring->tail; + u64 hopinfo; + + raw_desc = &tx_ring->raw_desc[tail]; + memset(raw_desc, 0, sizeof(struct xgene_enet_raw_desc)); + + dma_addr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE); + if (dma_mapping_error(dev, dma_addr)) { + netdev_err(tx_ring->ndev, "DMA mapping error\n"); + return -EINVAL; + } + + /* Hardware expects descriptor in little endian format */ + raw_desc->m0 = cpu_to_le64(tail); + raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) | + SET_VAL(BUFDATALEN, skb->len) | + SET_BIT(COHERENT)); + hopinfo = xgene_enet_work_msg(skb); + raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) | + hopinfo); + tx_ring->cp_ring->cp_skb[tail] = skb; + + return 0; +} + +static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb, + struct net_device *ndev) +{ + struct xgene_enet_pdata *pdata = netdev_priv(ndev); + struct xgene_enet_desc_ring *tx_ring = pdata->tx_ring; + struct xgene_enet_desc_ring *cp_ring = tx_ring->cp_ring; + u32 tx_level, cq_level; + + tx_level = xgene_enet_ring_len(tx_ring); + cq_level = xgene_enet_ring_len(cp_ring); + if (unlikely(tx_level > pdata->tx_qcnt_hi || + cq_level > pdata->cp_qcnt_hi)) { + netif_stop_queue(ndev); + return NETDEV_TX_BUSY; + } + + if (xgene_enet_setup_tx_desc(tx_ring, skb)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + iowrite32(1, tx_ring->cmd); + skb_tx_timestamp(skb); + tx_ring->tail = (tx_ring->tail + 1) & (tx_ring->slots - 1); + + pdata->stats.tx_packets++; + pdata->stats.tx_bytes += skb->len; + + return NETDEV_TX_OK; +} + +static void xgene_enet_skip_csum(struct sk_buff *skb) +{ + struct iphdr *iph = ip_hdr(skb); + + if (!ip_is_fragment(iph) || + (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP)) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + } +} + +static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring, + struct xgene_enet_raw_desc *raw_desc) +{ + struct net_device *ndev; + struct xgene_enet_pdata *pdata; + struct device *dev; + struct xgene_enet_desc_ring *buf_pool; + u32 datalen, skb_index; + struct sk_buff *skb; + u8 status; + int ret = 0; + + ndev = rx_ring->ndev; + pdata = netdev_priv(ndev); + dev = ndev_to_dev(rx_ring->ndev); + buf_pool = rx_ring->buf_pool; + + dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)), + XGENE_ENET_MAX_MTU, DMA_FROM_DEVICE); + skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0)); + skb = buf_pool->rx_skb[skb_index]; + + /* checking for error */ + status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0)); + if (unlikely(status > 2)) { + dev_kfree_skb_any(skb); + xgene_enet_parse_error(rx_ring, netdev_priv(rx_ring->ndev), + status); + pdata->stats.rx_dropped++; + ret = -EIO; + goto out; + } + + /* strip off CRC as HW isn't doing this */ + datalen = GET_VAL(BUFDATALEN, le64_to_cpu(raw_desc->m1)); + datalen -= 4; + prefetch(skb->data - NET_IP_ALIGN); + skb_put(skb, datalen); + + skb_checksum_none_assert(skb); + skb->protocol = eth_type_trans(skb, ndev); + if (likely((ndev->features & NETIF_F_IP_CSUM) && + skb->protocol == htons(ETH_P_IP))) { + xgene_enet_skip_csum(skb); + } + + pdata->stats.rx_packets++; + pdata->stats.rx_bytes += datalen; + napi_gro_receive(&rx_ring->napi, skb); +out: + if (--rx_ring->nbufpool == 0) { + ret = xgene_enet_refill_bufpool(buf_pool, NUM_BUFPOOL); + rx_ring->nbufpool = NUM_BUFPOOL; + } + + return ret; +} + +static bool is_rx_desc(struct xgene_enet_raw_desc *raw_desc) +{ + return GET_VAL(FPQNUM, le64_to_cpu(raw_desc->m0)) ? true : false; +} + +static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring, + int budget) +{ + struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev); + struct xgene_enet_raw_desc *raw_desc; + u16 head = ring->head; + u16 slots = ring->slots - 1; + int ret, count = 0; + + do { + raw_desc = &ring->raw_desc[head]; + if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc))) + break; + + if (is_rx_desc(raw_desc)) + ret = xgene_enet_rx_frame(ring, raw_desc); + else + ret = xgene_enet_tx_completion(ring, raw_desc); + xgene_enet_mark_desc_slot_empty(raw_desc); + + head = (head + 1) & slots; + count++; + + if (ret) + break; + } while (--budget); + + if (likely(count)) { + iowrite32(-count, ring->cmd); + ring->head = head; + + if (netif_queue_stopped(ring->ndev)) { + if (xgene_enet_ring_len(ring) < pdata->cp_qcnt_low) + netif_wake_queue(ring->ndev); + } + } + + return budget; +} + +static int xgene_enet_napi(struct napi_struct *napi, const int budget) +{ + struct xgene_enet_desc_ring *ring; + int processed; + + ring = container_of(napi, struct xgene_enet_desc_ring, napi); + processed = xgene_enet_process_ring(ring, budget); + + if (processed != budget) { + napi_complete(napi); + enable_irq(ring->irq); + } + + return processed; +} + +static void xgene_enet_timeout(struct net_device *ndev) +{ + struct xgene_enet_pdata *pdata = netdev_priv(ndev); + + xgene_gmac_reset(pdata); +} + +static int xgene_enet_register_irq(struct net_device *ndev) +{ + struct xgene_enet_pdata *pdata = netdev_priv(ndev); + struct device *dev = ndev_to_dev(ndev); + int ret; + + ret = devm_request_irq(dev, pdata->rx_ring->irq, xgene_enet_rx_irq, + IRQF_SHARED, ndev->name, pdata->rx_ring); + if (ret) { + netdev_err(ndev, "rx%d interrupt request failed\n", + pdata->rx_ring->irq); + } + + return ret; +} + +static void xgene_enet_free_irq(struct net_device *ndev) +{ + struct xgene_enet_pdata *pdata; + struct device *dev; + + pdata = netdev_priv(ndev); + dev = ndev_to_dev(ndev); + devm_free_irq(dev, pdata->rx_ring->irq, pdata->rx_ring); +} + +static int xgene_enet_open(struct net_device *ndev) +{ + struct xgene_enet_pdata *pdata = netdev_priv(ndev); + int ret; + + xgene_gmac_tx_enable(pdata); + xgene_gmac_rx_enable(pdata); + + ret = xgene_enet_register_irq(ndev); + if (ret) + return ret; + napi_enable(&pdata->rx_ring->napi); + + if (pdata->phy_dev) + phy_start(pdata->phy_dev); + + netif_start_queue(ndev); + + return ret; +} + +static int xgene_enet_close(struct net_device *ndev) +{ + struct xgene_enet_pdata *pdata = netdev_priv(ndev); + + netif_stop_queue(ndev); + + if (pdata->phy_dev) + phy_stop(pdata->phy_dev); + + napi_disable(&pdata->rx_ring->napi); + xgene_enet_free_irq(ndev); + xgene_enet_process_ring(pdata->rx_ring, -1); + + xgene_gmac_tx_disable(pdata); + xgene_gmac_rx_disable(pdata); + + return 0; +} + +static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring) +{ + struct xgene_enet_pdata *pdata; + struct device *dev; + + pdata = netdev_priv(ring->ndev); + dev = ndev_to_dev(ring->ndev); + + xgene_enet_clear_ring(ring); + dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma); +} + +static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata) +{ + struct xgene_enet_desc_ring *buf_pool; + + if (pdata->tx_ring) { + xgene_enet_delete_ring(pdata->tx_ring); + pdata->tx_ring = NULL; + } + + if (pdata->rx_ring) { + buf_pool = pdata->rx_ring->buf_pool; + xgene_enet_delete_bufpool(buf_pool); + xgene_enet_delete_ring(buf_pool); + xgene_enet_delete_ring(pdata->rx_ring); + pdata->rx_ring = NULL; + } +} + +static int xgene_enet_get_ring_size(struct device *dev, + enum xgene_enet_ring_cfgsize cfgsize) +{ + int size = -EINVAL; + + switch (cfgsize) { + case RING_CFGSIZE_512B: + size = 0x200; + break; + case RING_CFGSIZE_2KB: + size = 0x800; + break; + case RING_CFGSIZE_16KB: + size = 0x4000; + break; + case RING_CFGSIZE_64KB: + size = 0x10000; + break; + case RING_CFGSIZE_512KB: + size = 0x80000; + break; + default: + dev_err(dev, "Unsupported cfg ring size %d\n", cfgsize); + break; + } + + return size; +} + +static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring *ring) +{ + struct device *dev; + + if (!ring) + return; + + dev = ndev_to_dev(ring->ndev); + + if (ring->desc_addr) { + xgene_enet_clear_ring(ring); + dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma); + } + devm_kfree(dev, ring); +} + +static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata) +{ + struct device *dev = &pdata->pdev->dev; + struct xgene_enet_desc_ring *ring; + + ring = pdata->tx_ring; + if (ring) { + if (ring->cp_ring && ring->cp_ring->cp_skb) + devm_kfree(dev, ring->cp_ring->cp_skb); + xgene_enet_free_desc_ring(ring); + } + + ring = pdata->rx_ring; + if (ring) { + if (ring->buf_pool) { + if (ring->buf_pool->rx_skb) + devm_kfree(dev, ring->buf_pool->rx_skb); + xgene_enet_free_desc_ring(ring->buf_pool); + } + xgene_enet_free_desc_ring(ring); + } +} + +static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring( + struct net_device *ndev, u32 ring_num, + enum xgene_enet_ring_cfgsize cfgsize, u32 ring_id) +{ + struct xgene_enet_desc_ring *ring; + struct xgene_enet_pdata *pdata = netdev_priv(ndev); + struct device *dev = ndev_to_dev(ndev); + int size; + + size = xgene_enet_get_ring_size(dev, cfgsize); + if (size < 0) + return NULL; + + ring = devm_kzalloc(dev, sizeof(struct xgene_enet_desc_ring), + GFP_KERNEL); + if (!ring) + return NULL; + + ring->ndev = ndev; + ring->num = ring_num; + ring->cfgsize = cfgsize; + ring->id = ring_id; + + ring->desc_addr = dma_zalloc_coherent(dev, size, &ring->dma, + GFP_KERNEL); + if (!ring->desc_addr) { + devm_kfree(dev, ring); + return NULL; + } + ring->size = size; + + ring->cmd_base = pdata->ring_cmd_addr + (ring->num << 6); + ring->cmd = ring->cmd_base + INC_DEC_CMD_ADDR; + pdata->rm = RM3; + ring = xgene_enet_setup_ring(ring); + netdev_dbg(ndev, "ring info: num=%d size=%d id=%d slots=%d\n", + ring->num, ring->size, ring->id, ring->slots); + + return ring; +} + +static u16 xgene_enet_get_ring_id(enum xgene_ring_owner owner, u8 bufnum) +{ + return (owner << 6) | (bufnum & GENMASK(5, 0)); +} + +static int xgene_enet_create_desc_rings(struct net_device *ndev) +{ + struct xgene_enet_pdata *pdata = netdev_priv(ndev); + struct device *dev = ndev_to_dev(ndev); + struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring; + struct xgene_enet_desc_ring *buf_pool = NULL; + u8 cpu_bufnum = 0, eth_bufnum = 0; + u8 bp_bufnum = 0x20; + u16 ring_id, ring_num = 0; + int ret; + + /* allocate rx descriptor ring */ + ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++); + rx_ring = xgene_enet_create_desc_ring(ndev, ring_num++, + RING_CFGSIZE_16KB, ring_id); + if (!rx_ring) { + ret = -ENOMEM; + goto err; + } + + /* allocate buffer pool for receiving packets */ + ring_id = xgene_enet_get_ring_id(RING_OWNER_ETH0, bp_bufnum++); + buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++, + RING_CFGSIZE_2KB, ring_id); + if (!buf_pool) { + ret = -ENOMEM; + goto err; + } + + rx_ring->nbufpool = NUM_BUFPOOL; + rx_ring->buf_pool = buf_pool; + rx_ring->irq = pdata->rx_irq; + buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots, + sizeof(struct sk_buff *), GFP_KERNEL); + if (!buf_pool->rx_skb) { + ret = -ENOMEM; + goto err; + } + + buf_pool->dst_ring_num = xgene_enet_dst_ring_num(buf_pool); + rx_ring->buf_pool = buf_pool; + pdata->rx_ring = rx_ring; + + /* allocate tx descriptor ring */ + ring_id = xgene_enet_get_ring_id(RING_OWNER_ETH0, eth_bufnum++); + tx_ring = xgene_enet_create_desc_ring(ndev, ring_num++, + RING_CFGSIZE_16KB, ring_id); + if (!tx_ring) { + ret = -ENOMEM; + goto err; + } + pdata->tx_ring = tx_ring; + + cp_ring = pdata->rx_ring; + cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots, + sizeof(struct sk_buff *), GFP_KERNEL); + if (!cp_ring->cp_skb) { + ret = -ENOMEM; + goto err; + } + pdata->tx_ring->cp_ring = cp_ring; + pdata->tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring); + + pdata->tx_qcnt_hi = pdata->tx_ring->slots / 2; + pdata->cp_qcnt_hi = pdata->rx_ring->slots / 2; + pdata->cp_qcnt_low = pdata->cp_qcnt_hi / 2; + + return 0; + +err: + xgene_enet_free_desc_rings(pdata); + return ret; +} + +static struct rtnl_link_stats64 *xgene_enet_get_stats64( + struct net_device *ndev, + struct rtnl_link_stats64 *storage) +{ + struct xgene_enet_pdata *pdata = netdev_priv(ndev); + struct rtnl_link_stats64 *stats = &pdata->stats; + + stats->rx_errors += stats->rx_length_errors + + stats->rx_crc_errors + + stats->rx_frame_errors + + stats->rx_fifo_errors; + memcpy(storage, &pdata->stats, sizeof(struct rtnl_link_stats64)); + + return storage; +} + +static int xgene_enet_set_mac_address(struct net_device *ndev, void *addr) +{ + struct xgene_enet_pdata *pdata = netdev_priv(ndev); + int ret; + + ret = eth_mac_addr(ndev, addr); + if (ret) + return ret; + xgene_gmac_set_mac_addr(pdata); + + return ret; +} + +static const struct net_device_ops xgene_ndev_ops = { + .ndo_open = xgene_enet_open, + .ndo_stop = xgene_enet_close, + .ndo_start_xmit = xgene_enet_start_xmit, + .ndo_tx_timeout = xgene_enet_timeout, + .ndo_get_stats64 = xgene_enet_get_stats64, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = xgene_enet_set_mac_address, +}; + +static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) +{ + struct platform_device *pdev; + struct net_device *ndev; + struct device *dev; + struct resource *res; + void __iomem *base_addr; + const char *mac; + int ret; + + pdev = pdata->pdev; + dev = &pdev->dev; + ndev = pdata->ndev; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "enet_csr"); + if (!res) { + dev_err(dev, "Resource enet_csr not defined\n"); + return -ENODEV; + } + pdata->base_addr = devm_ioremap_resource(dev, res); + if (IS_ERR(pdata->base_addr)) { + dev_err(dev, "Unable to retrieve ENET Port CSR region\n"); + return PTR_ERR(pdata->base_addr); + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ring_csr"); + if (!res) { + dev_err(dev, "Resource ring_csr not defined\n"); + return -ENODEV; + } + pdata->ring_csr_addr = devm_ioremap_resource(dev, res); + if (IS_ERR(pdata->ring_csr_addr)) { + dev_err(dev, "Unable to retrieve ENET Ring CSR region\n"); + return PTR_ERR(pdata->ring_csr_addr); + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ring_cmd"); + if (!res) { + dev_err(dev, "Resource ring_cmd not defined\n"); + return -ENODEV; + } + pdata->ring_cmd_addr = devm_ioremap_resource(dev, res); + if (IS_ERR(pdata->ring_cmd_addr)) { + dev_err(dev, "Unable to retrieve ENET Ring command region\n"); + return PTR_ERR(pdata->ring_cmd_addr); + } + + ret = platform_get_irq(pdev, 0); + if (ret <= 0) { + dev_err(dev, "Unable to get ENET Rx IRQ\n"); + ret = ret ? : -ENXIO; + return ret; + } + pdata->rx_irq = ret; + + mac = of_get_mac_address(dev->of_node); + if (mac) + memcpy(ndev->dev_addr, mac, ndev->addr_len); + else + eth_hw_addr_random(ndev); + memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); + + pdata->phy_mode = of_get_phy_mode(pdev->dev.of_node); + if (pdata->phy_mode < 0) { + dev_err(dev, "Incorrect phy-connection-type in DTS\n"); + return -EINVAL; + } + + pdata->clk = devm_clk_get(&pdev->dev, NULL); + ret = IS_ERR(pdata->clk); + if (IS_ERR(pdata->clk)) { + dev_err(&pdev->dev, "can't get clock\n"); + ret = PTR_ERR(pdata->clk); + return ret; + } + + base_addr = pdata->base_addr; + pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET; + pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET; + pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET; + pdata->mcx_mac_addr = base_addr + BLOCK_ETH_MAC_OFFSET; + pdata->mcx_stats_addr = base_addr + BLOCK_ETH_STATS_OFFSET; + pdata->mcx_mac_csr_addr = base_addr + BLOCK_ETH_MAC_CSR_OFFSET; + pdata->rx_buff_cnt = NUM_PKT_BUF; + + return ret; +} + +static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata) +{ + struct net_device *ndev = pdata->ndev; + struct xgene_enet_desc_ring *buf_pool; + u16 dst_ring_num; + int ret; + + xgene_gmac_tx_disable(pdata); + xgene_gmac_rx_disable(pdata); + + ret = xgene_enet_create_desc_rings(ndev); + if (ret) { + netdev_err(ndev, "Error in ring configuration\n"); + return ret; + } + + /* setup buffer pool */ + buf_pool = pdata->rx_ring->buf_pool; + xgene_enet_init_bufpool(buf_pool); + ret = xgene_enet_refill_bufpool(buf_pool, pdata->rx_buff_cnt); + if (ret) { + xgene_enet_delete_desc_rings(pdata); + return ret; + } + + dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring); + xgene_enet_cle_bypass(pdata, dst_ring_num, buf_pool->id); + + return ret; +} + +static int xgene_enet_probe(struct platform_device *pdev) +{ + struct net_device *ndev; + struct xgene_enet_pdata *pdata; + struct device *dev = &pdev->dev; + struct napi_struct *napi; + int ret; + + ndev = alloc_etherdev(sizeof(struct xgene_enet_pdata)); + if (!ndev) + return -ENOMEM; + + pdata = netdev_priv(ndev); + + pdata->pdev = pdev; + pdata->ndev = ndev; + SET_NETDEV_DEV(ndev, dev); + platform_set_drvdata(pdev, pdata); + ndev->netdev_ops = &xgene_ndev_ops; + xgene_enet_set_ethtool_ops(ndev); + ndev->features |= NETIF_F_IP_CSUM | + NETIF_F_GSO | + NETIF_F_GRO; + + ret = xgene_enet_get_resources(pdata); + if (ret) + goto err; + + xgene_enet_reset(pdata); + xgene_gmac_init(pdata, SPEED_1000); + + ret = register_netdev(ndev); + if (ret) { + netdev_err(ndev, "Failed to register netdev\n"); + goto err; + } + + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); + if (ret) { + netdev_err(ndev, "No usable DMA configuration\n"); + goto err; + } + + ret = xgene_enet_init_hw(pdata); + if (ret) + goto err; + + napi = &pdata->rx_ring->napi; + netif_napi_add(ndev, napi, xgene_enet_napi, NAPI_POLL_WEIGHT); + ret = xgene_enet_mdio_config(pdata); + + return ret; +err: + free_netdev(ndev); + return ret; +} + +static int xgene_enet_remove(struct platform_device *pdev) +{ + struct xgene_enet_pdata *pdata; + struct net_device *ndev; + + pdata = platform_get_drvdata(pdev); + ndev = pdata->ndev; + + xgene_gmac_rx_disable(pdata); + xgene_gmac_tx_disable(pdata); + + netif_napi_del(&pdata->rx_ring->napi); + xgene_enet_mdio_remove(pdata); + xgene_enet_delete_desc_rings(pdata); + unregister_netdev(ndev); + xgene_gport_shutdown(pdata); + free_netdev(ndev); + + return 0; +} + +static struct of_device_id xgene_enet_match[] = { + {.compatible = "apm,xgene-enet",}, + {}, +}; + +MODULE_DEVICE_TABLE(of, xgene_enet_match); + +static struct platform_driver xgene_enet_driver = { + .driver = { + .name = "xgene-enet", + .of_match_table = xgene_enet_match, + }, + .probe = xgene_enet_probe, + .remove = xgene_enet_remove, +}; + +module_platform_driver(xgene_enet_driver); + +MODULE_DESCRIPTION("APM X-Gene SoC Ethernet driver"); +MODULE_VERSION(XGENE_DRV_VERSION); +MODULE_AUTHOR("Keyur Chudgar <kchudgar@apm.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h new file mode 100644 index 000000000000..0815866986b0 --- /dev/null +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h @@ -0,0 +1,135 @@ +/* Applied Micro X-Gene SoC Ethernet Driver + * + * Copyright (c) 2014, Applied Micro Circuits Corporation + * Authors: Iyappan Subramanian <isubramanian@apm.com> + * Ravi Patel <rapatel@apm.com> + * Keyur Chudgar <kchudgar@apm.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __XGENE_ENET_MAIN_H__ +#define __XGENE_ENET_MAIN_H__ + +#include <linux/clk.h> +#include <linux/of_platform.h> +#include <linux/of_net.h> +#include <linux/of_mdio.h> +#include <linux/module.h> +#include <net/ip.h> +#include <linux/prefetch.h> +#include <linux/if_vlan.h> +#include <linux/phy.h> +#include "xgene_enet_hw.h" + +#define XGENE_DRV_VERSION "v1.0" +#define XGENE_ENET_MAX_MTU 1536 +#define SKB_BUFFER_SIZE (XGENE_ENET_MAX_MTU - NET_IP_ALIGN) +#define NUM_PKT_BUF 64 +#define NUM_BUFPOOL 32 + +/* software context of a descriptor ring */ +struct xgene_enet_desc_ring { + struct net_device *ndev; + u16 id; + u16 num; + u16 head; + u16 tail; + u16 slots; + u16 irq; + u32 size; + u32 state[NUM_RING_CONFIG]; + void __iomem *cmd_base; + void __iomem *cmd; + dma_addr_t dma; + u16 dst_ring_num; + u8 nbufpool; + struct sk_buff *(*rx_skb); + struct sk_buff *(*cp_skb); + enum xgene_enet_ring_cfgsize cfgsize; + struct xgene_enet_desc_ring *cp_ring; + struct xgene_enet_desc_ring *buf_pool; + struct napi_struct napi; + union { + void *desc_addr; + struct xgene_enet_raw_desc *raw_desc; + struct xgene_enet_raw_desc16 *raw_desc16; + }; +}; + +/* ethernet private data */ +struct xgene_enet_pdata { + struct net_device *ndev; + struct mii_bus *mdio_bus; + struct phy_device *phy_dev; + int phy_speed; + struct clk *clk; + struct platform_device *pdev; + struct xgene_enet_desc_ring *tx_ring; + struct xgene_enet_desc_ring *rx_ring; + char *dev_name; + u32 rx_buff_cnt; + u32 tx_qcnt_hi; + u32 cp_qcnt_hi; + u32 cp_qcnt_low; + u32 rx_irq; + void __iomem *eth_csr_addr; + void __iomem *eth_ring_if_addr; + void __iomem *eth_diag_csr_addr; + void __iomem *mcx_mac_addr; + void __iomem *mcx_stats_addr; + void __iomem *mcx_mac_csr_addr; + void __iomem *base_addr; + void __iomem *ring_csr_addr; + void __iomem *ring_cmd_addr; + u32 phy_addr; + int phy_mode; + u32 speed; + u16 rm; + struct rtnl_link_stats64 stats; +}; + +/* Set the specified value into a bit-field defined by its starting position + * and length within a single u64. + */ +static inline u64 xgene_enet_set_field_value(int pos, int len, u64 val) +{ + return (val & ((1ULL << len) - 1)) << pos; +} + +#define SET_VAL(field, val) \ + xgene_enet_set_field_value(field ## _POS, field ## _LEN, val) + +#define SET_BIT(field) \ + xgene_enet_set_field_value(field ## _POS, 1, 1) + +/* Get the value from a bit-field defined by its starting position + * and length within the specified u64. + */ +static inline u64 xgene_enet_get_field_value(int pos, int len, u64 src) +{ + return (src >> pos) & ((1ULL << len) - 1); +} + +#define GET_VAL(field, src) \ + xgene_enet_get_field_value(field ## _POS, field ## _LEN, src) + +static inline struct device *ndev_to_dev(struct net_device *ndev) +{ + return ndev->dev.parent; +} + +void xgene_enet_set_ethtool_ops(struct net_device *netdev); + +#endif /* __XGENE_ENET_MAIN_H__ */ diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index fe5cfeace6e3..5919394d9f58 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c @@ -30,6 +30,17 @@ #define DRV_VERSION "1.0" /** + * arc_emac_tx_avail - Return the number of available slots in the tx ring. + * @priv: Pointer to ARC EMAC private data structure. + * + * returns: the number of slots available for transmission in tx the ring. + */ +static inline int arc_emac_tx_avail(struct arc_emac_priv *priv) +{ + return (priv->txbd_dirty + TX_BD_NUM - priv->txbd_curr - 1) % TX_BD_NUM; +} + +/** * arc_emac_adjust_link - Adjust the PHY link duplex. * @ndev: Pointer to the net_device structure. * @@ -180,10 +191,15 @@ static void arc_emac_tx_clean(struct net_device *ndev) txbd->info = 0; *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM; - - if (netif_queue_stopped(ndev)) - netif_wake_queue(ndev); } + + /* Ensure that txbd_dirty is visible to tx() before checking + * for queue stopped. + */ + smp_mb(); + + if (netif_queue_stopped(ndev) && arc_emac_tx_avail(priv)) + netif_wake_queue(ndev); } /** @@ -298,7 +314,7 @@ static int arc_emac_poll(struct napi_struct *napi, int budget) work_done = arc_emac_rx(ndev, budget); if (work_done < budget) { napi_complete(napi); - arc_reg_or(priv, R_ENABLE, RXINT_MASK); + arc_reg_or(priv, R_ENABLE, RXINT_MASK | TXINT_MASK); } return work_done; @@ -327,9 +343,9 @@ static irqreturn_t arc_emac_intr(int irq, void *dev_instance) /* Reset all flags except "MDIO complete" */ arc_reg_set(priv, R_STATUS, status); - if (status & RXINT_MASK) { + if (status & (RXINT_MASK | TXINT_MASK)) { if (likely(napi_schedule_prep(&priv->napi))) { - arc_reg_clr(priv, R_ENABLE, RXINT_MASK); + arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK); __napi_schedule(&priv->napi); } } @@ -440,7 +456,7 @@ static int arc_emac_open(struct net_device *ndev) arc_reg_set(priv, R_TX_RING, (unsigned int)priv->txbd_dma); /* Enable interrupts */ - arc_reg_set(priv, R_ENABLE, RXINT_MASK | ERR_MASK); + arc_reg_set(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK); /* Set CONTROL */ arc_reg_set(priv, R_CTRL, @@ -511,7 +527,7 @@ static int arc_emac_stop(struct net_device *ndev) netif_stop_queue(ndev); /* Disable interrupts */ - arc_reg_clr(priv, R_ENABLE, RXINT_MASK | ERR_MASK); + arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK); /* Disable EMAC */ arc_reg_clr(priv, R_CTRL, EN_MASK); @@ -574,11 +590,9 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev) len = max_t(unsigned int, ETH_ZLEN, skb->len); - /* EMAC still holds this buffer in its possession. - * CPU must not modify this buffer descriptor - */ - if (unlikely((le32_to_cpu(*info) & OWN_MASK) == FOR_EMAC)) { + if (unlikely(!arc_emac_tx_avail(priv))) { netif_stop_queue(ndev); + netdev_err(ndev, "BUG! Tx Ring full when queue awake!\n"); return NETDEV_TX_BUSY; } @@ -607,12 +621,19 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev) /* Increment index to point to the next BD */ *txbd_curr = (*txbd_curr + 1) % TX_BD_NUM; - /* Get "info" of the next BD */ - info = &priv->txbd[*txbd_curr].info; + /* Ensure that tx_clean() sees the new txbd_curr before + * checking the queue status. This prevents an unneeded wake + * of the queue in tx_clean(). + */ + smp_mb(); - /* Check if if Tx BD ring is full - next BD is still owned by EMAC */ - if (unlikely((le32_to_cpu(*info) & OWN_MASK) == FOR_EMAC)) + if (!arc_emac_tx_avail(priv)) { netif_stop_queue(ndev); + /* Refresh tx_dirty */ + smp_mb(); + if (arc_emac_tx_avail(priv)) + netif_start_queue(ndev); + } arc_reg_set(priv, R_STATUS, TXPL_MASK); diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index 49faa97a30c3..e398eda07298 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -1527,7 +1527,7 @@ static const struct pci_error_handlers alx_err_handlers = { .resume = alx_pci_error_resume, }; -static DEFINE_PCI_DEVICE_TABLE(alx_pci_tbl) = { +static const struct pci_device_id alx_pci_tbl[] = { { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8161), .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2200), diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index e11bf18fbbd1..72fb86b9aa24 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -34,7 +34,7 @@ char atl1c_driver_version[] = ATL1C_DRV_VERSION; * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, * Class, Class Mask, private data (not used) } */ -static DEFINE_PCI_DEVICE_TABLE(atl1c_pci_tbl) = { +static const struct pci_device_id atl1c_pci_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1C)}, {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L2C)}, {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B)}, diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c index 4345332533ad..2326579f9454 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c @@ -35,7 +35,7 @@ char atl1e_driver_version[] = DRV_VERSION; * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, * Class, Class Mask, private data (not used) } */ -static DEFINE_PCI_DEVICE_TABLE(atl1e_pci_tbl) = { +static const struct pci_device_id atl1e_pci_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1E)}, {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, 0x1066)}, /* required last entry */ @@ -831,17 +831,14 @@ static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter) /* real ring DMA buffer */ size = adapter->ring_size; - adapter->ring_vir_addr = pci_alloc_consistent(pdev, - adapter->ring_size, &adapter->ring_dma); - + adapter->ring_vir_addr = pci_zalloc_consistent(pdev, adapter->ring_size, + &adapter->ring_dma); if (adapter->ring_vir_addr == NULL) { netdev_err(adapter->netdev, "pci_alloc_consistent failed, size = D%d\n", size); return -ENOMEM; } - memset(adapter->ring_vir_addr, 0, adapter->ring_size); - rx_page_desc = rx_ring->rx_page_desc; /* Init TPD Ring */ diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c index 1546d550ac97..2c8f398aeda9 100644 --- a/drivers/net/ethernet/atheros/atlx/atl1.c +++ b/drivers/net/ethernet/atheros/atlx/atl1.c @@ -235,7 +235,7 @@ static void atl1_check_options(struct atl1_adapter *adapter) /* * atl1_pci_tbl - PCI Device ID Table */ -static DEFINE_PCI_DEVICE_TABLE(atl1_pci_tbl) = { +static const struct pci_device_id atl1_pci_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1)}, /* required last entry */ {0,} diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c index c194bc687c30..84a09e8ddd9c 100644 --- a/drivers/net/ethernet/atheros/atlx/atl2.c +++ b/drivers/net/ethernet/atheros/atlx/atl2.c @@ -65,7 +65,7 @@ MODULE_VERSION(ATL2_DRV_VERSION); /* * atl2_pci_tbl - PCI Device ID Table */ -static DEFINE_PCI_DEVICE_TABLE(atl2_pci_tbl) = { +static const struct pci_device_id atl2_pci_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L2)}, /* required last entry */ {0,} diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index 7dcfb19a31c8..d8d07a818b89 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -84,7 +84,7 @@ config BNX2 config CNIC tristate "QLogic CNIC support" - depends on PCI + depends on PCI && (IPV6 || IPV6=n) select BNX2 select UIO ---help--- diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index ca5a20a48b14..d588136b23b9 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c @@ -105,7 +105,7 @@ MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value"); #ifdef CONFIG_B44_PCI -static DEFINE_PCI_DEVICE_TABLE(b44_pci_tbl) = { +static const struct pci_device_id b44_pci_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1) }, @@ -1697,7 +1697,7 @@ static struct rtnl_link_stats64 *b44_get_stats64(struct net_device *dev, hwstat->tx_underruns + hwstat->tx_excessive_cols + hwstat->tx_late_cols); - nstat->multicast = hwstat->tx_multicast_pkts; + nstat->multicast = hwstat->rx_multicast_pkts; nstat->collisions = hwstat->tx_total_cols; nstat->rx_length_errors = (hwstat->rx_oversize_pkts + diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 6f4e18644bd4..d9b9170ed2fc 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -534,6 +534,25 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, while ((processed < to_process) && (processed < budget)) { cb = &priv->rx_cbs[priv->rx_read_ptr]; skb = cb->skb; + + processed++; + priv->rx_read_ptr++; + + if (priv->rx_read_ptr == priv->num_rx_bds) + priv->rx_read_ptr = 0; + + /* We do not have a backing SKB, so we do not a corresponding + * DMA mapping for this incoming packet since + * bcm_sysport_rx_refill always either has both skb and mapping + * or none. + */ + if (unlikely(!skb)) { + netif_err(priv, rx_err, ndev, "out of memory!\n"); + ndev->stats.rx_dropped++; + ndev->stats.rx_errors++; + goto refill; + } + dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), RX_BUF_LENGTH, DMA_FROM_DEVICE); @@ -543,23 +562,11 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) & DESC_STATUS_MASK; - processed++; - priv->rx_read_ptr++; - if (priv->rx_read_ptr == priv->num_rx_bds) - priv->rx_read_ptr = 0; - netif_dbg(priv, rx_status, ndev, "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n", p_index, priv->rx_c_index, priv->rx_read_ptr, len, status); - if (unlikely(!skb)) { - netif_err(priv, rx_err, ndev, "out of memory!\n"); - ndev->stats.rx_dropped++; - ndev->stats.rx_errors++; - goto refill; - } - if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) { netif_err(priv, rx_status, ndev, "fragmented packet!\n"); ndev->stats.rx_dropped++; diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index e64c963fe775..823d01c5684c 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -120,7 +120,7 @@ static struct { { "Broadcom NetXtreme II BCM5716 1000Base-SX" }, }; -static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = { +static const struct pci_device_id bnx2_pci_tbl[] = { { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706, PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T }, { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706, @@ -3236,8 +3236,9 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) skb->protocol = eth_type_trans(skb, bp->dev); - if ((len > (bp->dev->mtu + ETH_HLEN)) && - (ntohs(skb->protocol) != 0x8100)) { + if (len > (bp->dev->mtu + ETH_HLEN) && + skb->protocol != htons(0x8100) && + skb->protocol != htons(ETH_P_8021AD)) { dev_kfree_skb(skb); goto next_rx; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 4e6c82e20224..4ccc806b1150 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -483,11 +483,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, #ifdef BNX2X_STOP_ON_ERROR fp->tpa_queue_used |= (1 << queue); -#ifdef _ASM_GENERIC_INT_L64_H - DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n", -#else DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n", -#endif fp->tpa_queue_used); #endif } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h index 5ba8af50c84f..c4daa068f1db 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h @@ -2233,7 +2233,12 @@ struct shmem2_region { u32 reserved3; /* Offset 0x14C */ u32 reserved4; /* Offset 0x150 */ u32 link_attr_sync[PORT_MAX]; /* Offset 0x154 */ - #define LINK_ATTR_SYNC_KR2_ENABLE (1<<0) + #define LINK_ATTR_SYNC_KR2_ENABLE 0x00000001 + #define LINK_SFP_EEPROM_COMP_CODE_MASK 0x0000ff00 + #define LINK_SFP_EEPROM_COMP_CODE_SHIFT 8 + #define LINK_SFP_EEPROM_COMP_CODE_SR 0x00001000 + #define LINK_SFP_EEPROM_COMP_CODE_LR 0x00002000 + #define LINK_SFP_EEPROM_COMP_CODE_LRM 0x00004000 u32 reserved5[2]; u32 reserved6[PORT_MAX]; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 53fb4fa61b40..549549eaf580 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -154,15 +154,22 @@ typedef int (*read_sfp_module_eeprom_func_p)(struct bnx2x_phy *phy, LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE) #define SFP_EEPROM_CON_TYPE_ADDR 0x2 + #define SFP_EEPROM_CON_TYPE_VAL_UNKNOWN 0x0 #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7 #define SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21 #define SFP_EEPROM_CON_TYPE_VAL_RJ45 0x22 -#define SFP_EEPROM_COMP_CODE_ADDR 0x3 - #define SFP_EEPROM_COMP_CODE_SR_MASK (1<<4) - #define SFP_EEPROM_COMP_CODE_LR_MASK (1<<5) - #define SFP_EEPROM_COMP_CODE_LRM_MASK (1<<6) +#define SFP_EEPROM_10G_COMP_CODE_ADDR 0x3 + #define SFP_EEPROM_10G_COMP_CODE_SR_MASK (1<<4) + #define SFP_EEPROM_10G_COMP_CODE_LR_MASK (1<<5) + #define SFP_EEPROM_10G_COMP_CODE_LRM_MASK (1<<6) + +#define SFP_EEPROM_1G_COMP_CODE_ADDR 0x6 + #define SFP_EEPROM_1G_COMP_CODE_SX (1<<0) + #define SFP_EEPROM_1G_COMP_CODE_LX (1<<1) + #define SFP_EEPROM_1G_COMP_CODE_CX (1<<2) + #define SFP_EEPROM_1G_COMP_CODE_BASE_T (1<<3) #define SFP_EEPROM_FC_TX_TECH_ADDR 0x8 #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE 0x4 @@ -3633,8 +3640,8 @@ static void bnx2x_warpcore_enable_AN_KR2(struct bnx2x_phy *phy, reg_set[i].val); /* Start KR2 work-around timer which handles BCM8073 link-parner */ - vars->link_attr_sync |= LINK_ATTR_SYNC_KR2_ENABLE; - bnx2x_update_link_attr(params, vars->link_attr_sync); + params->link_attr_sync |= LINK_ATTR_SYNC_KR2_ENABLE; + bnx2x_update_link_attr(params, params->link_attr_sync); } static void bnx2x_disable_kr2(struct link_params *params, @@ -3666,8 +3673,8 @@ static void bnx2x_disable_kr2(struct link_params *params, for (i = 0; i < ARRAY_SIZE(reg_set); i++) bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, reg_set[i].val); - vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE; - bnx2x_update_link_attr(params, vars->link_attr_sync); + params->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE; + bnx2x_update_link_attr(params, params->link_attr_sync); vars->check_kr2_recovery_cnt = CHECK_KR2_RECOVERY_CNT; } @@ -4810,7 +4817,7 @@ void bnx2x_link_status_update(struct link_params *params, ~FEATURE_CONFIG_PFC_ENABLED; if (SHMEM2_HAS(bp, link_attr_sync)) - vars->link_attr_sync = SHMEM2_RD(bp, + params->link_attr_sync = SHMEM2_RD(bp, link_attr_sync[params->port]); DP(NETIF_MSG_LINK, "link_status 0x%x phy_link_up %x int_mask 0x%x\n", @@ -8057,21 +8064,24 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy, { struct bnx2x *bp = params->bp; u32 sync_offset = 0, phy_idx, media_types; - u8 gport, val[2], check_limiting_mode = 0; + u8 val[SFP_EEPROM_FC_TX_TECH_ADDR + 1], check_limiting_mode = 0; *edc_mode = EDC_MODE_LIMITING; phy->media_type = ETH_PHY_UNSPECIFIED; /* First check for copper cable */ if (bnx2x_read_sfp_module_eeprom(phy, params, I2C_DEV_ADDR_A0, - SFP_EEPROM_CON_TYPE_ADDR, - 2, + 0, + SFP_EEPROM_FC_TX_TECH_ADDR + 1, (u8 *)val) != 0) { DP(NETIF_MSG_LINK, "Failed to read from SFP+ module EEPROM\n"); return -EINVAL; } - - switch (val[0]) { + params->link_attr_sync &= ~LINK_SFP_EEPROM_COMP_CODE_MASK; + params->link_attr_sync |= val[SFP_EEPROM_10G_COMP_CODE_ADDR] << + LINK_SFP_EEPROM_COMP_CODE_SHIFT; + bnx2x_update_link_attr(params, params->link_attr_sync); + switch (val[SFP_EEPROM_CON_TYPE_ADDR]) { case SFP_EEPROM_CON_TYPE_VAL_COPPER: { u8 copper_module_type; @@ -8079,17 +8089,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy, /* Check if its active cable (includes SFP+ module) * of passive cable */ - if (bnx2x_read_sfp_module_eeprom(phy, - params, - I2C_DEV_ADDR_A0, - SFP_EEPROM_FC_TX_TECH_ADDR, - 1, - &copper_module_type) != 0) { - DP(NETIF_MSG_LINK, - "Failed to read copper-cable-type" - " from SFP+ EEPROM\n"); - return -EINVAL; - } + copper_module_type = val[SFP_EEPROM_FC_TX_TECH_ADDR]; if (copper_module_type & SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE) { @@ -8115,16 +8115,18 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy, } break; } + case SFP_EEPROM_CON_TYPE_VAL_UNKNOWN: case SFP_EEPROM_CON_TYPE_VAL_LC: case SFP_EEPROM_CON_TYPE_VAL_RJ45: check_limiting_mode = 1; - if ((val[1] & (SFP_EEPROM_COMP_CODE_SR_MASK | - SFP_EEPROM_COMP_CODE_LR_MASK | - SFP_EEPROM_COMP_CODE_LRM_MASK)) == 0) { + if ((val[SFP_EEPROM_10G_COMP_CODE_ADDR] & + (SFP_EEPROM_10G_COMP_CODE_SR_MASK | + SFP_EEPROM_10G_COMP_CODE_LR_MASK | + SFP_EEPROM_10G_COMP_CODE_LRM_MASK)) == 0) { DP(NETIF_MSG_LINK, "1G SFP module detected\n"); - gport = params->port; phy->media_type = ETH_PHY_SFP_1G_FIBER; if (phy->req_line_speed != SPEED_1000) { + u8 gport = params->port; phy->req_line_speed = SPEED_1000; if (!CHIP_IS_E1x(bp)) { gport = BP_PATH(bp) + @@ -8134,6 +8136,12 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy, "Warning: Link speed was forced to 1000Mbps. Current SFP module in port %d is not compliant with 10G Ethernet\n", gport); } + if (val[SFP_EEPROM_1G_COMP_CODE_ADDR] & + SFP_EEPROM_1G_COMP_CODE_BASE_T) { + bnx2x_sfp_set_transmitter(params, phy, 0); + msleep(40); + bnx2x_sfp_set_transmitter(params, phy, 1); + } } else { int idx, cfg_idx = 0; DP(NETIF_MSG_LINK, "10G Optic module detected\n"); @@ -8149,7 +8157,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy, break; default: DP(NETIF_MSG_LINK, "Unable to determine module type 0x%x !!!\n", - val[0]); + val[SFP_EEPROM_CON_TYPE_ADDR]); return -EINVAL; } sync_offset = params->shmem_base + @@ -13507,7 +13515,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params, sigdet = bnx2x_warpcore_get_sigdet(phy, params); if (!sigdet) { - if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { + if (!(params->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { bnx2x_kr2_recovery(params, vars, phy); DP(NETIF_MSG_LINK, "No sigdet\n"); } @@ -13525,7 +13533,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params, /* CL73 has not begun yet */ if (base_page == 0) { - if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { + if (!(params->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { bnx2x_kr2_recovery(params, vars, phy); DP(NETIF_MSG_LINK, "No BP\n"); } @@ -13541,7 +13549,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params, ((next_page & 0xe0) == 0x20)))); /* In case KR2 is already disabled, check if we need to re-enable it */ - if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { + if (!(params->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { if (!not_kr2_device) { DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page, next_page); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h index 389f5f8cb0a3..d9cce4c3899b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h @@ -323,6 +323,9 @@ struct link_params { #define LINK_FLAGS_INT_DISABLED (1<<0) #define PHY_INITIALIZED (1<<1) u32 lfa_base; + + /* The same definitions as the shmem2 parameter */ + u32 link_attr_sync; }; /* Output parameters */ @@ -364,8 +367,6 @@ struct link_vars { u8 rx_tx_asic_rst; u8 turn_to_run_wc_rt; u16 rsrv2; - /* The same definitions as the shmem2 parameter */ - u32 link_attr_sync; }; /***********************************************************/ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 3871ec49cc4d..d1c093dcb054 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -249,7 +249,7 @@ static struct { #define PCI_DEVICE_ID_NX2_57811_VF CHIP_NUM_57811_VF #endif -static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = { +static const struct pci_device_id bnx2x_pci_tbl[] = { { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 }, { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 }, { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E }, @@ -6849,6 +6849,37 @@ static void bnx2x__common_init_phy(struct bnx2x *bp) bnx2x_release_phy_lock(bp); } +static void bnx2x_config_endianity(struct bnx2x *bp, u32 val) +{ + REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, val); + REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, val); + REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, val); + REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, val); + REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, val); + + /* make sure this value is 0 */ + REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0); + + REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, val); + REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, val); + REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, val); + REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, val); +} + +static void bnx2x_set_endianity(struct bnx2x *bp) +{ +#ifdef __BIG_ENDIAN + bnx2x_config_endianity(bp, 1); +#else + bnx2x_config_endianity(bp, 0); +#endif +} + +static void bnx2x_reset_endianity(struct bnx2x *bp) +{ + bnx2x_config_endianity(bp, 0); +} + /** * bnx2x_init_hw_common - initialize the HW at the COMMON phase. * @@ -6915,23 +6946,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp) bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON); bnx2x_init_pxp(bp); - -#ifdef __BIG_ENDIAN - REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1); - REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1); - REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1); - REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1); - REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1); - /* make sure this value is 0 */ - REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0); - -/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */ - REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1); - REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1); - REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1); - REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1); -#endif - + bnx2x_set_endianity(bp); bnx2x_ilt_init_page_size(bp, INITOP_SET); if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp)) @@ -10052,6 +10067,8 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp, } #define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4)) +#define BNX2X_PREV_UNDI_PROD_ADDR_H(f) (BAR_TSTRORM_INTMEM + \ + 0x1848 + ((f) << 4)) #define BNX2X_PREV_UNDI_RCQ(val) ((val) & 0xffff) #define BNX2X_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff) #define BNX2X_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq)) @@ -10059,8 +10076,6 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp, #define BCM_5710_UNDI_FW_MF_MAJOR (0x07) #define BCM_5710_UNDI_FW_MF_MINOR (0x08) #define BCM_5710_UNDI_FW_MF_VERS (0x05) -#define BNX2X_PREV_UNDI_MF_PORT(p) (BAR_TSTRORM_INTMEM + 0x150c + ((p) << 4)) -#define BNX2X_PREV_UNDI_MF_FUNC(f) (BAR_TSTRORM_INTMEM + 0x184c + ((f) << 4)) static bool bnx2x_prev_is_after_undi(struct bnx2x *bp) { @@ -10079,72 +10094,25 @@ static bool bnx2x_prev_is_after_undi(struct bnx2x *bp) return false; } -static bool bnx2x_prev_unload_undi_fw_supports_mf(struct bnx2x *bp) -{ - u8 major, minor, version; - u32 fw; - - /* Must check that FW is loaded */ - if (!(REG_RD(bp, MISC_REG_RESET_REG_1) & - MISC_REGISTERS_RESET_REG_1_RST_XSEM)) { - BNX2X_DEV_INFO("XSEM is reset - UNDI MF FW is not loaded\n"); - return false; - } - - /* Read Currently loaded FW version */ - fw = REG_RD(bp, XSEM_REG_PRAM); - major = fw & 0xff; - minor = (fw >> 0x8) & 0xff; - version = (fw >> 0x10) & 0xff; - BNX2X_DEV_INFO("Loaded FW: 0x%08x: Major 0x%02x Minor 0x%02x Version 0x%02x\n", - fw, major, minor, version); - - if (major > BCM_5710_UNDI_FW_MF_MAJOR) - return true; - - if ((major == BCM_5710_UNDI_FW_MF_MAJOR) && - (minor > BCM_5710_UNDI_FW_MF_MINOR)) - return true; - - if ((major == BCM_5710_UNDI_FW_MF_MAJOR) && - (minor == BCM_5710_UNDI_FW_MF_MINOR) && - (version >= BCM_5710_UNDI_FW_MF_VERS)) - return true; - - return false; -} - -static void bnx2x_prev_unload_undi_mf(struct bnx2x *bp) -{ - int i; - - /* Due to legacy (FW) code, the first function on each engine has a - * different offset macro from the rest of the functions. - * Setting this for all 8 functions is harmless regardless of whether - * this is actually a multi-function device. - */ - for (i = 0; i < 2; i++) - REG_WR(bp, BNX2X_PREV_UNDI_MF_PORT(i), 1); - - for (i = 2; i < 8; i++) - REG_WR(bp, BNX2X_PREV_UNDI_MF_FUNC(i - 2), 1); - - BNX2X_DEV_INFO("UNDI FW (MF) set to discard\n"); -} - -static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 port, u8 inc) +static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 inc) { u16 rcq, bd; - u32 tmp_reg = REG_RD(bp, BNX2X_PREV_UNDI_PROD_ADDR(port)); + u32 addr, tmp_reg; + + if (BP_FUNC(bp) < 2) + addr = BNX2X_PREV_UNDI_PROD_ADDR(BP_PORT(bp)); + else + addr = BNX2X_PREV_UNDI_PROD_ADDR_H(BP_FUNC(bp) - 2); + tmp_reg = REG_RD(bp, addr); rcq = BNX2X_PREV_UNDI_RCQ(tmp_reg) + inc; bd = BNX2X_PREV_UNDI_BD(tmp_reg) + inc; tmp_reg = BNX2X_PREV_UNDI_PROD(rcq, bd); - REG_WR(bp, BNX2X_PREV_UNDI_PROD_ADDR(port), tmp_reg); + REG_WR(bp, addr, tmp_reg); - BNX2X_DEV_INFO("UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n", - port, bd, rcq); + BNX2X_DEV_INFO("UNDI producer [%d/%d][%08x] rings bd -> 0x%04x, rcq -> 0x%04x\n", + BP_PORT(bp), BP_FUNC(bp), addr, bd, rcq); } static int bnx2x_prev_mcp_done(struct bnx2x *bp) @@ -10383,7 +10351,6 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp) /* Reset should be performed after BRB is emptied */ if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) { u32 timer_count = 1000; - bool need_write = true; /* Close the MAC Rx to prevent BRB from filling up */ bnx2x_prev_unload_close_mac(bp, &mac_vals); @@ -10420,20 +10387,10 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp) else timer_count--; - /* New UNDI FW supports MF and contains better - * cleaning methods - might be redundant but harmless. - */ - if (bnx2x_prev_unload_undi_fw_supports_mf(bp)) { - if (need_write) { - bnx2x_prev_unload_undi_mf(bp); - need_write = false; - } - } else if (prev_undi) { - /* If UNDI resides in memory, - * manually increment it - */ - bnx2x_prev_unload_undi_inc(bp, BP_PORT(bp), 1); - } + /* If UNDI resides in memory, manually increment it */ + if (prev_undi) + bnx2x_prev_unload_undi_inc(bp, 1); + udelay(10); } @@ -13227,9 +13184,15 @@ static void __bnx2x_remove(struct pci_dev *pdev, bnx2x_iov_remove_one(bp); /* Power on: we can't let PCI layer write to us while we are in D3 */ - if (IS_PF(bp)) + if (IS_PF(bp)) { bnx2x_set_power_state(bp, PCI_D0); + /* Set endianity registers to reset values in case next driver + * boots in different endianty environment. + */ + bnx2x_reset_endianity(bp); + } + /* Disable MSI/MSI-X */ bnx2x_disable_msi(bp); diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index 27861a6c7ca5..a6a9f284c8dd 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -31,7 +31,7 @@ #include <linux/if_vlan.h> #include <linux/prefetch.h> #include <linux/random.h> -#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) +#if IS_ENABLED(CONFIG_VLAN_8021Q) #define BCM_VLAN 1 #endif #include <net/ip.h> @@ -3685,7 +3685,7 @@ static int cnic_get_v4_route(struct sockaddr_in *dst_addr, static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr, struct dst_entry **dst) { -#if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE)) +#if IS_ENABLED(CONFIG_IPV6) struct flowi6 fl6; memset(&fl6, 0, sizeof(fl6)); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index ce455aed5a2f..5cc9cae21ed5 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -739,7 +739,6 @@ static void bcmgenet_power_down(struct bcmgenet_priv *priv, case GENET_POWER_PASSIVE: /* Power down LED */ - bcmgenet_mii_reset(priv->dev); if (priv->hw_params->flags & GENET_HAS_EXT) { reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); reg |= (EXT_PWR_DOWN_PHY | @@ -779,7 +778,9 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv, } bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); - bcmgenet_mii_reset(priv->dev); + + if (mode == GENET_POWER_PASSIVE) + bcmgenet_mii_reset(priv->dev); } /* ioctl handle special commands that are not present in ethtool. */ @@ -874,6 +875,7 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev, int last_tx_cn, last_c_index, num_tx_bds; struct enet_cb *tx_cb_ptr; struct netdev_queue *txq; + unsigned int bds_compl; unsigned int c_index; /* Compute how many buffers are transmitted since last xmit call */ @@ -898,7 +900,9 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev, /* Reclaim transmitted buffers */ while (last_tx_cn-- > 0) { tx_cb_ptr = ring->cbs + last_c_index; + bds_compl = 0; if (tx_cb_ptr->skb) { + bds_compl = skb_shinfo(tx_cb_ptr->skb)->nr_frags + 1; dev->stats.tx_bytes += tx_cb_ptr->skb->len; dma_unmap_single(&dev->dev, dma_unmap_addr(tx_cb_ptr, dma_addr), @@ -915,7 +919,7 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev, dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0); } dev->stats.tx_packets++; - ring->free_bds += 1; + ring->free_bds += bds_compl; last_c_index++; last_c_index &= (num_tx_bds - 1); @@ -1273,12 +1277,29 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv, while ((rxpktprocessed < rxpkttoprocess) && (rxpktprocessed < budget)) { + cb = &priv->rx_cbs[priv->rx_read_ptr]; + skb = cb->skb; + + rxpktprocessed++; + + priv->rx_read_ptr++; + priv->rx_read_ptr &= (priv->num_rx_bds - 1); + + /* We do not have a backing SKB, so we do not have a + * corresponding DMA mapping for this incoming packet since + * bcmgenet_rx_refill always either has both skb and mapping or + * none. + */ + if (unlikely(!skb)) { + dev->stats.rx_dropped++; + dev->stats.rx_errors++; + goto refill; + } + /* Unmap the packet contents such that we can use the * RSV from the 64 bytes descriptor when enabled and save * a 32-bits register read */ - cb = &priv->rx_cbs[priv->rx_read_ptr]; - skb = cb->skb; dma_unmap_single(&dev->dev, dma_unmap_addr(cb, dma_addr), priv->rx_buf_len, DMA_FROM_DEVICE); @@ -1306,18 +1327,6 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv, __func__, p_index, priv->rx_c_index, priv->rx_read_ptr, dma_length_status); - rxpktprocessed++; - - priv->rx_read_ptr++; - priv->rx_read_ptr &= (priv->num_rx_bds - 1); - - /* out of memory, just drop packets at the hardware level */ - if (unlikely(!skb)) { - dev->stats.rx_dropped++; - dev->stats.rx_errors++; - goto refill; - } - if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) { netif_err(priv, rx_status, dev, "dropping fragmented packet!\n"); @@ -1735,13 +1744,63 @@ static void bcmgenet_init_multiq(struct net_device *dev) bcmgenet_tdma_writel(priv, reg, DMA_CTRL); } +static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv) +{ + int ret = 0; + int timeout = 0; + u32 reg; + + /* Disable TDMA to stop add more frames in TX DMA */ + reg = bcmgenet_tdma_readl(priv, DMA_CTRL); + reg &= ~DMA_EN; + bcmgenet_tdma_writel(priv, reg, DMA_CTRL); + + /* Check TDMA status register to confirm TDMA is disabled */ + while (timeout++ < DMA_TIMEOUT_VAL) { + reg = bcmgenet_tdma_readl(priv, DMA_STATUS); + if (reg & DMA_DISABLED) + break; + + udelay(1); + } + + if (timeout == DMA_TIMEOUT_VAL) { + netdev_warn(priv->dev, "Timed out while disabling TX DMA\n"); + ret = -ETIMEDOUT; + } + + /* Wait 10ms for packet drain in both tx and rx dma */ + usleep_range(10000, 20000); + + /* Disable RDMA */ + reg = bcmgenet_rdma_readl(priv, DMA_CTRL); + reg &= ~DMA_EN; + bcmgenet_rdma_writel(priv, reg, DMA_CTRL); + + timeout = 0; + /* Check RDMA status register to confirm RDMA is disabled */ + while (timeout++ < DMA_TIMEOUT_VAL) { + reg = bcmgenet_rdma_readl(priv, DMA_STATUS); + if (reg & DMA_DISABLED) + break; + + udelay(1); + } + + if (timeout == DMA_TIMEOUT_VAL) { + netdev_warn(priv->dev, "Timed out while disabling RX DMA\n"); + ret = -ETIMEDOUT; + } + + return ret; +} + static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) { int i; /* disable DMA */ - bcmgenet_rdma_writel(priv, 0, DMA_CTRL); - bcmgenet_tdma_writel(priv, 0, DMA_CTRL); + bcmgenet_dma_teardown(priv); for (i = 0; i < priv->num_tx_bds; i++) { if (priv->tx_cbs[i].skb != NULL) { @@ -1961,7 +2020,8 @@ static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv, static int bcmgenet_wol_resume(struct bcmgenet_priv *priv) { /* From WOL-enabled suspend, switch to regular clock */ - clk_disable_unprepare(priv->clk_wol); + if (priv->wolopts) + clk_disable_unprepare(priv->clk_wol); phy_init_hw(priv->phydev); /* Speed settings must be restored */ @@ -2099,57 +2159,6 @@ err_clk_disable: return ret; } -static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv) -{ - int ret = 0; - int timeout = 0; - u32 reg; - - /* Disable TDMA to stop add more frames in TX DMA */ - reg = bcmgenet_tdma_readl(priv, DMA_CTRL); - reg &= ~DMA_EN; - bcmgenet_tdma_writel(priv, reg, DMA_CTRL); - - /* Check TDMA status register to confirm TDMA is disabled */ - while (timeout++ < DMA_TIMEOUT_VAL) { - reg = bcmgenet_tdma_readl(priv, DMA_STATUS); - if (reg & DMA_DISABLED) - break; - - udelay(1); - } - - if (timeout == DMA_TIMEOUT_VAL) { - netdev_warn(priv->dev, "Timed out while disabling TX DMA\n"); - ret = -ETIMEDOUT; - } - - /* Wait 10ms for packet drain in both tx and rx dma */ - usleep_range(10000, 20000); - - /* Disable RDMA */ - reg = bcmgenet_rdma_readl(priv, DMA_CTRL); - reg &= ~DMA_EN; - bcmgenet_rdma_writel(priv, reg, DMA_CTRL); - - timeout = 0; - /* Check RDMA status register to confirm RDMA is disabled */ - while (timeout++ < DMA_TIMEOUT_VAL) { - reg = bcmgenet_rdma_readl(priv, DMA_STATUS); - if (reg & DMA_DISABLED) - break; - - udelay(1); - } - - if (timeout == DMA_TIMEOUT_VAL) { - netdev_warn(priv->dev, "Timed out while disabling RX DMA\n"); - ret = -ETIMEDOUT; - } - - return ret; -} - static void bcmgenet_netif_stop(struct net_device *dev) { struct bcmgenet_priv *priv = netdev_priv(dev); @@ -2164,6 +2173,10 @@ static void bcmgenet_netif_stop(struct net_device *dev) * disabled no new work will be scheduled. */ cancel_work_sync(&priv->bcmgenet_irq_work); + + priv->old_pause = -1; + priv->old_link = -1; + priv->old_duplex = -1; } static int bcmgenet_close(struct net_device *dev) @@ -2533,6 +2546,13 @@ static int bcmgenet_probe(struct platform_device *pdev) priv->pdev = pdev; priv->version = (enum bcmgenet_version)of_id->data; + priv->clk = devm_clk_get(&priv->pdev->dev, "enet"); + if (IS_ERR(priv->clk)) + dev_warn(&priv->pdev->dev, "failed to get enet clock\n"); + + if (!IS_ERR(priv->clk)) + clk_prepare_enable(priv->clk); + bcmgenet_set_hw_params(priv); /* Mii wait queue */ @@ -2541,17 +2561,10 @@ static int bcmgenet_probe(struct platform_device *pdev) priv->rx_buf_len = RX_BUF_LENGTH; INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task); - priv->clk = devm_clk_get(&priv->pdev->dev, "enet"); - if (IS_ERR(priv->clk)) - dev_warn(&priv->pdev->dev, "failed to get enet clock\n"); - priv->clk_wol = devm_clk_get(&priv->pdev->dev, "enet-wol"); if (IS_ERR(priv->clk_wol)) dev_warn(&priv->pdev->dev, "failed to get enet-wol clock\n"); - if (!IS_ERR(priv->clk)) - clk_prepare_enable(priv->clk); - err = reset_umac(priv); if (err) goto err_clk_disable; @@ -2611,6 +2624,8 @@ static int bcmgenet_suspend(struct device *d) bcmgenet_netif_stop(dev); + phy_suspend(priv->phydev); + netif_device_detach(dev); /* Disable MAC receive */ @@ -2661,9 +2676,7 @@ static int bcmgenet_resume(struct device *d) if (ret) goto out_clk_disable; - if (priv->wolopts) - ret = bcmgenet_wol_resume(priv); - + ret = bcmgenet_wol_resume(priv); if (ret) goto out_clk_disable; @@ -2678,6 +2691,9 @@ static int bcmgenet_resume(struct device *d) bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); } + if (priv->wolopts) + bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC); + /* Disable RX/TX DMA and flush TX queues */ dma_ctrl = bcmgenet_dma_disable(priv); @@ -2693,6 +2709,8 @@ static int bcmgenet_resume(struct device *d) netif_device_attach(dev); + phy_resume(priv->phydev); + bcmgenet_netif_start(dev); return 0; diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 18961613d385..c88f7ae99636 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -129,7 +129,10 @@ static void bcmgenet_mii_setup(struct net_device *dev) cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE; } - if (status_changed) { + if (!status_changed) + return; + + if (phydev->link) { reg = bcmgenet_umac_readl(priv, UMAC_CMD); reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) | CMD_HD_EN | @@ -137,8 +140,9 @@ static void bcmgenet_mii_setup(struct net_device *dev) reg |= cmd_bits; bcmgenet_umac_writel(priv, reg, UMAC_CMD); - phy_print_status(phydev); } + + phy_print_status(phydev); } void bcmgenet_mii_reset(struct net_device *dev) @@ -303,12 +307,12 @@ static int bcmgenet_mii_probe(struct net_device *dev) /* In the case of a fixed PHY, the DT node associated * to the PHY is the Ethernet MAC DT node. */ - if (of_phy_is_fixed_link(dn)) { + if (!priv->phy_dn && of_phy_is_fixed_link(dn)) { ret = of_phy_register_fixed_link(dn); if (ret) return ret; - priv->phy_dn = dn; + priv->phy_dn = of_node_get(dn); } phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup, 0, @@ -444,6 +448,7 @@ int bcmgenet_mii_init(struct net_device *dev) return 0; out: + of_node_put(priv->phy_dn); mdiobus_unregister(priv->mii_bus); out_free: kfree(priv->mii_bus->irq); @@ -455,6 +460,7 @@ void bcmgenet_mii_exit(struct net_device *dev) { struct bcmgenet_priv *priv = netdev_priv(dev); + of_node_put(priv->phy_dn); mdiobus_unregister(priv->mii_bus); kfree(priv->mii_bus->irq); mdiobus_free(priv->mii_bus); diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index a3dd5dc64f4c..ba499489969a 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -237,7 +237,7 @@ MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value"); #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002 -static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = { +static const struct pci_device_id tg3_pci_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)}, @@ -6918,7 +6918,8 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) skb->protocol = eth_type_trans(skb, tp->dev); if (len > (tp->dev->mtu + ETH_HLEN) && - skb->protocol != htons(ETH_P_8021Q)) { + skb->protocol != htons(ETH_P_8021Q) && + skb->protocol != htons(ETH_P_8021AD)) { dev_kfree_skb_any(skb); goto drop_it_no_recycle; } @@ -7914,8 +7915,6 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) entry = tnapi->tx_prod; base_flags = 0; - if (skb->ip_summed == CHECKSUM_PARTIAL) - base_flags |= TXD_FLAG_TCPUDP_CSUM; mss = skb_shinfo(skb)->gso_size; if (mss) { @@ -7929,6 +7928,13 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN; + /* HW/FW can not correctly segment packets that have been + * vlan encapsulated. + */ + if (skb->protocol == htons(ETH_P_8021Q) || + skb->protocol == htons(ETH_P_8021AD)) + return tg3_tso_bug(tp, tnapi, txq, skb); + if (!skb_is_gso_v6(skb)) { if (unlikely((ETH_HLEN + hdr_len) > 80) && tg3_flag(tp, TSO_BUG)) @@ -7979,6 +7985,17 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) base_flags |= tsflags << 12; } } + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + /* HW/FW can not correctly checksum packets that have been + * vlan encapsulated. + */ + if (skb->protocol == htons(ETH_P_8021Q) || + skb->protocol == htons(ETH_P_8021AD)) { + if (skb_checksum_help(skb)) + goto drop; + } else { + base_flags |= TXD_FLAG_TCPUDP_CSUM; + } } if (tg3_flag(tp, USE_JUMBO_BDFLAG) && @@ -11617,6 +11634,12 @@ static int tg3_open(struct net_device *dev) struct tg3 *tp = netdev_priv(dev); int err; + if (tp->pcierr_recovery) { + netdev_err(dev, "Failed to open device. PCI error recovery " + "in progress\n"); + return -EAGAIN; + } + if (tp->fw_needed) { err = tg3_request_firmware(tp); if (tg3_asic_rev(tp) == ASIC_REV_57766) { @@ -11674,6 +11697,12 @@ static int tg3_close(struct net_device *dev) { struct tg3 *tp = netdev_priv(dev); + if (tp->pcierr_recovery) { + netdev_err(dev, "Failed to close device. PCI error recovery " + "in progress\n"); + return -EAGAIN; + } + tg3_ptp_fini(tp); tg3_stop(tp); @@ -14093,8 +14122,9 @@ static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev, spin_lock_bh(&tp->lock); if (!tp->hw_stats) { + *stats = tp->net_stats_prev; spin_unlock_bh(&tp->lock); - return &tp->net_stats_prev; + return stats; } tg3_get_nstats(tp, stats); @@ -15926,7 +15956,7 @@ static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp) return TG3_RX_RET_MAX_SIZE_5705; } -static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = { +static const struct pci_device_id tg3_write_reorder_chipsets[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) }, { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) }, @@ -17185,7 +17215,7 @@ static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, #define TEST_BUFFER_SIZE 0x2000 -static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = { +static const struct pci_device_id tg3_dma_wait_state_chipsets[] = { { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) }, { }, }; @@ -17560,6 +17590,7 @@ static int tg3_init_one(struct pci_dev *pdev, tp->rx_mode = TG3_DEF_RX_MODE; tp->tx_mode = TG3_DEF_TX_MODE; tp->irq_sync = 1; + tp->pcierr_recovery = false; if (tg3_debug > 0) tp->msg_enable = tg3_debug; @@ -18070,6 +18101,8 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, rtnl_lock(); + tp->pcierr_recovery = true; + /* We probably don't have netdev yet */ if (!netdev || !netif_running(netdev)) goto done; @@ -18194,6 +18227,7 @@ static void tg3_io_resume(struct pci_dev *pdev) tg3_phy_start(tp); done: + tp->pcierr_recovery = false; rtnl_unlock(); } diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h index 461accaf0aa4..31c9f8295953 100644 --- a/drivers/net/ethernet/broadcom/tg3.h +++ b/drivers/net/ethernet/broadcom/tg3.h @@ -3407,6 +3407,7 @@ struct tg3 { struct device *hwmon_dev; bool link_up; + bool pcierr_recovery; }; /* Accessor macros for chip and asic attributes diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index 556aab75f490..ffc92a41d75b 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -2506,7 +2506,7 @@ bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb) * For TSO, the TCP checksum field is seeded with pseudo-header sum * excluding the length field. */ - if (skb->protocol == htons(ETH_P_IP)) { + if (vlan_get_protocol(skb) == htons(ETH_P_IP)) { struct iphdr *iph = ip_hdr(skb); /* Do we really need these? */ @@ -2870,12 +2870,13 @@ bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb, } if (skb->ip_summed == CHECKSUM_PARTIAL) { + __be16 net_proto = vlan_get_protocol(skb); u8 proto = 0; - if (skb->protocol == htons(ETH_P_IP)) + if (net_proto == htons(ETH_P_IP)) proto = ip_hdr(skb)->protocol; #ifdef NETIF_F_IPV6_CSUM - else if (skb->protocol == htons(ETH_P_IPV6)) { + else if (net_proto == htons(ETH_P_IPV6)) { /* nexthdr may not be TCP immediately. */ proto = ipv6_hdr(skb)->nexthdr; } @@ -3836,7 +3837,7 @@ bnad_pci_remove(struct pci_dev *pdev) free_netdev(netdev); } -static DEFINE_PCI_DEVICE_TABLE(bnad_pci_id_table) = { +static const struct pci_device_id bnad_pci_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_BROCADE, PCI_DEVICE_ID_BROCADE_CT), diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index ca5d7798b265..e1e02fba4fcc 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -30,7 +30,6 @@ #include <linux/of_device.h> #include <linux/of_mdio.h> #include <linux/of_net.h> -#include <linux/pinctrl/consumer.h> #include "macb.h" @@ -2071,7 +2070,6 @@ static int __init macb_probe(struct platform_device *pdev) struct phy_device *phydev; u32 config; int err = -ENXIO; - struct pinctrl *pinctrl; const char *mac; regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -2080,15 +2078,6 @@ static int __init macb_probe(struct platform_device *pdev) goto err_out; } - pinctrl = devm_pinctrl_get_select_default(&pdev->dev); - if (IS_ERR(pinctrl)) { - err = PTR_ERR(pinctrl); - if (err == -EPROBE_DEFER) - goto err_out; - - dev_warn(&pdev->dev, "No pinctrl provided\n"); - } - err = -ENOMEM; dev = alloc_etherdev(sizeof(*bp)); if (!dev) diff --git a/drivers/net/ethernet/calxeda/Kconfig b/drivers/net/ethernet/calxeda/Kconfig index 184a063bed5f..07d2201530d2 100644 --- a/drivers/net/ethernet/calxeda/Kconfig +++ b/drivers/net/ethernet/calxeda/Kconfig @@ -1,6 +1,7 @@ config NET_CALXEDA_XGMAC tristate "Calxeda 1G/10G XGMAC Ethernet driver" depends on HAS_IOMEM && HAS_DMA + depends on ARCH_HIGHBANK || COMPILE_TEST select CRC32 help This is the driver for the XGMAC Ethernet IP block found on Calxeda diff --git a/drivers/net/ethernet/chelsio/cxgb/subr.c b/drivers/net/ethernet/chelsio/cxgb/subr.c index 816719314cc8..ea0f8741d7cf 100644 --- a/drivers/net/ethernet/chelsio/cxgb/subr.c +++ b/drivers/net/ethernet/chelsio/cxgb/subr.c @@ -522,7 +522,7 @@ static const struct board_info t1_board[] = { }; -DEFINE_PCI_DEVICE_TABLE(t1_pci_tbl) = { +const struct pci_device_id t1_pci_tbl[] = { CH_DEVICE(8, 0, CH_BRD_T110_1CU), CH_DEVICE(8, 1, CH_BRD_T110_1CU), CH_DEVICE(7, 0, CH_BRD_N110_1F), diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c index 5d9cce053cc9..db76f7040455 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c @@ -85,7 +85,7 @@ enum { #define CH_DEVICE(devid, idx) \ { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx } -static DEFINE_PCI_DEVICE_TABLE(cxgb3_pci_tbl) = { +static const struct pci_device_id cxgb3_pci_tbl[] = { CH_DEVICE(0x20, 0), /* PE9000 */ CH_DEVICE(0x21, 1), /* T302E */ CH_DEVICE(0x22, 2), /* T310E */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index c9b922cc3e67..c067b7888ac4 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -50,13 +50,13 @@ #include "cxgb4_uld.h" #define T4FW_VERSION_MAJOR 0x01 -#define T4FW_VERSION_MINOR 0x09 -#define T4FW_VERSION_MICRO 0x17 +#define T4FW_VERSION_MINOR 0x0B +#define T4FW_VERSION_MICRO 0x1B #define T4FW_VERSION_BUILD 0x00 #define T5FW_VERSION_MAJOR 0x01 -#define T5FW_VERSION_MINOR 0x09 -#define T5FW_VERSION_MICRO 0x17 +#define T5FW_VERSION_MINOR 0x0B +#define T5FW_VERSION_MICRO 0x1B #define T5FW_VERSION_BUILD 0x00 #define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__) @@ -522,6 +522,9 @@ struct sge_txq { struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */ struct sge_txq q; struct netdev_queue *txq; /* associated netdev TX queue */ +#ifdef CONFIG_CHELSIO_T4_DCB + u8 dcb_prio; /* DCB Priority bound to queue */ +#endif unsigned long tso; /* # of TSO requests */ unsigned long tx_cso; /* # of Tx checksum offloads */ unsigned long vlan_ins; /* # of Tx VLAN insertions */ @@ -649,6 +652,7 @@ struct adapter { struct tid_info tids; void **tid_release_head; spinlock_t tid_release_lock; + struct workqueue_struct *workq; struct work_struct tid_release_task; struct work_struct db_full_task; struct work_struct db_drop_task; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c index 0d3a9df5be36..8edf0f5bd679 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c @@ -20,6 +20,17 @@ #include "cxgb4.h" +/* DCBx version control + */ +char *dcb_ver_array[] = { + "Unknown", + "DCBx-CIN", + "DCBx-CEE 1.01", + "DCBx-IEEE", + "", "", "", + "Auto Negotiated" +}; + /* Initialize a port's Data Center Bridging state. Typically used after a * Link Down event. */ @@ -27,25 +38,45 @@ void cxgb4_dcb_state_init(struct net_device *dev) { struct port_info *pi = netdev2pinfo(dev); struct port_dcb_info *dcb = &pi->dcb; + int version_temp = dcb->dcb_version; memset(dcb, 0, sizeof(struct port_dcb_info)); dcb->state = CXGB4_DCB_STATE_START; + if (version_temp) + dcb->dcb_version = version_temp; + + netdev_dbg(dev, "%s: Initializing DCB state for port[%d]\n", + __func__, pi->port_id); +} + +void cxgb4_dcb_version_init(struct net_device *dev) +{ + struct port_info *pi = netdev2pinfo(dev); + struct port_dcb_info *dcb = &pi->dcb; + + /* Any writes here are only done on kernels that exlicitly need + * a specific version, say < 2.6.38 which only support CEE + */ + dcb->dcb_version = FW_PORT_DCB_VER_AUTO; } /* Finite State machine for Data Center Bridging. */ void cxgb4_dcb_state_fsm(struct net_device *dev, - enum cxgb4_dcb_state_input input) + enum cxgb4_dcb_state_input transition_to) { struct port_info *pi = netdev2pinfo(dev); struct port_dcb_info *dcb = &pi->dcb; struct adapter *adap = pi->adapter; + enum cxgb4_dcb_state current_state = dcb->state; - switch (input) { - case CXGB4_DCB_INPUT_FW_DISABLED: { - /* Firmware tells us it's not doing DCB */ - switch (dcb->state) { - case CXGB4_DCB_STATE_START: { + netdev_dbg(dev, "%s: State change from %d to %d for %s\n", + __func__, dcb->state, transition_to, dev->name); + + switch (current_state) { + case CXGB4_DCB_STATE_START: { + switch (transition_to) { + case CXGB4_DCB_INPUT_FW_DISABLED: { /* we're going to use Host DCB */ dcb->state = CXGB4_DCB_STATE_HOST; dcb->supported = CXGB4_DCBX_HOST_SUPPORT; @@ -53,48 +84,62 @@ void cxgb4_dcb_state_fsm(struct net_device *dev, break; } - case CXGB4_DCB_STATE_HOST: { - /* we're alreaady in Host DCB mode */ + case CXGB4_DCB_INPUT_FW_ENABLED: { + /* we're going to use Firmware DCB */ + dcb->state = CXGB4_DCB_STATE_FW_INCOMPLETE; + dcb->supported = CXGB4_DCBX_FW_SUPPORT; + break; + } + + case CXGB4_DCB_INPUT_FW_INCOMPLETE: { + /* expected transition */ + break; + } + + case CXGB4_DCB_INPUT_FW_ALLSYNCED: { + dcb->state = CXGB4_DCB_STATE_FW_ALLSYNCED; break; } default: - goto bad_state_transition; + goto bad_state_input; } break; } - case CXGB4_DCB_INPUT_FW_ENABLED: { - /* Firmware tells us that it is doing DCB */ - switch (dcb->state) { - case CXGB4_DCB_STATE_START: { - /* we're going to use Firmware DCB */ - dcb->state = CXGB4_DCB_STATE_FW_INCOMPLETE; - dcb->supported = CXGB4_DCBX_FW_SUPPORT; + case CXGB4_DCB_STATE_FW_INCOMPLETE: { + switch (transition_to) { + case CXGB4_DCB_INPUT_FW_ENABLED: { + /* we're alreaady in firmware DCB mode */ break; } - case CXGB4_DCB_STATE_FW_INCOMPLETE: - case CXGB4_DCB_STATE_FW_ALLSYNCED: { - /* we're alreaady in firmware DCB mode */ + case CXGB4_DCB_INPUT_FW_INCOMPLETE: { + /* we're already incomplete */ + break; + } + + case CXGB4_DCB_INPUT_FW_ALLSYNCED: { + dcb->state = CXGB4_DCB_STATE_FW_ALLSYNCED; + dcb->enabled = 1; + linkwatch_fire_event(dev); break; } default: - goto bad_state_transition; + goto bad_state_input; } break; } - case CXGB4_DCB_INPUT_FW_INCOMPLETE: { - /* Firmware tells us that its DCB state is incomplete */ - switch (dcb->state) { - case CXGB4_DCB_STATE_FW_INCOMPLETE: { - /* we're already incomplete */ + case CXGB4_DCB_STATE_FW_ALLSYNCED: { + switch (transition_to) { + case CXGB4_DCB_INPUT_FW_ENABLED: { + /* we're alreaady in firmware DCB mode */ break; } - case CXGB4_DCB_STATE_FW_ALLSYNCED: { + case CXGB4_DCB_INPUT_FW_INCOMPLETE: { /* We were successfully running with firmware DCB but * now it's telling us that it's in an "incomplete * state. We need to reset back to a ground state @@ -107,46 +152,48 @@ void cxgb4_dcb_state_fsm(struct net_device *dev, break; } - default: - goto bad_state_transition; - } - break; - } - - case CXGB4_DCB_INPUT_FW_ALLSYNCED: { - /* Firmware tells us that its DCB state is complete */ - switch (dcb->state) { - case CXGB4_DCB_STATE_FW_INCOMPLETE: { - dcb->state = CXGB4_DCB_STATE_FW_ALLSYNCED; + case CXGB4_DCB_INPUT_FW_ALLSYNCED: { + /* we're already all sync'ed + * this is only applicable for IEEE or + * when another VI already completed negotiaton + */ dcb->enabled = 1; linkwatch_fire_event(dev); break; } - case CXGB4_DCB_STATE_FW_ALLSYNCED: { - /* we're already all sync'ed */ + default: + goto bad_state_input; + } + break; + } + + case CXGB4_DCB_STATE_HOST: { + switch (transition_to) { + case CXGB4_DCB_INPUT_FW_DISABLED: { + /* we're alreaady in Host DCB mode */ break; } default: - goto bad_state_transition; + goto bad_state_input; } break; } default: - goto bad_state_input; + goto bad_state_transition; } return; bad_state_input: dev_err(adap->pdev_dev, "cxgb4_dcb_state_fsm: illegal input symbol %d\n", - input); + transition_to); return; bad_state_transition: dev_err(adap->pdev_dev, "cxgb4_dcb_state_fsm: bad state transition, state = %d, input = %d\n", - dcb->state, input); + current_state, transition_to); } /* Handle a DCB/DCBX update message from the firmware. @@ -160,6 +207,7 @@ void cxgb4_dcb_handle_fw_update(struct adapter *adap, struct port_info *pi = netdev_priv(dev); struct port_dcb_info *dcb = &pi->dcb; int dcb_type = pcmd->u.dcb.pgid.type; + int dcb_running_version; /* Handle Firmware DCB Control messages separately since they drive * our state machine. @@ -171,6 +219,25 @@ void cxgb4_dcb_handle_fw_update(struct adapter *adap, ? CXGB4_DCB_STATE_FW_ALLSYNCED : CXGB4_DCB_STATE_FW_INCOMPLETE); + if (dcb->dcb_version != FW_PORT_DCB_VER_UNKNOWN) { + dcb_running_version = FW_PORT_CMD_DCB_VERSION_GET( + be16_to_cpu( + pcmd->u.dcb.control.dcb_version_to_app_state)); + if (dcb_running_version == FW_PORT_DCB_VER_CEE1D01 || + dcb_running_version == FW_PORT_DCB_VER_IEEE) { + dcb->dcb_version = dcb_running_version; + dev_warn(adap->pdev_dev, "Interface %s is running %s\n", + dev->name, + dcb_ver_array[dcb->dcb_version]); + } else { + dev_warn(adap->pdev_dev, + "Something screwed up, requested firmware for %s, but firmware returned %s instead\n", + dcb_ver_array[dcb->dcb_version], + dcb_ver_array[dcb_running_version]); + dcb->dcb_version = FW_PORT_DCB_VER_UNKNOWN; + } + } + cxgb4_dcb_state_fsm(dev, input); return; } @@ -199,7 +266,11 @@ void cxgb4_dcb_handle_fw_update(struct adapter *adap, dcb->pg_num_tcs_supported = fwdcb->pgrate.num_tcs_supported; memcpy(dcb->pgrate, &fwdcb->pgrate.pgrate, sizeof(dcb->pgrate)); + memcpy(dcb->tsa, &fwdcb->pgrate.tsa, + sizeof(dcb->tsa)); dcb->msgs |= CXGB4_DCB_FW_PGRATE; + if (dcb->msgs & CXGB4_DCB_FW_PGID) + IEEE_FAUX_SYNC(dev, dcb); break; case FW_PORT_DCB_TYPE_PRIORATE: @@ -212,6 +283,7 @@ void cxgb4_dcb_handle_fw_update(struct adapter *adap, dcb->pfcen = fwdcb->pfc.pfcen; dcb->pfc_num_tcs_supported = fwdcb->pfc.max_pfc_tcs; dcb->msgs |= CXGB4_DCB_FW_PFC; + IEEE_FAUX_SYNC(dev, dcb); break; case FW_PORT_DCB_TYPE_APP_ID: { @@ -220,13 +292,25 @@ void cxgb4_dcb_handle_fw_update(struct adapter *adap, struct app_priority *ap = &dcb->app_priority[idx]; struct dcb_app app = { - .selector = fwap->sel_field, .protocol = be16_to_cpu(fwap->protocolid), - .priority = fwap->user_prio_map, }; int err; - err = dcb_setapp(dev, &app); + /* Convert from firmware format to relevant format + * when using app selector + */ + if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE) { + app.selector = (fwap->sel_field + 1); + app.priority = ffs(fwap->user_prio_map) - 1; + err = dcb_ieee_setapp(dev, &app); + IEEE_FAUX_SYNC(dev, dcb); + } else { + /* Default is CEE */ + app.selector = !!(fwap->sel_field); + app.priority = fwap->user_prio_map; + err = dcb_setapp(dev, &app); + } + if (err) dev_err(adap->pdev_dev, "Failed DCB Set Application Priority: sel=%d, prot=%d, prio=%d, err=%d\n", @@ -408,9 +492,10 @@ static void cxgb4_getpgbwgcfg(struct net_device *dev, int pgid, u8 *bw_per, if (err != FW_PORT_DCB_CFG_SUCCESS) { dev_err(adap->pdev_dev, "DCB read PGRATE failed with %d\n", -err); - } else { - *bw_per = pcmd.u.dcb.pgrate.pgrate[pgid]; + return; } + + *bw_per = pcmd.u.dcb.pgrate.pgrate[pgid]; } static void cxgb4_getpgbwgcfg_tx(struct net_device *dev, int pgid, u8 *bw_per) @@ -637,7 +722,8 @@ static int __cxgb4_getapp(struct net_device *dev, u8 app_idtype, u16 app_id, return err; } if (be16_to_cpu(pcmd.u.dcb.app_priority.protocolid) == app_id) - return pcmd.u.dcb.app_priority.user_prio_map; + if (pcmd.u.dcb.app_priority.sel_field == app_idtype) + return pcmd.u.dcb.app_priority.user_prio_map; /* exhausted app list */ if (!pcmd.u.dcb.app_priority.protocolid) @@ -657,8 +743,8 @@ static int cxgb4_getapp(struct net_device *dev, u8 app_idtype, u16 app_id) /* Write a new Application User Priority Map for the specified Application ID */ -static int cxgb4_setapp(struct net_device *dev, u8 app_idtype, u16 app_id, - u8 app_prio) +static int __cxgb4_setapp(struct net_device *dev, u8 app_idtype, u16 app_id, + u8 app_prio) { struct fw_port_cmd pcmd; struct port_info *pi = netdev2pinfo(dev); @@ -673,10 +759,6 @@ static int cxgb4_setapp(struct net_device *dev, u8 app_idtype, u16 app_id, if (!netif_carrier_ok(dev)) return -ENOLINK; - if (app_idtype != DCB_APP_IDTYPE_ETHTYPE && - app_idtype != DCB_APP_IDTYPE_PORTNUM) - return -EINVAL; - for (i = 0; i < CXGB4_MAX_DCBX_APP_SUPPORTED; i++) { INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id); pcmd.u.dcb.app_priority.type = FW_PORT_DCB_TYPE_APP_ID; @@ -725,6 +807,30 @@ static int cxgb4_setapp(struct net_device *dev, u8 app_idtype, u16 app_id, return 0; } +/* Priority for CEE inside dcb_app is bitmask, with 0 being an invalid value */ +static int cxgb4_setapp(struct net_device *dev, u8 app_idtype, u16 app_id, + u8 app_prio) +{ + int ret; + struct dcb_app app = { + .selector = app_idtype, + .protocol = app_id, + .priority = app_prio, + }; + + if (app_idtype != DCB_APP_IDTYPE_ETHTYPE && + app_idtype != DCB_APP_IDTYPE_PORTNUM) + return -EINVAL; + + /* Convert app_idtype to a format that firmware understands */ + ret = __cxgb4_setapp(dev, app_idtype == DCB_APP_IDTYPE_ETHTYPE ? + app_idtype : 3, app_id, app_prio); + if (ret) + return ret; + + return dcb_setapp(dev, &app); +} + /* Return whether IEEE Data Center Bridging has been negotiated. */ static inline int cxgb4_ieee_negotiation_complete(struct net_device *dev) @@ -738,6 +844,7 @@ static inline int cxgb4_ieee_negotiation_complete(struct net_device *dev) /* Fill in the Application User Priority Map associated with the * specified Application. + * Priority for IEEE dcb_app is an integer, with 0 being a valid value */ static int cxgb4_ieee_getapp(struct net_device *dev, struct dcb_app *app) { @@ -748,28 +855,39 @@ static int cxgb4_ieee_getapp(struct net_device *dev, struct dcb_app *app) if (!(app->selector && app->protocol)) return -EINVAL; - prio = dcb_getapp(dev, app); - if (prio == 0) { - /* If app doesn't exist in dcb_app table, try firmware - * directly. - */ - prio = __cxgb4_getapp(dev, app->selector, app->protocol, 0); - } + /* Try querying firmware first, use firmware format */ + prio = __cxgb4_getapp(dev, app->selector - 1, app->protocol, 0); + + if (prio < 0) + prio = dcb_ieee_getapp_mask(dev, app); - app->priority = prio; + app->priority = ffs(prio) - 1; return 0; } -/* Write a new Application User Priority Map for the specified App id. */ +/* Write a new Application User Priority Map for the specified Application ID. + * Priority for IEEE dcb_app is an integer, with 0 being a valid value + */ static int cxgb4_ieee_setapp(struct net_device *dev, struct dcb_app *app) { + int ret; + if (!cxgb4_ieee_negotiation_complete(dev)) return -EINVAL; - if (!(app->selector && app->protocol && app->priority)) + if (!(app->selector && app->protocol)) + return -EINVAL; + + if (!(app->selector > IEEE_8021QAZ_APP_SEL_ETHERTYPE && + app->selector < IEEE_8021QAZ_APP_SEL_ANY)) return -EINVAL; - cxgb4_setapp(dev, app->selector, app->protocol, app->priority); - return dcb_setapp(dev, app); + /* change selector to a format that firmware understands */ + ret = __cxgb4_setapp(dev, app->selector - 1, app->protocol, + (1 << app->priority)); + if (ret) + return ret; + + return dcb_ieee_setapp(dev, app); } /* Return our DCBX parameters. @@ -794,8 +912,9 @@ static u8 cxgb4_setdcbx(struct net_device *dev, u8 dcb_request) != dcb_request) return 1; - /* Can't set DCBX capabilities if DCBX isn't enabled. */ - if (!pi->dcb.state) + /* Can't enable DCB if we haven't successfully negotiated it. + */ + if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED) return 1; /* There's currently no mechanism to allow for the firmware DCBX @@ -874,7 +993,8 @@ static int cxgb4_getpeerapp_tbl(struct net_device *dev, struct dcb_app *table) table[i].selector = pcmd.u.dcb.app_priority.sel_field; table[i].protocol = be16_to_cpu(pcmd.u.dcb.app_priority.protocolid); - table[i].priority = pcmd.u.dcb.app_priority.user_prio_map; + table[i].priority = + ffs(pcmd.u.dcb.app_priority.user_prio_map) - 1; } return err; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h index 1ec1d834e257..2a6aa88984f4 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h @@ -63,6 +63,13 @@ #define INIT_PORT_DCB_WRITE_CMD(__pcmd, __port) \ INIT_PORT_DCB_CMD(__pcmd, __port, EXEC, FW_PORT_ACTION_L2_DCB_CFG) +#define IEEE_FAUX_SYNC(__dev, __dcb) \ + do { \ + if ((__dcb)->dcb_version == FW_PORT_DCB_VER_IEEE) \ + cxgb4_dcb_state_fsm((__dev), \ + CXGB4_DCB_STATE_FW_ALLSYNCED); \ + } while (0) + /* States we can be in for a port's Data Center Bridging. */ enum cxgb4_dcb_state { @@ -108,11 +115,13 @@ struct port_dcb_info { * Native Endian format). */ u32 pgid; /* Priority Group[0..7] */ + u8 dcb_version; /* Running DCBx version */ u8 pfcen; /* Priority Flow Control[0..7] */ u8 pg_num_tcs_supported; /* max PG Traffic Classes */ u8 pfc_num_tcs_supported; /* max PFC Traffic Classes */ u8 pgrate[8]; /* Priority Group Rate[0..7] */ u8 priorate[8]; /* Priority Rate[0..7] */ + u8 tsa[8]; /* TSA Algorithm[0..7] */ struct app_priority { /* Application Information */ u8 user_prio_map; /* Priority Map bitfield */ u8 sel_field; /* Protocol ID interpretation */ @@ -121,6 +130,7 @@ struct port_dcb_info { }; void cxgb4_dcb_state_init(struct net_device *); +void cxgb4_dcb_version_init(struct net_device *); void cxgb4_dcb_state_fsm(struct net_device *, enum cxgb4_dcb_state_input); void cxgb4_dcb_handle_fw_update(struct adapter *, const struct fw_port_cmd *); void cxgb4_dcb_set_caps(struct adapter *, const struct fw_port_cmd *); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 4247356c16ff..e5be511a3c38 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -212,7 +212,7 @@ struct filter_entry { #define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) } -static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = { +static const struct pci_device_id cxgb4_pci_tbl[] = { CH_DEVICE(0xa000, 0), /* PE10K */ CH_DEVICE(0x4001, -1), CH_DEVICE(0x4002, -1), @@ -522,6 +522,8 @@ static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable) dev_err(adap->pdev_dev, "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n", enable ? "set" : "unset", pi->port_id, i, -err); + else + txq->dcb_prio = value; } } #endif /* CONFIG_CHELSIO_T4_DCB */ @@ -641,8 +643,6 @@ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok) return ret; } -static struct workqueue_struct *workq; - /** * link_start - enable a port * @dev: the port to enable @@ -1253,7 +1253,9 @@ freeout: t4_free_sge_resources(adap); goto freeout; } - t4_write_reg(adap, MPS_TRC_RSS_CONTROL, + t4_write_reg(adap, is_t4(adap->params.chip) ? + MPS_TRC_RSS_CONTROL : + MPS_T5_TRC_RSS_CONTROL, RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) | QUEUENUMBER(s->ethrxq[0].rspq.abs_id)); return 0; @@ -1761,7 +1763,8 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs, 0xd004, 0xd03c, 0xdfc0, 0xdfe0, 0xe000, 0xea7c, - 0xf000, 0x11190, + 0xf000, 0x11110, + 0x11118, 0x11190, 0x19040, 0x1906c, 0x19078, 0x19080, 0x1908c, 0x19124, @@ -1968,7 +1971,8 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs, 0xd004, 0xd03c, 0xdfc0, 0xdfe0, 0xe000, 0x11088, - 0x1109c, 0x1117c, + 0x1109c, 0x11110, + 0x11118, 0x1117c, 0x11190, 0x11204, 0x19040, 0x1906c, 0x19078, 0x19080, @@ -3338,7 +3342,7 @@ static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan, adap->tid_release_head = (void **)((uintptr_t)p | chan); if (!adap->tid_release_task_busy) { adap->tid_release_task_busy = true; - queue_work(workq, &adap->tid_release_task); + queue_work(adap->workq, &adap->tid_release_task); } spin_unlock_bh(&adap->tid_release_lock); } @@ -4138,7 +4142,7 @@ void t4_db_full(struct adapter *adap) notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL); t4_set_reg_field(adap, SGE_INT_ENABLE3, DBFIFO_HP_INT | DBFIFO_LP_INT, 0); - queue_work(workq, &adap->db_full_task); + queue_work(adap->workq, &adap->db_full_task); } } @@ -4148,7 +4152,7 @@ void t4_db_dropped(struct adapter *adap) disable_dbs(adap); notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL); } - queue_work(workq, &adap->db_drop_task); + queue_work(adap->workq, &adap->db_drop_task); } static void uld_attach(struct adapter *adap, unsigned int uld) @@ -5955,7 +5959,8 @@ static int adap_init0(struct adapter *adap) params[3] = FW_PARAM_PFVF(CQ_END); params[4] = FW_PARAM_PFVF(OCQ_START); params[5] = FW_PARAM_PFVF(OCQ_END); - ret = t4_query_params(adap, 0, 0, 0, 6, params, val); + ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, + val); if (ret < 0) goto bye; adap->vres.qp.start = val[0]; @@ -5967,7 +5972,8 @@ static int adap_init0(struct adapter *adap) params[0] = FW_PARAM_DEV(MAXORDIRD_QP); params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER); - ret = t4_query_params(adap, 0, 0, 0, 2, params, val); + ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, + val); if (ret < 0) { adap->params.max_ordird_qp = 8; adap->params.max_ird_adapter = 32 * adap->tids.ntids; @@ -6472,6 +6478,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) struct port_info *pi; bool highdma = false; struct adapter *adapter = NULL; + void __iomem *regs; printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION); @@ -6488,19 +6495,35 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) goto out_release_regions; } + regs = pci_ioremap_bar(pdev, 0); + if (!regs) { + dev_err(&pdev->dev, "cannot map device registers\n"); + err = -ENOMEM; + goto out_disable_device; + } + + /* We control everything through one PF */ + func = SOURCEPF_GET(readl(regs + PL_WHOAMI)); + if (func != ent->driver_data) { + iounmap(regs); + pci_disable_device(pdev); + pci_save_state(pdev); /* to restore SR-IOV later */ + goto sriov; + } + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { highdma = true; err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); if (err) { dev_err(&pdev->dev, "unable to obtain 64-bit DMA for " "coherent allocations\n"); - goto out_disable_device; + goto out_unmap_bar0; } } else { err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "no usable DMA configuration\n"); - goto out_disable_device; + goto out_unmap_bar0; } } @@ -6512,28 +6535,19 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); if (!adapter) { err = -ENOMEM; - goto out_disable_device; + goto out_unmap_bar0; } - /* PCI device has been enabled */ - adapter->flags |= DEV_ENABLED; - - adapter->regs = pci_ioremap_bar(pdev, 0); - if (!adapter->regs) { - dev_err(&pdev->dev, "cannot map device registers\n"); + adapter->workq = create_singlethread_workqueue("cxgb4"); + if (!adapter->workq) { err = -ENOMEM; goto out_free_adapter; } - /* We control everything through one PF */ - func = SOURCEPF_GET(readl(adapter->regs + PL_WHOAMI)); - if ((pdev->device == 0xa000 && func != 0) || - func != ent->driver_data) { - pci_save_state(pdev); /* to restore SR-IOV later */ - err = 0; - goto out_unmap_bar0; - } + /* PCI device has been enabled */ + adapter->flags |= DEV_ENABLED; + adapter->regs = regs; adapter->pdev = pdev; adapter->pdev_dev = &pdev->dev; adapter->mbox = func; @@ -6550,7 +6564,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) err = t4_prep_adapter(adapter); if (err) - goto out_unmap_bar0; + goto out_free_adapter; + if (!is_t4(adapter->params.chip)) { s_qpp = QUEUESPERPAGEPF1 * adapter->fn; @@ -6567,14 +6582,14 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev_err(&pdev->dev, "Incorrect number of egress queues per page\n"); err = -EINVAL; - goto out_unmap_bar0; + goto out_free_adapter; } adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2), pci_resource_len(pdev, 2)); if (!adapter->bar2) { dev_err(&pdev->dev, "cannot map device bar2 region\n"); err = -ENOMEM; - goto out_unmap_bar0; + goto out_free_adapter; } } @@ -6697,6 +6712,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (is_offload(adapter)) attach_ulds(adapter); +sriov: #ifdef CONFIG_PCI_IOV if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0) if (pci_enable_sriov(pdev, num_vf[func]) == 0) @@ -6711,10 +6727,13 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) out_unmap_bar: if (!is_t4(adapter->params.chip)) iounmap(adapter->bar2); - out_unmap_bar0: - iounmap(adapter->regs); out_free_adapter: + if (adapter->workq) + destroy_workqueue(adapter->workq); + kfree(adapter); + out_unmap_bar0: + iounmap(regs); out_disable_device: pci_disable_pcie_error_reporting(pdev); pci_disable_device(pdev); @@ -6735,6 +6754,11 @@ static void remove_one(struct pci_dev *pdev) if (adapter) { int i; + /* Tear down per-adapter Work Queue first since it can contain + * references to our adapter data structure. + */ + destroy_workqueue(adapter->workq); + if (is_offload(adapter)) detach_ulds(adapter); @@ -6787,20 +6811,14 @@ static int __init cxgb4_init_module(void) { int ret; - workq = create_singlethread_workqueue("cxgb4"); - if (!workq) - return -ENOMEM; - /* Debugfs support is optional, just warn if this fails */ cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); if (!cxgb4_debugfs_root) pr_warn("could not create debugfs entry, continuing\n"); ret = pci_register_driver(&cxgb4_driver); - if (ret < 0) { + if (ret < 0) debugfs_remove(cxgb4_debugfs_root); - destroy_workqueue(workq); - } register_inet6addr_notifier(&cxgb4_inet6addr_notifier); @@ -6812,8 +6830,6 @@ static void __exit cxgb4_cleanup_module(void) unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier); pci_unregister_driver(&cxgb4_driver); debugfs_remove(cxgb4_debugfs_root); /* NULL ok */ - flush_workqueue(workq); - destroy_workqueue(workq); } module_init(cxgb4_init_module); diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index b0bba32d69d5..d22d728d4e5c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -2303,7 +2303,8 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, FW_EQ_ETH_CMD_PFN(adap->fn) | FW_EQ_ETH_CMD_VFN(0)); c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC | FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c)); - c.viid_pkd = htonl(FW_EQ_ETH_CMD_VIID(pi->viid)); + c.viid_pkd = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE | + FW_EQ_ETH_CMD_VIID(pi->viid)); c.fetchszm_to_iqid = htonl(FW_EQ_ETH_CMD_HOSTFCMODE(2) | FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) | FW_EQ_ETH_CMD_FETCHRO(1) | diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index a853133d8db8..41d04462b72e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -168,6 +168,34 @@ void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val) } /* + * t4_report_fw_error - report firmware error + * @adap: the adapter + * + * The adapter firmware can indicate error conditions to the host. + * If the firmware has indicated an error, print out the reason for + * the firmware error. + */ +static void t4_report_fw_error(struct adapter *adap) +{ + static const char *const reason[] = { + "Crash", /* PCIE_FW_EVAL_CRASH */ + "During Device Preparation", /* PCIE_FW_EVAL_PREP */ + "During Device Configuration", /* PCIE_FW_EVAL_CONF */ + "During Device Initialization", /* PCIE_FW_EVAL_INIT */ + "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */ + "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */ + "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */ + "Reserved", /* reserved */ + }; + u32 pcie_fw; + + pcie_fw = t4_read_reg(adap, MA_PCIE_FW); + if (pcie_fw & FW_PCIE_FW_ERR) + dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n", + reason[FW_PCIE_FW_EVAL_GET(pcie_fw)]); +} + +/* * Get the reply to a mailbox command and store it in @rpl in big-endian order. */ static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit, @@ -300,6 +328,7 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, dump_mbox(adap, mbox, data_reg); dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n", *(const u8 *)cmd, mbox); + t4_report_fw_error(adap); return -ETIMEDOUT; } @@ -566,6 +595,7 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, #define VPD_BASE 0x400 #define VPD_BASE_OLD 0 #define VPD_LEN 1024 +#define CHELSIO_VPD_UNIQUE_ID 0x82 /** * t4_seeprom_wp - enable/disable EEPROM write protection @@ -603,7 +633,14 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p) ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd); if (ret < 0) goto out; - addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD; + + /* The VPD shall have a unique identifier specified by the PCI SIG. + * For chelsio adapters, the identifier is 0x82. The first byte of a VPD + * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software + * is expected to automatically put this entry at the + * beginning of the VPD. + */ + addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD; ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd); if (ret < 0) @@ -667,6 +704,7 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p) i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE); memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN)); strim(p->sn); + i = pci_vpd_info_field_size(vpd + pn - PCI_VPD_INFO_FLD_HDR_SIZE); memcpy(p->pn, vpd + pn, min(i, PN_LEN)); strim(p->pn); @@ -1394,15 +1432,18 @@ static void pcie_intr_handler(struct adapter *adapter) int fat; - fat = t4_handle_intr_status(adapter, - PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, - sysbus_intr_info) + - t4_handle_intr_status(adapter, - PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, - pcie_port_intr_info) + - t4_handle_intr_status(adapter, PCIE_INT_CAUSE, - is_t4(adapter->params.chip) ? - pcie_intr_info : t5_pcie_intr_info); + if (is_t4(adapter->params.chip)) + fat = t4_handle_intr_status(adapter, + PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, + sysbus_intr_info) + + t4_handle_intr_status(adapter, + PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, + pcie_port_intr_info) + + t4_handle_intr_status(adapter, PCIE_INT_CAUSE, + pcie_intr_info); + else + fat = t4_handle_intr_status(adapter, PCIE_INT_CAUSE, + t5_pcie_intr_info); if (fat) t4_fatal_err(adapter); @@ -1521,6 +1562,9 @@ static void cim_intr_handler(struct adapter *adapter) int fat; + if (t4_read_reg(adapter, MA_PCIE_FW) & FW_PCIE_FW_ERR) + t4_report_fw_error(adapter); + fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE, cim_intr_info) + t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE, @@ -1768,10 +1812,16 @@ static void ma_intr_handler(struct adapter *adap) { u32 v, status = t4_read_reg(adap, MA_INT_CAUSE); - if (status & MEM_PERR_INT_CAUSE) + if (status & MEM_PERR_INT_CAUSE) { dev_alert(adap->pdev_dev, "MA parity error, parity status %#x\n", t4_read_reg(adap, MA_PARITY_ERROR_STATUS)); + if (is_t5(adap->params.chip)) + dev_alert(adap->pdev_dev, + "MA parity error, parity status %#x\n", + t4_read_reg(adap, + MA_PARITY_ERROR_STATUS2)); + } if (status & MEM_WRAP_INT_CAUSE) { v = t4_read_reg(adap, MA_INT_WRAP_STATUS); dev_alert(adap->pdev_dev, "MA address wrap-around error by " @@ -2733,12 +2783,16 @@ retry: /* * Issue the HELLO command to the firmware. If it's not successful * but indicates that we got a "busy" or "timeout" condition, retry - * the HELLO until we exhaust our retry limit. + * the HELLO until we exhaust our retry limit. If we do exceed our + * retry limit, check to see if the firmware left us any error + * information and report that if so. */ ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); if (ret < 0) { if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0) goto retry; + if (t4_read_reg(adap, MA_PCIE_FW) & FW_PCIE_FW_ERR) + t4_report_fw_error(adap); return ret; } @@ -3742,6 +3796,7 @@ int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl) lc->link_ok = link_ok; lc->speed = speed; lc->fc = fc; + lc->supported = be16_to_cpu(p->u.info.pcap); t4_os_link_changed(adap, port, link_ok); } if (mod != pi->mod_type) { diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index e3146e83df20..39fb325474f7 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -511,6 +511,7 @@ #define MEM_WRAP_CLIENT_NUM_GET(x) (((x) & MEM_WRAP_CLIENT_NUM_MASK) >> MEM_WRAP_CLIENT_NUM_SHIFT) #define MA_PCIE_FW 0x30b8 #define MA_PARITY_ERROR_STATUS 0x77f4 +#define MA_PARITY_ERROR_STATUS2 0x7804 #define MA_EXT_MEMORY1_BAR 0x7808 #define EDC_0_BASE_ADDR 0x7900 @@ -959,6 +960,7 @@ #define TRCMULTIFILTER 0x00000001U #define MPS_TRC_RSS_CONTROL 0x9808 +#define MPS_T5_TRC_RSS_CONTROL 0xa00c #define RSSCONTROL_MASK 0x00ff0000U #define RSSCONTROL_SHIFT 16 #define RSSCONTROL(x) ((x) << RSSCONTROL_SHIFT) diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index ff709e3b3e7e..3409756a85b9 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -1227,6 +1227,7 @@ struct fw_eq_eth_cmd { #define FW_EQ_ETH_CMD_CIDXFTHRESH(x) ((x) << 16) #define FW_EQ_ETH_CMD_EQSIZE(x) ((x) << 0) +#define FW_EQ_ETH_CMD_AUTOEQUEQE (1U << 30) #define FW_EQ_ETH_CMD_VIID(x) ((x) << 16) struct fw_eq_ctrl_cmd { @@ -1629,6 +1630,14 @@ enum fw_port_l2cfg_ctlbf { FW_PORT_L2_CTLBF_TXIPG = 0x20 }; +enum fw_port_dcb_versions { + FW_PORT_DCB_VER_UNKNOWN, + FW_PORT_DCB_VER_CEE1D0, + FW_PORT_DCB_VER_CEE1D01, + FW_PORT_DCB_VER_IEEE, + FW_PORT_DCB_VER_AUTO = 7 +}; + enum fw_port_dcb_cfg { FW_PORT_DCB_CFG_PG = 0x01, FW_PORT_DCB_CFG_PFC = 0x02, @@ -1709,6 +1718,7 @@ struct fw_port_cmd { __u8 r10_lo[5]; __u8 num_tcs_supported; __u8 pgrate[8]; + __u8 tsa[8]; } pgrate; struct fw_port_dcb_priorate { __u8 type; @@ -1735,7 +1745,7 @@ struct fw_port_cmd { struct fw_port_dcb_control { __u8 type; __u8 all_syncd_pkd; - __be16 pfc_state_to_app_state; + __be16 dcb_version_to_app_state; __be32 r11; __be64 r12; } control; @@ -1778,6 +1788,7 @@ struct fw_port_cmd { #define FW_PORT_CMD_DCBXDIS (1U << 7) #define FW_PORT_CMD_APPLY (1U << 7) #define FW_PORT_CMD_ALL_SYNCD (1U << 7) +#define FW_PORT_CMD_DCB_VERSION_GET(x) (((x) >> 8) & 0xf) #define FW_PORT_CMD_PPPEN(x) ((x) << 31) #define FW_PORT_CMD_TPSRC(x) ((x) << 28) @@ -2217,6 +2228,10 @@ struct fw_debug_cmd { #define FW_PCIE_FW_MASTER(x) ((x) << FW_PCIE_FW_MASTER_SHIFT) #define FW_PCIE_FW_MASTER_GET(x) (((x) >> FW_PCIE_FW_MASTER_SHIFT) & \ FW_PCIE_FW_MASTER_MASK) +#define FW_PCIE_FW_EVAL_MASK 0x7 +#define FW_PCIE_FW_EVAL_SHIFT 24 +#define FW_PCIE_FW_EVAL_GET(x) (((x) >> FW_PCIE_FW_EVAL_SHIFT) & \ + FW_PCIE_FW_EVAL_MASK) struct fw_hdr { u8 ver; diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index d8d28e82ade1..2102a4c91737 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -2910,7 +2910,7 @@ static void cxgb4vf_pci_shutdown(struct pci_dev *pdev) #define CH_DEVICE(devid, idx) \ { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx } -static DEFINE_PCI_DEVICE_TABLE(cxgb4vf_pci_tbl) = { +static const struct pci_device_id cxgb4vf_pci_tbl[] = { CH_DEVICE(0xb000, 0), /* PE10K FPGA */ CH_DEVICE(0x4800, 0), /* T440-dbg */ CH_DEVICE(0x4801, 0), /* T420-cr */ diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index bdfa80ca5e31..a5fb9493dee8 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -2250,7 +2250,8 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq, cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_ALLOC | FW_EQ_ETH_CMD_EQSTART | FW_LEN16(cmd)); - cmd.viid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_VIID(pi->viid)); + cmd.viid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_AUTOEQUEQE | + FW_EQ_ETH_CMD_VIID(pi->viid)); cmd.fetchszm_to_iqid = cpu_to_be32(FW_EQ_ETH_CMD_HOSTFCMODE(SGE_HOSTFCMODE_STPG) | FW_EQ_ETH_CMD_PCIECHN(pi->port_id) | diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 9348febc0743..c8832bc1c5f7 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -67,7 +67,7 @@ #define PCI_DEVICE_ID_CISCO_VIC_ENET_VF 0x0071 /* enet SRIOV VF */ /* Supported devices */ -static DEFINE_PCI_DEVICE_TABLE(enic_id_table) = { +static const struct pci_device_id enic_id_table[] = { { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) }, { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_DYN) }, { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_VF) }, diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c index 5abc496bcf29..37472ce4fac3 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_dev.c +++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c @@ -432,14 +432,12 @@ int vnic_dev_fw_info(struct vnic_dev *vdev, int err = 0; if (!vdev->fw_info) { - vdev->fw_info = pci_alloc_consistent(vdev->pdev, - sizeof(struct vnic_devcmd_fw_info), - &vdev->fw_info_pa); + vdev->fw_info = pci_zalloc_consistent(vdev->pdev, + sizeof(struct vnic_devcmd_fw_info), + &vdev->fw_info_pa); if (!vdev->fw_info) return -ENOMEM; - memset(vdev->fw_info, 0, sizeof(struct vnic_devcmd_fw_info)); - a0 = vdev->fw_info_pa; a1 = sizeof(struct vnic_devcmd_fw_info); diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index 23084fb2090e..70089c29d307 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c @@ -93,7 +93,7 @@ enum dm9000_type { }; /* Structure/enum declaration ------------------------------- */ -typedef struct board_info { +struct board_info { void __iomem *io_addr; /* Register I/O base address */ void __iomem *io_data; /* Data I/O address */ @@ -141,7 +141,7 @@ typedef struct board_info { u32 wake_state; int ip_summed; -} board_info_t; +}; /* debug code */ @@ -151,7 +151,7 @@ typedef struct board_info { } \ } while (0) -static inline board_info_t *to_dm9000_board(struct net_device *dev) +static inline struct board_info *to_dm9000_board(struct net_device *dev) { return netdev_priv(dev); } @@ -162,7 +162,7 @@ static inline board_info_t *to_dm9000_board(struct net_device *dev) * Read a byte from I/O port */ static u8 -ior(board_info_t *db, int reg) +ior(struct board_info *db, int reg) { writeb(reg, db->io_addr); return readb(db->io_data); @@ -173,14 +173,14 @@ ior(board_info_t *db, int reg) */ static void -iow(board_info_t *db, int reg, int value) +iow(struct board_info *db, int reg, int value) { writeb(reg, db->io_addr); writeb(value, db->io_data); } static void -dm9000_reset(board_info_t *db) +dm9000_reset(struct board_info *db) { dev_dbg(db->dev, "resetting device\n"); @@ -272,7 +272,7 @@ static void dm9000_dumpblk_32bit(void __iomem *reg, int count) * Sleep, either by using msleep() or if we are suspending, then * use mdelay() to sleep. */ -static void dm9000_msleep(board_info_t *db, unsigned int ms) +static void dm9000_msleep(struct board_info *db, unsigned int ms) { if (db->in_suspend || db->in_timeout) mdelay(ms); @@ -284,7 +284,7 @@ static void dm9000_msleep(board_info_t *db, unsigned int ms) static int dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg) { - board_info_t *db = netdev_priv(dev); + struct board_info *db = netdev_priv(dev); unsigned long flags; unsigned int reg_save; int ret; @@ -330,7 +330,7 @@ static void dm9000_phy_write(struct net_device *dev, int phyaddr_unused, int reg, int value) { - board_info_t *db = netdev_priv(dev); + struct board_info *db = netdev_priv(dev); unsigned long flags; unsigned long reg_save; @@ -408,7 +408,7 @@ static void dm9000_set_io(struct board_info *db, int byte_width) } } -static void dm9000_schedule_poll(board_info_t *db) +static void dm9000_schedule_poll(struct board_info *db) { if (db->type == TYPE_DM9000E) schedule_delayed_work(&db->phy_poll, HZ * 2); @@ -416,7 +416,7 @@ static void dm9000_schedule_poll(board_info_t *db) static int dm9000_ioctl(struct net_device *dev, struct ifreq *req, int cmd) { - board_info_t *dm = to_dm9000_board(dev); + struct board_info *dm = to_dm9000_board(dev); if (!netif_running(dev)) return -EINVAL; @@ -425,7 +425,7 @@ static int dm9000_ioctl(struct net_device *dev, struct ifreq *req, int cmd) } static unsigned int -dm9000_read_locked(board_info_t *db, int reg) +dm9000_read_locked(struct board_info *db, int reg) { unsigned long flags; unsigned int ret; @@ -437,7 +437,7 @@ dm9000_read_locked(board_info_t *db, int reg) return ret; } -static int dm9000_wait_eeprom(board_info_t *db) +static int dm9000_wait_eeprom(struct board_info *db) { unsigned int status; int timeout = 8; /* wait max 8msec */ @@ -474,7 +474,7 @@ static int dm9000_wait_eeprom(board_info_t *db) * Read a word data from EEPROM */ static void -dm9000_read_eeprom(board_info_t *db, int offset, u8 *to) +dm9000_read_eeprom(struct board_info *db, int offset, u8 *to) { unsigned long flags; @@ -514,7 +514,7 @@ dm9000_read_eeprom(board_info_t *db, int offset, u8 *to) * Write a word data to SROM */ static void -dm9000_write_eeprom(board_info_t *db, int offset, u8 *data) +dm9000_write_eeprom(struct board_info *db, int offset, u8 *data) { unsigned long flags; @@ -546,7 +546,7 @@ dm9000_write_eeprom(board_info_t *db, int offset, u8 *data) static void dm9000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { - board_info_t *dm = to_dm9000_board(dev); + struct board_info *dm = to_dm9000_board(dev); strlcpy(info->driver, CARDNAME, sizeof(info->driver)); strlcpy(info->version, DRV_VERSION, sizeof(info->version)); @@ -556,21 +556,21 @@ static void dm9000_get_drvinfo(struct net_device *dev, static u32 dm9000_get_msglevel(struct net_device *dev) { - board_info_t *dm = to_dm9000_board(dev); + struct board_info *dm = to_dm9000_board(dev); return dm->msg_enable; } static void dm9000_set_msglevel(struct net_device *dev, u32 value) { - board_info_t *dm = to_dm9000_board(dev); + struct board_info *dm = to_dm9000_board(dev); dm->msg_enable = value; } static int dm9000_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { - board_info_t *dm = to_dm9000_board(dev); + struct board_info *dm = to_dm9000_board(dev); mii_ethtool_gset(&dm->mii, cmd); return 0; @@ -578,21 +578,21 @@ static int dm9000_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) static int dm9000_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { - board_info_t *dm = to_dm9000_board(dev); + struct board_info *dm = to_dm9000_board(dev); return mii_ethtool_sset(&dm->mii, cmd); } static int dm9000_nway_reset(struct net_device *dev) { - board_info_t *dm = to_dm9000_board(dev); + struct board_info *dm = to_dm9000_board(dev); return mii_nway_restart(&dm->mii); } static int dm9000_set_features(struct net_device *dev, netdev_features_t features) { - board_info_t *dm = to_dm9000_board(dev); + struct board_info *dm = to_dm9000_board(dev); netdev_features_t changed = dev->features ^ features; unsigned long flags; @@ -608,7 +608,7 @@ static int dm9000_set_features(struct net_device *dev, static u32 dm9000_get_link(struct net_device *dev) { - board_info_t *dm = to_dm9000_board(dev); + struct board_info *dm = to_dm9000_board(dev); u32 ret; if (dm->flags & DM9000_PLATF_EXT_PHY) @@ -629,7 +629,7 @@ static int dm9000_get_eeprom_len(struct net_device *dev) static int dm9000_get_eeprom(struct net_device *dev, struct ethtool_eeprom *ee, u8 *data) { - board_info_t *dm = to_dm9000_board(dev); + struct board_info *dm = to_dm9000_board(dev); int offset = ee->offset; int len = ee->len; int i; @@ -653,7 +653,7 @@ static int dm9000_get_eeprom(struct net_device *dev, static int dm9000_set_eeprom(struct net_device *dev, struct ethtool_eeprom *ee, u8 *data) { - board_info_t *dm = to_dm9000_board(dev); + struct board_info *dm = to_dm9000_board(dev); int offset = ee->offset; int len = ee->len; int done; @@ -691,7 +691,7 @@ static int dm9000_set_eeprom(struct net_device *dev, static void dm9000_get_wol(struct net_device *dev, struct ethtool_wolinfo *w) { - board_info_t *dm = to_dm9000_board(dev); + struct board_info *dm = to_dm9000_board(dev); memset(w, 0, sizeof(struct ethtool_wolinfo)); @@ -702,7 +702,7 @@ static void dm9000_get_wol(struct net_device *dev, struct ethtool_wolinfo *w) static int dm9000_set_wol(struct net_device *dev, struct ethtool_wolinfo *w) { - board_info_t *dm = to_dm9000_board(dev); + struct board_info *dm = to_dm9000_board(dev); unsigned long flags; u32 opts = w->wolopts; u32 wcr = 0; @@ -752,7 +752,7 @@ static const struct ethtool_ops dm9000_ethtool_ops = { .set_eeprom = dm9000_set_eeprom, }; -static void dm9000_show_carrier(board_info_t *db, +static void dm9000_show_carrier(struct board_info *db, unsigned carrier, unsigned nsr) { int lpa; @@ -775,7 +775,7 @@ static void dm9000_poll_work(struct work_struct *w) { struct delayed_work *dw = to_delayed_work(w); - board_info_t *db = container_of(dw, board_info_t, phy_poll); + struct board_info *db = container_of(dw, struct board_info, phy_poll); struct net_device *ndev = db->ndev; if (db->flags & DM9000_PLATF_SIMPLE_PHY && @@ -843,7 +843,7 @@ static unsigned char dm9000_type_to_char(enum dm9000_type type) static void dm9000_hash_table_unlocked(struct net_device *dev) { - board_info_t *db = netdev_priv(dev); + struct board_info *db = netdev_priv(dev); struct netdev_hw_addr *ha; int i, oft; u32 hash_val; @@ -879,7 +879,7 @@ dm9000_hash_table_unlocked(struct net_device *dev) static void dm9000_hash_table(struct net_device *dev) { - board_info_t *db = netdev_priv(dev); + struct board_info *db = netdev_priv(dev); unsigned long flags; spin_lock_irqsave(&db->lock, flags); @@ -888,13 +888,13 @@ dm9000_hash_table(struct net_device *dev) } static void -dm9000_mask_interrupts(board_info_t *db) +dm9000_mask_interrupts(struct board_info *db) { iow(db, DM9000_IMR, IMR_PAR); } static void -dm9000_unmask_interrupts(board_info_t *db) +dm9000_unmask_interrupts(struct board_info *db) { iow(db, DM9000_IMR, db->imr_all); } @@ -905,7 +905,7 @@ dm9000_unmask_interrupts(board_info_t *db) static void dm9000_init_dm9000(struct net_device *dev) { - board_info_t *db = netdev_priv(dev); + struct board_info *db = netdev_priv(dev); unsigned int imr; unsigned int ncr; @@ -970,7 +970,7 @@ dm9000_init_dm9000(struct net_device *dev) /* Our watchdog timed out. Called by the networking layer */ static void dm9000_timeout(struct net_device *dev) { - board_info_t *db = netdev_priv(dev); + struct board_info *db = netdev_priv(dev); u8 reg_save; unsigned long flags; @@ -996,7 +996,7 @@ static void dm9000_send_packet(struct net_device *dev, int ip_summed, u16 pkt_len) { - board_info_t *dm = to_dm9000_board(dev); + struct board_info *dm = to_dm9000_board(dev); /* The DM9000 is not smart enough to leave fragmented packets alone. */ if (dm->ip_summed != ip_summed) { @@ -1023,7 +1023,7 @@ static int dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev) { unsigned long flags; - board_info_t *db = netdev_priv(dev); + struct board_info *db = netdev_priv(dev); dm9000_dbg(db, 3, "%s:\n", __func__); @@ -1062,7 +1062,7 @@ dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev) * receive the packet to upper layer, free the transmitted packet */ -static void dm9000_tx_done(struct net_device *dev, board_info_t *db) +static void dm9000_tx_done(struct net_device *dev, struct board_info *db) { int tx_status = ior(db, DM9000_NSR); /* Got TX status */ @@ -1094,7 +1094,7 @@ struct dm9000_rxhdr { static void dm9000_rx(struct net_device *dev) { - board_info_t *db = netdev_priv(dev); + struct board_info *db = netdev_priv(dev); struct dm9000_rxhdr rxhdr; struct sk_buff *skb; u8 rxbyte, *rdptr; @@ -1196,7 +1196,7 @@ dm9000_rx(struct net_device *dev) static irqreturn_t dm9000_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; - board_info_t *db = netdev_priv(dev); + struct board_info *db = netdev_priv(dev); int int_status; unsigned long flags; u8 reg_save; @@ -1246,7 +1246,7 @@ static irqreturn_t dm9000_interrupt(int irq, void *dev_id) static irqreturn_t dm9000_wol_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; - board_info_t *db = netdev_priv(dev); + struct board_info *db = netdev_priv(dev); unsigned long flags; unsigned nsr, wcr; @@ -1296,7 +1296,7 @@ static void dm9000_poll_controller(struct net_device *dev) static int dm9000_open(struct net_device *dev) { - board_info_t *db = netdev_priv(dev); + struct board_info *db = netdev_priv(dev); unsigned long irqflags = db->irq_res->flags & IRQF_TRIGGER_MASK; if (netif_msg_ifup(db)) @@ -1342,7 +1342,7 @@ dm9000_open(struct net_device *dev) static void dm9000_shutdown(struct net_device *dev) { - board_info_t *db = netdev_priv(dev); + struct board_info *db = netdev_priv(dev); /* RESET device */ dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */ @@ -1358,7 +1358,7 @@ dm9000_shutdown(struct net_device *dev) static int dm9000_stop(struct net_device *ndev) { - board_info_t *db = netdev_priv(ndev); + struct board_info *db = netdev_priv(ndev); if (netif_msg_ifdown(db)) dev_dbg(db->dev, "shutting down %s\n", ndev->name); @@ -1399,7 +1399,7 @@ static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev) const void *mac_addr; if (!IS_ENABLED(CONFIG_OF) || !np) - return NULL; + return ERR_PTR(-ENXIO); pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) @@ -1681,7 +1681,7 @@ dm9000_drv_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct net_device *ndev = platform_get_drvdata(pdev); - board_info_t *db; + struct board_info *db; if (ndev) { db = netdev_priv(ndev); @@ -1704,7 +1704,7 @@ dm9000_drv_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct net_device *ndev = platform_get_drvdata(pdev); - board_info_t *db = netdev_priv(ndev); + struct board_info *db = netdev_priv(ndev); if (ndev) { if (netif_running(ndev)) { diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c index 38148b0e3a95..a02ecc4f9002 100644 --- a/drivers/net/ethernet/dec/tulip/de2104x.c +++ b/drivers/net/ethernet/dec/tulip/de2104x.c @@ -340,7 +340,7 @@ static void de21041_media_timer (unsigned long data); static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media); -static DEFINE_PCI_DEVICE_TABLE(de_pci_tbl) = { +static const struct pci_device_id de_pci_tbl[] = { { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS, diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c index 7091fa6ed096..cf8b6ff21613 100644 --- a/drivers/net/ethernet/dec/tulip/de4x5.c +++ b/drivers/net/ethernet/dec/tulip/de4x5.c @@ -2328,7 +2328,7 @@ static void de4x5_pci_remove(struct pci_dev *pdev) pci_disable_device (pdev); } -static DEFINE_PCI_DEVICE_TABLE(de4x5_pci_tbl) = { +static const struct pci_device_id de4x5_pci_tbl[] = { { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS, diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c index 53f0c618045c..322213d901d5 100644 --- a/drivers/net/ethernet/dec/tulip/dmfe.c +++ b/drivers/net/ethernet/dec/tulip/dmfe.c @@ -2096,7 +2096,7 @@ static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * db) -static DEFINE_PCI_DEVICE_TABLE(dmfe_pci_tbl) = { +static const struct pci_device_id dmfe_pci_tbl[] = { { 0x1282, 0x9132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9132_ID }, { 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9102_ID }, { 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9100_ID }, diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c index 861660841ce2..3b42556f7f8d 100644 --- a/drivers/net/ethernet/dec/tulip/tulip_core.c +++ b/drivers/net/ethernet/dec/tulip/tulip_core.c @@ -207,7 +207,7 @@ struct tulip_chip_table tulip_tbl[] = { }; -static DEFINE_PCI_DEVICE_TABLE(tulip_pci_tbl) = { +static const struct pci_device_id tulip_pci_tbl[] = { { 0x1011, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21140 }, { 0x1011, 0x0019, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21143 }, { 0x11AD, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, LC82C168 }, @@ -1294,7 +1294,7 @@ static const struct net_device_ops tulip_netdev_ops = { #endif }; -DEFINE_PCI_DEVICE_TABLE(early_486_chipsets) = { +const struct pci_device_id early_486_chipsets[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82424) }, { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496) }, { }, diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c index 80afec335a11..4061f9b22812 100644 --- a/drivers/net/ethernet/dec/tulip/uli526x.c +++ b/drivers/net/ethernet/dec/tulip/uli526x.c @@ -1768,7 +1768,7 @@ static u16 phy_read_1bit(struct uli526x_board_info *db) } -static DEFINE_PCI_DEVICE_TABLE(uli526x_pci_tbl) = { +static const struct pci_device_id uli526x_pci_tbl[] = { { 0x10B9, 0x5261, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_ULI5261_ID }, { 0x10B9, 0x5263, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_ULI5263_ID }, { 0, } diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c index 62fe512bb216..6aa887e0e1cb 100644 --- a/drivers/net/ethernet/dec/tulip/winbond-840.c +++ b/drivers/net/ethernet/dec/tulip/winbond-840.c @@ -219,7 +219,7 @@ enum chip_capability_flags { CanHaveMII=1, HasBrokenTx=2, AlwaysFDX=4, FDXOnNoMII=8, }; -static DEFINE_PCI_DEVICE_TABLE(w840_pci_tbl) = { +static const struct pci_device_id w840_pci_tbl[] = { { 0x1050, 0x0840, PCI_ANY_ID, 0x8153, 0, 0, 0 }, { 0x1050, 0x0840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 }, { 0x11f6, 0x2011, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 }, diff --git a/drivers/net/ethernet/dec/tulip/xircom_cb.c b/drivers/net/ethernet/dec/tulip/xircom_cb.c index 6204cdfe43a6..0e721cedfa67 100644 --- a/drivers/net/ethernet/dec/tulip/xircom_cb.c +++ b/drivers/net/ethernet/dec/tulip/xircom_cb.c @@ -137,7 +137,7 @@ static int link_status(struct xircom_private *card); -static DEFINE_PCI_DEVICE_TABLE(xircom_pci_table) = { +static const struct pci_device_id xircom_pci_table[] = { { PCI_VDEVICE(XIRCOM, 0x0003), }, {0,}, }; diff --git a/drivers/net/ethernet/dlink/dl2k.h b/drivers/net/ethernet/dlink/dl2k.h index 7d07a0f5320d..23c07b007069 100644 --- a/drivers/net/ethernet/dlink/dl2k.h +++ b/drivers/net/ethernet/dlink/dl2k.h @@ -408,7 +408,7 @@ struct netdev_private { driver_data Data private to the driver. */ -static DEFINE_PCI_DEVICE_TABLE(rio_pci_tbl) = { +static const struct pci_device_id rio_pci_tbl[] = { {0x1186, 0x4000, PCI_ANY_ID, PCI_ANY_ID, }, {0x13f0, 0x1021, PCI_ANY_ID, PCI_ANY_ID, }, { } diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c index 433c1e185442..a28a2e583f0f 100644 --- a/drivers/net/ethernet/dlink/sundance.c +++ b/drivers/net/ethernet/dlink/sundance.c @@ -199,7 +199,7 @@ IVc. Errata #define USE_IO_OPS 1 #endif -static DEFINE_PCI_DEVICE_TABLE(sundance_pci_tbl) = { +static const struct pci_device_id sundance_pci_tbl[] = { { 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 }, { 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 }, { 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 }, diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 811f1351db7a..43e08d0bc3d3 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -897,5 +897,6 @@ void be_roce_dev_remove(struct be_adapter *); */ void be_roce_dev_open(struct be_adapter *); void be_roce_dev_close(struct be_adapter *); +void be_roce_dev_shutdown(struct be_adapter *); #endif /* BE_H */ diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index db4ff14ff18f..93ff8ef39352 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -39,7 +39,7 @@ static ushort rx_frag_size = 2048; module_param(rx_frag_size, ushort, S_IRUGO); MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data."); -static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = { +static const struct pci_device_id be_dev_ids[] = { { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) }, { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) }, { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) }, @@ -5014,6 +5014,7 @@ static void be_shutdown(struct pci_dev *pdev) if (!adapter) return; + be_roce_dev_shutdown(adapter); cancel_delayed_work_sync(&adapter->work); cancel_delayed_work_sync(&adapter->func_recovery_work); diff --git a/drivers/net/ethernet/emulex/benet/be_roce.c b/drivers/net/ethernet/emulex/benet/be_roce.c index 5bf16603a3e9..ef4672dc7357 100644 --- a/drivers/net/ethernet/emulex/benet/be_roce.c +++ b/drivers/net/ethernet/emulex/benet/be_roce.c @@ -120,7 +120,8 @@ static void _be_roce_dev_open(struct be_adapter *adapter) { if (ocrdma_drv && adapter->ocrdma_dev && ocrdma_drv->state_change_handler) - ocrdma_drv->state_change_handler(adapter->ocrdma_dev, 0); + ocrdma_drv->state_change_handler(adapter->ocrdma_dev, + BE_DEV_UP); } void be_roce_dev_open(struct be_adapter *adapter) @@ -136,7 +137,8 @@ static void _be_roce_dev_close(struct be_adapter *adapter) { if (ocrdma_drv && adapter->ocrdma_dev && ocrdma_drv->state_change_handler) - ocrdma_drv->state_change_handler(adapter->ocrdma_dev, 1); + ocrdma_drv->state_change_handler(adapter->ocrdma_dev, + BE_DEV_DOWN); } void be_roce_dev_close(struct be_adapter *adapter) @@ -148,6 +150,18 @@ void be_roce_dev_close(struct be_adapter *adapter) } } +void be_roce_dev_shutdown(struct be_adapter *adapter) +{ + if (be_roce_supported(adapter)) { + mutex_lock(&be_adapter_list_lock); + if (ocrdma_drv && adapter->ocrdma_dev && + ocrdma_drv->state_change_handler) + ocrdma_drv->state_change_handler(adapter->ocrdma_dev, + BE_DEV_SHUTDOWN); + mutex_unlock(&be_adapter_list_lock); + } +} + int be_roce_register_driver(struct ocrdma_driver *drv) { struct be_adapter *dev; diff --git a/drivers/net/ethernet/emulex/benet/be_roce.h b/drivers/net/ethernet/emulex/benet/be_roce.h index a3d9e96c18eb..e6f7eb1a7d87 100644 --- a/drivers/net/ethernet/emulex/benet/be_roce.h +++ b/drivers/net/ethernet/emulex/benet/be_roce.h @@ -62,7 +62,8 @@ struct ocrdma_driver { enum { BE_DEV_UP = 0, - BE_DEV_DOWN = 1 + BE_DEV_DOWN = 1, + BE_DEV_SHUTDOWN = 2 }; /* APIs for RoCE driver to register callback handlers, diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c index 4b22a9579f85..b1b9ebafb354 100644 --- a/drivers/net/ethernet/fealnx.c +++ b/drivers/net/ethernet/fealnx.c @@ -1936,7 +1936,7 @@ static int netdev_close(struct net_device *dev) return 0; } -static DEFINE_PCI_DEVICE_TABLE(fealnx_pci_tbl) = { +static const struct pci_device_id fealnx_pci_tbl[] = { {0x1516, 0x0800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0x1516, 0x0803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1}, {0x1516, 0x0891, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2}, diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index bd53caf1c1eb..ee41d98b44b6 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -275,6 +275,9 @@ struct fec_enet_private { struct clk *clk_enet_out; struct clk *clk_ptp; + bool ptp_clk_on; + struct mutex ptp_clk_mutex; + /* The saved address of a sent-in-place packet/buffer, for skfree(). */ unsigned char *tx_bounce[TX_RING_SIZE]; struct sk_buff *tx_skbuff[TX_RING_SIZE]; @@ -310,6 +313,7 @@ struct fec_enet_private { int mii_timeout; uint phy_speed; phy_interface_t phy_interface; + struct device_node *phy_node; int link; int full_duplex; int speed; @@ -334,7 +338,7 @@ struct fec_enet_private { u32 cycle_speed; int hwts_rx_en; int hwts_tx_en; - struct timer_list time_keep; + struct delayed_work time_keep; struct regulator *reg_phy; }; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 66fe1f672499..89355a719625 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -52,6 +52,7 @@ #include <linux/of.h> #include <linux/of_device.h> #include <linux/of_gpio.h> +#include <linux/of_mdio.h> #include <linux/of_net.h> #include <linux/regulator/consumer.h> #include <linux/if_vlan.h> @@ -1610,17 +1611,27 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable) goto failed_clk_enet_out; } if (fep->clk_ptp) { + mutex_lock(&fep->ptp_clk_mutex); ret = clk_prepare_enable(fep->clk_ptp); - if (ret) + if (ret) { + mutex_unlock(&fep->ptp_clk_mutex); goto failed_clk_ptp; + } else { + fep->ptp_clk_on = true; + } + mutex_unlock(&fep->ptp_clk_mutex); } } else { clk_disable_unprepare(fep->clk_ahb); clk_disable_unprepare(fep->clk_ipg); if (fep->clk_enet_out) clk_disable_unprepare(fep->clk_enet_out); - if (fep->clk_ptp) + if (fep->clk_ptp) { + mutex_lock(&fep->ptp_clk_mutex); clk_disable_unprepare(fep->clk_ptp); + fep->ptp_clk_on = false; + mutex_unlock(&fep->ptp_clk_mutex); + } } return 0; @@ -1648,29 +1659,37 @@ static int fec_enet_mii_probe(struct net_device *ndev) fep->phy_dev = NULL; - /* check for attached phy */ - for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) { - if ((fep->mii_bus->phy_mask & (1 << phy_id))) - continue; - if (fep->mii_bus->phy_map[phy_id] == NULL) - continue; - if (fep->mii_bus->phy_map[phy_id]->phy_id == 0) - continue; - if (dev_id--) - continue; - strncpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE); - break; - } + if (fep->phy_node) { + phy_dev = of_phy_connect(ndev, fep->phy_node, + &fec_enet_adjust_link, 0, + fep->phy_interface); + } else { + /* check for attached phy */ + for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) { + if ((fep->mii_bus->phy_mask & (1 << phy_id))) + continue; + if (fep->mii_bus->phy_map[phy_id] == NULL) + continue; + if (fep->mii_bus->phy_map[phy_id]->phy_id == 0) + continue; + if (dev_id--) + continue; + strncpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE); + break; + } + + if (phy_id >= PHY_MAX_ADDR) { + netdev_info(ndev, "no PHY, assuming direct connection to switch\n"); + strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); + phy_id = 0; + } - if (phy_id >= PHY_MAX_ADDR) { - netdev_info(ndev, "no PHY, assuming direct connection to switch\n"); - strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); - phy_id = 0; + snprintf(phy_name, sizeof(phy_name), + PHY_ID_FMT, mdio_bus_id, phy_id); + phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link, + fep->phy_interface); } - snprintf(phy_name, sizeof(phy_name), PHY_ID_FMT, mdio_bus_id, phy_id); - phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link, - fep->phy_interface); if (IS_ERR(phy_dev)) { netdev_err(ndev, "could not attach to PHY\n"); return PTR_ERR(phy_dev); @@ -1707,6 +1726,7 @@ static int fec_enet_mii_init(struct platform_device *pdev) struct fec_enet_private *fep = netdev_priv(ndev); const struct platform_device_id *id_entry = platform_get_device_id(fep->pdev); + struct device_node *node; int err = -ENXIO, i; /* @@ -1774,7 +1794,15 @@ static int fec_enet_mii_init(struct platform_device *pdev) for (i = 0; i < PHY_MAX_ADDR; i++) fep->mii_bus->irq[i] = PHY_POLL; - if (mdiobus_register(fep->mii_bus)) + node = of_get_child_by_name(pdev->dev.of_node, "mdio"); + if (node) { + err = of_mdiobus_register(fep->mii_bus, node); + of_node_put(node); + } else { + err = mdiobus_register(fep->mii_bus); + } + + if (err) goto err_out_free_mdio_irq; mii_cnt++; @@ -2527,6 +2555,7 @@ fec_probe(struct platform_device *pdev) struct resource *r; const struct of_device_id *of_id; static int dev_id; + struct device_node *np = pdev->dev.of_node, *phy_node; of_id = of_match_device(fec_dt_ids, &pdev->dev); if (of_id) @@ -2566,6 +2595,18 @@ fec_probe(struct platform_device *pdev) platform_set_drvdata(pdev, ndev); + phy_node = of_parse_phandle(np, "phy-handle", 0); + if (!phy_node && of_phy_is_fixed_link(np)) { + ret = of_phy_register_fixed_link(np); + if (ret < 0) { + dev_err(&pdev->dev, + "broken fixed-link specification\n"); + goto failed_phy; + } + phy_node = of_node_get(np); + } + fep->phy_node = phy_node; + ret = of_get_phy_mode(pdev->dev.of_node); if (ret < 0) { pdata = dev_get_platdata(&pdev->dev); @@ -2594,6 +2635,8 @@ fec_probe(struct platform_device *pdev) if (IS_ERR(fep->clk_enet_out)) fep->clk_enet_out = NULL; + fep->ptp_clk_on = false; + mutex_init(&fep->ptp_clk_mutex); fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp"); fep->bufdesc_ex = pdev->id_entry->driver_data & FEC_QUIRK_HAS_BUFDESC_EX; @@ -2670,6 +2713,8 @@ failed_init: failed_regulator: fec_enet_clk_enable(ndev, false); failed_clk: +failed_phy: + of_node_put(phy_node); failed_ioremap: free_netdev(ndev); @@ -2682,15 +2727,16 @@ fec_drv_remove(struct platform_device *pdev) struct net_device *ndev = platform_get_drvdata(pdev); struct fec_enet_private *fep = netdev_priv(ndev); + cancel_delayed_work_sync(&fep->time_keep); cancel_work_sync(&fep->tx_timeout_work); unregister_netdev(ndev); fec_enet_mii_remove(fep); - del_timer_sync(&fep->time_keep); if (fep->reg_phy) regulator_disable(fep->reg_phy); if (fep->ptp_clock) ptp_clock_unregister(fep->ptp_clock); fec_enet_clk_enable(ndev, false); + of_node_put(fep->phy_node); free_netdev(ndev); return 0; diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c index 9947765e90c5..ff55fbb20a75 100644 --- a/drivers/net/ethernet/freescale/fec_mpc52xx.c +++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c @@ -1015,8 +1015,7 @@ mpc52xx_fec_remove(struct platform_device *op) unregister_netdev(ndev); - if (priv->phy_node) - of_node_put(priv->phy_node); + of_node_put(priv->phy_node); priv->phy_node = NULL; irq_dispose_mapping(ndev->irq); diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c index 82386b29914a..cca3617a2321 100644 --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c @@ -245,12 +245,20 @@ static int fec_ptp_settime(struct ptp_clock_info *ptp, u64 ns; unsigned long flags; + mutex_lock(&fep->ptp_clk_mutex); + /* Check the ptp clock */ + if (!fep->ptp_clk_on) { + mutex_unlock(&fep->ptp_clk_mutex); + return -EINVAL; + } + ns = ts->tv_sec * 1000000000ULL; ns += ts->tv_nsec; spin_lock_irqsave(&fep->tmreg_lock, flags); timecounter_init(&fep->tc, &fep->cc, ns); spin_unlock_irqrestore(&fep->tmreg_lock, flags); + mutex_unlock(&fep->ptp_clk_mutex); return 0; } @@ -338,17 +346,22 @@ int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr) * fec_time_keep - call timecounter_read every second to avoid timer overrun * because ENET just support 32bit counter, will timeout in 4s */ -static void fec_time_keep(unsigned long _data) +static void fec_time_keep(struct work_struct *work) { - struct fec_enet_private *fep = (struct fec_enet_private *)_data; + struct delayed_work *dwork = to_delayed_work(work); + struct fec_enet_private *fep = container_of(dwork, struct fec_enet_private, time_keep); u64 ns; unsigned long flags; - spin_lock_irqsave(&fep->tmreg_lock, flags); - ns = timecounter_read(&fep->tc); - spin_unlock_irqrestore(&fep->tmreg_lock, flags); + mutex_lock(&fep->ptp_clk_mutex); + if (fep->ptp_clk_on) { + spin_lock_irqsave(&fep->tmreg_lock, flags); + ns = timecounter_read(&fep->tc); + spin_unlock_irqrestore(&fep->tmreg_lock, flags); + } + mutex_unlock(&fep->ptp_clk_mutex); - mod_timer(&fep->time_keep, jiffies + HZ); + schedule_delayed_work(&fep->time_keep, HZ); } /** @@ -386,15 +399,13 @@ void fec_ptp_init(struct platform_device *pdev) fec_ptp_start_cyclecounter(ndev); - init_timer(&fep->time_keep); - fep->time_keep.data = (unsigned long)fep; - fep->time_keep.function = fec_time_keep; - fep->time_keep.expires = jiffies + HZ; - add_timer(&fep->time_keep); + INIT_DELAYED_WORK(&fep->time_keep, fec_time_keep); fep->ptp_clock = ptp_clock_register(&fep->ptp_caps, &pdev->dev); if (IS_ERR(fep->ptp_clock)) { fep->ptp_clock = NULL; pr_err("ptp_clock_register failed\n"); } + + schedule_delayed_work(&fep->time_keep, HZ); } diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c index cfaf17b70f3f..748fd24d3d9e 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c @@ -1033,7 +1033,7 @@ static int fs_enet_probe(struct platform_device *ofdev) /* In the case of a fixed PHY, the DT node associated * to the PHY is the Ethernet MAC DT node. */ - fpi->phy_node = ofdev->dev.of_node; + fpi->phy_node = of_node_get(ofdev->dev.of_node); } if (of_device_is_compatible(ofdev->dev.of_node, "fsl,mpc5125-fec")) { diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c index fc5413488496..1eedfba2ad3c 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c +++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c @@ -41,7 +41,6 @@ #ifdef CONFIG_8xx #include <asm/8xx_immap.h> #include <asm/pgtable.h> -#include <asm/mpc8xx.h> #include <asm/cpm1.h> #endif diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c index b4bf02f57d43..90b3b19b7cd3 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c +++ b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c @@ -40,7 +40,6 @@ #ifdef CONFIG_8xx #include <asm/8xx_immap.h> #include <asm/pgtable.h> -#include <asm/mpc8xx.h> #include <asm/cpm1.h> #endif diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index a6cf40e62f3a..fb29d049f4e1 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -892,12 +892,12 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) /* In the case of a fixed PHY, the DT node associated * to the PHY is the Ethernet MAC DT node. */ - if (of_phy_is_fixed_link(np)) { + if (!priv->phy_node && of_phy_is_fixed_link(np)) { err = of_phy_register_fixed_link(np); if (err) goto err_grp_init; - priv->phy_node = np; + priv->phy_node = of_node_get(np); } /* Find the TBI PHY. If it's not there, we don't support SGMII */ @@ -1435,10 +1435,8 @@ register_fail: unmap_group_regs(priv); gfar_free_rx_queues(priv); gfar_free_tx_queues(priv); - if (priv->phy_node) - of_node_put(priv->phy_node); - if (priv->tbi_node) - of_node_put(priv->tbi_node); + of_node_put(priv->phy_node); + of_node_put(priv->tbi_node); free_gfar_dev(priv); return err; } @@ -1447,10 +1445,8 @@ static int gfar_remove(struct platform_device *ofdev) { struct gfar_private *priv = platform_get_drvdata(ofdev); - if (priv->phy_node) - of_node_put(priv->phy_node); - if (priv->tbi_node) - of_node_put(priv->tbi_node); + of_node_put(priv->phy_node); + of_node_put(priv->tbi_node); unregister_netdev(priv->ndev); unmap_group_regs(priv); diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index 8ceaf7a2660c..3cf0478b3728 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -3785,16 +3785,15 @@ static int ucc_geth_probe(struct platform_device* ofdev) ug_info->uf_info.irq = irq_of_parse_and_map(np, 0); ug_info->phy_node = of_parse_phandle(np, "phy-handle", 0); - if (!ug_info->phy_node) { - /* In the case of a fixed PHY, the DT node associated + if (!ug_info->phy_node && of_phy_is_fixed_link(np)) { + /* + * In the case of a fixed PHY, the DT node associated * to the PHY is the Ethernet MAC DT node. */ - if (of_phy_is_fixed_link(np)) { - err = of_phy_register_fixed_link(np); - if (err) - return err; - } - ug_info->phy_node = np; + err = of_phy_register_fixed_link(np); + if (err) + return err; + ug_info->phy_node = of_node_get(np); } /* Find the TBI PHY node. If it's not there, we don't support SGMII */ @@ -3862,8 +3861,11 @@ static int ucc_geth_probe(struct platform_device* ofdev) /* Create an ethernet device instance */ dev = alloc_etherdev(sizeof(*ugeth)); - if (dev == NULL) + if (dev == NULL) { + of_node_put(ug_info->tbi_node); + of_node_put(ug_info->phy_node); return -ENOMEM; + } ugeth = netdev_priv(dev); spin_lock_init(&ugeth->lock); @@ -3897,6 +3899,8 @@ static int ucc_geth_probe(struct platform_device* ofdev) pr_err("%s: Cannot register net device, aborting\n", dev->name); free_netdev(dev); + of_node_put(ug_info->tbi_node); + of_node_put(ug_info->phy_node); return err; } @@ -3920,6 +3924,8 @@ static int ucc_geth_remove(struct platform_device* ofdev) unregister_netdev(dev); free_netdev(dev); ucc_geth_memclean(ugeth); + of_node_put(ugeth->ug_info->tbi_node); + of_node_put(ugeth->ug_info->phy_node); return 0; } diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c index cfe7a7431730..a7139f588ad2 100644 --- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c +++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c @@ -99,23 +99,23 @@ static const struct ethtool_ops netdev_ethtool_ops; /* card type */ -typedef enum { MBH10302, MBH10304, TDK, CONTEC, LA501, UNGERMANN, +enum cardtype { MBH10302, MBH10304, TDK, CONTEC, LA501, UNGERMANN, XXX10304, NEC, KME -} cardtype_t; +}; /* driver specific data structure */ -typedef struct local_info_t { +struct local_info { struct pcmcia_device *p_dev; long open_time; uint tx_started:1; uint tx_queue; u_short tx_queue_len; - cardtype_t cardtype; + enum cardtype cardtype; u_short sent; u_char __iomem *base; -} local_info_t; +}; #define MC_FILTERBREAK 64 @@ -232,13 +232,13 @@ static const struct net_device_ops fjn_netdev_ops = { static int fmvj18x_probe(struct pcmcia_device *link) { - local_info_t *lp; + struct local_info *lp; struct net_device *dev; dev_dbg(&link->dev, "fmvj18x_attach()\n"); /* Make up a FMVJ18x specific data structure */ - dev = alloc_etherdev(sizeof(local_info_t)); + dev = alloc_etherdev(sizeof(struct local_info)); if (!dev) return -ENOMEM; lp = netdev_priv(dev); @@ -327,10 +327,10 @@ static int fmvj18x_ioprobe(struct pcmcia_device *p_dev, void *priv_data) static int fmvj18x_config(struct pcmcia_device *link) { struct net_device *dev = link->priv; - local_info_t *lp = netdev_priv(dev); + struct local_info *lp = netdev_priv(dev); int i, ret; unsigned int ioaddr; - cardtype_t cardtype; + enum cardtype cardtype; char *card_name = "unknown"; u8 *buf; size_t len; @@ -584,7 +584,7 @@ static int fmvj18x_setup_mfc(struct pcmcia_device *link) int i; struct net_device *dev = link->priv; unsigned int ioaddr; - local_info_t *lp = netdev_priv(dev); + struct local_info *lp = netdev_priv(dev); /* Allocate a small memory window */ link->resource[3]->flags = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE; @@ -626,7 +626,7 @@ static void fmvj18x_release(struct pcmcia_device *link) { struct net_device *dev = link->priv; - local_info_t *lp = netdev_priv(dev); + struct local_info *lp = netdev_priv(dev); u_char __iomem *tmp; dev_dbg(&link->dev, "fmvj18x_release\n"); @@ -711,7 +711,7 @@ module_pcmcia_driver(fmvj18x_cs_driver); static irqreturn_t fjn_interrupt(int dummy, void *dev_id) { struct net_device *dev = dev_id; - local_info_t *lp = netdev_priv(dev); + struct local_info *lp = netdev_priv(dev); unsigned int ioaddr; unsigned short tx_stat, rx_stat; @@ -772,7 +772,7 @@ static irqreturn_t fjn_interrupt(int dummy, void *dev_id) static void fjn_tx_timeout(struct net_device *dev) { - struct local_info_t *lp = netdev_priv(dev); + struct local_info *lp = netdev_priv(dev); unsigned int ioaddr = dev->base_addr; netdev_notice(dev, "transmit timed out with status %04x, %s?\n", @@ -802,7 +802,7 @@ static void fjn_tx_timeout(struct net_device *dev) static netdev_tx_t fjn_start_xmit(struct sk_buff *skb, struct net_device *dev) { - struct local_info_t *lp = netdev_priv(dev); + struct local_info *lp = netdev_priv(dev); unsigned int ioaddr = dev->base_addr; short length = skb->len; @@ -874,7 +874,7 @@ static netdev_tx_t fjn_start_xmit(struct sk_buff *skb, static void fjn_reset(struct net_device *dev) { - struct local_info_t *lp = netdev_priv(dev); + struct local_info *lp = netdev_priv(dev); unsigned int ioaddr = dev->base_addr; int i; @@ -1058,7 +1058,7 @@ static int fjn_config(struct net_device *dev, struct ifmap *map){ static int fjn_open(struct net_device *dev) { - struct local_info_t *lp = netdev_priv(dev); + struct local_info *lp = netdev_priv(dev); struct pcmcia_device *link = lp->p_dev; pr_debug("fjn_open('%s').\n", dev->name); @@ -1083,7 +1083,7 @@ static int fjn_open(struct net_device *dev) static int fjn_close(struct net_device *dev) { - struct local_info_t *lp = netdev_priv(dev); + struct local_info *lp = netdev_priv(dev); struct pcmcia_device *link = lp->p_dev; unsigned int ioaddr = dev->base_addr; diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c index 37860096f744..ed7916f6fbcf 100644 --- a/drivers/net/ethernet/hp/hp100.c +++ b/drivers/net/ethernet/hp/hp100.c @@ -208,7 +208,7 @@ MODULE_DEVICE_TABLE(eisa, hp100_eisa_tbl); #endif #ifdef CONFIG_PCI -static DEFINE_PCI_DEVICE_TABLE(hp100_pci_tbl) = { +static const struct pci_device_id hp100_pci_tbl[] = { {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585A, PCI_ANY_ID, PCI_ANY_ID,}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585B, PCI_ANY_ID, PCI_ANY_ID,}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2970A, PCI_ANY_ID, PCI_ANY_ID,}, diff --git a/drivers/net/ethernet/ibm/ehea/Makefile b/drivers/net/ethernet/ibm/ehea/Makefile index 775d9969b5c2..cd473e295242 100644 --- a/drivers/net/ethernet/ibm/ehea/Makefile +++ b/drivers/net/ethernet/ibm/ehea/Makefile @@ -1,6 +1,6 @@ # # Makefile for the eHEA ethernet device driver for IBM eServer System p # -ehea-y = ehea_main.o ehea_phyp.o ehea_qmr.o ehea_ethtool.o ehea_phyp.o +ehea-y = ehea_main.o ehea_phyp.o ehea_qmr.o ehea_ethtool.o obj-$(CONFIG_EHEA) += ehea.o diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index a0b418e007a0..566b17db135a 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -1994,7 +1994,7 @@ static void xmit_common(struct sk_buff *skb, struct ehea_swqe *swqe) { swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT | EHEA_SWQE_CRC; - if (skb->protocol != htons(ETH_P_IP)) + if (vlan_get_protocol(skb) != htons(ETH_P_IP)) return; if (skb->ip_summed == CHECKSUM_PARTIAL) diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index c9127562bd22..21978cc019e7 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -292,6 +292,18 @@ failure: atomic_add(buffers_added, &(pool->available)); } +/* + * The final 8 bytes of the buffer list is a counter of frames dropped + * because there was not a buffer in the buffer list capable of holding + * the frame. + */ +static void ibmveth_update_rx_no_buffer(struct ibmveth_adapter *adapter) +{ + __be64 *p = adapter->buffer_list_addr + 4096 - 8; + + adapter->rx_no_buffer = be64_to_cpup(p); +} + /* replenish routine */ static void ibmveth_replenish_task(struct ibmveth_adapter *adapter) { @@ -307,8 +319,7 @@ static void ibmveth_replenish_task(struct ibmveth_adapter *adapter) ibmveth_replenish_buffer_pool(adapter, pool); } - adapter->rx_no_buffer = *(u64 *)(((char*)adapter->buffer_list_addr) + - 4096 - 8); + ibmveth_update_rx_no_buffer(adapter); } /* empty and free ana buffer pool - also used to do cleanup in error paths */ @@ -698,8 +709,7 @@ static int ibmveth_close(struct net_device *netdev) free_irq(netdev->irq, netdev); - adapter->rx_no_buffer = *(u64 *)(((char *)adapter->buffer_list_addr) + - 4096 - 8); + ibmveth_update_rx_no_buffer(adapter); ibmveth_cleanup(adapter); diff --git a/drivers/net/ethernet/icplus/ipg.c b/drivers/net/ethernet/icplus/ipg.c index 5727779a7df2..ff2903652f4b 100644 --- a/drivers/net/ethernet/icplus/ipg.c +++ b/drivers/net/ethernet/icplus/ipg.c @@ -95,7 +95,7 @@ static const char * const ipg_brand_name[] = { "D-Link NIC IP1000A" }; -static DEFINE_PCI_DEVICE_TABLE(ipg_pci_tbl) = { +static const struct pci_device_id ipg_pci_tbl[] = { { PCI_VDEVICE(SUNDANCE, 0x1023), 0 }, { PCI_VDEVICE(SUNDANCE, 0x2021), 1 }, { PCI_VDEVICE(DLINK, 0x9021), 2 }, diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index 9d979d7debef..781065eb5431 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -208,7 +208,7 @@ MODULE_PARM_DESC(use_io, "Force use of i/o access mode"); #define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\ PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \ PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich } -static DEFINE_PCI_DEVICE_TABLE(e100_id_table) = { +static const struct pci_device_id e100_id_table[] = { INTEL_8255X_ETHERNET_DEVICE(0x1029, 0), INTEL_8255X_ETHERNET_DEVICE(0x1030, 0), INTEL_8255X_ETHERNET_DEVICE(0x1031, 3), diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 660971f304b2..ad3d5d12173f 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -46,7 +46,7 @@ static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation * Macro expands to... * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} */ -static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = { +static const struct pci_device_id e1000_pci_tbl[] = { INTEL_E1000_ETHERNET_DEVICE(0x1000), INTEL_E1000_ETHERNET_DEVICE(0x1001), INTEL_E1000_ETHERNET_DEVICE(0x1004), @@ -2674,7 +2674,8 @@ set_itr_now: #define E1000_TX_FLAGS_VLAN_SHIFT 16 static int e1000_tso(struct e1000_adapter *adapter, - struct e1000_tx_ring *tx_ring, struct sk_buff *skb) + struct e1000_tx_ring *tx_ring, struct sk_buff *skb, + __be16 protocol) { struct e1000_context_desc *context_desc; struct e1000_buffer *buffer_info; @@ -2692,7 +2693,7 @@ static int e1000_tso(struct e1000_adapter *adapter, hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); mss = skb_shinfo(skb)->gso_size; - if (skb->protocol == htons(ETH_P_IP)) { + if (protocol == htons(ETH_P_IP)) { struct iphdr *iph = ip_hdr(skb); iph->tot_len = 0; iph->check = 0; @@ -2702,7 +2703,7 @@ static int e1000_tso(struct e1000_adapter *adapter, 0); cmd_length = E1000_TXD_CMD_IP; ipcse = skb_transport_offset(skb) - 1; - } else if (skb->protocol == htons(ETH_P_IPV6)) { + } else if (skb_is_gso_v6(skb)) { ipv6_hdr(skb)->payload_len = 0; tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, @@ -2745,7 +2746,8 @@ static int e1000_tso(struct e1000_adapter *adapter, } static bool e1000_tx_csum(struct e1000_adapter *adapter, - struct e1000_tx_ring *tx_ring, struct sk_buff *skb) + struct e1000_tx_ring *tx_ring, struct sk_buff *skb, + __be16 protocol) { struct e1000_context_desc *context_desc; struct e1000_buffer *buffer_info; @@ -2756,7 +2758,7 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter, if (skb->ip_summed != CHECKSUM_PARTIAL) return false; - switch (skb->protocol) { + switch (protocol) { case cpu_to_be16(ETH_P_IP): if (ip_hdr(skb)->protocol == IPPROTO_TCP) cmd_len |= E1000_TXD_CMD_TCP; @@ -3097,6 +3099,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, int count = 0; int tso; unsigned int f; + __be16 protocol = vlan_get_protocol(skb); /* This goes back to the question of how to logically map a Tx queue * to a flow. Right now, performance is impacted slightly negatively @@ -3210,7 +3213,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, first = tx_ring->next_to_use; - tso = e1000_tso(adapter, tx_ring, skb); + tso = e1000_tso(adapter, tx_ring, skb, protocol); if (tso < 0) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; @@ -3220,10 +3223,10 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, if (likely(hw->mac_type != e1000_82544)) tx_ring->last_tx_tso = true; tx_flags |= E1000_TX_FLAGS_TSO; - } else if (likely(e1000_tx_csum(adapter, tx_ring, skb))) + } else if (likely(e1000_tx_csum(adapter, tx_ring, skb, protocol))) tx_flags |= E1000_TX_FLAGS_CSUM; - if (likely(skb->protocol == htons(ETH_P_IP))) + if (protocol == htons(ETH_P_IP)) tx_flags |= E1000_TX_FLAGS_IPV4; if (unlikely(skb->no_fcs)) diff --git a/drivers/net/ethernet/intel/e1000e/manage.c b/drivers/net/ethernet/intel/e1000e/manage.c index 58856032298d..06edfca1a35e 100644 --- a/drivers/net/ethernet/intel/e1000e/manage.c +++ b/drivers/net/ethernet/intel/e1000e/manage.c @@ -47,7 +47,7 @@ static u8 e1000_calculate_checksum(u8 *buffer, u32 length) * e1000_mng_enable_host_if - Checks host interface is enabled * @hw: pointer to the HW structure * - * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND + * Returns 0 upon success, else -E1000_ERR_HOST_INTERFACE_COMMAND * * This function checks whether the HOST IF is enabled for command operation * and also checks whether the previous command is completed. It busy waits @@ -78,7 +78,7 @@ static s32 e1000_mng_enable_host_if(struct e1000_hw *hw) } if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) { - e_dbg("Previous command timeout failed .\n"); + e_dbg("Previous command timeout failed.\n"); return -E1000_ERR_HOST_INTERFACE_COMMAND; } diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 65c3aef2bd36..247335d2c7ec 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -5164,7 +5164,8 @@ link_up: #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 #define E1000_TX_FLAGS_VLAN_SHIFT 16 -static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb) +static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb, + __be16 protocol) { struct e1000_context_desc *context_desc; struct e1000_buffer *buffer_info; @@ -5183,7 +5184,7 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb) hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); mss = skb_shinfo(skb)->gso_size; - if (skb->protocol == htons(ETH_P_IP)) { + if (protocol == htons(ETH_P_IP)) { struct iphdr *iph = ip_hdr(skb); iph->tot_len = 0; iph->check = 0; @@ -5231,7 +5232,8 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb) return 1; } -static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb) +static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb, + __be16 protocol) { struct e1000_adapter *adapter = tx_ring->adapter; struct e1000_context_desc *context_desc; @@ -5239,16 +5241,10 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb) unsigned int i; u8 css; u32 cmd_len = E1000_TXD_CMD_DEXT; - __be16 protocol; if (skb->ip_summed != CHECKSUM_PARTIAL) return false; - if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) - protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto; - else - protocol = skb->protocol; - switch (protocol) { case cpu_to_be16(ETH_P_IP): if (ip_hdr(skb)->protocol == IPPROTO_TCP) @@ -5546,6 +5542,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, int count = 0; int tso; unsigned int f; + __be16 protocol = vlan_get_protocol(skb); if (test_bit(__E1000_DOWN, &adapter->state)) { dev_kfree_skb_any(skb); @@ -5620,7 +5617,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, first = tx_ring->next_to_use; - tso = e1000_tso(tx_ring, skb); + tso = e1000_tso(tx_ring, skb, protocol); if (tso < 0) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; @@ -5628,14 +5625,14 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, if (tso) tx_flags |= E1000_TX_FLAGS_TSO; - else if (e1000_tx_csum(tx_ring, skb)) + else if (e1000_tx_csum(tx_ring, skb, protocol)) tx_flags |= E1000_TX_FLAGS_CSUM; /* Old method was to assume IPv4 packet by default if TSO was enabled. * 82571 hardware supports TSO capabilities for IPv6 as well... * no longer assume, we must. */ - if (skb->protocol == htons(ETH_P_IP)) + if (protocol == htons(ETH_P_IP)) tx_flags |= E1000_TX_FLAGS_IPV4; if (unlikely(skb->no_fcs)) diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 681a9e81ff51..e8ba7470700a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -1948,7 +1948,7 @@ static int i40e_update_ethtool_fdir_entry(struct i40e_vsi *vsi, /* add filter to the list */ if (parent) - hlist_add_after(&parent->fdir_node, &input->fdir_node); + hlist_add_behind(&input->fdir_node, &parent->fdir_node); else hlist_add_head(&input->fdir_node, &pf->fdir_filter_list); diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c index 6938fc1ad877..5d01db1d789b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c +++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c @@ -33,6 +33,7 @@ #include <scsi/fc/fc_fcoe.h> #include <scsi/libfc.h> #include <scsi/libfcoe.h> +#include <uapi/linux/dcbnl.h> #include "i40e.h" #include "i40e_fcoe.h" diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 51bc03072ed3..eddec6ba095b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -65,7 +65,7 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb); * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, * Class, Class Mask, private data (not used) } */ -static DEFINE_PCI_DEVICE_TABLE(i40e_pci_tbl) = { +static const struct pci_device_id i40e_pci_tbl[] = { {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_A), 0}, @@ -4415,13 +4415,13 @@ static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) switch (vsi->back->hw.phy.link_info.link_speed) { case I40E_LINK_SPEED_40GB: - strncpy(speed, "40 Gbps", SPEED_SIZE); + strlcpy(speed, "40 Gbps", SPEED_SIZE); break; case I40E_LINK_SPEED_10GB: - strncpy(speed, "10 Gbps", SPEED_SIZE); + strlcpy(speed, "10 Gbps", SPEED_SIZE); break; case I40E_LINK_SPEED_1GB: - strncpy(speed, "1000 Mbps", SPEED_SIZE); + strlcpy(speed, "1000 Mbps", SPEED_SIZE); break; default: break; @@ -4429,16 +4429,16 @@ static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) switch (vsi->back->hw.fc.current_mode) { case I40E_FC_FULL: - strncpy(fc, "RX/TX", FC_SIZE); + strlcpy(fc, "RX/TX", FC_SIZE); break; case I40E_FC_TX_PAUSE: - strncpy(fc, "TX", FC_SIZE); + strlcpy(fc, "TX", FC_SIZE); break; case I40E_FC_RX_PAUSE: - strncpy(fc, "RX", FC_SIZE); + strlcpy(fc, "RX", FC_SIZE); break; default: - strncpy(fc, "None", FC_SIZE); + strlcpy(fc, "None", FC_SIZE); break; } @@ -5839,7 +5839,7 @@ static void i40e_send_version(struct i40e_pf *pf) dv.minor_version = DRV_VERSION_MINOR; dv.build_version = DRV_VERSION_BUILD; dv.subbuild_version = 0; - strncpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string)); + strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string)); i40e_aq_send_driver_version(&pf->hw, &dv, NULL); } @@ -6293,7 +6293,7 @@ static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors) if (alloc_qvectors) { /* allocate memory for q_vector pointers */ - size = sizeof(struct i40e_q_vectors *) * vsi->num_q_vectors; + size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors; vsi->q_vectors = kzalloc(size, GFP_KERNEL); if (!vsi->q_vectors) { ret = -ENOMEM; diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index 97bda3dffd49..25c4f9a3011f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -251,9 +251,9 @@ i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, * * Writes a 16 bit words buffer to the Shadow RAM using the admin command. **/ -i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer, - u32 offset, u16 words, void *data, - bool last_command) +static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer, + u32 offset, u16 words, void *data, + bool last_command) { i40e_status ret_code = I40E_ERR_NVM; diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index bb7fe98b3a6c..537b6216971d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -247,7 +247,7 @@ void i40e_ptp_rx_hang(struct i40e_vsi *vsi) u32 prttsyn_stat; int n; - if (pf->flags & I40E_FLAG_PTP) + if (!(pf->flags & I40E_FLAG_PTP)) return; prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1); diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index a51aa37b7b5a..369848e107f8 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2295,7 +2295,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, goto out_drop; /* obtain protocol of skb */ - protocol = skb->protocol; + protocol = vlan_get_protocol(skb); /* record the location of the first descriptor for this packet */ first = &tx_ring->tx_bi[tx_ring->next_to_use]; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 89672551dce9..3ac6a0d2f143 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1003,11 +1003,19 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen) { - struct i40e_pf *pf = vf->pf; - struct i40e_hw *hw = &pf->hw; - int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; + struct i40e_pf *pf; + struct i40e_hw *hw; + int abs_vf_id; i40e_status aq_ret; + /* validate the request */ + if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) + return -EINVAL; + + pf = vf->pf; + hw = &pf->hw; + abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; + /* single place to detect unsuccessful return values */ if (v_retval) { vf->num_invalid_msgs++; @@ -1928,17 +1936,20 @@ static void i40e_vc_vf_broadcast(struct i40e_pf *pf, { struct i40e_hw *hw = &pf->hw; struct i40e_vf *vf = pf->vf; - int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; int i; - for (i = 0; i < pf->num_alloc_vfs; i++) { + for (i = 0; i < pf->num_alloc_vfs; i++, vf++) { + int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; + /* Not all vfs are enabled so skip the ones that are not */ + if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) && + !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) + continue; + /* Ignore return value on purpose - a given VF may fail, but * we need to keep going and send to all of them */ i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, msg, msglen, NULL); - vf++; - abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; } } @@ -1954,12 +1965,12 @@ void i40e_vc_notify_link_state(struct i40e_pf *pf) struct i40e_hw *hw = &pf->hw; struct i40e_vf *vf = pf->vf; struct i40e_link_status *ls = &pf->hw.phy.link_info; - int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; int i; pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE; pfe.severity = I40E_PF_EVENT_SEVERITY_INFO; - for (i = 0; i < pf->num_alloc_vfs; i++) { + for (i = 0; i < pf->num_alloc_vfs; i++, vf++) { + int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; if (vf->link_forced) { pfe.event_data.link_event.link_status = vf->link_up; pfe.event_data.link_event.link_speed = @@ -1972,8 +1983,6 @@ void i40e_vc_notify_link_state(struct i40e_pf *pf) i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT, 0, (u8 *)&pfe, sizeof(pfe), NULL); - vf++; - abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; } } @@ -2002,7 +2011,18 @@ void i40e_vc_notify_reset(struct i40e_pf *pf) void i40e_vc_notify_vf_reset(struct i40e_vf *vf) { struct i40e_virtchnl_pf_event pfe; - int abs_vf_id = vf->vf_id + vf->pf->hw.func_caps.vf_base_id; + int abs_vf_id; + + /* validate the request */ + if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) + return; + + /* verify if the VF is in either init or active before proceeding */ + if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) && + !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) + return; + + abs_vf_id = vf->vf_id + vf->pf->hw.func_caps.vf_base_id; pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING; pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 79bf96ca6489..95a3ec236b49 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -1597,7 +1597,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, goto out_drop; /* obtain protocol of skb */ - protocol = skb->protocol; + protocol = vlan_get_protocol(skb); /* record the location of the first descriptor for this packet */ first = &tx_ring->tx_bi[tx_ring->next_to_use]; diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index ab15f4d07e41..38429fae4fcf 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -49,7 +49,7 @@ static const char i40evf_copyright[] = * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, * Class, Class Mask, private data (not used) } */ -static DEFINE_PCI_DEVICE_TABLE(i40evf_pci_tbl) = { +static const struct pci_device_id i40evf_pci_tbl[] = { {PCI_VDEVICE(INTEL, I40E_DEV_ID_VF), 0}, /* required last entry */ {0, } diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index d608599e123a..63c807c9b21c 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -2853,7 +2853,7 @@ static const struct pci_error_handlers igbvf_err_handler = { .resume = igbvf_io_resume, }; -static DEFINE_PCI_DEVICE_TABLE(igbvf_pci_tbl) = { +static const struct pci_device_id igbvf_pci_tbl[] = { { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_VF), board_vf }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_VF), board_i350_vf }, { } /* terminate list */ diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c index 60801273915c..055961b0f24b 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c @@ -53,7 +53,7 @@ MODULE_PARM_DESC(copybreak, * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, * Class, Class Mask, private data (not used) } */ -static DEFINE_PCI_DEVICE_TABLE(ixgb_pci_tbl) = { +static const struct pci_device_id ixgb_pci_tbl[] = { {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX_CX4, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 94a1c07efeb0..e4100b5737b6 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -2517,7 +2517,7 @@ static int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter, /* add filter to the list */ if (parent) - hlist_add_after(&parent->fdir_node, &input->fdir_node); + hlist_add_behind(&input->fdir_node, &parent->fdir_node); else hlist_add_head(&input->fdir_node, &adapter->fdir_filter_list); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 5384ed30298a..87bd53fdd209 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -84,7 +84,7 @@ static const struct ixgbe_info *ixgbe_info_tbl[] = { * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, * Class, Class Mask, private data (not used) } */ -static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = { +static const struct pci_device_id ixgbe_pci_tbl[] = { {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), board_82598 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), board_82598 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), board_82598 }, diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 75467f83772c..c22a00c3621a 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -76,7 +76,7 @@ static const struct ixgbevf_info *ixgbevf_info_tbl[] = { * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, * Class, Class Mask, private data (not used) } */ -static DEFINE_PCI_DEVICE_TABLE(ixgbevf_pci_tbl) = { +static const struct pci_device_id ixgbevf_pci_tbl[] = { {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf }, /* required last entry */ diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index b78378cea5e3..4a1be34d7214 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c @@ -3334,7 +3334,7 @@ static SIMPLE_DEV_PM_OPS(jme_pm_ops, jme_suspend, jme_resume); #define JME_PM_OPS NULL #endif -static DEFINE_PCI_DEVICE_TABLE(jme_pci_tbl) = { +static const struct pci_device_id jme_pci_tbl[] = { { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC250) }, { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC260) }, { } diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index dadd9a5f6323..ade067de1689 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -20,6 +20,7 @@ #include <linux/mbus.h> #include <linux/module.h> #include <linux/interrupt.h> +#include <linux/if_vlan.h> #include <net/ip.h> #include <net/ipv6.h> #include <linux/io.h> @@ -1371,15 +1372,16 @@ static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb) { if (skb->ip_summed == CHECKSUM_PARTIAL) { int ip_hdr_len = 0; + __be16 l3_proto = vlan_get_protocol(skb); u8 l4_proto; - if (skb->protocol == htons(ETH_P_IP)) { + if (l3_proto == htons(ETH_P_IP)) { struct iphdr *ip4h = ip_hdr(skb); /* Calculate IPv4 checksum and L4 checksum */ ip_hdr_len = ip4h->ihl; l4_proto = ip4h->protocol; - } else if (skb->protocol == htons(ETH_P_IPV6)) { + } else if (l3_proto == htons(ETH_P_IPV6)) { struct ipv6hdr *ip6h = ipv6_hdr(skb); /* Read l4_protocol from one of IPv6 extra headers */ @@ -1390,7 +1392,7 @@ static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb) return MVNETA_TX_L4_CSUM_NOT; return mvneta_txq_desc_csum(skb_network_offset(skb), - skb->protocol, ip_hdr_len, l4_proto); + l3_proto, ip_hdr_len, l4_proto); } return MVNETA_TX_L4_CSUM_NOT; @@ -2969,14 +2971,14 @@ static int mvneta_probe(struct platform_device *pdev) /* In the case of a fixed PHY, the DT node associated * to the PHY is the Ethernet MAC DT node. */ - phy_node = dn; + phy_node = of_node_get(dn); } phy_mode = of_get_phy_mode(dn); if (phy_mode < 0) { dev_err(&pdev->dev, "incorrect phy-mode\n"); err = -EINVAL; - goto err_free_irq; + goto err_put_phy_node; } dev->tx_queue_len = MVNETA_MAX_TXD; @@ -2992,7 +2994,7 @@ static int mvneta_probe(struct platform_device *pdev) pp->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(pp->clk)) { err = PTR_ERR(pp->clk); - goto err_free_irq; + goto err_put_phy_node; } clk_prepare_enable(pp->clk); @@ -3071,6 +3073,8 @@ err_free_stats: free_percpu(pp->stats); err_clk: clk_disable_unprepare(pp->clk); +err_put_phy_node: + of_node_put(phy_node); err_free_irq: irq_dispose_mapping(dev->irq); err_free_netdev: @@ -3088,6 +3092,7 @@ static int mvneta_remove(struct platform_device *pdev) clk_disable_unprepare(pp->clk); free_percpu(pp->stats); irq_dispose_mapping(dev->irq); + of_node_put(pp->phy_node); free_netdev(dev); return 0; diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index e912b6887d40..24b242277ea1 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -82,7 +82,7 @@ static int debug = -1; /* defaults above */ module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); -static DEFINE_PCI_DEVICE_TABLE(skge_id_table) = { +static const struct pci_device_id skge_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_3COM, 0x1700) }, /* 3Com 3C940 */ { PCI_DEVICE(PCI_VENDOR_ID_3COM, 0x80EB) }, /* 3Com 3C940B */ #ifdef CONFIG_SKGE_GENESIS diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 69693384b58c..dba48a5ce7ab 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -101,7 +101,7 @@ static int legacy_pme = 0; module_param(legacy_pme, int, 0); MODULE_PARM_DESC(legacy_pme, "Legacy power management"); -static DEFINE_PCI_DEVICE_TABLE(sky2_id_table) = { +static const struct pci_device_id sky2_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, /* SK-9Sxx */ { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, /* SK-9Exx */ { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E01) }, /* SK-9E21M */ @@ -1622,11 +1622,10 @@ static int sky2_alloc_buffers(struct sky2_port *sky2) if (!sky2->tx_ring) goto nomem; - sky2->rx_le = pci_alloc_consistent(hw->pdev, RX_LE_BYTES, - &sky2->rx_le_map); + sky2->rx_le = pci_zalloc_consistent(hw->pdev, RX_LE_BYTES, + &sky2->rx_le_map); if (!sky2->rx_le) goto nomem; - memset(sky2->rx_le, 0, RX_LE_BYTES); sky2->rx_ring = kcalloc(sky2->rx_pending, sizeof(struct rx_ring_info), GFP_KERNEL); diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index fb0bf867f69f..923c4878461e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -1311,6 +1311,15 @@ static struct mlx4_cmd_info cmd_info[] = { .wrapper = mlx4_MAD_IFC_wrapper }, { + .opcode = MLX4_CMD_MAD_DEMUX, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_CMD_EPERM_wrapper + }, + { .opcode = MLX4_CMD_QUERY_IF_STAT, .has_inbox = false, .has_outbox = true, @@ -2380,6 +2389,22 @@ struct mlx4_slaves_pport mlx4_phys_to_slaves_pport_actv( } EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport_actv); +static int mlx4_slaves_closest_port(struct mlx4_dev *dev, int slave, int port) +{ + struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave); + int min_port = find_first_bit(actv_ports.ports, dev->caps.num_ports) + + 1; + int max_port = min_port + + bitmap_weight(actv_ports.ports, dev->caps.num_ports); + + if (port < min_port) + port = min_port; + else if (port >= max_port) + port = max_port - 1; + + return port; +} + int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac) { struct mlx4_priv *priv = mlx4_priv(dev); @@ -2393,6 +2418,7 @@ int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac) if (slave < 0) return -EINVAL; + port = mlx4_slaves_closest_port(dev, slave, port); s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; s_info->mac = mac; mlx4_info(dev, "default mac on vf %d port %d to %llX will take afect only after vf restart\n", @@ -2419,6 +2445,7 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos) if (slave < 0) return -EINVAL; + port = mlx4_slaves_closest_port(dev, slave, port); vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; if ((0 == vlan) && (0 == qos)) @@ -2446,6 +2473,7 @@ bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, int port, int slave, struct mlx4_priv *priv; priv = mlx4_priv(dev); + port = mlx4_slaves_closest_port(dev, slave, port); vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; if (MLX4_VGT != vp_oper->state.default_vlan) { @@ -2473,6 +2501,7 @@ int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting) if (slave < 0) return -EINVAL; + port = mlx4_slaves_closest_port(dev, slave, port); s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; s_info->spoofchk = setting; @@ -2526,6 +2555,7 @@ int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_stat if (slave < 0) return -EINVAL; + port = mlx4_slaves_closest_port(dev, slave, port); switch (link_state) { case IFLA_VF_LINK_STATE_AUTO: /* get current link state */ diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index e22f24f784fc..35ff2925110a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -487,6 +487,9 @@ static int mlx4_en_set_pauseparam(struct net_device *dev, struct mlx4_en_dev *mdev = priv->mdev; int err; + if (pause->autoneg) + return -EINVAL; + priv->prof->tx_pause = pause->tx_pause != 0; priv->prof->rx_pause = pause->rx_pause != 0; err = mlx4_SET_PORT_general(mdev->dev, priv->port, diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index bb536aa613f4..abddcf8c40aa 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -474,39 +474,12 @@ static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *ad int qpn, u64 *reg_id) { int err; - struct mlx4_spec_list spec_eth_outer = { {NULL} }; - struct mlx4_spec_list spec_vxlan = { {NULL} }; - struct mlx4_spec_list spec_eth_inner = { {NULL} }; - - struct mlx4_net_trans_rule rule = { - .queue_mode = MLX4_NET_TRANS_Q_FIFO, - .exclusive = 0, - .allow_loopback = 1, - .promisc_mode = MLX4_FS_REGULAR, - .priority = MLX4_DOMAIN_NIC, - }; - - __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) return 0; /* do nothing */ - rule.port = priv->port; - rule.qpn = qpn; - INIT_LIST_HEAD(&rule.list); - - spec_eth_outer.id = MLX4_NET_TRANS_RULE_ID_ETH; - memcpy(spec_eth_outer.eth.dst_mac, addr, ETH_ALEN); - memcpy(spec_eth_outer.eth.dst_mac_msk, &mac_mask, ETH_ALEN); - - spec_vxlan.id = MLX4_NET_TRANS_RULE_ID_VXLAN; /* any vxlan header */ - spec_eth_inner.id = MLX4_NET_TRANS_RULE_ID_ETH; /* any inner eth header */ - - list_add_tail(&spec_eth_outer.list, &rule.list); - list_add_tail(&spec_vxlan.list, &rule.list); - list_add_tail(&spec_eth_inner.list, &rule.list); - - err = mlx4_flow_attach(priv->mdev->dev, &rule, reg_id); + err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn, + MLX4_DOMAIN_NIC, reg_id); if (err) { en_err(priv, "failed to add vxlan steering rule, err %d\n", err); return err; diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 688e1eabab29..494753e44ae3 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -136,7 +136,8 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags) [7] = "FSM (MAC anti-spoofing) support", [8] = "Dynamic QP updates support", [9] = "Device managed flow steering IPoIB support", - [10] = "TCP/IP offloads/flow-steering for VXLAN support" + [10] = "TCP/IP offloads/flow-steering for VXLAN support", + [11] = "MAD DEMUX (Secure-Host) support" }; int i; @@ -571,6 +572,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) #define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0 #define QUERY_DEV_CAP_FW_REASSIGN_MAC 0x9d #define QUERY_DEV_CAP_VXLAN 0x9e +#define QUERY_DEV_CAP_MAD_DEMUX_OFFSET 0xb0 dev_cap->flags2 = 0; mailbox = mlx4_alloc_cmd_mailbox(dev); @@ -748,6 +750,11 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) MLX4_GET(dev_cap->max_counters, outbox, QUERY_DEV_CAP_MAX_COUNTERS_OFFSET); + MLX4_GET(field32, outbox, + QUERY_DEV_CAP_MAD_DEMUX_OFFSET); + if (field32 & (1 << 0)) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_MAD_DEMUX; + MLX4_GET(field32, outbox, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET); if (field32 & (1 << 16)) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_UPDATE_QP; @@ -2016,3 +2023,85 @@ void mlx4_opreq_action(struct work_struct *work) out: mlx4_free_cmd_mailbox(dev, mailbox); } + +static int mlx4_check_smp_firewall_active(struct mlx4_dev *dev, + struct mlx4_cmd_mailbox *mailbox) +{ +#define MLX4_CMD_MAD_DEMUX_SET_ATTR_OFFSET 0x10 +#define MLX4_CMD_MAD_DEMUX_GETRESP_ATTR_OFFSET 0x20 +#define MLX4_CMD_MAD_DEMUX_TRAP_ATTR_OFFSET 0x40 +#define MLX4_CMD_MAD_DEMUX_TRAP_REPRESS_ATTR_OFFSET 0x70 + + u32 set_attr_mask, getresp_attr_mask; + u32 trap_attr_mask, traprepress_attr_mask; + + MLX4_GET(set_attr_mask, mailbox->buf, + MLX4_CMD_MAD_DEMUX_SET_ATTR_OFFSET); + mlx4_dbg(dev, "SMP firewall set_attribute_mask = 0x%x\n", + set_attr_mask); + + MLX4_GET(getresp_attr_mask, mailbox->buf, + MLX4_CMD_MAD_DEMUX_GETRESP_ATTR_OFFSET); + mlx4_dbg(dev, "SMP firewall getresp_attribute_mask = 0x%x\n", + getresp_attr_mask); + + MLX4_GET(trap_attr_mask, mailbox->buf, + MLX4_CMD_MAD_DEMUX_TRAP_ATTR_OFFSET); + mlx4_dbg(dev, "SMP firewall trap_attribute_mask = 0x%x\n", + trap_attr_mask); + + MLX4_GET(traprepress_attr_mask, mailbox->buf, + MLX4_CMD_MAD_DEMUX_TRAP_REPRESS_ATTR_OFFSET); + mlx4_dbg(dev, "SMP firewall traprepress_attribute_mask = 0x%x\n", + traprepress_attr_mask); + + if (set_attr_mask && getresp_attr_mask && trap_attr_mask && + traprepress_attr_mask) + return 1; + + return 0; +} + +int mlx4_config_mad_demux(struct mlx4_dev *dev) +{ + struct mlx4_cmd_mailbox *mailbox; + int secure_host_active; + int err; + + /* Check if mad_demux is supported */ + if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_MAD_DEMUX)) + return 0; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) { + mlx4_warn(dev, "Failed to allocate mailbox for cmd MAD_DEMUX"); + return -ENOMEM; + } + + /* Query mad_demux to find out which MADs are handled by internal sma */ + err = mlx4_cmd_box(dev, 0, mailbox->dma, 0x01 /* subn mgmt class */, + MLX4_CMD_MAD_DEMUX_QUERY_RESTR, MLX4_CMD_MAD_DEMUX, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); + if (err) { + mlx4_warn(dev, "MLX4_CMD_MAD_DEMUX: query restrictions failed (%d)\n", + err); + goto out; + } + + secure_host_active = mlx4_check_smp_firewall_active(dev, mailbox); + + /* Config mad_demux to handle all MADs returned by the query above */ + err = mlx4_cmd(dev, mailbox->dma, 0x01 /* subn mgmt class */, + MLX4_CMD_MAD_DEMUX_CONFIG, MLX4_CMD_MAD_DEMUX, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); + if (err) { + mlx4_warn(dev, "MLX4_CMD_MAD_DEMUX: configure failed (%d)\n", err); + goto out; + } + + if (secure_host_active) + mlx4_warn(dev, "HCA operating in secure-host mode. SMP firewall activated.\n"); +out: + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 80b8c5f30e4e..871e3a5bda38 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -78,13 +78,13 @@ MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero"); #endif /* CONFIG_PCI_MSI */ static uint8_t num_vfs[3] = {0, 0, 0}; -static int num_vfs_argc = 3; +static int num_vfs_argc; module_param_array(num_vfs, byte , &num_vfs_argc, 0444); MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0\n" "num_vfs=port1,port2,port1+2"); static uint8_t probe_vf[3] = {0, 0, 0}; -static int probe_vfs_argc = 3; +static int probe_vfs_argc; module_param_array(probe_vf, byte, &probe_vfs_argc, 0444); MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)\n" "probe_vf=port1,port2,port1+2"); @@ -1853,6 +1853,11 @@ static int mlx4_setup_hca(struct mlx4_dev *dev) mlx4_err(dev, "Failed to initialize multicast group table, aborting\n"); goto err_mr_table_free; } + err = mlx4_config_mad_demux(dev); + if (err) { + mlx4_err(dev, "Failed in config_mad_demux, aborting\n"); + goto err_mcg_table_free; + } } err = mlx4_init_eq_table(dev); @@ -2727,7 +2732,7 @@ int mlx4_restart_one(struct pci_dev *pdev) return __mlx4_init_one(pdev, pci_dev_data); } -static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = { +static const struct pci_device_id mlx4_pci_table[] = { /* MT25408 "Hermon" SDR */ { PCI_VDEVICE(MELLANOX, 0x6340), MLX4_PCI_DEV_FORCE_SENSE_PORT }, /* MT25408 "Hermon" DDR */ diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c index d80e7a6fac74..ca0f98c95105 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c @@ -1020,6 +1020,44 @@ int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id) } EXPORT_SYMBOL_GPL(mlx4_flow_detach); +int mlx4_tunnel_steer_add(struct mlx4_dev *dev, unsigned char *addr, + int port, int qpn, u16 prio, u64 *reg_id) +{ + int err; + struct mlx4_spec_list spec_eth_outer = { {NULL} }; + struct mlx4_spec_list spec_vxlan = { {NULL} }; + struct mlx4_spec_list spec_eth_inner = { {NULL} }; + + struct mlx4_net_trans_rule rule = { + .queue_mode = MLX4_NET_TRANS_Q_FIFO, + .exclusive = 0, + .allow_loopback = 1, + .promisc_mode = MLX4_FS_REGULAR, + }; + + __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); + + rule.port = port; + rule.qpn = qpn; + rule.priority = prio; + INIT_LIST_HEAD(&rule.list); + + spec_eth_outer.id = MLX4_NET_TRANS_RULE_ID_ETH; + memcpy(spec_eth_outer.eth.dst_mac, addr, ETH_ALEN); + memcpy(spec_eth_outer.eth.dst_mac_msk, &mac_mask, ETH_ALEN); + + spec_vxlan.id = MLX4_NET_TRANS_RULE_ID_VXLAN; /* any vxlan header */ + spec_eth_inner.id = MLX4_NET_TRANS_RULE_ID_ETH; /* any inner eth header */ + + list_add_tail(&spec_eth_outer.list, &rule.list); + list_add_tail(&spec_vxlan.list, &rule.list); + list_add_tail(&spec_eth_inner.list, &rule.list); + + err = mlx4_flow_attach(dev, &rule, reg_id); + return err; +} +EXPORT_SYMBOL(mlx4_tunnel_steer_add); + int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn, u32 max_range_qpn) { diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index 13fbcd03c3e4..b508c7887ef8 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -274,6 +274,8 @@ struct mlx4_icm_table { #define MLX4_MPT_FLAG_PHYSICAL (1 << 9) #define MLX4_MPT_FLAG_REGION (1 << 8) +#define MLX4_MPT_PD_MASK (0x1FFFFUL) +#define MLX4_MPT_PD_VF_MASK (0xFE0000UL) #define MLX4_MPT_PD_FLAG_FAST_REG (1 << 27) #define MLX4_MPT_PD_FLAG_RAE (1 << 28) #define MLX4_MPT_PD_FLAG_EN_INV (3 << 24) @@ -1306,5 +1308,6 @@ void mlx4_init_quotas(struct mlx4_dev *dev); int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port); /* Returns the VF index of slave */ int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave); +int mlx4_config_mad_demux(struct mlx4_dev *dev); #endif /* MLX4_H */ diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c index 2839abb878a6..193a6adb5d04 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c @@ -298,6 +298,130 @@ static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); } +/* Must protect against concurrent access */ +int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr, + struct mlx4_mpt_entry ***mpt_entry) +{ + int err; + int key = key_to_hw_index(mmr->key) & (dev->caps.num_mpts - 1); + struct mlx4_cmd_mailbox *mailbox = NULL; + + if (mmr->enabled != MLX4_MPT_EN_HW) + return -EINVAL; + + err = mlx4_HW2SW_MPT(dev, NULL, key); + if (err) { + mlx4_warn(dev, "HW2SW_MPT failed (%d).", err); + mlx4_warn(dev, "Most likely the MR has MWs bound to it.\n"); + return err; + } + + mmr->enabled = MLX4_MPT_EN_SW; + + if (!mlx4_is_mfunc(dev)) { + **mpt_entry = mlx4_table_find( + &mlx4_priv(dev)->mr_table.dmpt_table, + key, NULL); + } else { + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR_OR_NULL(mailbox)) + return PTR_ERR(mailbox); + + err = mlx4_cmd_box(dev, 0, mailbox->dma, key, + 0, MLX4_CMD_QUERY_MPT, + MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_WRAPPED); + if (err) + goto free_mailbox; + + *mpt_entry = (struct mlx4_mpt_entry **)&mailbox->buf; + } + + if (!(*mpt_entry) || !(**mpt_entry)) { + err = -ENOMEM; + goto free_mailbox; + } + + return 0; + +free_mailbox: + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} +EXPORT_SYMBOL_GPL(mlx4_mr_hw_get_mpt); + +int mlx4_mr_hw_write_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr, + struct mlx4_mpt_entry **mpt_entry) +{ + int err; + + if (!mlx4_is_mfunc(dev)) { + /* Make sure any changes to this entry are flushed */ + wmb(); + + *(u8 *)(*mpt_entry) = MLX4_MPT_STATUS_HW; + + /* Make sure the new status is written */ + wmb(); + + err = mlx4_SYNC_TPT(dev); + } else { + int key = key_to_hw_index(mmr->key) & (dev->caps.num_mpts - 1); + + struct mlx4_cmd_mailbox *mailbox = + container_of((void *)mpt_entry, struct mlx4_cmd_mailbox, + buf); + + err = mlx4_SW2HW_MPT(dev, mailbox, key); + } + + if (!err) { + mmr->pd = be32_to_cpu((*mpt_entry)->pd_flags) & MLX4_MPT_PD_MASK; + mmr->enabled = MLX4_MPT_EN_HW; + } + return err; +} +EXPORT_SYMBOL_GPL(mlx4_mr_hw_write_mpt); + +void mlx4_mr_hw_put_mpt(struct mlx4_dev *dev, + struct mlx4_mpt_entry **mpt_entry) +{ + if (mlx4_is_mfunc(dev)) { + struct mlx4_cmd_mailbox *mailbox = + container_of((void *)mpt_entry, struct mlx4_cmd_mailbox, + buf); + mlx4_free_cmd_mailbox(dev, mailbox); + } +} +EXPORT_SYMBOL_GPL(mlx4_mr_hw_put_mpt); + +int mlx4_mr_hw_change_pd(struct mlx4_dev *dev, struct mlx4_mpt_entry *mpt_entry, + u32 pdn) +{ + u32 pd_flags = be32_to_cpu(mpt_entry->pd_flags) & ~MLX4_MPT_PD_MASK; + /* The wrapper function will put the slave's id here */ + if (mlx4_is_mfunc(dev)) + pd_flags &= ~MLX4_MPT_PD_VF_MASK; + + mpt_entry->pd_flags = cpu_to_be32(pd_flags | + (pdn & MLX4_MPT_PD_MASK) + | MLX4_MPT_PD_FLAG_EN_INV); + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_mr_hw_change_pd); + +int mlx4_mr_hw_change_access(struct mlx4_dev *dev, + struct mlx4_mpt_entry *mpt_entry, + u32 access) +{ + u32 flags = (be32_to_cpu(mpt_entry->flags) & ~MLX4_PERM_MASK) | + (access & MLX4_PERM_MASK); + + mpt_entry->flags = cpu_to_be32(flags); + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_mr_hw_change_access); + static int mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd, u64 iova, u64 size, u32 access, int npages, int page_shift, struct mlx4_mr *mr) @@ -463,6 +587,53 @@ int mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr) } EXPORT_SYMBOL_GPL(mlx4_mr_free); +void mlx4_mr_rereg_mem_cleanup(struct mlx4_dev *dev, struct mlx4_mr *mr) +{ + mlx4_mtt_cleanup(dev, &mr->mtt); +} +EXPORT_SYMBOL_GPL(mlx4_mr_rereg_mem_cleanup); + +int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr, + u64 iova, u64 size, int npages, + int page_shift, struct mlx4_mpt_entry *mpt_entry) +{ + int err; + + mpt_entry->start = cpu_to_be64(iova); + mpt_entry->length = cpu_to_be64(size); + mpt_entry->entity_size = cpu_to_be32(page_shift); + + err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); + if (err) + return err; + + mpt_entry->pd_flags &= cpu_to_be32(MLX4_MPT_PD_MASK | + MLX4_MPT_PD_FLAG_EN_INV); + mpt_entry->flags &= cpu_to_be32(MLX4_MPT_FLAG_FREE | + MLX4_MPT_FLAG_SW_OWNS); + if (mr->mtt.order < 0) { + mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL); + mpt_entry->mtt_addr = 0; + } else { + mpt_entry->mtt_addr = cpu_to_be64(mlx4_mtt_addr(dev, + &mr->mtt)); + if (mr->mtt.page_shift == 0) + mpt_entry->mtt_sz = cpu_to_be32(1 << mr->mtt.order); + } + if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) { + /* fast register MR in free state */ + mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE); + mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG | + MLX4_MPT_PD_FLAG_RAE); + } else { + mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS); + } + mr->enabled = MLX4_MPT_EN_SW; + + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_mr_rereg_mem_write); + int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr) { struct mlx4_cmd_mailbox *mailbox; diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index 9ba0c1ca10d5..94eeb2c7d7e4 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@ -103,7 +103,8 @@ static int find_index(struct mlx4_dev *dev, int i; for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { - if ((mac & MLX4_MAC_MASK) == + if (table->refs[i] && + (MLX4_MAC_MASK & mac) == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) return i; } @@ -165,12 +166,14 @@ int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac) mutex_lock(&table->mutex); for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { - if (free < 0 && !table->entries[i]) { - free = i; + if (!table->refs[i]) { + if (free < 0) + free = i; continue; } - if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) { + if ((MLX4_MAC_MASK & mac) == + (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) { /* MAC already registered, increment ref count */ err = i; ++table->refs[i]; diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c index 0dc31d85fc3b..2301365c79c7 100644 --- a/drivers/net/ethernet/mellanox/mlx4/qp.c +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c @@ -390,13 +390,14 @@ err_icm: EXPORT_SYMBOL_GPL(mlx4_qp_alloc); #define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC -int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp, +int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn, enum mlx4_update_qp_attr attr, struct mlx4_update_qp_params *params) { struct mlx4_cmd_mailbox *mailbox; struct mlx4_update_qp_context *cmd; u64 pri_addr_path_mask = 0; + u64 qp_mask = 0; int err = 0; mailbox = mlx4_alloc_cmd_mailbox(dev); @@ -413,9 +414,16 @@ int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp, cmd->qp_context.pri_path.grh_mylmc = params->smac_index; } + if (attr & MLX4_UPDATE_QP_VSD) { + qp_mask |= 1ULL << MLX4_UPD_QP_MASK_VSD; + if (params->flags & MLX4_UPDATE_QP_PARAMS_FLAGS_VSD_ENABLE) + cmd->qp_context.param3 |= cpu_to_be32(MLX4_STRIP_VLAN); + } + cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask); + cmd->qp_mask = cpu_to_be64(qp_mask); - err = mlx4_cmd(dev, mailbox->dma, qp->qpn & 0xffffff, 0, + err = mlx4_cmd(dev, mailbox->dma, qpn & 0xffffff, 0, MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 0efc1368e5a8..5d2498dcf536 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -702,11 +702,13 @@ static int update_vport_qp_param(struct mlx4_dev *dev, struct mlx4_qp_context *qpc = inbox->buf + 8; struct mlx4_vport_oper_state *vp_oper; struct mlx4_priv *priv; + u32 qp_type; int port; port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1; priv = mlx4_priv(dev); vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; + qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff; if (MLX4_VGT != vp_oper->state.default_vlan) { /* the reserved QPs (special, proxy, tunnel) @@ -715,8 +717,20 @@ static int update_vport_qp_param(struct mlx4_dev *dev, if (mlx4_is_qp_reserved(dev, qpn)) return 0; - /* force strip vlan by clear vsd */ - qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN); + /* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */ + if (qp_type == MLX4_QP_ST_UD || + (qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) { + if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) { + *(__be32 *)inbox->buf = + cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) | + MLX4_QP_OPTPAR_VLAN_STRIPPING); + qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN); + } else { + struct mlx4_update_qp_params params = {.flags = 0}; + + mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, ¶ms); + } + } if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE && dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) { @@ -2613,12 +2627,34 @@ int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave, if (err) return err; - if (mpt->com.from_state != RES_MPT_HW) { + if (mpt->com.from_state == RES_MPT_MAPPED) { + /* In order to allow rereg in SRIOV, we need to alter the MPT entry. To do + * that, the VF must read the MPT. But since the MPT entry memory is not + * in the VF's virtual memory space, it must use QUERY_MPT to obtain the + * entry contents. To guarantee that the MPT cannot be changed, the driver + * must perform HW2SW_MPT before this query and return the MPT entry to HW + * ownership fofollowing the change. The change here allows the VF to + * perform QUERY_MPT also when the entry is in SW ownership. + */ + struct mlx4_mpt_entry *mpt_entry = mlx4_table_find( + &mlx4_priv(dev)->mr_table.dmpt_table, + mpt->key, NULL); + + if (NULL == mpt_entry || NULL == outbox->buf) { + err = -EINVAL; + goto out; + } + + memcpy(outbox->buf, mpt_entry, sizeof(*mpt_entry)); + + err = 0; + } else if (mpt->com.from_state == RES_MPT_HW) { + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); + } else { err = -EBUSY; goto out; } - err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); out: put_res(dev, slave, id, RES_MPT); @@ -3976,13 +4012,17 @@ int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave, } port = (rqp->sched_queue >> 6 & 1) + 1; - smac_index = cmd->qp_context.pri_path.grh_mylmc; - err = mac_find_smac_ix_in_slave(dev, slave, port, - smac_index, &mac); - if (err) { - mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n", - qpn, smac_index); - goto err_mac; + + if (pri_addr_path_mask & (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)) { + smac_index = cmd->qp_context.pri_path.grh_mylmc; + err = mac_find_smac_ix_in_slave(dev, slave, port, + smac_index, &mac); + + if (err) { + mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n", + qpn, smac_index); + goto err_mac; + } } err = mlx4_cmd(dev, inbox->dma, @@ -4796,7 +4836,7 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work) MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED; upd_context = mailbox->buf; - upd_context->qp_mask = cpu_to_be64(MLX4_UPD_QP_MASK_VSD); + upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD); spin_lock_irq(mlx4_tlock(dev)); list_for_each_entry_safe(qp, tmp, qp_list, com.list) { diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c index 064a48d0c368..f1ebed6c63b1 100644 --- a/drivers/net/ethernet/micrel/ksz884x.c +++ b/drivers/net/ethernet/micrel/ksz884x.c @@ -4409,14 +4409,13 @@ static int ksz_alloc_desc(struct dev_info *adapter) DESC_ALIGNMENT; adapter->desc_pool.alloc_virt = - pci_alloc_consistent( - adapter->pdev, adapter->desc_pool.alloc_size, - &adapter->desc_pool.dma_addr); + pci_zalloc_consistent(adapter->pdev, + adapter->desc_pool.alloc_size, + &adapter->desc_pool.dma_addr); if (adapter->desc_pool.alloc_virt == NULL) { adapter->desc_pool.alloc_size = 0; return 1; } - memset(adapter->desc_pool.alloc_virt, 0, adapter->desc_pool.alloc_size); /* Align to the next cache line boundary. */ offset = (((ulong) adapter->desc_pool.alloc_virt % DESC_ALIGNMENT) ? @@ -7222,7 +7221,7 @@ static int pcidev_suspend(struct pci_dev *pdev, pm_message_t state) static char pcidev_name[] = "ksz884xp"; -static DEFINE_PCI_DEVICE_TABLE(pcidev_table) = { +static const struct pci_device_id pcidev_table[] = { { PCI_VENDOR_ID_MICREL_KS, 0x8841, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { PCI_VENDOR_ID_MICREL_KS, 0x8842, diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c index 5020fd47825d..2f12c88c66ab 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.c +++ b/drivers/net/ethernet/moxa/moxart_ether.c @@ -206,7 +206,7 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget) int rx_head = priv->rx_head; int rx = 0; - while (1) { + while (rx < budget) { desc = priv->rx_desc_base + (RX_REG_DESC_SIZE * rx_head); desc0 = readl(desc + RX_REG_OFFSET_DESC0); @@ -218,7 +218,7 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget) net_dbg_ratelimited("packet error\n"); priv->stats.rx_dropped++; priv->stats.rx_errors++; - continue; + goto rx_next; } len = desc0 & RX_DESC0_FRAME_LEN_MASK; @@ -226,13 +226,19 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget) if (len > RX_BUF_SIZE) len = RX_BUF_SIZE; - skb = build_skb(priv->rx_buf[rx_head], priv->rx_buf_size); + dma_sync_single_for_cpu(&ndev->dev, + priv->rx_mapping[rx_head], + priv->rx_buf_size, DMA_FROM_DEVICE); + skb = netdev_alloc_skb_ip_align(ndev, len); + if (unlikely(!skb)) { - net_dbg_ratelimited("build_skb failed\n"); + net_dbg_ratelimited("netdev_alloc_skb_ip_align failed\n"); priv->stats.rx_dropped++; priv->stats.rx_errors++; + goto rx_next; } + memcpy(skb->data, priv->rx_buf[rx_head], len); skb_put(skb, len); skb->protocol = eth_type_trans(skb, ndev); napi_gro_receive(&priv->napi, skb); @@ -244,18 +250,15 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget) if (desc0 & RX_DESC0_MULTICAST) priv->stats.multicast++; +rx_next: writel(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0); rx_head = RX_NEXT(rx_head); priv->rx_head = rx_head; - - if (rx >= budget) - break; } if (rx < budget) { - napi_gro_flush(napi, false); - __napi_complete(napi); + napi_complete(napi); } priv->reg_imr |= RPKT_FINISH_M; @@ -346,10 +349,12 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) len = ETH_ZLEN; } - txdes1 = readl(desc + TX_REG_OFFSET_DESC1); - txdes1 |= TX_DESC1_LTS | TX_DESC1_FTS; - txdes1 &= ~(TX_DESC1_FIFO_COMPLETE | TX_DESC1_INTR_COMPLETE); - txdes1 |= (len & TX_DESC1_BUF_SIZE_MASK); + dma_sync_single_for_device(&ndev->dev, priv->tx_mapping[tx_head], + priv->tx_buf_size, DMA_TO_DEVICE); + + txdes1 = TX_DESC1_LTS | TX_DESC1_FTS | (len & TX_DESC1_BUF_SIZE_MASK); + if (tx_head == TX_DESC_NUM_MASK) + txdes1 |= TX_DESC1_END; writel(txdes1, desc + TX_REG_OFFSET_DESC1); writel(TX_DESC0_DMA_OWN, desc + TX_REG_OFFSET_DESC0); @@ -465,8 +470,7 @@ static int moxart_mac_probe(struct platform_device *pdev) spin_lock_init(&priv->txlock); priv->tx_buf_size = TX_BUF_SIZE; - priv->rx_buf_size = RX_BUF_SIZE + - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + priv->rx_buf_size = RX_BUF_SIZE; priv->tx_desc_base = dma_alloc_coherent(NULL, TX_REG_DESC_SIZE * TX_DESC_NUM, &priv->tx_base, diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index f3d5d79f1cd1..9e7e3f1dce3e 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -574,6 +574,7 @@ myri10ge_validate_firmware(struct myri10ge_priv *mgp, /* save firmware version for ethtool */ strncpy(mgp->fw_version, hdr->version, sizeof(mgp->fw_version)); + mgp->fw_version[sizeof(mgp->fw_version) - 1] = '\0'; sscanf(mgp->fw_version, "%d.%d.%d", &mgp->fw_ver_major, &mgp->fw_ver_minor, &mgp->fw_ver_tiny); @@ -872,6 +873,10 @@ static int myri10ge_dma_test(struct myri10ge_priv *mgp, int test_type) return -ENOMEM; dmatest_bus = pci_map_page(mgp->pdev, dmatest_page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL); + if (unlikely(pci_dma_mapping_error(mgp->pdev, dmatest_bus))) { + __free_page(dmatest_page); + return -ENOMEM; + } /* Run a small DMA test. * The magic multipliers to the length tell the firmware @@ -1293,6 +1298,7 @@ myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx, int bytes, int watchdog) { struct page *page; + dma_addr_t bus; int idx; #if MYRI10GE_ALLOC_SIZE > 4096 int end_offset; @@ -1317,11 +1323,21 @@ myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx, rx->watchdog_needed = 1; return; } + + bus = pci_map_page(mgp->pdev, page, 0, + MYRI10GE_ALLOC_SIZE, + PCI_DMA_FROMDEVICE); + if (unlikely(pci_dma_mapping_error(mgp->pdev, bus))) { + __free_pages(page, MYRI10GE_ALLOC_ORDER); + if (rx->fill_cnt - rx->cnt < 16) + rx->watchdog_needed = 1; + return; + } + rx->page = page; rx->page_offset = 0; - rx->bus = pci_map_page(mgp->pdev, page, 0, - MYRI10GE_ALLOC_SIZE, - PCI_DMA_FROMDEVICE); + rx->bus = bus; + } rx->info[idx].page = rx->page; rx->info[idx].page_offset = rx->page_offset; @@ -2763,6 +2779,35 @@ myri10ge_submit_req(struct myri10ge_tx_buf *tx, struct mcp_kreq_ether_send *src, mb(); } +static void myri10ge_unmap_tx_dma(struct myri10ge_priv *mgp, + struct myri10ge_tx_buf *tx, int idx) +{ + unsigned int len; + int last_idx; + + /* Free any DMA resources we've alloced and clear out the skb slot */ + last_idx = (idx + 1) & tx->mask; + idx = tx->req & tx->mask; + do { + len = dma_unmap_len(&tx->info[idx], len); + if (len) { + if (tx->info[idx].skb != NULL) + pci_unmap_single(mgp->pdev, + dma_unmap_addr(&tx->info[idx], + bus), len, + PCI_DMA_TODEVICE); + else + pci_unmap_page(mgp->pdev, + dma_unmap_addr(&tx->info[idx], + bus), len, + PCI_DMA_TODEVICE); + dma_unmap_len_set(&tx->info[idx], len, 0); + tx->info[idx].skb = NULL; + } + idx = (idx + 1) & tx->mask; + } while (idx != last_idx); +} + /* * Transmit a packet. We need to split the packet so that a single * segment does not cross myri10ge->tx_boundary, so this makes segment @@ -2786,7 +2831,7 @@ static netdev_tx_t myri10ge_xmit(struct sk_buff *skb, u32 low; __be32 high_swapped; unsigned int len; - int idx, last_idx, avail, frag_cnt, frag_idx, count, mss, max_segments; + int idx, avail, frag_cnt, frag_idx, count, mss, max_segments; u16 pseudo_hdr_offset, cksum_offset, queue; int cum_len, seglen, boundary, rdma_count; u8 flags, odd_flag; @@ -2883,9 +2928,12 @@ again: /* map the skb for DMA */ len = skb_headlen(skb); + bus = pci_map_single(mgp->pdev, skb->data, len, PCI_DMA_TODEVICE); + if (unlikely(pci_dma_mapping_error(mgp->pdev, bus))) + goto drop; + idx = tx->req & tx->mask; tx->info[idx].skb = skb; - bus = pci_map_single(mgp->pdev, skb->data, len, PCI_DMA_TODEVICE); dma_unmap_addr_set(&tx->info[idx], bus, bus); dma_unmap_len_set(&tx->info[idx], len, len); @@ -2984,12 +3032,16 @@ again: break; /* map next fragment for DMA */ - idx = (count + tx->req) & tx->mask; frag = &skb_shinfo(skb)->frags[frag_idx]; frag_idx++; len = skb_frag_size(frag); bus = skb_frag_dma_map(&mgp->pdev->dev, frag, 0, len, DMA_TO_DEVICE); + if (unlikely(pci_dma_mapping_error(mgp->pdev, bus))) { + myri10ge_unmap_tx_dma(mgp, tx, idx); + goto drop; + } + idx = (count + tx->req) & tx->mask; dma_unmap_addr_set(&tx->info[idx], bus, bus); dma_unmap_len_set(&tx->info[idx], len, len); } @@ -3020,31 +3072,8 @@ again: return NETDEV_TX_OK; abort_linearize: - /* Free any DMA resources we've alloced and clear out the skb - * slot so as to not trip up assertions, and to avoid a - * double-free if linearizing fails */ + myri10ge_unmap_tx_dma(mgp, tx, idx); - last_idx = (idx + 1) & tx->mask; - idx = tx->req & tx->mask; - tx->info[idx].skb = NULL; - do { - len = dma_unmap_len(&tx->info[idx], len); - if (len) { - if (tx->info[idx].skb != NULL) - pci_unmap_single(mgp->pdev, - dma_unmap_addr(&tx->info[idx], - bus), len, - PCI_DMA_TODEVICE); - else - pci_unmap_page(mgp->pdev, - dma_unmap_addr(&tx->info[idx], - bus), len, - PCI_DMA_TODEVICE); - dma_unmap_len_set(&tx->info[idx], len, 0); - tx->info[idx].skb = NULL; - } - idx = (idx + 1) & tx->mask; - } while (idx != last_idx); if (skb_is_gso(skb)) { netdev_err(mgp->dev, "TSO but wanted to linearize?!?!?\n"); goto drop; @@ -4213,7 +4242,7 @@ static void myri10ge_remove(struct pci_dev *pdev) #define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E 0x0008 #define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E_9 0x0009 -static DEFINE_PCI_DEVICE_TABLE(myri10ge_pci_tbl) = { +static const struct pci_device_id myri10ge_pci_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E)}, {PCI_DEVICE (PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E_9)}, diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c index 291fba8b9f07..b83f7c0fcf99 100644 --- a/drivers/net/ethernet/natsemi/natsemi.c +++ b/drivers/net/ethernet/natsemi/natsemi.c @@ -247,7 +247,7 @@ static struct { { "NatSemi DP8381[56]", 0, 24 }, }; -static DEFINE_PCI_DEVICE_TABLE(natsemi_pci_tbl) = { +static const struct pci_device_id natsemi_pci_tbl[] = { { PCI_VENDOR_ID_NS, 0x0020, 0x12d9, 0x000c, 0, 0, 0 }, { PCI_VENDOR_ID_NS, 0x0020, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 }, { } /* terminate list */ diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c index 19bb8244b9e3..2552e550a78c 100644 --- a/drivers/net/ethernet/natsemi/ns83820.c +++ b/drivers/net/ethernet/natsemi/ns83820.c @@ -2260,7 +2260,7 @@ static void ns83820_remove_one(struct pci_dev *pci_dev) free_netdev(ndev); } -static DEFINE_PCI_DEVICE_TABLE(ns83820_pci_tbl) = { +static const struct pci_device_id ns83820_pci_tbl[] = { { 0x100b, 0x0022, PCI_ANY_ID, PCI_ANY_ID, 0, .driver_data = 0, }, { 0, }, }; diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index be587647c706..f5e4b820128b 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -471,7 +471,7 @@ module_param_array(rts_frm_len, uint, NULL, 0); * S2IO device table. * This table lists all the devices that this driver supports. */ -static DEFINE_PCI_DEVICE_TABLE(s2io_tbl) = { +static const struct pci_device_id s2io_tbl[] = { {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN, PCI_ANY_ID, PCI_ANY_ID}, {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI, diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c index 2eda153cb1e0..4f40d7b8629e 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c @@ -63,7 +63,7 @@ MODULE_LICENSE("Dual BSD/GPL"); MODULE_DESCRIPTION("Neterion's X3100 Series 10GbE PCIe I/O" "Virtualized Server Adapter"); -static DEFINE_PCI_DEVICE_TABLE(vxge_id_table) = { +static const struct pci_device_id vxge_id_table[] = { {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_WIN, PCI_ANY_ID, PCI_ANY_ID}, {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_UNI, PCI_ANY_ID, diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index 9afc536c5734..925b296d8ab8 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -6185,7 +6185,7 @@ static void nv_shutdown(struct pci_dev *pdev) #define nv_shutdown NULL #endif /* CONFIG_PM */ -static DEFINE_PCI_DEVICE_TABLE(pci_tbl) = { +static const struct pci_device_id pci_tbl[] = { { /* nForce Ethernet Controller */ PCI_DEVICE(0x10DE, 0x01C3), .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index 8706c0dbd0c3..a44a03c45014 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -1220,6 +1220,9 @@ static int lpc_eth_open(struct net_device *ndev) __lpc_eth_clock_enable(pldat, true); + /* Suspended PHY makes LPC ethernet core block, so resume now */ + phy_resume(pldat->phy_dev); + /* Reset and initialize */ __lpc_eth_reset(pldat); __lpc_eth_init(pldat); diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c index 979c6980639f..a42293092ea4 100644 --- a/drivers/net/ethernet/octeon/octeon_mgmt.c +++ b/drivers/net/ethernet/octeon/octeon_mgmt.c @@ -290,9 +290,11 @@ static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p) /* Read the hardware TX timestamp if one was recorded */ if (unlikely(re.s.tstamp)) { struct skb_shared_hwtstamps ts; + u64 ns; + memset(&ts, 0, sizeof(ts)); /* Read the timestamp */ - u64 ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port)); + ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port)); /* Remove the timestamp from the FIFO */ cvmx_write_csr(CVMX_MIXX_TSCTL(p->port), 0); /* Tell the kernel about the timestamp */ diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig index 44c8be1c6805..5f7a35212796 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig +++ b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig @@ -7,6 +7,7 @@ config PCH_GBE depends on PCI && (X86_32 || COMPILE_TEST) select MII select PTP_1588_CLOCK_PCH + select NET_PTP_CLASSIFY ---help--- This is a gigabit ethernet driver for EG20T PCH. EG20T PCH is the platform controller hub that is used in Intel's diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index 73e66838cfef..3b98b263bad0 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -2743,7 +2743,7 @@ static struct pch_gbe_privdata pch_gbe_minnow_privdata = { .platform_init = pch_gbe_minnow_platform_init, }; -static DEFINE_PCI_DEVICE_TABLE(pch_gbe_pcidev_id) = { +static const struct pci_device_id pch_gbe_pcidev_id[] = { {.vendor = PCI_VENDOR_ID_INTEL, .device = PCI_DEVICE_ID_INTEL_IOH1_GBE, .subvendor = PCI_VENDOR_ID_CIRCUITCO, diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c index 9a997e4c3e08..319d9d40f922 100644 --- a/drivers/net/ethernet/packetengines/hamachi.c +++ b/drivers/net/ethernet/packetengines/hamachi.c @@ -1911,7 +1911,7 @@ static void hamachi_remove_one(struct pci_dev *pdev) } } -static DEFINE_PCI_DEVICE_TABLE(hamachi_pci_tbl) = { +static const struct pci_device_id hamachi_pci_tbl[] = { { 0x1318, 0x0911, PCI_ANY_ID, PCI_ANY_ID, }, { 0, } }; diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c index 69a8dc095072..2d6b148528dd 100644 --- a/drivers/net/ethernet/packetengines/yellowfin.c +++ b/drivers/net/ethernet/packetengines/yellowfin.c @@ -236,7 +236,7 @@ static const struct pci_id_info pci_id_tbl[] = { { } }; -static DEFINE_PCI_DEVICE_TABLE(yellowfin_pci_tbl) = { +static const struct pci_device_id yellowfin_pci_tbl[] = { { 0x1000, 0x0702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { 0x1000, 0x0701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 }, { } diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c index 9abf70d74b31..30d934d66356 100644 --- a/drivers/net/ethernet/pasemi/pasemi_mac.c +++ b/drivers/net/ethernet/pasemi/pasemi_mac.c @@ -1871,7 +1871,7 @@ static void pasemi_mac_remove(struct pci_dev *pdev) free_netdev(netdev); } -static DEFINE_PCI_DEVICE_TABLE(pasemi_mac_pci_tbl) = { +static const struct pci_device_id pasemi_mac_pci_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa005) }, { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa006) }, { }, diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c index 6f6be57f4690..b8d5270359cd 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c @@ -129,14 +129,12 @@ netxen_get_minidump_template(struct netxen_adapter *adapter) return NX_RCODE_INVALID_ARGS; } - addr = pci_alloc_consistent(adapter->pdev, size, &md_template_addr); - + addr = pci_zalloc_consistent(adapter->pdev, size, &md_template_addr); if (!addr) { dev_err(&adapter->pdev->dev, "Unable to allocate dmable memory for template.\n"); return -ENOMEM; } - memset(addr, 0, size); memset(&cmd, 0, sizeof(cmd)); memset(&cmd.rsp, 1, sizeof(struct _cdrp_cmd)); cmd.req.cmd = NX_CDRP_CMD_GET_TEMP_HDR; diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c index 32058614151a..5c4068353f66 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c @@ -135,6 +135,7 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter) int i, j; struct nx_host_tx_ring *tx_ring = adapter->tx_ring; + spin_lock(&adapter->tx_clean_lock); cmd_buf = tx_ring->cmd_buf_arr; for (i = 0; i < tx_ring->num_desc; i++) { buffrag = cmd_buf->frag_array; @@ -158,6 +159,7 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter) } cmd_buf++; } + spin_unlock(&adapter->tx_clean_lock); } void netxen_free_sw_resources(struct netxen_adapter *adapter) @@ -1792,9 +1794,9 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter) break; } - if (count && netif_running(netdev)) { - tx_ring->sw_consumer = sw_consumer; + tx_ring->sw_consumer = sw_consumer; + if (count && netif_running(netdev)) { smp_mb(); if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 5bf05818a12c..5ec5a2b0e989 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -99,7 +99,7 @@ static int netxen_nic_set_mac(struct net_device *netdev, void *p); {PCI_DEVICE(PCI_VENDOR_ID_NETXEN, (device)), \ .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0} -static DEFINE_PCI_DEVICE_TABLE(netxen_pci_tbl) = { +static const struct pci_device_id netxen_pci_tbl[] = { ENTRY(PCI_DEVICE_ID_NX2031_10GXSR), ENTRY(PCI_DEVICE_ID_NX2031_10GCX4), ENTRY(PCI_DEVICE_ID_NX2031_4GCU), @@ -1186,7 +1186,6 @@ __netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev) return; smp_mb(); - spin_lock(&adapter->tx_clean_lock); netif_carrier_off(netdev); netif_tx_disable(netdev); @@ -1204,7 +1203,6 @@ __netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev) netxen_napi_disable(adapter); netxen_release_tx_buffers(adapter); - spin_unlock(&adapter->tx_clean_lock); } /* Usage: During suspend and firmware recovery module */ diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c index b5d6bc1a8b00..c2f09af5c25b 100644 --- a/drivers/net/ethernet/qlogic/qla3xxx.c +++ b/drivers/net/ethernet/qlogic/qla3xxx.c @@ -65,7 +65,7 @@ static int msi; module_param(msi, int, 0); MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts."); -static DEFINE_PCI_DEVICE_TABLE(ql3xxx_pci_tbl) = { +static const struct pci_device_id ql3xxx_pci_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)}, {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)}, /* required last entry */ diff --git a/drivers/net/ethernet/qlogic/qlcnic/Makefile b/drivers/net/ethernet/qlogic/qlcnic/Makefile index a848d2979722..3c2c2c7c1559 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/Makefile +++ b/drivers/net/ethernet/qlogic/qlcnic/Makefile @@ -8,7 +8,7 @@ qlcnic-y := qlcnic_hw.o qlcnic_main.o qlcnic_init.o \ qlcnic_ethtool.o qlcnic_ctx.o qlcnic_io.o \ qlcnic_sysfs.o qlcnic_minidump.o qlcnic_83xx_hw.o \ qlcnic_83xx_init.o qlcnic_83xx_vnic.o \ - qlcnic_minidump.o qlcnic_sriov_common.o + qlcnic_sriov_common.o qlcnic-$(CONFIG_QLCNIC_SRIOV) += qlcnic_sriov_pf.o diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 16039d1497b8..b84f5ea3d659 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -268,7 +268,7 @@ struct qlcnic_fdt { u16 cksum; u16 unused; u8 model[16]; - u16 mfg_id; + u8 mfg_id; u16 id; u8 flag; u8 erase_cmd; @@ -2362,6 +2362,19 @@ static inline u32 qlcnic_get_vnic_func_count(struct qlcnic_adapter *adapter) return QLC_DEFAULT_VNIC_COUNT; } +static inline void qlcnic_swap32_buffer(u32 *buffer, int count) +{ +#if defined(__BIG_ENDIAN) + u32 *tmp = buffer; + int i; + + for (i = 0; i < count; i++) { + *tmp = swab32(*tmp); + tmp++; + } +#endif +} + #ifdef CONFIG_QLCNIC_HWMON void qlcnic_register_hwmon_dev(struct qlcnic_adapter *); void qlcnic_unregister_hwmon_dev(struct qlcnic_adapter *); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index a4a4ec0b68f8..476e4998ef99 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -2603,7 +2603,7 @@ int qlcnic_83xx_lockless_flash_read32(struct qlcnic_adapter *adapter, } qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_DIRECT_WINDOW, - (addr)); + (addr & 0xFFFF0000)); range = flash_offset + (count * sizeof(u32)); /* Check if data is spread across multiple sectors */ @@ -2753,7 +2753,7 @@ int qlcnic_83xx_read_flash_descriptor_table(struct qlcnic_adapter *adapter) ret = qlcnic_83xx_lockless_flash_read32(adapter, QLCNIC_FDT_LOCATION, (u8 *)&adapter->ahw->fdt, count); - + qlcnic_swap32_buffer((u32 *)&adapter->ahw->fdt, count); qlcnic_83xx_unlock_flash(adapter); return ret; } @@ -2788,7 +2788,7 @@ int qlcnic_83xx_erase_flash_sector(struct qlcnic_adapter *adapter, addr1 = (sector_start_addr & 0xFF) << 16; addr2 = (sector_start_addr & 0xFF0000) >> 16; - reversed_addr = addr1 | addr2; + reversed_addr = addr1 | addr2 | (sector_start_addr & 0xFF00); qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA, reversed_addr); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index f33559b72528..3172cdf591fe 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -1177,9 +1177,8 @@ static void qlcnic_83xx_setup_idc_parameters(struct qlcnic_adapter *adapter) { u32 idc_params, val; - if (qlcnic_83xx_lockless_flash_read32(adapter, - QLC_83XX_IDC_FLASH_PARAM_ADDR, - (u8 *)&idc_params, 1)) { + if (qlcnic_83xx_flash_read32(adapter, QLC_83XX_IDC_FLASH_PARAM_ADDR, + (u8 *)&idc_params, 1)) { dev_info(&adapter->pdev->dev, "%s:failed to get IDC params from flash\n", __func__); adapter->dev_init_timeo = QLC_83XX_IDC_INIT_TIMEOUT_SECS; @@ -1378,31 +1377,45 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter) { struct qlc_83xx_fw_info *fw_info = adapter->ahw->fw_info; const struct firmware *fw = fw_info->fw; - u32 dest, *p_cache; + u32 dest, *p_cache, *temp; int i, ret = -EIO; + __le32 *temp_le; u8 data[16]; size_t size; u64 addr; + temp = kzalloc(fw->size, GFP_KERNEL); + if (!temp) { + release_firmware(fw); + fw_info->fw = NULL; + return -ENOMEM; + } + + temp_le = (__le32 *)fw->data; + + /* FW image in file is in little endian, swap the data to nullify + * the effect of writel() operation on big endian platform. + */ + for (i = 0; i < fw->size / sizeof(u32); i++) + temp[i] = __le32_to_cpu(temp_le[i]); + dest = QLCRDX(adapter->ahw, QLCNIC_FW_IMAGE_ADDR); size = (fw->size & ~0xF); - p_cache = (u32 *)fw->data; + p_cache = temp; addr = (u64)dest; ret = qlcnic_ms_mem_write128(adapter, addr, p_cache, size / 16); if (ret) { dev_err(&adapter->pdev->dev, "MS memory write failed\n"); - release_firmware(fw); - fw_info->fw = NULL; - return -EIO; + goto exit; } /* alignment check */ if (fw->size & 0xF) { addr = dest + size; for (i = 0; i < (fw->size & 0xF); i++) - data[i] = fw->data[size + i]; + data[i] = temp[size + i]; for (; i < 16; i++) data[i] = 0; ret = qlcnic_ms_mem_write128(adapter, addr, @@ -1410,15 +1423,16 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter) if (ret) { dev_err(&adapter->pdev->dev, "MS memory write failed\n"); - release_firmware(fw); - fw_info->fw = NULL; - return -EIO; + goto exit; } } + +exit: release_firmware(fw); fw_info->fw = NULL; + kfree(temp); - return 0; + return ret; } static void qlcnic_83xx_dump_pause_control_regs(struct qlcnic_adapter *adapter) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c index 141f116eb868..494e8105adee 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c @@ -1333,21 +1333,21 @@ static void qlcnic_get_ethtool_stats(struct net_device *dev, struct qlcnic_host_tx_ring *tx_ring; struct qlcnic_esw_statistics port_stats; struct qlcnic_mac_statistics mac_stats; - int index, ret, length, size, tx_size, ring; + int index, ret, length, size, ring; char *p; - tx_size = adapter->drv_tx_rings * QLCNIC_TX_STATS_LEN; + memset(data, 0, stats->n_stats * sizeof(u64)); - memset(data, 0, tx_size * sizeof(u64)); for (ring = 0, index = 0; ring < adapter->drv_tx_rings; ring++) { - if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) { + if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) { tx_ring = &adapter->tx_ring[ring]; data = qlcnic_fill_tx_queue_stats(data, tx_ring); qlcnic_update_stats(adapter); + } else { + data += QLCNIC_TX_STATS_LEN; } } - memset(data, 0, stats->n_stats * sizeof(u64)); length = QLCNIC_STATS_LEN; for (index = 0; index < length; index++) { p = (char *)adapter + qlcnic_gstrings_stats[index].stat_offset; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 59846daf1379..cf08b2de071e 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -108,7 +108,7 @@ static u32 qlcnic_vlan_tx_check(struct qlcnic_adapter *adapter) {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \ .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0} -static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = { +static const struct pci_device_id qlcnic_pci_tbl[] = { ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X), ENTRY(PCI_DEVICE_ID_QLOGIC_QLE834X), ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE834X), diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c index e46fc39d425d..c9f57fb84b9e 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c @@ -47,15 +47,26 @@ struct qlcnic_common_entry_hdr { u32 type; u32 offset; u32 cap_size; +#if defined(__LITTLE_ENDIAN) u8 mask; u8 rsvd[2]; u8 flags; +#else + u8 flags; + u8 rsvd[2]; + u8 mask; +#endif } __packed; struct __crb { u32 addr; +#if defined(__LITTLE_ENDIAN) u8 stride; u8 rsvd1[3]; +#else + u8 rsvd1[3]; + u8 stride; +#endif u32 data_size; u32 no_ops; u32 rsvd2[4]; @@ -63,15 +74,28 @@ struct __crb { struct __ctrl { u32 addr; +#if defined(__LITTLE_ENDIAN) u8 stride; u8 index_a; u16 timeout; +#else + u16 timeout; + u8 index_a; + u8 stride; +#endif u32 data_size; u32 no_ops; +#if defined(__LITTLE_ENDIAN) u8 opcode; u8 index_v; u8 shl_val; u8 shr_val; +#else + u8 shr_val; + u8 shl_val; + u8 index_v; + u8 opcode; +#endif u32 val1; u32 val2; u32 val3; @@ -79,16 +103,27 @@ struct __ctrl { struct __cache { u32 addr; +#if defined(__LITTLE_ENDIAN) u16 stride; u16 init_tag_val; +#else + u16 init_tag_val; + u16 stride; +#endif u32 size; u32 no_ops; u32 ctrl_addr; u32 ctrl_val; u32 read_addr; +#if defined(__LITTLE_ENDIAN) u8 read_addr_stride; u8 read_addr_num; u8 rsvd1[2]; +#else + u8 rsvd1[2]; + u8 read_addr_num; + u8 read_addr_stride; +#endif } __packed; struct __ocm { @@ -122,23 +157,39 @@ struct __mux { struct __queue { u32 sel_addr; +#if defined(__LITTLE_ENDIAN) u16 stride; u8 rsvd[2]; +#else + u8 rsvd[2]; + u16 stride; +#endif u32 size; u32 no_ops; u8 rsvd2[8]; u32 read_addr; +#if defined(__LITTLE_ENDIAN) u8 read_addr_stride; u8 read_addr_cnt; u8 rsvd3[2]; +#else + u8 rsvd3[2]; + u8 read_addr_cnt; + u8 read_addr_stride; +#endif } __packed; struct __pollrd { u32 sel_addr; u32 read_addr; u32 sel_val; +#if defined(__LITTLE_ENDIAN) u16 sel_val_stride; u16 no_ops; +#else + u16 no_ops; + u16 sel_val_stride; +#endif u32 poll_wait; u32 poll_mask; u32 data_size; @@ -153,9 +204,15 @@ struct __mux2 { u32 no_ops; u32 sel_val_mask; u32 read_addr; +#if defined(__LITTLE_ENDIAN) u8 sel_val_stride; u8 data_size; u8 rsvd[2]; +#else + u8 rsvd[2]; + u8 data_size; + u8 sel_val_stride; +#endif } __packed; struct __pollrdmwr { diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c index f5786d5792df..59a721fba018 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c @@ -280,6 +280,7 @@ static ssize_t qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj, if (ret != 0) return ret; qlcnic_read_crb(adapter, buf, offset, size); + qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); return size; } @@ -296,6 +297,7 @@ static ssize_t qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj, if (ret != 0) return ret; + qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); qlcnic_write_crb(adapter, buf, offset, size); return size; } @@ -329,6 +331,7 @@ static ssize_t qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj, return -EIO; memcpy(buf, &data, size); + qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); return size; } @@ -346,6 +349,7 @@ static ssize_t qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj, if (ret != 0) return ret; + qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); memcpy(&data, buf, size); if (qlcnic_pci_mem_write_2M(adapter, offset, data)) @@ -412,6 +416,7 @@ static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp, if (rem) return QL_STATUS_INVALID_PARAM; + qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); pm_cfg = (struct qlcnic_pm_func_cfg *)buf; ret = validate_pm_config(adapter, pm_cfg, count); @@ -474,6 +479,7 @@ static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp, pm_cfg[pci_func].dest_npar = 0; pm_cfg[pci_func].pci_func = i; } + qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); return size; } @@ -555,6 +561,7 @@ static ssize_t qlcnic_sysfs_write_esw_config(struct file *file, if (rem) return QL_STATUS_INVALID_PARAM; + qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); esw_cfg = (struct qlcnic_esw_func_cfg *)buf; ret = validate_esw_config(adapter, esw_cfg, count); if (ret) @@ -649,6 +656,7 @@ static ssize_t qlcnic_sysfs_read_esw_config(struct file *file, if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[pci_func])) return QL_STATUS_INVALID_PARAM; } + qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); return size; } @@ -688,6 +696,7 @@ static ssize_t qlcnic_sysfs_write_npar_config(struct file *file, if (rem) return QL_STATUS_INVALID_PARAM; + qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); np_cfg = (struct qlcnic_npar_func_cfg *)buf; ret = validate_npar_config(adapter, np_cfg, count); if (ret) @@ -759,6 +768,7 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file, np_cfg[pci_func].max_tx_queues = nic_info.max_tx_ques; np_cfg[pci_func].max_rx_queues = nic_info.max_rx_ques; } + qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); return size; } @@ -916,6 +926,7 @@ static ssize_t qlcnic_sysfs_read_pci_config(struct file *file, pci_cfg = (struct qlcnic_pci_func_cfg *)buf; count = size / sizeof(struct qlcnic_pci_func_cfg); + qlcnic_swap32_buffer((u32 *)pci_info, size / sizeof(u32)); for (i = 0; i < count; i++) { pci_cfg[i].pci_func = pci_info[i].id; pci_cfg[i].func_type = pci_info[i].type; @@ -969,6 +980,7 @@ static ssize_t qlcnic_83xx_sysfs_flash_read_handler(struct file *filp, } qlcnic_83xx_unlock_flash(adapter); + qlcnic_swap32_buffer((u32 *)p_read_buf, count); memcpy(buf, p_read_buf, size); kfree(p_read_buf); @@ -986,9 +998,10 @@ static int qlcnic_83xx_sysfs_flash_bulk_write(struct qlcnic_adapter *adapter, if (!p_cache) return -ENOMEM; + count = size / sizeof(u32); + qlcnic_swap32_buffer((u32 *)buf, count); memcpy(p_cache, buf, size); p_src = p_cache; - count = size / sizeof(u32); if (qlcnic_83xx_lock_flash(adapter) != 0) { kfree(p_cache); @@ -1053,6 +1066,7 @@ static int qlcnic_83xx_sysfs_flash_write(struct qlcnic_adapter *adapter, if (!p_cache) return -ENOMEM; + qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); memcpy(p_cache, buf, size); p_src = p_cache; count = size / sizeof(u32); diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index b40050e03a56..3e96f269150d 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -86,7 +86,7 @@ MODULE_PARM_DESC(qlge_force_coredump, "Option to allow force of firmware core dump. " "Default is OFF - Do not allow."); -static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = { +static const struct pci_device_id qlge_pci_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)}, {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)}, /* required last entry */ @@ -2556,6 +2556,7 @@ static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr) if (skb_is_gso(skb)) { int err; + __be16 l3_proto = vlan_get_protocol(skb); err = skb_cow_head(skb, 0); if (err < 0) @@ -2572,7 +2573,7 @@ static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr) << OB_MAC_TRANSPORT_HDR_SHIFT); mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO; - if (likely(skb->protocol == htons(ETH_P_IP))) { + if (likely(l3_proto == htons(ETH_P_IP))) { struct iphdr *iph = ip_hdr(skb); iph->check = 0; mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4; @@ -2580,7 +2581,7 @@ static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr) iph->daddr, 0, IPPROTO_TCP, 0); - } else if (skb->protocol == htons(ETH_P_IPV6)) { + } else if (l3_proto == htons(ETH_P_IPV6)) { mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6; tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, @@ -2727,23 +2728,22 @@ static void ql_free_shadow_space(struct ql_adapter *qdev) static int ql_alloc_shadow_space(struct ql_adapter *qdev) { qdev->rx_ring_shadow_reg_area = - pci_alloc_consistent(qdev->pdev, - PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma); + pci_zalloc_consistent(qdev->pdev, PAGE_SIZE, + &qdev->rx_ring_shadow_reg_dma); if (qdev->rx_ring_shadow_reg_area == NULL) { netif_err(qdev, ifup, qdev->ndev, "Allocation of RX shadow space failed.\n"); return -ENOMEM; } - memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE); + qdev->tx_ring_shadow_reg_area = - pci_alloc_consistent(qdev->pdev, PAGE_SIZE, - &qdev->tx_ring_shadow_reg_dma); + pci_zalloc_consistent(qdev->pdev, PAGE_SIZE, + &qdev->tx_ring_shadow_reg_dma); if (qdev->tx_ring_shadow_reg_area == NULL) { netif_err(qdev, ifup, qdev->ndev, "Allocation of TX shadow space failed.\n"); goto err_wqp_sh_area; } - memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE); return 0; err_wqp_sh_area: diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c index cd045ecb9816..9a37247cf4b8 100644 --- a/drivers/net/ethernet/rdc/r6040.c +++ b/drivers/net/ethernet/rdc/r6040.c @@ -1254,7 +1254,7 @@ static void r6040_remove_one(struct pci_dev *pdev) } -static DEFINE_PCI_DEVICE_TABLE(r6040_pci_tbl) = { +static const struct pci_device_id r6040_pci_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_RDC, 0x6040) }, { 0 } }; diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c index 2e5df148af4c..007b38cce69a 100644 --- a/drivers/net/ethernet/realtek/8139too.c +++ b/drivers/net/ethernet/realtek/8139too.c @@ -234,7 +234,7 @@ static const struct { }; -static DEFINE_PCI_DEVICE_TABLE(rtl8139_pci_tbl) = { +static const struct pci_device_id rtl8139_pci_tbl[] = { {0x10ec, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, {0x10ec, 0x8138, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, {0x1113, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 9887bcb45b84..0921302553c6 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -291,7 +291,7 @@ enum cfg_version { RTL_CFG_2 }; -static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = { +static const struct pci_device_id rtl8169_pci_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 }, { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 }, { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 }, @@ -1783,33 +1783,31 @@ static void __rtl8169_set_features(struct net_device *dev, netdev_features_t features) { struct rtl8169_private *tp = netdev_priv(dev); - netdev_features_t changed = features ^ dev->features; void __iomem *ioaddr = tp->mmio_addr; + u32 rx_config; - if (!(changed & (NETIF_F_RXALL | NETIF_F_RXCSUM | - NETIF_F_HW_VLAN_CTAG_RX))) - return; + rx_config = RTL_R32(RxConfig); + if (features & NETIF_F_RXALL) + rx_config |= (AcceptErr | AcceptRunt); + else + rx_config &= ~(AcceptErr | AcceptRunt); - if (changed & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX)) { - if (features & NETIF_F_RXCSUM) - tp->cp_cmd |= RxChkSum; - else - tp->cp_cmd &= ~RxChkSum; + RTL_W32(RxConfig, rx_config); - if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) - tp->cp_cmd |= RxVlan; - else - tp->cp_cmd &= ~RxVlan; + if (features & NETIF_F_RXCSUM) + tp->cp_cmd |= RxChkSum; + else + tp->cp_cmd &= ~RxChkSum; - RTL_W16(CPlusCmd, tp->cp_cmd); - RTL_R16(CPlusCmd); - } - if (changed & NETIF_F_RXALL) { - int tmp = (RTL_R32(RxConfig) & ~(AcceptErr | AcceptRunt)); - if (features & NETIF_F_RXALL) - tmp |= (AcceptErr | AcceptRunt); - RTL_W32(RxConfig, tmp); - } + if (features & NETIF_F_HW_VLAN_CTAG_RX) + tp->cp_cmd |= RxVlan; + else + tp->cp_cmd &= ~RxVlan; + + tp->cp_cmd |= RTL_R16(CPlusCmd) & ~(RxVlan | RxChkSum); + + RTL_W16(CPlusCmd, tp->cp_cmd); + RTL_R16(CPlusCmd); } static int rtl8169_set_features(struct net_device *dev, @@ -1817,8 +1815,11 @@ static int rtl8169_set_features(struct net_device *dev, { struct rtl8169_private *tp = netdev_priv(dev); + features &= NETIF_F_RXALL | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX; + rtl_lock_work(tp); - __rtl8169_set_features(dev, features); + if (features ^ dev->features) + __rtl8169_set_features(dev, features); rtl_unlock_work(tp); return 0; @@ -7118,8 +7119,7 @@ static void rtl_hw_initialize(struct rtl8169_private *tp) } } -static int -rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) +static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data; const unsigned int region = cfg->region; @@ -7194,7 +7194,7 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_mwi_2; } - tp->cp_cmd = RxChkSum; + tp->cp_cmd = 0; if ((sizeof(dma_addr_t) > 4) && !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) { @@ -7235,13 +7235,6 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_master(pdev); - /* - * Pretend we are using VLANs; This bypasses a nasty bug where - * Interrupts stop flowing on high load on 8110SCd controllers. - */ - if (tp->mac_version == RTL_GIGA_MAC_VER_05) - tp->cp_cmd |= RxVlan; - rtl_init_mdio_ops(tp); rtl_init_pll_power_ops(tp); rtl_init_jumbo_ops(tp); @@ -7302,8 +7295,14 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | NETIF_F_HIGHDMA; + tp->cp_cmd |= RxChkSum | RxVlan; + + /* + * Pretend we are using VLANs; This bypasses a nasty bug where + * Interrupts stop flowing on high load on 8110SCd controllers. + */ if (tp->mac_version == RTL_GIGA_MAC_VER_05) - /* 8110SCd requires hardware Rx VLAN - disallow toggling */ + /* Disallow toggling */ dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX; if (tp->txd_version == RTL_TD_0) diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig index 9e757c792d84..196e98a2d93b 100644 --- a/drivers/net/ethernet/renesas/Kconfig +++ b/drivers/net/ethernet/renesas/Kconfig @@ -5,6 +5,7 @@ config SH_ETH tristate "Renesas SuperH Ethernet support" depends on HAS_DMA + depends on ARCH_SHMOBILE || SUPERH || COMPILE_TEST select CRC32 select MII select MDIO_BITBANG diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 4cebe9d37816..b2cc590dd1dd 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -2642,7 +2642,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type) **************************************************************************/ /* PCI device ID table */ -static DEFINE_PCI_DEVICE_TABLE(efx_pci_table) = { +static const struct pci_device_id efx_pci_table[] = { {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0), .driver_data = (unsigned long) &falcon_a1_nic_type}, diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index 0537381cd2f6..6859437b59fb 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c @@ -2933,6 +2933,9 @@ void efx_farch_filter_sync_rx_mode(struct efx_nic *efx) u32 crc; int bit; + if (!efx_dev_registered(efx)) + return; + netif_addr_lock_bh(net_dev); efx->unicast_filter = !(net_dev->flags & IFF_PROMISC); diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c index 7984ad05357d..7a254da85dd7 100644 --- a/drivers/net/ethernet/sgi/ioc3-eth.c +++ b/drivers/net/ethernet/sgi/ioc3-eth.c @@ -1384,7 +1384,7 @@ static void ioc3_remove_one(struct pci_dev *pdev) */ } -static DEFINE_PCI_DEVICE_TABLE(ioc3_pci_tbl) = { +static const struct pci_device_id ioc3_pci_tbl[] = { { PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3, PCI_ANY_ID, PCI_ANY_ID }, { 0 } }; diff --git a/drivers/net/ethernet/silan/sc92031.c b/drivers/net/ethernet/silan/sc92031.c index 7daa7d433099..7426f8b21252 100644 --- a/drivers/net/ethernet/silan/sc92031.c +++ b/drivers/net/ethernet/silan/sc92031.c @@ -1561,7 +1561,7 @@ out: return 0; } -static DEFINE_PCI_DEVICE_TABLE(sc92031_pci_device_id_table) = { +static const struct pci_device_id sc92031_pci_device_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x2031) }, { PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x8139) }, { PCI_DEVICE(0x1088, 0x2031) }, diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c index a86339903b9b..27be6c869315 100644 --- a/drivers/net/ethernet/sis/sis190.c +++ b/drivers/net/ethernet/sis/sis190.c @@ -330,7 +330,7 @@ static const struct { { "SiS 191 PCI Gigabit Ethernet adapter" }, }; -static DEFINE_PCI_DEVICE_TABLE(sis190_pci_tbl) = { +static const struct pci_device_id sis190_pci_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 }, { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0191), 0, 0, 1 }, { 0, }, diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c index 7bea17c41dc9..fd812d2e5e1c 100644 --- a/drivers/net/ethernet/sis/sis900.c +++ b/drivers/net/ethernet/sis/sis900.c @@ -106,7 +106,8 @@ static const char * card_names[] = { "SiS 900 PCI Fast Ethernet", "SiS 7016 PCI Fast Ethernet" }; -static DEFINE_PCI_DEVICE_TABLE(sis900_pci_tbl) = { + +static const struct pci_device_id sis900_pci_tbl[] = { {PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_900}, {PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_7016, diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c index 8ae1f8a7bf38..443f1da9fc9e 100644 --- a/drivers/net/ethernet/smsc/epic100.c +++ b/drivers/net/ethernet/smsc/epic100.c @@ -173,7 +173,7 @@ static const struct epic_chip_info pci_id_tbl[] = { }; -static DEFINE_PCI_DEVICE_TABLE(epic_pci_tbl) = { +static const struct pci_device_id epic_pci_tbl[] = { { 0x10B8, 0x0005, 0x1092, 0x0AB4, 0, 0, SMSC_83C170_0 }, { 0x10B8, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMSC_83C170 }, { 0x10B8, 0x0006, PCI_ANY_ID, PCI_ANY_ID, diff --git a/drivers/net/ethernet/smsc/smsc911x.h b/drivers/net/ethernet/smsc/smsc911x.h index 23953957fed8..54d648920a1b 100644 --- a/drivers/net/ethernet/smsc/smsc911x.h +++ b/drivers/net/ethernet/smsc/smsc911x.h @@ -51,7 +51,7 @@ #ifdef CONFIG_DEBUG_SPINLOCK #define SMSC_ASSERT_MAC_LOCK(pdata) \ - WARN_ON(!spin_is_locked(&pdata->mac_lock)) + WARN_ON_SMP(!spin_is_locked(&pdata->mac_lock)) #else #define SMSC_ASSERT_MAC_LOCK(pdata) do {} while (0) #endif /* CONFIG_DEBUG_SPINLOCK */ diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c index d3b967aff9e0..4a90cdae5444 100644 --- a/drivers/net/ethernet/smsc/smsc9420.c +++ b/drivers/net/ethernet/smsc/smsc9420.c @@ -83,7 +83,7 @@ struct smsc9420_pdata { int last_carrier; }; -static DEFINE_PCI_DEVICE_TABLE(smsc9420_id_table) = { +static const struct pci_device_id smsc9420_id_table[] = { { PCI_VENDOR_ID_9420, PCI_DEVICE_ID_9420, PCI_ANY_ID, PCI_ANY_ID, }, { 0, } }; diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c index c553f6b5a913..cf28daba4346 100644 --- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c @@ -28,7 +28,7 @@ #include "stmmac.h" -static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) +static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) { struct stmmac_priv *priv = (struct stmmac_priv *)p; unsigned int txsize = priv->dma_tx_size; @@ -47,7 +47,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) desc->des2 = dma_map_single(priv->device, skb->data, bmax, DMA_TO_DEVICE); - priv->tx_skbuff_dma[entry] = desc->des2; + if (dma_mapping_error(priv->device, desc->des2)) + return -1; + priv->tx_skbuff_dma[entry].buf = desc->des2; priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE); while (len != 0) { @@ -59,7 +61,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) desc->des2 = dma_map_single(priv->device, (skb->data + bmax * i), bmax, DMA_TO_DEVICE); - priv->tx_skbuff_dma[entry] = desc->des2; + if (dma_mapping_error(priv->device, desc->des2)) + return -1; + priv->tx_skbuff_dma[entry].buf = desc->des2; priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum, STMMAC_CHAIN_MODE); priv->hw->desc->set_tx_owner(desc); @@ -69,7 +73,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) desc->des2 = dma_map_single(priv->device, (skb->data + bmax * i), len, DMA_TO_DEVICE); - priv->tx_skbuff_dma[entry] = desc->des2; + if (dma_mapping_error(priv->device, desc->des2)) + return -1; + priv->tx_skbuff_dma[entry].buf = desc->des2; priv->hw->desc->prepare_tx_desc(desc, 0, len, csum, STMMAC_CHAIN_MODE); priv->hw->desc->set_tx_owner(desc); diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index de507c32036c..593e6c4144a7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -220,10 +220,10 @@ enum dma_irq_status { handle_tx = 0x8, }; -#define CORE_IRQ_TX_PATH_IN_LPI_MODE (1 << 1) -#define CORE_IRQ_TX_PATH_EXIT_LPI_MODE (1 << 2) -#define CORE_IRQ_RX_PATH_IN_LPI_MODE (1 << 3) -#define CORE_IRQ_RX_PATH_EXIT_LPI_MODE (1 << 4) +#define CORE_IRQ_TX_PATH_IN_LPI_MODE (1 << 0) +#define CORE_IRQ_TX_PATH_EXIT_LPI_MODE (1 << 1) +#define CORE_IRQ_RX_PATH_IN_LPI_MODE (1 << 2) +#define CORE_IRQ_RX_PATH_EXIT_LPI_MODE (1 << 3) #define CORE_PCS_ANE_COMPLETE (1 << 5) #define CORE_PCS_LINK_STATUS (1 << 6) @@ -287,7 +287,7 @@ struct dma_features { /* Default LPI timers */ #define STMMAC_DEFAULT_LIT_LS 0x3E8 -#define STMMAC_DEFAULT_TWT_LS 0x0 +#define STMMAC_DEFAULT_TWT_LS 0x1E #define STMMAC_CHAIN_MODE 0x1 #define STMMAC_RING_MODE 0x2 @@ -425,7 +425,7 @@ struct stmmac_mode_ops { void (*init) (void *des, dma_addr_t phy_addr, unsigned int size, unsigned int extend_desc); unsigned int (*is_jumbo_frm) (int len, int ehn_desc); - unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum); + int (*jumbo_frm)(void *priv, struct sk_buff *skb, int csum); int (*set_16kib_bfsize)(int mtu); void (*init_desc3)(struct dma_desc *p); void (*refill_desc3) (void *priv, struct dma_desc *p); @@ -445,6 +445,7 @@ struct mac_device_info { int multicast_filter_bins; int unicast_filter_entries; int mcast_bits_log2; + unsigned int rx_csum; }; struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h index 71b5419256c1..64d8f56a9c17 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h @@ -153,7 +153,7 @@ enum inter_frame_gap { #define GMAC_CONTROL_RE 0x00000004 /* Receiver Enable */ #define GMAC_CORE_INIT (GMAC_CONTROL_JD | GMAC_CONTROL_PS | GMAC_CONTROL_ACS | \ - GMAC_CONTROL_BE) + GMAC_CONTROL_BE | GMAC_CONTROL_DCRS) /* GMAC Frame Filter defines */ #define GMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c index d8ef18786a1c..5efe60ea6526 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c @@ -58,7 +58,11 @@ static int dwmac1000_rx_ipc_enable(struct mac_device_info *hw) void __iomem *ioaddr = hw->pcsr; u32 value = readl(ioaddr + GMAC_CONTROL); - value |= GMAC_CONTROL_IPC; + if (hw->rx_csum) + value |= GMAC_CONTROL_IPC; + else + value &= ~GMAC_CONTROL_IPC; + writel(value, ioaddr + GMAC_CONTROL); value = readl(ioaddr + GMAC_CONTROL); diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h index 8607488cbcfc..192c2491330b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/mmc.h +++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h @@ -68,7 +68,7 @@ struct stmmac_counters { unsigned int mmc_rx_octetcount_g; unsigned int mmc_rx_broadcastframe_g; unsigned int mmc_rx_multicastframe_g; - unsigned int mmc_rx_crc_errror; + unsigned int mmc_rx_crc_error; unsigned int mmc_rx_align_error; unsigned int mmc_rx_run_error; unsigned int mmc_rx_jabber_error; diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c index 50617c5a0bdb..08c483bd2ec7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c @@ -196,7 +196,7 @@ void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc) mmc->mmc_rx_octetcount_g += readl(ioaddr + MMC_RX_OCTETCOUNT_G); mmc->mmc_rx_broadcastframe_g += readl(ioaddr + MMC_RX_BROADCASTFRAME_G); mmc->mmc_rx_multicastframe_g += readl(ioaddr + MMC_RX_MULTICASTFRAME_G); - mmc->mmc_rx_crc_errror += readl(ioaddr + MMC_RX_CRC_ERRROR); + mmc->mmc_rx_crc_error += readl(ioaddr + MMC_RX_CRC_ERRROR); mmc->mmc_rx_align_error += readl(ioaddr + MMC_RX_ALIGN_ERROR); mmc->mmc_rx_run_error += readl(ioaddr + MMC_RX_RUN_ERROR); mmc->mmc_rx_jabber_error += readl(ioaddr + MMC_RX_JABBER_ERROR); diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c index 650a4be6bce5..5dd50c6cda5b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c @@ -28,7 +28,7 @@ #include "stmmac.h" -static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) +static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) { struct stmmac_priv *priv = (struct stmmac_priv *)p; unsigned int txsize = priv->dma_tx_size; @@ -53,7 +53,10 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) desc->des2 = dma_map_single(priv->device, skb->data, bmax, DMA_TO_DEVICE); - priv->tx_skbuff_dma[entry] = desc->des2; + if (dma_mapping_error(priv->device, desc->des2)) + return -1; + + priv->tx_skbuff_dma[entry].buf = desc->des2; desc->des3 = desc->des2 + BUF_SIZE_4KiB; priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_RING_MODE); @@ -68,7 +71,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) desc->des2 = dma_map_single(priv->device, skb->data + bmax, len, DMA_TO_DEVICE); - priv->tx_skbuff_dma[entry] = desc->des2; + if (dma_mapping_error(priv->device, desc->des2)) + return -1; + priv->tx_skbuff_dma[entry].buf = desc->des2; desc->des3 = desc->des2 + BUF_SIZE_4KiB; priv->hw->desc->prepare_tx_desc(desc, 0, len, csum, STMMAC_RING_MODE); @@ -77,7 +82,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) } else { desc->des2 = dma_map_single(priv->device, skb->data, nopaged_len, DMA_TO_DEVICE); - priv->tx_skbuff_dma[entry] = desc->des2; + if (dma_mapping_error(priv->device, desc->des2)) + return -1; + priv->tx_skbuff_dma[entry].buf = desc->des2; desc->des3 = desc->des2 + BUF_SIZE_4KiB; priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum, STMMAC_RING_MODE); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index ca01035634a7..58097c0e2ad5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -34,6 +34,11 @@ #include <linux/ptp_clock_kernel.h> #include <linux/reset.h> +struct stmmac_tx_info { + dma_addr_t buf; + bool map_as_page; +}; + struct stmmac_priv { /* Frequently used values are kept adjacent for cache effect */ struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp; @@ -45,7 +50,7 @@ struct stmmac_priv { u32 tx_count_frames; u32 tx_coal_frames; u32 tx_coal_timer; - dma_addr_t *tx_skbuff_dma; + struct stmmac_tx_info *tx_skbuff_dma; dma_addr_t dma_tx_phy; int tx_coalesce; int hwts_tx_en; @@ -105,6 +110,8 @@ struct stmmac_priv { struct ptp_clock *ptp_clock; struct ptp_clock_info ptp_clock_ops; unsigned int default_addend; + struct clk *clk_ptp_ref; + unsigned int clk_ptp_rate; u32 adv_ts; int use_riwt; int irq_wake; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 9af50bae4dde..cf4f38db1c0a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -175,7 +175,7 @@ static const struct stmmac_stats stmmac_mmc[] = { STMMAC_MMC_STAT(mmc_rx_octetcount_g), STMMAC_MMC_STAT(mmc_rx_broadcastframe_g), STMMAC_MMC_STAT(mmc_rx_multicastframe_g), - STMMAC_MMC_STAT(mmc_rx_crc_errror), + STMMAC_MMC_STAT(mmc_rx_crc_error), STMMAC_MMC_STAT(mmc_rx_align_error), STMMAC_MMC_STAT(mmc_rx_run_error), STMMAC_MMC_STAT(mmc_rx_jabber_error), diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 08addd653728..b0c1521e08a3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -275,6 +275,7 @@ static void stmmac_eee_ctrl_timer(unsigned long arg) */ bool stmmac_eee_init(struct stmmac_priv *priv) { + char *phy_bus_name = priv->plat->phy_bus_name; bool ret = false; /* Using PCS we cannot dial with the phy registers at this stage @@ -284,6 +285,10 @@ bool stmmac_eee_init(struct stmmac_priv *priv) (priv->pcs == STMMAC_PCS_RTBI)) goto out; + /* Never init EEE in case of a switch is attached */ + if (phy_bus_name && (!strcmp(phy_bus_name, "fixed"))) + goto out; + /* MAC core supports the EEE feature. */ if (priv->dma_cap.eee) { int tx_lpi_timer = priv->tx_lpi_timer; @@ -316,10 +321,9 @@ bool stmmac_eee_init(struct stmmac_priv *priv) priv->hw->mac->set_eee_timer(priv->hw, STMMAC_DEFAULT_LIT_LS, tx_lpi_timer); - } else - /* Set HW EEE according to the speed */ - priv->hw->mac->set_eee_pls(priv->hw, - priv->phydev->link); + } + /* Set HW EEE according to the speed */ + priv->hw->mac->set_eee_pls(priv->hw, priv->phydev->link); pr_debug("stmmac: Energy-Efficient Ethernet initialized\n"); @@ -603,16 +607,16 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) /* calculate default added value: * formula is : * addend = (2^32)/freq_div_ratio; - * where, freq_div_ratio = STMMAC_SYSCLOCK/50MHz - * hence, addend = ((2^32) * 50MHz)/STMMAC_SYSCLOCK; - * NOTE: STMMAC_SYSCLOCK should be >= 50MHz to + * where, freq_div_ratio = clk_ptp_ref_i/50MHz + * hence, addend = ((2^32) * 50MHz)/clk_ptp_ref_i; + * NOTE: clk_ptp_ref_i should be >= 50MHz to * achive 20ns accuracy. * * 2^x * y == (y << x), hence * 2^32 * 50000000 ==> (50000000 << 32) */ temp = (u64) (50000000ULL << 32); - priv->default_addend = div_u64(temp, STMMAC_SYSCLOCK); + priv->default_addend = div_u64(temp, priv->clk_ptp_rate); priv->hw->ptp->config_addend(priv->ioaddr, priv->default_addend); @@ -638,6 +642,16 @@ static int stmmac_init_ptp(struct stmmac_priv *priv) if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) return -EOPNOTSUPP; + /* Fall-back to main clock in case of no PTP ref is passed */ + priv->clk_ptp_ref = devm_clk_get(priv->device, "clk_ptp_ref"); + if (IS_ERR(priv->clk_ptp_ref)) { + priv->clk_ptp_rate = clk_get_rate(priv->stmmac_clk); + priv->clk_ptp_ref = NULL; + } else { + clk_prepare_enable(priv->clk_ptp_ref); + priv->clk_ptp_rate = clk_get_rate(priv->clk_ptp_ref); + } + priv->adv_ts = 0; if (priv->dma_cap.atime_stamp && priv->extend_desc) priv->adv_ts = 1; @@ -657,6 +671,8 @@ static int stmmac_init_ptp(struct stmmac_priv *priv) static void stmmac_release_ptp(struct stmmac_priv *priv) { + if (priv->clk_ptp_ref) + clk_disable_unprepare(priv->clk_ptp_ref); stmmac_ptp_unregister(priv); } @@ -1061,7 +1077,8 @@ static int init_dma_desc_rings(struct net_device *dev) else p = priv->dma_tx + i; p->des2 = 0; - priv->tx_skbuff_dma[i] = 0; + priv->tx_skbuff_dma[i].buf = 0; + priv->tx_skbuff_dma[i].map_as_page = false; priv->tx_skbuff[i] = NULL; } @@ -1100,17 +1117,24 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv) else p = priv->dma_tx + i; - if (priv->tx_skbuff_dma[i]) { - dma_unmap_single(priv->device, - priv->tx_skbuff_dma[i], - priv->hw->desc->get_tx_len(p), - DMA_TO_DEVICE); - priv->tx_skbuff_dma[i] = 0; + if (priv->tx_skbuff_dma[i].buf) { + if (priv->tx_skbuff_dma[i].map_as_page) + dma_unmap_page(priv->device, + priv->tx_skbuff_dma[i].buf, + priv->hw->desc->get_tx_len(p), + DMA_TO_DEVICE); + else + dma_unmap_single(priv->device, + priv->tx_skbuff_dma[i].buf, + priv->hw->desc->get_tx_len(p), + DMA_TO_DEVICE); } if (priv->tx_skbuff[i] != NULL) { dev_kfree_skb_any(priv->tx_skbuff[i]); priv->tx_skbuff[i] = NULL; + priv->tx_skbuff_dma[i].buf = 0; + priv->tx_skbuff_dma[i].map_as_page = false; } } } @@ -1131,7 +1155,8 @@ static int alloc_dma_desc_resources(struct stmmac_priv *priv) if (!priv->rx_skbuff) goto err_rx_skbuff; - priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t), + priv->tx_skbuff_dma = kmalloc_array(txsize, + sizeof(*priv->tx_skbuff_dma), GFP_KERNEL); if (!priv->tx_skbuff_dma) goto err_tx_skbuff_dma; @@ -1293,12 +1318,19 @@ static void stmmac_tx_clean(struct stmmac_priv *priv) pr_debug("%s: curr %d, dirty %d\n", __func__, priv->cur_tx, priv->dirty_tx); - if (likely(priv->tx_skbuff_dma[entry])) { - dma_unmap_single(priv->device, - priv->tx_skbuff_dma[entry], - priv->hw->desc->get_tx_len(p), - DMA_TO_DEVICE); - priv->tx_skbuff_dma[entry] = 0; + if (likely(priv->tx_skbuff_dma[entry].buf)) { + if (priv->tx_skbuff_dma[entry].map_as_page) + dma_unmap_page(priv->device, + priv->tx_skbuff_dma[entry].buf, + priv->hw->desc->get_tx_len(p), + DMA_TO_DEVICE); + else + dma_unmap_single(priv->device, + priv->tx_skbuff_dma[entry].buf, + priv->hw->desc->get_tx_len(p), + DMA_TO_DEVICE); + priv->tx_skbuff_dma[entry].buf = 0; + priv->tx_skbuff_dma[entry].map_as_page = false; } priv->hw->mode->clean_desc3(priv, p); @@ -1637,6 +1669,13 @@ static int stmmac_hw_setup(struct net_device *dev) /* Initialize the MAC Core */ priv->hw->mac->core_init(priv->hw, dev->mtu); + ret = priv->hw->mac->rx_ipc(priv->hw); + if (!ret) { + pr_warn(" RX IPC Checksum Offload disabled\n"); + priv->plat->rx_coe = STMMAC_RX_COE_NONE; + priv->hw->rx_csum = 0; + } + /* Enable the MAC Rx/Tx */ stmmac_set_mac(priv->ioaddr, true); @@ -1887,12 +1926,16 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) if (likely(!is_jumbo)) { desc->des2 = dma_map_single(priv->device, skb->data, nopaged_len, DMA_TO_DEVICE); - priv->tx_skbuff_dma[entry] = desc->des2; + if (dma_mapping_error(priv->device, desc->des2)) + goto dma_map_err; + priv->tx_skbuff_dma[entry].buf = desc->des2; priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum_insertion, priv->mode); } else { desc = first; entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion); + if (unlikely(entry < 0)) + goto dma_map_err; } for (i = 0; i < nfrags; i++) { @@ -1908,7 +1951,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len, DMA_TO_DEVICE); - priv->tx_skbuff_dma[entry] = desc->des2; + if (dma_mapping_error(priv->device, desc->des2)) + goto dma_map_err; /* should reuse desc w/o issues */ + + priv->tx_skbuff_dma[entry].buf = desc->des2; + priv->tx_skbuff_dma[entry].map_as_page = true; priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion, priv->mode); wmb(); @@ -1975,7 +2022,12 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) priv->hw->dma->enable_dma_transmission(priv->ioaddr); spin_unlock(&priv->tx_lock); + return NETDEV_TX_OK; +dma_map_err: + dev_err(priv->device, "Tx dma map failed\n"); + dev_kfree_skb(skb); + priv->dev->stats.tx_dropped++; return NETDEV_TX_OK; } @@ -2028,7 +2080,12 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv) priv->rx_skbuff_dma[entry] = dma_map_single(priv->device, skb->data, bfsize, DMA_FROM_DEVICE); - + if (dma_mapping_error(priv->device, + priv->rx_skbuff_dma[entry])) { + dev_err(priv->device, "Rx dma map failed\n"); + dev_kfree_skb(skb); + break; + } p->des2 = priv->rx_skbuff_dma[entry]; priv->hw->mode->refill_desc3(priv, p); @@ -2055,7 +2112,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) unsigned int entry = priv->cur_rx % rxsize; unsigned int next_entry; unsigned int count = 0; - int coe = priv->plat->rx_coe; + int coe = priv->hw->rx_csum; if (netif_msg_rx_status(priv)) { pr_debug("%s: descriptor ring:\n", __func__); @@ -2276,8 +2333,7 @@ static netdev_features_t stmmac_fix_features(struct net_device *dev, if (priv->plat->rx_coe == STMMAC_RX_COE_NONE) features &= ~NETIF_F_RXCSUM; - else if (priv->plat->rx_coe == STMMAC_RX_COE_TYPE1) - features &= ~NETIF_F_IPV6_CSUM; + if (!priv->plat->tx_coe) features &= ~NETIF_F_ALL_CSUM; @@ -2292,6 +2348,24 @@ static netdev_features_t stmmac_fix_features(struct net_device *dev, return features; } +static int stmmac_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct stmmac_priv *priv = netdev_priv(netdev); + + /* Keep the COE Type in case of csum is supporting */ + if (features & NETIF_F_RXCSUM) + priv->hw->rx_csum = priv->plat->rx_coe; + else + priv->hw->rx_csum = 0; + /* No check needed because rx_coe has been set before and it will be + * fixed in case of issue. + */ + priv->hw->mac->rx_ipc(priv->hw); + + return 0; +} + /** * stmmac_interrupt - main ISR * @irq: interrupt number. @@ -2572,6 +2646,7 @@ static const struct net_device_ops stmmac_netdev_ops = { .ndo_stop = stmmac_release, .ndo_change_mtu = stmmac_change_mtu, .ndo_fix_features = stmmac_fix_features, + .ndo_set_features = stmmac_set_features, .ndo_set_rx_mode = stmmac_set_rx_mode, .ndo_tx_timeout = stmmac_tx_timeout, .ndo_do_ioctl = stmmac_ioctl, @@ -2592,7 +2667,6 @@ static const struct net_device_ops stmmac_netdev_ops = { */ static int stmmac_hw_init(struct stmmac_priv *priv) { - int ret; struct mac_device_info *mac; /* Identify the MAC HW device */ @@ -2649,15 +2723,11 @@ static int stmmac_hw_init(struct stmmac_priv *priv) /* To use alternate (extended) or normal descriptor structures */ stmmac_selec_desc_mode(priv); - ret = priv->hw->mac->rx_ipc(priv->hw); - if (!ret) { - pr_warn(" RX IPC Checksum Offload not configured.\n"); - priv->plat->rx_coe = STMMAC_RX_COE_NONE; - } - - if (priv->plat->rx_coe) + if (priv->plat->rx_coe) { + priv->hw->rx_csum = priv->plat->rx_coe; pr_info(" RX Checksum Offload Engine supported (type %d)\n", priv->plat->rx_coe); + } if (priv->plat->tx_coe) pr_info(" TX Checksum insertion supported\n"); @@ -2716,8 +2786,15 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device, if (IS_ERR(priv->stmmac_clk)) { dev_warn(priv->device, "%s: warning: cannot get CSR clock\n", __func__); - ret = PTR_ERR(priv->stmmac_clk); - goto error_clk_get; + /* If failed to obtain stmmac_clk and specific clk_csr value + * is NOT passed from the platform, probe fail. + */ + if (!priv->plat->clk_csr) { + ret = PTR_ERR(priv->stmmac_clk); + goto error_clk_get; + } else { + priv->stmmac_clk = NULL; + } } clk_prepare_enable(priv->stmmac_clk); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c index 291608924849..655a23bbc451 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c @@ -170,7 +170,7 @@ static int stmmac_pci_resume(struct pci_dev *pdev) #define STMMAC_VENDOR_ID 0x700 #define STMMAC_DEVICE_ID 0x1108 -static DEFINE_PCI_DEVICE_TABLE(stmmac_id_table) = { +static const struct pci_device_id stmmac_id_table[] = { {PCI_DEVICE(STMMAC_VENDOR_ID, STMMAC_DEVICE_ID)}, {PCI_DEVICE(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_MAC)}, {} diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c index b7ad3565566c..c5ee79d8a8c5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c @@ -206,6 +206,7 @@ void stmmac_ptp_unregister(struct stmmac_priv *priv) { if (priv->ptp_clock) { ptp_clock_unregister(priv->ptp_clock); + priv->ptp_clock = NULL; pr_debug("Removed PTP HW clock successfully on %s\n", priv->dev->name); } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h index 3dbc047622fa..4535df37c227 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h @@ -25,8 +25,6 @@ #ifndef __STMMAC_PTP_H__ #define __STMMAC_PTP_H__ -#define STMMAC_SYSCLOCK 62500000 - /* IEEE 1588 PTP register offsets */ #define PTP_TCR 0x0700 /* Timestamp Control Reg */ #define PTP_SSIR 0x0704 /* Sub-Second Increment Reg */ diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index b9ac20f42651..37f87ff28f03 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c @@ -229,7 +229,7 @@ static u16 link_modes[] = { CAS_BMCR_SPEED1000|BMCR_FULLDPLX /* 5 : 1000bt full duplex */ }; -static DEFINE_PCI_DEVICE_TABLE(cas_pci_tbl) = { +static const struct pci_device_id cas_pci_tbl[] = { { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_CASSINI, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SATURN, diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index db8ffde491b5..8216be46540f 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -59,7 +59,7 @@ static void writeq(u64 val, void __iomem *reg) } #endif -static DEFINE_PCI_DEVICE_TABLE(niu_pci_tbl) = { +static const struct pci_device_id niu_pci_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_SUN, 0xabcd)}, {} }; diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c index 102a66fc54a2..f7415b6bf141 100644 --- a/drivers/net/ethernet/sun/sungem.c +++ b/drivers/net/ethernet/sun/sungem.c @@ -85,7 +85,7 @@ MODULE_LICENSE("GPL"); #define GEM_MODULE_NAME "gem" -static DEFINE_PCI_DEVICE_TABLE(gem_pci_tbl) = { +static const struct pci_device_id gem_pci_tbl[] = { { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_GEM, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c index 0dbf46f08ed5..72c8525d5457 100644 --- a/drivers/net/ethernet/sun/sunhme.c +++ b/drivers/net/ethernet/sun/sunhme.c @@ -3172,7 +3172,7 @@ static void happy_meal_pci_remove(struct pci_dev *pdev) free_netdev(net_dev); } -static DEFINE_PCI_DEVICE_TABLE(happymeal_pci_ids) = { +static const struct pci_device_id happymeal_pci_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_HAPPYMEAL) }, { } /* Terminating entry */ }; diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index d813bfb1a847..f67539650c38 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c @@ -32,6 +32,11 @@ MODULE_DESCRIPTION("Sun LDOM virtual network driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_MODULE_VERSION); +/* Heuristic for the number of times to exponentially backoff and + * retry sending an LDC trigger when EAGAIN is encountered + */ +#define VNET_MAX_RETRIES 10 + /* Ordered from largest major to lowest */ static struct vio_version vnet_versions[] = { { .major = 1, .minor = 0 }, @@ -260,6 +265,7 @@ static int vnet_send_ack(struct vnet_port *port, struct vio_dring_state *dr, .state = vio_dring_state, }; int err, delay; + int retries = 0; hdr.seq = dr->snd_nxt; delay = 1; @@ -272,6 +278,13 @@ static int vnet_send_ack(struct vnet_port *port, struct vio_dring_state *dr, udelay(delay); if ((delay <<= 1) > 128) delay = 128; + if (retries++ > VNET_MAX_RETRIES) { + pr_info("ECONNRESET %x:%x:%x:%x:%x:%x\n", + port->raddr[0], port->raddr[1], + port->raddr[2], port->raddr[3], + port->raddr[4], port->raddr[5]); + err = -ECONNRESET; + } } while (err == -EAGAIN); return err; @@ -337,14 +350,17 @@ static int vnet_walk_rx_one(struct vnet_port *port, if (IS_ERR(desc)) return PTR_ERR(desc); + if (desc->hdr.state != VIO_DESC_READY) + return 1; + + rmb(); + viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n", desc->hdr.state, desc->hdr.ack, desc->size, desc->ncookies, desc->cookies[0].cookie_addr, desc->cookies[0].cookie_size); - if (desc->hdr.state != VIO_DESC_READY) - return 1; err = vnet_rx_one(port, desc->size, desc->cookies, desc->ncookies); if (err == -ECONNRESET) return err; @@ -475,8 +491,9 @@ static int handle_mcast(struct vnet_port *port, void *msgbuf) return 0; } -static void maybe_tx_wakeup(struct vnet *vp) +static void maybe_tx_wakeup(unsigned long param) { + struct vnet *vp = (struct vnet *)param; struct net_device *dev = vp->dev; netif_tx_lock(dev); @@ -573,8 +590,13 @@ static void vnet_event(void *arg, int event) break; } spin_unlock(&vio->lock); + /* Kick off a tasklet to wake the queue. We cannot call + * maybe_tx_wakeup directly here because we could deadlock on + * netif_tx_lock() with dev_watchdog() + */ if (unlikely(tx_wakeup && err != -ECONNRESET)) - maybe_tx_wakeup(port->vp); + tasklet_schedule(&port->vp->vnet_tx_wakeup); + local_irq_restore(flags); } @@ -593,6 +615,7 @@ static int __vnet_tx_trigger(struct vnet_port *port) .end_idx = (u32) -1, }; int err, delay; + int retries = 0; hdr.seq = dr->snd_nxt; delay = 1; @@ -605,6 +628,8 @@ static int __vnet_tx_trigger(struct vnet_port *port) udelay(delay); if ((delay <<= 1) > 128) delay = 128; + if (retries++ > VNET_MAX_RETRIES) + break; } while (err == -EAGAIN); return err; @@ -691,7 +716,15 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) memset(tx_buf+VNET_PACKET_SKIP+skb->len, 0, len - skb->len); } - d->hdr.ack = VIO_ACK_ENABLE; + /* We don't rely on the ACKs to free the skb in vnet_start_xmit(), + * thus it is safe to not set VIO_ACK_ENABLE for each transmission: + * the protocol itself does not require it as long as the peer + * sends a VIO_SUBTYPE_ACK for VIO_DRING_STOPPED. + * + * An ACK for every packet in the ring is expensive as the + * sending of LDC messages is slow and affects performance. + */ + d->hdr.ack = VIO_ACK_DISABLE; d->size = len; d->ncookies = port->tx_bufs[dr->prod].ncookies; for (i = 0; i < d->ncookies; i++) @@ -1046,6 +1079,7 @@ static struct vnet *vnet_new(const u64 *local_mac) vp = netdev_priv(dev); spin_lock_init(&vp->lock); + tasklet_init(&vp->vnet_tx_wakeup, maybe_tx_wakeup, (unsigned long)vp); vp->dev = dev; INIT_LIST_HEAD(&vp->port_list); @@ -1105,6 +1139,7 @@ static void vnet_cleanup(void) vp = list_first_entry(&vnet_list, struct vnet, list); list_del(&vp->list); dev = vp->dev; + tasklet_kill(&vp->vnet_tx_wakeup); /* vio_unregister_driver() should have cleaned up port_list */ BUG_ON(!list_empty(&vp->port_list)); unregister_netdev(dev); diff --git a/drivers/net/ethernet/sun/sunvnet.h b/drivers/net/ethernet/sun/sunvnet.h index d347a5bf24b0..de5c2c64996f 100644 --- a/drivers/net/ethernet/sun/sunvnet.h +++ b/drivers/net/ethernet/sun/sunvnet.h @@ -1,6 +1,8 @@ #ifndef _SUNVNET_H #define _SUNVNET_H +#include <linux/interrupt.h> + #define DESC_NCOOKIES(entry_size) \ ((entry_size) - sizeof(struct vio_net_desc)) @@ -78,6 +80,8 @@ struct vnet { struct list_head list; u64 local_mac; + + struct tasklet_struct vnet_tx_wakeup; }; #endif /* _SUNVNET_H */ diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c index 38da73a2a886..6ab36d9ff2ab 100644 --- a/drivers/net/ethernet/tehuti/tehuti.c +++ b/drivers/net/ethernet/tehuti/tehuti.c @@ -66,7 +66,7 @@ #include "tehuti.h" -static DEFINE_PCI_DEVICE_TABLE(bdx_pci_tbl) = { +static const struct pci_device_id bdx_pci_tbl[] = { { PCI_VDEVICE(TEHUTI, 0x3009), }, { PCI_VDEVICE(TEHUTI, 0x3010), }, { PCI_VDEVICE(TEHUTI, 0x3014), }, diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c index 3809f4ec2820..f9bcf7aa88ca 100644 --- a/drivers/net/ethernet/ti/cpmac.c +++ b/drivers/net/ethernet/ti/cpmac.c @@ -1130,6 +1130,7 @@ static int cpmac_probe(struct platform_device *pdev) strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); /* fixed phys bus */ phy_id = pdev->id; } + mdio_bus_id[sizeof(mdio_bus_id) - 1] = '\0'; dev = alloc_etherdev_mq(sizeof(*priv), CPMAC_QUEUES); if (!dev) diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 999fb72688d2..e2a00287f8eb 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -699,6 +699,28 @@ static void cpsw_rx_handler(void *token, int len, int status) cpsw_dual_emac_src_port_detect(status, priv, ndev, skb); if (unlikely(status < 0) || unlikely(!netif_running(ndev))) { + bool ndev_status = false; + struct cpsw_slave *slave = priv->slaves; + int n; + + if (priv->data.dual_emac) { + /* In dual emac mode check for all interfaces */ + for (n = priv->data.slaves; n; n--, slave++) + if (netif_running(slave->ndev)) + ndev_status = true; + } + + if (ndev_status && (status >= 0)) { + /* The packet received is for the interface which + * is already down and the other interface is up + * and running, intead of freeing which results + * in reducing of the number of rx descriptor in + * DMA engine, requeue skb back to cpdma. + */ + new_skb = skb; + goto requeue; + } + /* the interface is going down, skbs are purged */ dev_kfree_skb_any(skb); return; @@ -717,6 +739,7 @@ static void cpsw_rx_handler(void *token, int len, int status) new_skb = skb; } +requeue: ret = cpdma_chan_submit(priv->rxch, new_skb, new_skb->data, skb_tailroom(new_skb), 0); if (WARN_ON(ret < 0)) @@ -2311,10 +2334,19 @@ static int cpsw_suspend(struct device *dev) struct net_device *ndev = platform_get_drvdata(pdev); struct cpsw_priv *priv = netdev_priv(ndev); - if (netif_running(ndev)) - cpsw_ndo_stop(ndev); + if (priv->data.dual_emac) { + int i; - for_each_slave(priv, soft_reset_slave); + for (i = 0; i < priv->data.slaves; i++) { + if (netif_running(priv->slaves[i].ndev)) + cpsw_ndo_stop(priv->slaves[i].ndev); + soft_reset_slave(priv->slaves + i); + } + } else { + if (netif_running(ndev)) + cpsw_ndo_stop(ndev); + for_each_slave(priv, soft_reset_slave); + } pm_runtime_put_sync(&pdev->dev); @@ -2328,14 +2360,24 @@ static int cpsw_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct net_device *ndev = platform_get_drvdata(pdev); + struct cpsw_priv *priv = netdev_priv(ndev); pm_runtime_get_sync(&pdev->dev); /* Select default pin state */ pinctrl_pm_select_default_state(&pdev->dev); - if (netif_running(ndev)) - cpsw_ndo_open(ndev); + if (priv->data.dual_emac) { + int i; + + for (i = 0; i < priv->data.slaves; i++) { + if (netif_running(priv->slaves[i].ndev)) + cpsw_ndo_open(priv->slaves[i].ndev); + } + } else { + if (netif_running(ndev)) + cpsw_ndo_open(ndev); + } return 0; } diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c index 6078342fe3f2..f2ff0074aac9 100644 --- a/drivers/net/ethernet/ti/tlan.c +++ b/drivers/net/ethernet/ti/tlan.c @@ -116,7 +116,7 @@ static struct board { TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, /* EISA card */ }; -static DEFINE_PCI_DEVICE_TABLE(tlan_pci_tbl) = { +static const struct pci_device_id tlan_pci_tbl[] = { { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL10, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100, diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c index 0282d0161859..3e38f67c6011 100644 --- a/drivers/net/ethernet/toshiba/spider_net.c +++ b/drivers/net/ethernet/toshiba/spider_net.c @@ -73,7 +73,7 @@ MODULE_PARM_DESC(tx_descriptors, "number of descriptors used " \ char spider_net_driver_name[] = "spidernet"; -static DEFINE_PCI_DEVICE_TABLE(spider_net_pci_tbl) = { +static const struct pci_device_id spider_net_pci_tbl[] = { { PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SPIDER_NET, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { 0, } diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c index fef5573dbfca..45ac38d29ed8 100644 --- a/drivers/net/ethernet/toshiba/tc35815.c +++ b/drivers/net/ethernet/toshiba/tc35815.c @@ -65,7 +65,7 @@ static const struct { { "TOSHIBA TC35815/TX4939" }, }; -static DEFINE_PCI_DEVICE_TABLE(tc35815_pci_tbl) = { +static const struct pci_device_id tc35815_pci_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815CF), .driver_data = TC35815CF }, {PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815_NWU), .driver_data = TC35815_NWU }, {PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815_TX4939), .driver_data = TC35815_TX4939 }, diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index 2d72f96a9e2c..68c5260cc322 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c @@ -273,7 +273,7 @@ enum rhine_quirks { /* Beware of PCI posted writes */ #define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0) -static DEFINE_PCI_DEVICE_TABLE(rhine_pci_tbl) = { +static const struct pci_device_id rhine_pci_tbl[] = { { 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, }, /* VT86C100A */ { 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6102 */ { 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, }, /* 6105{,L,LOM} */ diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c index d5d547aef4f7..a43e8492b1ce 100644 --- a/drivers/net/ethernet/via/via-velocity.c +++ b/drivers/net/ethernet/via/via-velocity.c @@ -381,7 +381,7 @@ static struct velocity_info_tbl chip_info_table[] = { * device driver. Used for hotplug autoloading. */ -static DEFINE_PCI_DEVICE_TABLE(velocity_pci_id_table) = { +static const struct pci_device_id velocity_pci_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X) }, { } }; diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 8a6e5c2d6f95..fda5891835d4 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -1148,8 +1148,7 @@ static int temac_of_remove(struct platform_device *op) temac_mdio_teardown(lp); unregister_netdev(ndev); sysfs_remove_group(&lp->dev->kobj, &temac_attr_group); - if (lp->phy_node) - of_node_put(lp->phy_node); + of_node_put(lp->phy_node); lp->phy_node = NULL; iounmap(lp->regs); if (lp->sdma_regs) @@ -1171,7 +1170,6 @@ static struct platform_driver temac_of_driver = { .probe = temac_of_probe, .remove = temac_of_remove, .driver = { - .owner = THIS_MODULE, .name = "xilinx_temac", .of_match_table = temac_of_match, }, diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 7b0a73556264..c8fd94133ecd 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -1630,8 +1630,7 @@ static int axienet_of_remove(struct platform_device *op) axienet_mdio_teardown(lp); unregister_netdev(ndev); - if (lp->phy_node) - of_node_put(lp->phy_node); + of_node_put(lp->phy_node); lp->phy_node = NULL; iounmap(lp->regs); @@ -1646,7 +1645,6 @@ static struct platform_driver axienet_of_driver = { .probe = axienet_of_probe, .remove = axienet_of_remove, .driver = { - .owner = THIS_MODULE, .name = "xilinx_axienet", .of_match_table = axienet_of_match, }, diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 782bb9373cd8..28dbbdc393eb 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -1245,7 +1245,6 @@ MODULE_DEVICE_TABLE(of, xemaclite_of_match); static struct platform_driver xemaclite_of_driver = { .driver = { .name = DRIVER_NAME, - .owner = THIS_MODULE, .of_match_table = xemaclite_of_match, }, .probe = xemaclite_of_probe, diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c index 7c81ffb861e8..d56f8693202b 100644 --- a/drivers/net/ethernet/xircom/xirc2ps_cs.c +++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c @@ -266,7 +266,7 @@ static void xirc2ps_detach(struct pcmcia_device *p_dev); static irqreturn_t xirc2ps_interrupt(int irq, void *dev_id); -typedef struct local_info_t { +struct local_info { struct net_device *dev; struct pcmcia_device *p_dev; @@ -281,7 +281,7 @@ typedef struct local_info_t { unsigned last_ptr_value; /* last packets transmitted value */ const char *manf_str; struct work_struct tx_timeout_task; -} local_info_t; +}; /**************** * Some more prototypes @@ -475,12 +475,12 @@ static int xirc2ps_probe(struct pcmcia_device *link) { struct net_device *dev; - local_info_t *local; + struct local_info *local; dev_dbg(&link->dev, "attach()\n"); /* Allocate the device structure */ - dev = alloc_etherdev(sizeof(local_info_t)); + dev = alloc_etherdev(sizeof(struct local_info)); if (!dev) return -ENOMEM; local = netdev_priv(dev); @@ -536,7 +536,7 @@ static int set_card_type(struct pcmcia_device *link) { struct net_device *dev = link->priv; - local_info_t *local = netdev_priv(dev); + struct local_info *local = netdev_priv(dev); u8 *buf; unsigned int cisrev, mediaid, prodid; size_t len; @@ -690,7 +690,7 @@ static int xirc2ps_config(struct pcmcia_device * link) { struct net_device *dev = link->priv; - local_info_t *local = netdev_priv(dev); + struct local_info *local = netdev_priv(dev); unsigned int ioaddr; int err; u8 *buf; @@ -931,7 +931,7 @@ xirc2ps_release(struct pcmcia_device *link) if (link->resource[2]->end) { struct net_device *dev = link->priv; - local_info_t *local = netdev_priv(dev); + struct local_info *local = netdev_priv(dev); if (local->dingo) iounmap(local->dingo_ccr - 0x0800); } @@ -975,7 +975,7 @@ static irqreturn_t xirc2ps_interrupt(int irq, void *dev_id) { struct net_device *dev = (struct net_device *)dev_id; - local_info_t *lp = netdev_priv(dev); + struct local_info *lp = netdev_priv(dev); unsigned int ioaddr; u_char saved_page; unsigned bytes_rcvd; @@ -1194,8 +1194,8 @@ xirc2ps_interrupt(int irq, void *dev_id) static void xirc2ps_tx_timeout_task(struct work_struct *work) { - local_info_t *local = - container_of(work, local_info_t, tx_timeout_task); + struct local_info *local = + container_of(work, struct local_info, tx_timeout_task); struct net_device *dev = local->dev; /* reset the card */ do_reset(dev,1); @@ -1206,7 +1206,7 @@ xirc2ps_tx_timeout_task(struct work_struct *work) static void xirc_tx_timeout(struct net_device *dev) { - local_info_t *lp = netdev_priv(dev); + struct local_info *lp = netdev_priv(dev); dev->stats.tx_errors++; netdev_notice(dev, "transmit timed out\n"); schedule_work(&lp->tx_timeout_task); @@ -1215,7 +1215,7 @@ xirc_tx_timeout(struct net_device *dev) static netdev_tx_t do_start_xmit(struct sk_buff *skb, struct net_device *dev) { - local_info_t *lp = netdev_priv(dev); + struct local_info *lp = netdev_priv(dev); unsigned int ioaddr = dev->base_addr; int okay; unsigned freespace; @@ -1300,7 +1300,7 @@ static void set_address(struct set_address_info *sa_info, char *addr) static void set_addresses(struct net_device *dev) { unsigned int ioaddr = dev->base_addr; - local_info_t *lp = netdev_priv(dev); + struct local_info *lp = netdev_priv(dev); struct netdev_hw_addr *ha; struct set_address_info sa_info; int i; @@ -1362,7 +1362,7 @@ set_multicast_list(struct net_device *dev) static int do_config(struct net_device *dev, struct ifmap *map) { - local_info_t *local = netdev_priv(dev); + struct local_info *local = netdev_priv(dev); pr_debug("do_config(%p)\n", dev); if (map->port != 255 && map->port != dev->if_port) { @@ -1387,7 +1387,7 @@ do_config(struct net_device *dev, struct ifmap *map) static int do_open(struct net_device *dev) { - local_info_t *lp = netdev_priv(dev); + struct local_info *lp = netdev_priv(dev); struct pcmcia_device *link = lp->p_dev; dev_dbg(&link->dev, "do_open(%p)\n", dev); @@ -1421,7 +1421,7 @@ static const struct ethtool_ops netdev_ethtool_ops = { static int do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { - local_info_t *local = netdev_priv(dev); + struct local_info *local = netdev_priv(dev); unsigned int ioaddr = dev->base_addr; struct mii_ioctl_data *data = if_mii(rq); @@ -1453,7 +1453,7 @@ do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) static void hardreset(struct net_device *dev) { - local_info_t *local = netdev_priv(dev); + struct local_info *local = netdev_priv(dev); unsigned int ioaddr = dev->base_addr; SelectPage(4); @@ -1470,7 +1470,7 @@ hardreset(struct net_device *dev) static void do_reset(struct net_device *dev, int full) { - local_info_t *local = netdev_priv(dev); + struct local_info *local = netdev_priv(dev); unsigned int ioaddr = dev->base_addr; unsigned value; @@ -1631,7 +1631,7 @@ do_reset(struct net_device *dev, int full) static int init_mii(struct net_device *dev) { - local_info_t *local = netdev_priv(dev); + struct local_info *local = netdev_priv(dev); unsigned int ioaddr = dev->base_addr; unsigned control, status, linkpartner; int i; @@ -1715,7 +1715,7 @@ static int do_stop(struct net_device *dev) { unsigned int ioaddr = dev->base_addr; - local_info_t *lp = netdev_priv(dev); + struct local_info *lp = netdev_priv(dev); struct pcmcia_device *link = lp->p_dev; dev_dbg(&link->dev, "do_stop(%p)\n", dev); diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c index 6eb849a56da5..c44eaf019dea 100644 --- a/drivers/net/fddi/defxx.c +++ b/drivers/net/fddi/defxx.c @@ -3664,7 +3664,7 @@ static int __maybe_unused dfx_dev_unregister(struct device *); static int dfx_pci_register(struct pci_dev *, const struct pci_device_id *); static void dfx_pci_unregister(struct pci_dev *); -static DEFINE_PCI_DEVICE_TABLE(dfx_pci_table) = { +static const struct pci_device_id dfx_pci_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_FDDI) }, { } }; diff --git a/drivers/net/fddi/skfp/h/skfbi.h b/drivers/net/fddi/skfp/h/skfbi.h index c1ba26c06d73..3de2f0d15fe2 100644 --- a/drivers/net/fddi/skfp/h/skfbi.h +++ b/drivers/net/fddi/skfp/h/skfbi.h @@ -147,11 +147,6 @@ #define PCI_MEM64BIT (2<<1) /* Base addr anywhere in 64 Bit range */ #define PCI_MEMSPACE 0x00000001L /* Bit 0: Memory Space Indic. */ -/* PCI_BASE_2ND 32 bit 2nd Base address */ -#define PCI_IOBASE 0xffffff00L /* Bit 31..8: I/O Base address */ -#define PCI_IOSIZE 0x000000fcL /* Bit 7..2: I/O Size Requirements */ -#define PCI_IOSPACE 0x00000001L /* Bit 0: I/O Space Indicator */ - /* PCI_SUB_VID 16 bit Subsystem Vendor ID */ /* PCI_SUB_ID 16 bit Subsystem ID */ diff --git a/drivers/net/fddi/skfp/skfddi.c b/drivers/net/fddi/skfp/skfddi.c index d5f58121b2e2..51acc6d86e91 100644 --- a/drivers/net/fddi/skfp/skfddi.c +++ b/drivers/net/fddi/skfp/skfddi.c @@ -149,7 +149,7 @@ extern void mac_drv_rx_mode(struct s_smc *smc, int mode); extern void mac_drv_clear_rx_queue(struct s_smc *smc); extern void enable_tx_irq(struct s_smc *smc, u_short queue); -static DEFINE_PCI_DEVICE_TABLE(skfddi_pci_tbl) = { +static const struct pci_device_id skfddi_pci_tbl[] = { { PCI_VENDOR_ID_SK, PCI_DEVICE_ID_SK_FP, PCI_ANY_ID, PCI_ANY_ID, }, { } /* Terminating entry */ }; diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c index e580583f196d..95c0b45a68fb 100644 --- a/drivers/net/hippi/rrunner.c +++ b/drivers/net/hippi/rrunner.c @@ -1668,7 +1668,7 @@ static int rr_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) } } -static DEFINE_PCI_DEVICE_TABLE(rr_pci_tbl) = { +static const struct pci_device_id rr_pci_tbl[] = { { PCI_VENDOR_ID_ESSENTIAL, PCI_DEVICE_ID_ESSENTIAL_ROADRUNNER, PCI_ANY_ID, PCI_ANY_ID, }, { 0,} diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 02a3ee282eee..d5e07def6a59 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -585,7 +585,7 @@ struct nvsp_message { #define NETVSC_RECEIVE_BUFFER_SIZE (1024*1024*16) /* 16MB */ #define NETVSC_RECEIVE_BUFFER_SIZE_LEGACY (1024*1024*15) /* 15MB */ -#define NETVSC_SEND_BUFFER_SIZE (1024 * 1024 * 16) /* 16MB */ +#define NETVSC_SEND_BUFFER_SIZE (1024 * 1024 * 15) /* 15MB */ #define NETVSC_INVALID_INDEX -1 diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index a9c5eaadc426..0fcb5e7eb073 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -387,6 +387,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) int hdr_offset; u32 net_trans_info; u32 hash; + u32 skb_length = skb->len; /* We will atmost need two pages to describe the rndis @@ -562,7 +563,7 @@ do_send: drop: if (ret == 0) { - net->stats.tx_bytes += skb->len; + net->stats.tx_bytes += skb_length; net->stats.tx_packets++; } else { kfree(packet); diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c index 5f91e3e01c04..aab2cf72d025 100644 --- a/drivers/net/irda/au1k_ir.c +++ b/drivers/net/irda/au1k_ir.c @@ -18,6 +18,7 @@ * with this program; if not, see <http://www.gnu.org/licenses/>. */ +#include <linux/clk.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/interrupt.h> @@ -175,6 +176,7 @@ struct au1k_private { struct resource *ioarea; struct au1k_irda_platform_data *platdata; + struct clk *irda_clk; }; static int qos_mtt_bits = 0x07; /* 1 ms or more */ @@ -514,9 +516,39 @@ static irqreturn_t au1k_irda_interrupt(int dummy, void *dev_id) static int au1k_init(struct net_device *dev) { struct au1k_private *aup = netdev_priv(dev); - u32 enable, ring_address; + u32 enable, ring_address, phyck; + struct clk *c; int i; + c = clk_get(NULL, "irda_clk"); + if (IS_ERR(c)) + return PTR_ERR(c); + i = clk_prepare_enable(c); + if (i) { + clk_put(c); + return i; + } + + switch (clk_get_rate(c)) { + case 40000000: + phyck = IR_PHYCLK_40MHZ; + break; + case 48000000: + phyck = IR_PHYCLK_48MHZ; + break; + case 56000000: + phyck = IR_PHYCLK_56MHZ; + break; + case 64000000: + phyck = IR_PHYCLK_64MHZ; + break; + default: + clk_disable_unprepare(c); + clk_put(c); + return -EINVAL; + } + aup->irda_clk = c; + enable = IR_HC | IR_CE | IR_C; #ifndef CONFIG_CPU_LITTLE_ENDIAN enable |= IR_BE; @@ -545,7 +577,7 @@ static int au1k_init(struct net_device *dev) irda_write(aup, IR_RING_SIZE, (RING_SIZE_64 << 8) | (RING_SIZE_64 << 12)); - irda_write(aup, IR_CONFIG_2, IR_PHYCLK_48MHZ | IR_ONE_PIN); + irda_write(aup, IR_CONFIG_2, phyck | IR_ONE_PIN); irda_write(aup, IR_RING_ADDR_CMPR, 0); au1k_irda_set_speed(dev, 9600); @@ -619,6 +651,9 @@ static int au1k_irda_stop(struct net_device *dev) free_irq(aup->irq_tx, dev); free_irq(aup->irq_rx, dev); + clk_disable_unprepare(aup->irda_clk); + clk_put(aup->irda_clk); + return 0; } @@ -853,6 +888,7 @@ static int au1k_irda_probe(struct platform_device *pdev) struct au1k_private *aup; struct net_device *dev; struct resource *r; + struct clk *c; int err; dev = alloc_irdadev(sizeof(struct au1k_private)); @@ -886,6 +922,14 @@ static int au1k_irda_probe(struct platform_device *pdev) if (!aup->ioarea) goto out; + /* bail out early if clock doesn't exist */ + c = clk_get(NULL, "irda_clk"); + if (IS_ERR(c)) { + err = PTR_ERR(c); + goto out; + } + clk_put(c); + aup->iobase = ioremap_nocache(r->start, resource_size(r)); if (!aup->iobase) goto out2; diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c index 768dfe9a9315..a87a82ca111f 100644 --- a/drivers/net/irda/donauboe.c +++ b/drivers/net/irda/donauboe.c @@ -184,7 +184,7 @@ #define CONFIG0H_DMA_ON_NORX CONFIG0H_DMA_OFF| OBOE_CONFIG0H_ENDMAC #define CONFIG0H_DMA_ON CONFIG0H_DMA_ON_NORX | OBOE_CONFIG0H_ENRX -static DEFINE_PCI_DEVICE_TABLE(toshoboe_pci_tbl) = { +static const struct pci_device_id toshoboe_pci_tbl[] = { { PCI_VENDOR_ID_TOSHIBA, PCI_DEVICE_ID_FIR701, PCI_ANY_ID, PCI_ANY_ID, }, { PCI_VENDOR_ID_TOSHIBA, PCI_DEVICE_ID_FIRD01, PCI_ANY_ID, PCI_ANY_ID, }, { } /* Terminating entry */ @@ -1755,17 +1755,4 @@ static struct pci_driver donauboe_pci_driver = { .resume = toshoboe_wakeup }; -static int __init -donauboe_init (void) -{ - return pci_register_driver(&donauboe_pci_driver); -} - -static void __exit -donauboe_cleanup (void) -{ - pci_unregister_driver(&donauboe_pci_driver); -} - -module_init(donauboe_init); -module_exit(donauboe_cleanup); +module_pci_driver(donauboe_pci_driver); diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c index 998bb89ede71..36e004288ea7 100644 --- a/drivers/net/irda/via-ircc.c +++ b/drivers/net/irda/via-ircc.c @@ -115,7 +115,7 @@ static void iodelay(int udelay) } } -static DEFINE_PCI_DEVICE_TABLE(via_pci_tbl) = { +static const struct pci_device_id via_pci_tbl[] = { { PCI_VENDOR_ID_VIA, 0x8231, PCI_ANY_ID, PCI_ANY_ID,0,0,0 }, { PCI_VENDOR_ID_VIA, 0x3109, PCI_ANY_ID, PCI_ANY_ID,0,0,1 }, { PCI_VENDOR_ID_VIA, 0x3074, PCI_ANY_ID, PCI_ANY_ID,0,0,2 }, diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c index 485006604bbc..a04af9d0f8f9 100644 --- a/drivers/net/irda/vlsi_ir.c +++ b/drivers/net/irda/vlsi_ir.c @@ -58,7 +58,7 @@ MODULE_LICENSE("GPL"); static /* const */ char drivername[] = DRIVER_NAME; -static DEFINE_PCI_DEVICE_TABLE(vlsi_irda_table) = { +static const struct pci_device_id vlsi_irda_table[] = { { .class = PCI_CLASS_WIRELESS_IRDA << 8, .class_mask = PCI_CLASS_SUBCLASS_MASK << 8, @@ -485,13 +485,13 @@ static int vlsi_create_hwif(vlsi_irda_dev_t *idev) idev->virtaddr = NULL; idev->busaddr = 0; - ringarea = pci_alloc_consistent(idev->pdev, HW_RING_AREA_SIZE, &idev->busaddr); + ringarea = pci_zalloc_consistent(idev->pdev, HW_RING_AREA_SIZE, + &idev->busaddr); if (!ringarea) { IRDA_ERROR("%s: insufficient memory for descriptor rings\n", __func__); goto out; } - memset(ringarea, 0, HW_RING_AREA_SIZE); hwmap = (struct ring_descr_hw *)ringarea; idev->rx_ring = vlsi_alloc_ring(idev->pdev, hwmap, ringsize[1], diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index ef8a5c20236a..726edabff26b 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -36,6 +36,7 @@ #include <linux/netpoll.h> #define MACVLAN_HASH_SIZE (1 << BITS_PER_BYTE) +#define MACVLAN_BC_QUEUE_LEN 1000 struct macvlan_port { struct net_device *dev; @@ -45,10 +46,9 @@ struct macvlan_port { struct sk_buff_head bc_queue; struct work_struct bc_work; bool passthru; + int count; }; -#define MACVLAN_PORT_IS_EMPTY(port) list_empty(&port->vlans) - struct macvlan_skb_cb { const struct macvlan_dev *src; }; @@ -249,7 +249,7 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port, goto err; spin_lock(&port->bc_queue.lock); - if (skb_queue_len(&port->bc_queue) < skb->dev->tx_queue_len) { + if (skb_queue_len(&port->bc_queue) < MACVLAN_BC_QUEUE_LEN) { __skb_queue_tail(&port->bc_queue, nskb); err = 0; } @@ -667,7 +667,8 @@ static void macvlan_uninit(struct net_device *dev) free_percpu(vlan->pcpu_stats); - if (MACVLAN_PORT_IS_EMPTY(port)) + port->count -= 1; + if (!port->count) macvlan_port_destroy(port->dev); } @@ -739,7 +740,10 @@ static int macvlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct macvlan_dev *vlan = netdev_priv(dev); int err = -EINVAL; - if (!vlan->port->passthru) + /* Support unicast filter only on passthru devices. + * Multicast filter should be allowed on all devices. + */ + if (!vlan->port->passthru && is_unicast_ether_addr(addr)) return -EOPNOTSUPP; if (flags & NLM_F_REPLACE) @@ -760,7 +764,10 @@ static int macvlan_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], struct macvlan_dev *vlan = netdev_priv(dev); int err = -EINVAL; - if (!vlan->port->passthru) + /* Support unicast filter only on passthru devices. + * Multicast filter should be allowed on all devices. + */ + if (!vlan->port->passthru && is_unicast_ether_addr(addr)) return -EOPNOTSUPP; if (is_unicast_ether_addr(addr)) @@ -800,6 +807,7 @@ static netdev_features_t macvlan_fix_features(struct net_device *dev, features, mask); features |= ALWAYS_ON_FEATURES; + features &= ~NETIF_F_NETNS_LOCAL; return features; } @@ -1020,12 +1028,13 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, vlan->flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]); if (vlan->mode == MACVLAN_MODE_PASSTHRU) { - if (!MACVLAN_PORT_IS_EMPTY(port)) + if (port->count) return -EINVAL; port->passthru = true; eth_hw_addr_inherit(dev, lowerdev); } + port->count += 1; err = register_netdevice(dev); if (err < 0) goto destroy_port; @@ -1043,7 +1052,8 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, unregister_netdev: unregister_netdevice(dev); destroy_port: - if (MACVLAN_PORT_IS_EMPTY(port)) + port->count -= 1; + if (!port->count) macvlan_port_destroy(lowerdev); return err; diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 3381c4f91a8c..0c6adaaf898c 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -112,17 +112,15 @@ out: return err; } +/* Requires RTNL */ static int macvtap_set_queue(struct net_device *dev, struct file *file, struct macvtap_queue *q) { struct macvlan_dev *vlan = netdev_priv(dev); - int err = -EBUSY; - rtnl_lock(); if (vlan->numqueues == MAX_MACVTAP_QUEUES) - goto out; + return -EBUSY; - err = 0; rcu_assign_pointer(q->vlan, vlan); rcu_assign_pointer(vlan->taps[vlan->numvtaps], q); sock_hold(&q->sk); @@ -136,9 +134,7 @@ static int macvtap_set_queue(struct net_device *dev, struct file *file, vlan->numvtaps++; vlan->numqueues++; -out: - rtnl_unlock(); - return err; + return 0; } static int macvtap_disable_queue(struct macvtap_queue *q) @@ -454,11 +450,12 @@ static void macvtap_sock_destruct(struct sock *sk) static int macvtap_open(struct inode *inode, struct file *file) { struct net *net = current->nsproxy->net_ns; - struct net_device *dev = dev_get_by_macvtap_minor(iminor(inode)); + struct net_device *dev; struct macvtap_queue *q; - int err; + int err = -ENODEV; - err = -ENODEV; + rtnl_lock(); + dev = dev_get_by_macvtap_minor(iminor(inode)); if (!dev) goto out; @@ -498,6 +495,7 @@ out: if (dev) dev_put(dev); + rtnl_unlock(); return err; } diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c index 526b94cea569..fdce1ea28790 100644 --- a/drivers/net/phy/bcm7xxx.c +++ b/drivers/net/phy/bcm7xxx.c @@ -157,6 +157,23 @@ static int bcm7xxx_28nm_config_init(struct phy_device *phydev) return bcm7xxx_28nm_afe_config_init(phydev); } +static int bcm7xxx_28nm_resume(struct phy_device *phydev) +{ + int ret; + + /* Re-apply workarounds coming out suspend/resume */ + ret = bcm7xxx_28nm_config_init(phydev); + if (ret) + return ret; + + /* 28nm Gigabit PHYs come out of reset without any half-duplex + * or "hub" compliant advertised mode, fix that. This does not + * cause any problems with the PHY library since genphy_config_aneg() + * gracefully handles auto-negotiated and forced modes. + */ + return genphy_config_aneg(phydev); +} + static int phy_set_clr_bits(struct phy_device *dev, int location, int set_mask, int clr_mask) { @@ -212,7 +229,7 @@ static int bcm7xxx_config_init(struct phy_device *phydev) } /* Workaround for putting the PHY in IDDQ mode, required - * for all BCM7XXX PHYs + * for all BCM7XXX 40nm and 65nm PHYs */ static int bcm7xxx_suspend(struct phy_device *phydev) { @@ -257,8 +274,7 @@ static struct phy_driver bcm7xxx_driver[] = { .config_init = bcm7xxx_28nm_afe_config_init, .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, - .suspend = bcm7xxx_suspend, - .resume = bcm7xxx_28nm_afe_config_init, + .resume = bcm7xxx_28nm_resume, .driver = { .owner = THIS_MODULE }, }, { .phy_id = PHY_ID_BCM7439, @@ -270,8 +286,7 @@ static struct phy_driver bcm7xxx_driver[] = { .config_init = bcm7xxx_28nm_afe_config_init, .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, - .suspend = bcm7xxx_suspend, - .resume = bcm7xxx_28nm_afe_config_init, + .resume = bcm7xxx_28nm_resume, .driver = { .owner = THIS_MODULE }, }, { .phy_id = PHY_ID_BCM7445, @@ -283,21 +298,7 @@ static struct phy_driver bcm7xxx_driver[] = { .config_init = bcm7xxx_28nm_config_init, .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, - .suspend = bcm7xxx_suspend, - .resume = bcm7xxx_28nm_config_init, - .driver = { .owner = THIS_MODULE }, -}, { - .name = "Broadcom BCM7XXX 28nm", - .phy_id = PHY_ID_BCM7XXX_28, - .phy_id_mask = PHY_BCM_OUI_MASK, - .features = PHY_GBIT_FEATURES | - SUPPORTED_Pause | SUPPORTED_Asym_Pause, - .flags = PHY_IS_INTERNAL, - .config_init = bcm7xxx_28nm_config_init, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, - .suspend = bcm7xxx_suspend, - .resume = bcm7xxx_28nm_config_init, + .resume = bcm7xxx_28nm_afe_config_init, .driver = { .owner = THIS_MODULE }, }, { .phy_id = PHY_BCM_OUI_4, @@ -331,7 +332,6 @@ static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = { { PHY_ID_BCM7366, 0xfffffff0, }, { PHY_ID_BCM7439, 0xfffffff0, }, { PHY_ID_BCM7445, 0xfffffff0, }, - { PHY_ID_BCM7XXX_28, 0xfffffc00 }, { PHY_BCM_OUI_4, 0xffff0000 }, { PHY_BCM_OUI_5, 0xffffff00 }, { } diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index fd0ea7c50ee6..011dbda2b2f1 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -592,8 +592,7 @@ static struct phy_driver ksphy_driver[] = { .phy_id = PHY_ID_KSZ9031, .phy_id_mask = 0x00fffff0, .name = "Micrel KSZ9031 Gigabit PHY", - .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause - | SUPPORTED_Asym_Pause), + .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause), .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, .config_init = ksz9031_config_init, .config_aneg = genphy_config_aneg, diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index c94e2a27446a..a854d38c231d 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -1036,31 +1036,31 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) /* First check if the EEE ability is supported */ eee_cap = phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_ABLE, MDIO_MMD_PCS, phydev->addr); - if (eee_cap < 0) - return eee_cap; + if (eee_cap <= 0) + goto eee_exit_err; cap = mmd_eee_cap_to_ethtool_sup_t(eee_cap); if (!cap) - return -EPROTONOSUPPORT; + goto eee_exit_err; /* Check which link settings negotiated and verify it in * the EEE advertising registers. */ eee_lp = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_LPABLE, MDIO_MMD_AN, phydev->addr); - if (eee_lp < 0) - return eee_lp; + if (eee_lp <= 0) + goto eee_exit_err; eee_adv = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_ADV, MDIO_MMD_AN, phydev->addr); - if (eee_adv < 0) - return eee_adv; + if (eee_adv <= 0) + goto eee_exit_err; adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv); lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp); idx = phy_find_setting(phydev->speed, phydev->duplex); if (!(lp & adv & settings[idx].setting)) - return -EPROTONOSUPPORT; + goto eee_exit_err; if (clk_stop_enable) { /* Configure the PHY to stop receiving xMII @@ -1080,7 +1080,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) return 0; /* EEE supported */ } - +eee_exit_err: return -EPROTONOSUPPORT; } EXPORT_SYMBOL(phy_init_eee); diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c index 180c49479c42..a4b08198fb9f 100644 --- a/drivers/net/phy/smsc.c +++ b/drivers/net/phy/smsc.c @@ -43,6 +43,22 @@ static int smsc_phy_ack_interrupt(struct phy_device *phydev) static int smsc_phy_config_init(struct phy_device *phydev) { + int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS); + + if (rc < 0) + return rc; + + /* Enable energy detect mode for this SMSC Transceivers */ + rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS, + rc | MII_LAN83C185_EDPWRDOWN); + if (rc < 0) + return rc; + + return smsc_phy_ack_interrupt(phydev); +} + +static int smsc_phy_reset(struct phy_device *phydev) +{ int rc = phy_read(phydev, MII_LAN83C185_SPECIAL_MODES); if (rc < 0) return rc; @@ -66,18 +82,7 @@ static int smsc_phy_config_init(struct phy_device *phydev) rc = phy_read(phydev, MII_BMCR); } while (rc & BMCR_RESET); } - - rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS); - if (rc < 0) - return rc; - - /* Enable energy detect mode for this SMSC Transceivers */ - rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS, - rc | MII_LAN83C185_EDPWRDOWN); - if (rc < 0) - return rc; - - return smsc_phy_ack_interrupt (phydev); + return 0; } static int lan911x_config_init(struct phy_device *phydev) @@ -142,6 +147,7 @@ static struct phy_driver smsc_phy_driver[] = { .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, .config_init = smsc_phy_config_init, + .soft_reset = smsc_phy_reset, /* IRQ related */ .ack_interrupt = smsc_phy_ack_interrupt, @@ -164,6 +170,7 @@ static struct phy_driver smsc_phy_driver[] = { .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, .config_init = smsc_phy_config_init, + .soft_reset = smsc_phy_reset, /* IRQ related */ .ack_interrupt = smsc_phy_ack_interrupt, @@ -186,6 +193,7 @@ static struct phy_driver smsc_phy_driver[] = { .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, .config_init = smsc_phy_config_init, + .soft_reset = smsc_phy_reset, /* IRQ related */ .ack_interrupt = smsc_phy_ack_interrupt, @@ -230,6 +238,7 @@ static struct phy_driver smsc_phy_driver[] = { .config_aneg = genphy_config_aneg, .read_status = lan87xx_read_status, .config_init = smsc_phy_config_init, + .soft_reset = smsc_phy_reset, /* IRQ related */ .ack_interrupt = smsc_phy_ack_interrupt, diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig index 9f194a0bef7c..37eed4d84e9c 100644 --- a/drivers/net/usb/Kconfig +++ b/drivers/net/usb/Kconfig @@ -5,8 +5,8 @@ comment "Host-side USB support is needed for USB Network Adapter support" depends on !USB && NET menuconfig USB_NET_DRIVERS - bool "USB Network Adapters" - default y + tristate "USB Network Adapters" + default USB if USB depends on USB && NET if USB_NET_DRIVERS diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 87f710476217..604ef210a4de 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -24,7 +24,7 @@ #include <net/ip6_checksum.h> /* Version Information */ -#define DRIVER_VERSION "v1.06.0 (2014/03/03)" +#define DRIVER_VERSION "v1.06.1 (2014/10/01)" #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" #define DRIVER_DESC "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters" #define MODULENAME "r8152" @@ -1949,10 +1949,34 @@ static void rxdy_gated_en(struct r8152 *tp, bool enable) ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data); } +static int rtl_start_rx(struct r8152 *tp) +{ + int i, ret = 0; + + INIT_LIST_HEAD(&tp->rx_done); + for (i = 0; i < RTL8152_MAX_RX; i++) { + INIT_LIST_HEAD(&tp->rx_info[i].list); + ret = r8152_submit_rx(tp, &tp->rx_info[i], GFP_KERNEL); + if (ret) + break; + } + + return ret; +} + +static int rtl_stop_rx(struct r8152 *tp) +{ + int i; + + for (i = 0; i < RTL8152_MAX_RX; i++) + usb_kill_urb(tp->rx_info[i].urb); + + return 0; +} + static int rtl_enable(struct r8152 *tp) { u32 ocp_data; - int i, ret; r8152b_reset_packet_filter(tp); @@ -1962,14 +1986,7 @@ static int rtl_enable(struct r8152 *tp) rxdy_gated_en(tp, false); - INIT_LIST_HEAD(&tp->rx_done); - ret = 0; - for (i = 0; i < RTL8152_MAX_RX; i++) { - INIT_LIST_HEAD(&tp->rx_info[i].list); - ret |= r8152_submit_rx(tp, &tp->rx_info[i], GFP_KERNEL); - } - - return ret; + return rtl_start_rx(tp); } static int rtl8152_enable(struct r8152 *tp) @@ -2019,7 +2036,7 @@ static int rtl8153_enable(struct r8152 *tp) return rtl_enable(tp); } -static void rtl8152_disable(struct r8152 *tp) +static void rtl_disable(struct r8152 *tp) { u32 ocp_data; int i; @@ -2053,8 +2070,7 @@ static void rtl8152_disable(struct r8152 *tp) mdelay(1); } - for (i = 0; i < RTL8152_MAX_RX; i++) - usb_kill_urb(tp->rx_info[i].urb); + rtl_stop_rx(tp); rtl8152_nic_reset(tp); } @@ -2185,28 +2201,6 @@ static void rtl_phy_reset(struct r8152 *tp) } } -static void rtl_clear_bp(struct r8152 *tp) -{ - ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_0, 0); - ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_2, 0); - ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_4, 0); - ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_6, 0); - ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_0, 0); - ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_2, 0); - ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_4, 0); - ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_6, 0); - mdelay(3); - ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_BA, 0); - ocp_write_word(tp, MCU_TYPE_USB, USB_BP_BA, 0); -} - -static void r8153_clear_bp(struct r8152 *tp) -{ - ocp_write_byte(tp, MCU_TYPE_PLA, PLA_BP_EN, 0); - ocp_write_byte(tp, MCU_TYPE_USB, USB_BP_EN, 0); - rtl_clear_bp(tp); -} - static void r8153_teredo_off(struct r8152 *tp) { u32 ocp_data; @@ -2232,6 +2226,13 @@ static inline void r8152b_enable_aldps(struct r8152 *tp) LINKENA | DIS_SDSAVE); } +static void rtl8152_disable(struct r8152 *tp) +{ + r8152b_disable_aldps(tp); + rtl_disable(tp); + r8152b_enable_aldps(tp); +} + static void r8152b_hw_phy_cfg(struct r8152 *tp) { u16 data; @@ -2242,11 +2243,6 @@ static void r8152b_hw_phy_cfg(struct r8152 *tp) r8152_mdio_write(tp, MII_BMCR, data); } - r8152b_disable_aldps(tp); - - rtl_clear_bp(tp); - - r8152b_enable_aldps(tp); set_bit(PHY_RESET, &tp->flags); } @@ -2255,9 +2251,6 @@ static void r8152b_exit_oob(struct r8152 *tp) u32 ocp_data; int i; - if (test_bit(RTL8152_UNPLUG, &tp->flags)) - return; - ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); ocp_data &= ~RCR_ACPT_ALL; ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); @@ -2347,7 +2340,7 @@ static void r8152b_enter_oob(struct r8152 *tp) ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1, RXFIFO_THR2_OOB); ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2, RXFIFO_THR3_OOB); - rtl8152_disable(tp); + rtl_disable(tp); for (i = 0; i < 1000; i++) { ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); @@ -2400,8 +2393,6 @@ static void r8153_hw_phy_cfg(struct r8152 *tp) r8152_mdio_write(tp, MII_BMCR, data); } - r8153_clear_bp(tp); - if (tp->version == RTL_VER_03) { data = ocp_reg_read(tp, OCP_EEE_CFG); data &= ~CTAP_SHORT_EN; @@ -2485,9 +2476,6 @@ static void r8153_first_init(struct r8152 *tp) u32 ocp_data; int i; - if (test_bit(RTL8152_UNPLUG, &tp->flags)) - return; - rxdy_gated_en(tp, true); r8153_teredo_off(tp); @@ -2560,7 +2548,7 @@ static void r8153_enter_oob(struct r8152 *tp) ocp_data &= ~NOW_IS_OOB; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); - rtl8152_disable(tp); + rtl_disable(tp); for (i = 0; i < 1000; i++) { ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); @@ -2624,6 +2612,13 @@ static void r8153_enable_aldps(struct r8152 *tp) ocp_reg_write(tp, OCP_POWER_CFG, data); } +static void rtl8153_disable(struct r8152 *tp) +{ + r8153_disable_aldps(tp); + rtl_disable(tp); + r8153_enable_aldps(tp); +} + static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex) { u16 bmcr, anar, gbcr; @@ -2714,6 +2709,16 @@ out: return ret; } +static void rtl8152_up(struct r8152 *tp) +{ + if (test_bit(RTL8152_UNPLUG, &tp->flags)) + return; + + r8152b_disable_aldps(tp); + r8152b_exit_oob(tp); + r8152b_enable_aldps(tp); +} + static void rtl8152_down(struct r8152 *tp) { if (test_bit(RTL8152_UNPLUG, &tp->flags)) { @@ -2727,6 +2732,16 @@ static void rtl8152_down(struct r8152 *tp) r8152b_enable_aldps(tp); } +static void rtl8153_up(struct r8152 *tp) +{ + if (test_bit(RTL8152_UNPLUG, &tp->flags)) + return; + + r8153_disable_aldps(tp); + r8153_first_init(tp); + r8153_enable_aldps(tp); +} + static void rtl8153_down(struct r8152 *tp) { if (test_bit(RTL8152_UNPLUG, &tp->flags)) { @@ -2946,6 +2961,8 @@ static void r8152b_init(struct r8152 *tp) if (test_bit(RTL8152_UNPLUG, &tp->flags)) return; + r8152b_disable_aldps(tp); + if (tp->version == RTL_VER_01) { ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE); ocp_data &= ~LED_MODE_MASK; @@ -2984,6 +3001,7 @@ static void r8153_init(struct r8152 *tp) if (test_bit(RTL8152_UNPLUG, &tp->flags)) return; + r8153_disable_aldps(tp); r8153_u1u2en(tp, false); for (i = 0; i < 500; i++) { @@ -3055,13 +3073,14 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message) clear_bit(WORK_ENABLE, &tp->flags); usb_kill_urb(tp->intr_urb); cancel_delayed_work_sync(&tp->schedule); + tasklet_disable(&tp->tl); if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { + rtl_stop_rx(tp); rtl_runtime_suspend_enable(tp, true); } else { - tasklet_disable(&tp->tl); tp->rtl_ops.down(tp); - tasklet_enable(&tp->tl); } + tasklet_enable(&tp->tl); } return 0; @@ -3080,17 +3099,18 @@ static int rtl8152_resume(struct usb_interface *intf) if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { rtl_runtime_suspend_enable(tp, false); clear_bit(SELECTIVE_SUSPEND, &tp->flags); + set_bit(WORK_ENABLE, &tp->flags); if (tp->speed & LINK_STATUS) - tp->rtl_ops.disable(tp); + rtl_start_rx(tp); } else { tp->rtl_ops.up(tp); rtl8152_set_speed(tp, AUTONEG_ENABLE, tp->mii.supports_gmii ? SPEED_1000 : SPEED_100, DUPLEX_FULL); + tp->speed = 0; + netif_carrier_off(tp->netdev); + set_bit(WORK_ENABLE, &tp->flags); } - tp->speed = 0; - netif_carrier_off(tp->netdev); - set_bit(WORK_ENABLE, &tp->flags); usb_submit_urb(tp->intr_urb, GFP_KERNEL); } @@ -3377,7 +3397,7 @@ static void rtl8153_unload(struct r8152 *tp) if (test_bit(RTL8152_UNPLUG, &tp->flags)) return; - r8153_power_cut_en(tp, true); + r8153_power_cut_en(tp, false); } static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id) @@ -3392,7 +3412,7 @@ static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id) ops->init = r8152b_init; ops->enable = rtl8152_enable; ops->disable = rtl8152_disable; - ops->up = r8152b_exit_oob; + ops->up = rtl8152_up; ops->down = rtl8152_down; ops->unload = rtl8152_unload; ret = 0; @@ -3400,8 +3420,8 @@ static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id) case PRODUCT_ID_RTL8153: ops->init = r8153_init; ops->enable = rtl8153_enable; - ops->disable = rtl8152_disable; - ops->up = r8153_first_init; + ops->disable = rtl8153_disable; + ops->up = rtl8153_up; ops->down = rtl8153_down; ops->unload = rtl8153_unload; ret = 0; @@ -3416,8 +3436,8 @@ static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id) case PRODUCT_ID_SAMSUNG: ops->init = r8153_init; ops->enable = rtl8153_enable; - ops->disable = rtl8152_disable; - ops->up = r8153_first_init; + ops->disable = rtl8153_disable; + ops->up = rtl8153_up; ops->down = rtl8153_down; ops->unload = rtl8153_unload; ret = 0; @@ -3530,7 +3550,11 @@ static void rtl8152_disconnect(struct usb_interface *intf) usb_set_intfdata(intf, NULL); if (tp) { - set_bit(RTL8152_UNPLUG, &tp->flags); + struct usb_device *udev = tp->udev; + + if (udev->state == USB_STATE_NOTATTACHED) + set_bit(RTL8152_UNPLUG, &tp->flags); + tasklet_kill(&tp->tl); unregister_netdev(tp->netdev); tp->rtl_ops.unload(tp); diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index b76f7dcde0db..6dfcbf523936 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -36,7 +36,7 @@ char vmxnet3_driver_name[] = "vmxnet3"; * PCI Device ID Table * Last entry must be all 0s */ -static DEFINE_PCI_DEVICE_TABLE(vmxnet3_pciid_table) = { +static const struct pci_device_id vmxnet3_pciid_table[] = { {PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)}, {0} }; @@ -766,7 +766,7 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, gdesc->dword[3] = 0; netdev_dbg(adapter->netdev, - "txd[%u]: 0x%llu %u %u\n", + "txd[%u]: 0x%llx %u %u\n", tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr), le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]); vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); @@ -2056,7 +2056,6 @@ vmxnet3_set_mc(struct net_device *netdev) if (!netdev_mc_empty(netdev)) { new_table = vmxnet3_copy_mc(netdev); if (new_table) { - new_mode |= VMXNET3_RXM_MCAST; rxConf->mfTableLen = cpu_to_le16( netdev_mc_count(netdev) * ETH_ALEN); new_table_pa = dma_map_single( @@ -2064,15 +2063,18 @@ vmxnet3_set_mc(struct net_device *netdev) new_table, rxConf->mfTableLen, PCI_DMA_TODEVICE); + } + + if (new_table_pa) { + new_mode |= VMXNET3_RXM_MCAST; rxConf->mfTablePA = cpu_to_le64(new_table_pa); } else { - netdev_info(netdev, "failed to copy mcast list" - ", setting ALL_MULTI\n"); + netdev_info(netdev, + "failed to copy mcast list, setting ALL_MULTI\n"); new_mode |= VMXNET3_RXM_ALL_MULTI; } } - if (!(new_mode & VMXNET3_RXM_MCAST)) { rxConf->mfTableLen = 0; rxConf->mfTablePA = 0; @@ -2091,11 +2093,10 @@ vmxnet3_set_mc(struct net_device *netdev) VMXNET3_CMD_UPDATE_MAC_FILTERS); spin_unlock_irqrestore(&adapter->cmd_lock, flags); - if (new_table) { + if (new_table_pa) dma_unmap_single(&adapter->pdev->dev, new_table_pa, rxConf->mfTableLen, PCI_DMA_TODEVICE); - kfree(new_table); - } + kfree(new_table); } void diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h index 29ee77f2c97f..3759479f959a 100644 --- a/drivers/net/vmxnet3/vmxnet3_int.h +++ b/drivers/net/vmxnet3/vmxnet3_int.h @@ -69,10 +69,10 @@ /* * Version numbers */ -#define VMXNET3_DRIVER_VERSION_STRING "1.2.0.0-k" +#define VMXNET3_DRIVER_VERSION_STRING "1.2.1.0-k" /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ -#define VMXNET3_DRIVER_VERSION_NUM 0x01020000 +#define VMXNET3_DRIVER_VERSION_NUM 0x01020100 #if defined(CONFIG_PCI_MSI) /* RSS only makes sense if MSI-X is supported. */ diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 1fb7b37d1402..beb377b2d4b7 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1327,7 +1327,7 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb) } else if (vxlan->flags & VXLAN_F_L3MISS) { union vxlan_addr ipa = { .sin.sin_addr.s_addr = tip, - .sa.sa_family = AF_INET, + .sin.sin_family = AF_INET, }; vxlan_ip_miss(dev, &ipa); @@ -1488,7 +1488,7 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb) } else if (vxlan->flags & VXLAN_F_L3MISS) { union vxlan_addr ipa = { .sin6.sin6_addr = msg->target, - .sa.sa_family = AF_INET6, + .sin6.sin6_family = AF_INET6, }; vxlan_ip_miss(dev, &ipa); @@ -1521,7 +1521,7 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb) if (!n && (vxlan->flags & VXLAN_F_L3MISS)) { union vxlan_addr ipa = { .sin.sin_addr.s_addr = pip->daddr, - .sa.sa_family = AF_INET, + .sin.sin_family = AF_INET, }; vxlan_ip_miss(dev, &ipa); @@ -1542,7 +1542,7 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb) if (!n && (vxlan->flags & VXLAN_F_L3MISS)) { union vxlan_addr ipa = { .sin6.sin6_addr = pip6->daddr, - .sa.sa_family = AF_INET6, + .sin6.sin6_family = AF_INET6, }; vxlan_ip_miss(dev, &ipa); diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c index 288610df205c..08223569cebd 100644 --- a/drivers/net/wan/dscc4.c +++ b/drivers/net/wan/dscc4.c @@ -2039,7 +2039,7 @@ static int __init dscc4_setup(char *str) __setup("dscc4.setup=", dscc4_setup); #endif -static DEFINE_PCI_DEVICE_TABLE(dscc4_pci_tbl) = { +static const struct pci_device_id dscc4_pci_tbl[] = { { PCI_VENDOR_ID_SIEMENS, PCI_DEVICE_ID_SIEMENS_DSCC4, PCI_ANY_ID, PCI_ANY_ID, }, { 0,} diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c index 1f041271f7fe..44541dbc5c28 100644 --- a/drivers/net/wan/farsync.c +++ b/drivers/net/wan/farsync.c @@ -531,7 +531,7 @@ do { \ /* * PCI ID lookup table */ -static DEFINE_PCI_DEVICE_TABLE(fst_pci_dev_id) = { +static const struct pci_device_id fst_pci_dev_id[] = { {PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T2P, PCI_ANY_ID, PCI_ANY_ID, 0, 0, FST_TYPE_T2P}, diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c index 7cc64eac0fa3..e5c7e6165a4b 100644 --- a/drivers/net/wan/hdlc_fr.c +++ b/drivers/net/wan/hdlc_fr.c @@ -90,7 +90,7 @@ #define LMI_ANSI_LENGTH 14 -typedef struct { +struct fr_hdr { #if defined(__LITTLE_ENDIAN_BITFIELD) unsigned ea1: 1; unsigned cr: 1; @@ -112,14 +112,14 @@ typedef struct { unsigned de: 1; unsigned ea2: 1; #endif -}__packed fr_hdr; +} __packed; -typedef struct pvc_device_struct { +struct pvc_device { struct net_device *frad; struct net_device *main; struct net_device *ether; /* bridged Ethernet interface */ - struct pvc_device_struct *next; /* Sorted in ascending DLCI order */ + struct pvc_device *next; /* Sorted in ascending DLCI order */ int dlci; int open_count; @@ -132,11 +132,11 @@ typedef struct pvc_device_struct { unsigned int becn: 1; unsigned int bandwidth; /* Cisco LMI reporting only */ }state; -}pvc_device; +}; struct frad_state { fr_proto settings; - pvc_device *first_pvc; + struct pvc_device *first_pvc; int dce_pvc_count; struct timer_list timer; @@ -174,9 +174,9 @@ static inline struct frad_state* state(hdlc_device *hdlc) } -static inline pvc_device* find_pvc(hdlc_device *hdlc, u16 dlci) +static inline struct pvc_device *find_pvc(hdlc_device *hdlc, u16 dlci) { - pvc_device *pvc = state(hdlc)->first_pvc; + struct pvc_device *pvc = state(hdlc)->first_pvc; while (pvc) { if (pvc->dlci == dlci) @@ -190,10 +190,10 @@ static inline pvc_device* find_pvc(hdlc_device *hdlc, u16 dlci) } -static pvc_device* add_pvc(struct net_device *dev, u16 dlci) +static struct pvc_device *add_pvc(struct net_device *dev, u16 dlci) { hdlc_device *hdlc = dev_to_hdlc(dev); - pvc_device *pvc, **pvc_p = &state(hdlc)->first_pvc; + struct pvc_device *pvc, **pvc_p = &state(hdlc)->first_pvc; while (*pvc_p) { if ((*pvc_p)->dlci == dlci) @@ -203,7 +203,7 @@ static pvc_device* add_pvc(struct net_device *dev, u16 dlci) pvc_p = &(*pvc_p)->next; } - pvc = kzalloc(sizeof(pvc_device), GFP_ATOMIC); + pvc = kzalloc(sizeof(*pvc), GFP_ATOMIC); #ifdef DEBUG_PVC printk(KERN_DEBUG "add_pvc: allocated pvc %p, frad %p\n", pvc, dev); #endif @@ -218,13 +218,13 @@ static pvc_device* add_pvc(struct net_device *dev, u16 dlci) } -static inline int pvc_is_used(pvc_device *pvc) +static inline int pvc_is_used(struct pvc_device *pvc) { return pvc->main || pvc->ether; } -static inline void pvc_carrier(int on, pvc_device *pvc) +static inline void pvc_carrier(int on, struct pvc_device *pvc) { if (on) { if (pvc->main) @@ -246,11 +246,11 @@ static inline void pvc_carrier(int on, pvc_device *pvc) static inline void delete_unused_pvcs(hdlc_device *hdlc) { - pvc_device **pvc_p = &state(hdlc)->first_pvc; + struct pvc_device **pvc_p = &state(hdlc)->first_pvc; while (*pvc_p) { if (!pvc_is_used(*pvc_p)) { - pvc_device *pvc = *pvc_p; + struct pvc_device *pvc = *pvc_p; #ifdef DEBUG_PVC printk(KERN_DEBUG "freeing unused pvc: %p\n", pvc); #endif @@ -263,7 +263,8 @@ static inline void delete_unused_pvcs(hdlc_device *hdlc) } -static inline struct net_device** get_dev_p(pvc_device *pvc, int type) +static inline struct net_device **get_dev_p(struct pvc_device *pvc, + int type) { if (type == ARPHRD_ETHER) return &pvc->ether; @@ -342,7 +343,7 @@ static int fr_hard_header(struct sk_buff **skb_p, u16 dlci) static int pvc_open(struct net_device *dev) { - pvc_device *pvc = dev->ml_priv; + struct pvc_device *pvc = dev->ml_priv; if ((pvc->frad->flags & IFF_UP) == 0) return -EIO; /* Frad must be UP in order to activate PVC */ @@ -362,7 +363,7 @@ static int pvc_open(struct net_device *dev) static int pvc_close(struct net_device *dev) { - pvc_device *pvc = dev->ml_priv; + struct pvc_device *pvc = dev->ml_priv; if (--pvc->open_count == 0) { hdlc_device *hdlc = dev_to_hdlc(pvc->frad); @@ -381,7 +382,7 @@ static int pvc_close(struct net_device *dev) static int pvc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { - pvc_device *pvc = dev->ml_priv; + struct pvc_device *pvc = dev->ml_priv; fr_proto_pvc_info info; if (ifr->ifr_settings.type == IF_GET_PROTO) { @@ -409,7 +410,7 @@ static int pvc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev) { - pvc_device *pvc = dev->ml_priv; + struct pvc_device *pvc = dev->ml_priv; if (pvc->state.active) { if (dev->type == ARPHRD_ETHER) { @@ -444,7 +445,7 @@ static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } -static inline void fr_log_dlci_active(pvc_device *pvc) +static inline void fr_log_dlci_active(struct pvc_device *pvc) { netdev_info(pvc->frad, "DLCI %d [%s%s%s]%s %s\n", pvc->dlci, @@ -469,7 +470,7 @@ static void fr_lmi_send(struct net_device *dev, int fullrep) { hdlc_device *hdlc = dev_to_hdlc(dev); struct sk_buff *skb; - pvc_device *pvc = state(hdlc)->first_pvc; + struct pvc_device *pvc = state(hdlc)->first_pvc; int lmi = state(hdlc)->settings.lmi; int dce = state(hdlc)->settings.dce; int len = lmi == LMI_ANSI ? LMI_ANSI_LENGTH : LMI_CCITT_CISCO_LENGTH; @@ -566,7 +567,7 @@ static void fr_lmi_send(struct net_device *dev, int fullrep) static void fr_set_link_state(int reliable, struct net_device *dev) { hdlc_device *hdlc = dev_to_hdlc(dev); - pvc_device *pvc = state(hdlc)->first_pvc; + struct pvc_device *pvc = state(hdlc)->first_pvc; state(hdlc)->reliable = reliable; if (reliable) { @@ -652,7 +653,7 @@ static void fr_timer(unsigned long arg) static int fr_lmi_recv(struct net_device *dev, struct sk_buff *skb) { hdlc_device *hdlc = dev_to_hdlc(dev); - pvc_device *pvc; + struct pvc_device *pvc; u8 rxseq, txseq; int lmi = state(hdlc)->settings.lmi; int dce = state(hdlc)->settings.dce; @@ -869,10 +870,10 @@ static int fr_rx(struct sk_buff *skb) { struct net_device *frad = skb->dev; hdlc_device *hdlc = dev_to_hdlc(frad); - fr_hdr *fh = (fr_hdr*)skb->data; + struct fr_hdr *fh = (struct fr_hdr *)skb->data; u8 *data = skb->data; u16 dlci; - pvc_device *pvc; + struct pvc_device *pvc; struct net_device *dev = NULL; if (skb->len <= 4 || fh->ea1 || data[2] != FR_UI) @@ -1028,7 +1029,7 @@ static void fr_stop(struct net_device *dev) static void fr_close(struct net_device *dev) { hdlc_device *hdlc = dev_to_hdlc(dev); - pvc_device *pvc = state(hdlc)->first_pvc; + struct pvc_device *pvc = state(hdlc)->first_pvc; while (pvc) { /* Shutdown all PVCs for this FRAD */ if (pvc->main) @@ -1060,7 +1061,7 @@ static const struct net_device_ops pvc_ops = { static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type) { hdlc_device *hdlc = dev_to_hdlc(frad); - pvc_device *pvc; + struct pvc_device *pvc; struct net_device *dev; int used; @@ -1117,7 +1118,7 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type) static int fr_del_pvc(hdlc_device *hdlc, unsigned int dlci, int type) { - pvc_device *pvc; + struct pvc_device *pvc; struct net_device *dev; if ((pvc = find_pvc(hdlc, dlci)) == NULL) @@ -1145,13 +1146,13 @@ static int fr_del_pvc(hdlc_device *hdlc, unsigned int dlci, int type) static void fr_destroy(struct net_device *frad) { hdlc_device *hdlc = dev_to_hdlc(frad); - pvc_device *pvc = state(hdlc)->first_pvc; + struct pvc_device *pvc = state(hdlc)->first_pvc; state(hdlc)->first_pvc = NULL; /* All PVCs destroyed */ state(hdlc)->dce_pvc_count = 0; state(hdlc)->dce_changed = 1; while (pvc) { - pvc_device *next = pvc->next; + struct pvc_device *next = pvc->next; /* destructors will free_netdev() main and ether */ if (pvc->main) unregister_netdevice(pvc->main); diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c index b2fe9bb89633..bea0f313a7a8 100644 --- a/drivers/net/wan/lmc/lmc_main.c +++ b/drivers/net/wan/lmc/lmc_main.c @@ -76,7 +76,7 @@ static int LMC_PKT_BUF_SZ = 1542; -static DEFINE_PCI_DEVICE_TABLE(lmc_pci_tbl) = { +static const struct pci_device_id lmc_pci_tbl[] = { { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST, PCI_VENDOR_ID_LMC, PCI_ANY_ID }, { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST, diff --git a/drivers/net/wan/pc300too.c b/drivers/net/wan/pc300too.c index 5b72f7f8c516..db363856e0b5 100644 --- a/drivers/net/wan/pc300too.c +++ b/drivers/net/wan/pc300too.c @@ -477,7 +477,7 @@ static int pc300_pci_init_one(struct pci_dev *pdev, -static DEFINE_PCI_DEVICE_TABLE(pc300_pci_tbl) = { +static const struct pci_device_id pc300_pci_tbl[] = { { PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_PC300_RX_1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_PC300_RX_2, PCI_ANY_ID, diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c index fe4e3ece3c42..e8455621390e 100644 --- a/drivers/net/wan/pci200syn.c +++ b/drivers/net/wan/pci200syn.c @@ -414,7 +414,7 @@ static int pci200_pci_init_one(struct pci_dev *pdev, -static DEFINE_PCI_DEVICE_TABLE(pci200_pci_tbl) = { +static const struct pci_device_id pci200_pci_tbl[] = { { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_PCI200SYN, 0, 0, 0 }, { 0, } diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c index f76aa9081585..e73f13857846 100644 --- a/drivers/net/wan/wanxl.c +++ b/drivers/net/wan/wanxl.c @@ -54,24 +54,24 @@ static const char* version = "wanXL serial card driver version: 0.48"; #define MBX2_MEMSZ_MASK 0xFFFF0000 /* PUTS Memory Size Register mask */ -typedef struct { +struct port { struct net_device *dev; - struct card_t *card; + struct card *card; spinlock_t lock; /* for wanxl_xmit */ int node; /* physical port #0 - 3 */ unsigned int clock_type; int tx_in, tx_out; struct sk_buff *tx_skbs[TX_BUFFERS]; -}port_t; +}; -typedef struct { +struct card_status { desc_t rx_descs[RX_QUEUE_LENGTH]; port_status_t port_status[4]; -}card_status_t; +}; -typedef struct card_t { +struct card { int n_ports; /* 1, 2 or 4 ports */ u8 irq; @@ -79,20 +79,20 @@ typedef struct card_t { struct pci_dev *pdev; /* for pci_name(pdev) */ int rx_in; struct sk_buff *rx_skbs[RX_QUEUE_LENGTH]; - card_status_t *status; /* shared between host and card */ + struct card_status *status; /* shared between host and card */ dma_addr_t status_address; - port_t ports[0]; /* 1 - 4 port_t structures follow */ -}card_t; + struct port ports[0]; /* 1 - 4 port structures follow */ +}; -static inline port_t* dev_to_port(struct net_device *dev) +static inline struct port *dev_to_port(struct net_device *dev) { - return (port_t *)dev_to_hdlc(dev)->priv; + return (struct port *)dev_to_hdlc(dev)->priv; } -static inline port_status_t* get_status(port_t *port) +static inline port_status_t *get_status(struct port *port) { return &port->card->status->port_status[port->node]; } @@ -115,7 +115,7 @@ static inline dma_addr_t pci_map_single_debug(struct pci_dev *pdev, void *ptr, /* Cable and/or personality module change interrupt service */ -static inline void wanxl_cable_intr(port_t *port) +static inline void wanxl_cable_intr(struct port *port) { u32 value = get_status(port)->cable; int valid = 1; @@ -160,7 +160,7 @@ static inline void wanxl_cable_intr(port_t *port) /* Transmit complete interrupt service */ -static inline void wanxl_tx_intr(port_t *port) +static inline void wanxl_tx_intr(struct port *port) { struct net_device *dev = port->dev; while (1) { @@ -193,7 +193,7 @@ static inline void wanxl_tx_intr(port_t *port) /* Receive complete interrupt service */ -static inline void wanxl_rx_intr(card_t *card) +static inline void wanxl_rx_intr(struct card *card) { desc_t *desc; while (desc = &card->status->rx_descs[card->rx_in], @@ -203,7 +203,7 @@ static inline void wanxl_rx_intr(card_t *card) pci_name(card->pdev)); else { struct sk_buff *skb = card->rx_skbs[card->rx_in]; - port_t *port = &card->ports[desc->stat & + struct port *port = &card->ports[desc->stat & PACKET_PORT_MASK]; struct net_device *dev = port->dev; @@ -245,7 +245,7 @@ static inline void wanxl_rx_intr(card_t *card) static irqreturn_t wanxl_intr(int irq, void* dev_id) { - card_t *card = dev_id; + struct card *card = dev_id; int i; u32 stat; int handled = 0; @@ -272,7 +272,7 @@ static irqreturn_t wanxl_intr(int irq, void* dev_id) static netdev_tx_t wanxl_xmit(struct sk_buff *skb, struct net_device *dev) { - port_t *port = dev_to_port(dev); + struct port *port = dev_to_port(dev); desc_t *desc; spin_lock(&port->lock); @@ -319,7 +319,7 @@ static netdev_tx_t wanxl_xmit(struct sk_buff *skb, struct net_device *dev) static int wanxl_attach(struct net_device *dev, unsigned short encoding, unsigned short parity) { - port_t *port = dev_to_port(dev); + struct port *port = dev_to_port(dev); if (encoding != ENCODING_NRZ && encoding != ENCODING_NRZI) @@ -343,7 +343,7 @@ static int wanxl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { const size_t size = sizeof(sync_serial_settings); sync_serial_settings line; - port_t *port = dev_to_port(dev); + struct port *port = dev_to_port(dev); if (cmd != SIOCWANDEV) return hdlc_ioctl(dev, ifr, cmd); @@ -393,7 +393,7 @@ static int wanxl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) static int wanxl_open(struct net_device *dev) { - port_t *port = dev_to_port(dev); + struct port *port = dev_to_port(dev); u8 __iomem *dbr = port->card->plx + PLX_DOORBELL_TO_CARD; unsigned long timeout; int i; @@ -429,7 +429,7 @@ static int wanxl_open(struct net_device *dev) static int wanxl_close(struct net_device *dev) { - port_t *port = dev_to_port(dev); + struct port *port = dev_to_port(dev); unsigned long timeout; int i; @@ -467,7 +467,7 @@ static int wanxl_close(struct net_device *dev) static struct net_device_stats *wanxl_get_stats(struct net_device *dev) { - port_t *port = dev_to_port(dev); + struct port *port = dev_to_port(dev); dev->stats.rx_over_errors = get_status(port)->rx_overruns; dev->stats.rx_frame_errors = get_status(port)->rx_frame_errors; @@ -478,7 +478,7 @@ static struct net_device_stats *wanxl_get_stats(struct net_device *dev) -static int wanxl_puts_command(card_t *card, u32 cmd) +static int wanxl_puts_command(struct card *card, u32 cmd) { unsigned long timeout = jiffies + 5 * HZ; @@ -495,7 +495,7 @@ static int wanxl_puts_command(card_t *card, u32 cmd) -static void wanxl_reset(card_t *card) +static void wanxl_reset(struct card *card) { u32 old_value = readl(card->plx + PLX_CONTROL) & ~PLX_CTL_RESET; @@ -511,7 +511,7 @@ static void wanxl_reset(card_t *card) static void wanxl_pci_remove_one(struct pci_dev *pdev) { - card_t *card = pci_get_drvdata(pdev); + struct card *card = pci_get_drvdata(pdev); int i; for (i = 0; i < card->n_ports; i++) { @@ -537,7 +537,7 @@ static void wanxl_pci_remove_one(struct pci_dev *pdev) iounmap(card->plx); if (card->status) - pci_free_consistent(pdev, sizeof(card_status_t), + pci_free_consistent(pdev, sizeof(struct card_status), card->status, card->status_address); pci_release_regions(pdev); @@ -560,7 +560,7 @@ static const struct net_device_ops wanxl_ops = { static int wanxl_pci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { - card_t *card; + struct card *card; u32 ramsize, stat; unsigned long timeout; u32 plx_phy; /* PLX PCI base address */ @@ -601,7 +601,7 @@ static int wanxl_pci_init_one(struct pci_dev *pdev, default: ports = 4; } - alloc_size = sizeof(card_t) + ports * sizeof(port_t); + alloc_size = sizeof(struct card) + ports * sizeof(struct port); card = kzalloc(alloc_size, GFP_KERNEL); if (card == NULL) { pci_release_regions(pdev); @@ -612,7 +612,8 @@ static int wanxl_pci_init_one(struct pci_dev *pdev, pci_set_drvdata(pdev, card); card->pdev = pdev; - card->status = pci_alloc_consistent(pdev, sizeof(card_status_t), + card->status = pci_alloc_consistent(pdev, + sizeof(struct card_status), &card->status_address); if (card->status == NULL) { wanxl_pci_remove_one(pdev); @@ -766,7 +767,7 @@ static int wanxl_pci_init_one(struct pci_dev *pdev, for (i = 0; i < ports; i++) { hdlc_device *hdlc; - port_t *port = &card->ports[i]; + struct port *port = &card->ports[i]; struct net_device *dev = alloc_hdlcdev(port); if (!dev) { pr_err("%s: unable to allocate memory\n", @@ -807,7 +808,7 @@ static int wanxl_pci_init_one(struct pci_dev *pdev, return 0; } -static DEFINE_PCI_DEVICE_TABLE(wanxl_pci_tbl) = { +static const struct pci_device_id wanxl_pci_tbl[] = { { PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL200, PCI_ANY_ID, diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig index b2137e8f7ca6..16604bdf5197 100644 --- a/drivers/net/wireless/Kconfig +++ b/drivers/net/wireless/Kconfig @@ -189,6 +189,7 @@ config USB_NET_RNDIS_WLAN tristate "Wireless RNDIS USB support" depends on USB depends on CFG80211 + select USB_NET_DRIVERS select USB_USBNET select USB_NET_CDCETHER select USB_NET_RNDIS_HOST diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c index f35f93c31b09..17fcaabb2687 100644 --- a/drivers/net/wireless/adm8211.c +++ b/drivers/net/wireless/adm8211.c @@ -41,7 +41,7 @@ static unsigned int rx_ring_size __read_mostly = 16; module_param(tx_ring_size, uint, 0); module_param(rx_ring_size, uint, 0); -static DEFINE_PCI_DEVICE_TABLE(adm8211_pci_id_table) = { +static const struct pci_device_id adm8211_pci_id_table[] = { /* ADMtek ADM8211 */ { PCI_DEVICE(0x10B7, 0x6000) }, /* 3Com 3CRSHPW796 */ { PCI_DEVICE(0x1200, 0x8201) }, /* ? */ diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c index b39807579a8a..e71a2ce7a448 100644 --- a/drivers/net/wireless/airo.c +++ b/drivers/net/wireless/airo.c @@ -57,7 +57,7 @@ #define DRV_NAME "airo" #ifdef CONFIG_PCI -static DEFINE_PCI_DEVICE_TABLE(card_ids) = { +static const struct pci_device_id card_ids[] = { { 0x14b9, 1, PCI_ANY_ID, PCI_ANY_ID, }, { 0x14b9, 0x4500, PCI_ANY_ID, PCI_ANY_ID }, { 0x14b9, 0x4800, PCI_ANY_ID, PCI_ANY_ID, }, diff --git a/drivers/net/wireless/airo_cs.c b/drivers/net/wireless/airo_cs.c index 7e9ede6c5798..d9ed22b4cc6b 100644 --- a/drivers/net/wireless/airo_cs.c +++ b/drivers/net/wireless/airo_cs.c @@ -56,18 +56,18 @@ static void airo_release(struct pcmcia_device *link); static void airo_detach(struct pcmcia_device *p_dev); -typedef struct local_info_t { +struct local_info { struct net_device *eth_dev; -} local_info_t; +}; static int airo_probe(struct pcmcia_device *p_dev) { - local_info_t *local; + struct local_info *local; dev_dbg(&p_dev->dev, "airo_attach()\n"); /* Allocate space for private device-specific data */ - local = kzalloc(sizeof(local_info_t), GFP_KERNEL); + local = kzalloc(sizeof(*local), GFP_KERNEL); if (!local) return -ENOMEM; @@ -82,10 +82,11 @@ static void airo_detach(struct pcmcia_device *link) airo_release(link); - if (((local_info_t *)link->priv)->eth_dev) { - stop_airo_card(((local_info_t *)link->priv)->eth_dev, 0); + if (((struct local_info *)link->priv)->eth_dev) { + stop_airo_card(((struct local_info *)link->priv)->eth_dev, + 0); } - ((local_info_t *)link->priv)->eth_dev = NULL; + ((struct local_info *)link->priv)->eth_dev = NULL; kfree(link->priv); } /* airo_detach */ @@ -101,7 +102,7 @@ static int airo_cs_config_check(struct pcmcia_device *p_dev, void *priv_data) static int airo_config(struct pcmcia_device *link) { - local_info_t *dev; + struct local_info *dev; int ret; dev = link->priv; @@ -121,10 +122,10 @@ static int airo_config(struct pcmcia_device *link) ret = pcmcia_enable_device(link); if (ret) goto failed; - ((local_info_t *)link->priv)->eth_dev = + ((struct local_info *)link->priv)->eth_dev = init_airo_card(link->irq, link->resource[0]->start, 1, &link->dev); - if (!((local_info_t *)link->priv)->eth_dev) + if (!((struct local_info *)link->priv)->eth_dev) goto failed; return 0; @@ -142,7 +143,7 @@ static void airo_release(struct pcmcia_device *link) static int airo_suspend(struct pcmcia_device *link) { - local_info_t *local = link->priv; + struct local_info *local = link->priv; netif_device_detach(local->eth_dev); @@ -151,7 +152,7 @@ static int airo_suspend(struct pcmcia_device *link) static int airo_resume(struct pcmcia_device *link) { - local_info_t *local = link->priv; + struct local_info *local = link->priv; if (link->open) { reset_airo_card(local->eth_dev); diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c index 334c2ece855a..da92bfa76b7c 100644 --- a/drivers/net/wireless/at76c50x-usb.c +++ b/drivers/net/wireless/at76c50x-usb.c @@ -2423,8 +2423,6 @@ static void at76_delete_device(struct at76_priv *priv) kfree_skb(priv->rx_skb); - usb_put_dev(priv->udev); - at76_dbg(DBG_PROC_ENTRY, "%s: before freeing priv/ieee80211_hw", __func__); ieee80211_free_hw(priv->hw); @@ -2558,6 +2556,7 @@ static void at76_disconnect(struct usb_interface *interface) wiphy_info(priv->hw->wiphy, "disconnecting\n"); at76_delete_device(priv); + usb_put_dev(priv->udev); dev_info(&interface->dev, "disconnected\n"); } diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 0ffff205478d..3376963a4862 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -63,7 +63,7 @@ MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)"); #define QCA988X_2_0_DEVICE_ID (0x003c) -static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = { +static const struct pci_device_id ath10k_pci_id_table[] = { { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */ {0} }; diff --git a/drivers/net/wireless/ath/ath5k/led.c b/drivers/net/wireless/ath/ath5k/led.c index f77ef36acf87..48a6a69b57bc 100644 --- a/drivers/net/wireless/ath/ath5k/led.c +++ b/drivers/net/wireless/ath/ath5k/led.c @@ -53,7 +53,7 @@ #define ATH_POLARITY(data) ((data) & 0xff) /* Devices we match on for LED config info (typically laptops) */ -static DEFINE_PCI_DEVICE_TABLE(ath5k_led_devices) = { +static const struct pci_device_id ath5k_led_devices[] = { /* AR5211 */ { PCI_VDEVICE(ATHEROS, PCI_DEVICE_ID_ATHEROS_AR5211), ATH_LED(0, 0) }, /* HP Compaq nc6xx, nc4000, nx6000 */ diff --git a/drivers/net/wireless/ath/ath5k/pci.c b/drivers/net/wireless/ath/ath5k/pci.c index 859db7c34f87..c6156cc38940 100644 --- a/drivers/net/wireless/ath/ath5k/pci.c +++ b/drivers/net/wireless/ath/ath5k/pci.c @@ -28,7 +28,7 @@ #include "reg.h" /* Known PCI ids */ -static DEFINE_PCI_DEVICE_TABLE(ath5k_pci_id_table) = { +static const struct pci_device_id ath5k_pci_id_table[] = { { PCI_VDEVICE(ATHEROS, 0x0207) }, /* 5210 early */ { PCI_VDEVICE(ATHEROS, 0x0007) }, /* 5210 */ { PCI_VDEVICE(ATHEROS, 0x0011) }, /* 5311 - this is on AHB bus !*/ diff --git a/drivers/net/wireless/ath/ath9k/common-beacon.c b/drivers/net/wireless/ath/ath9k/common-beacon.c index 733be5178481..6ad44470d0f2 100644 --- a/drivers/net/wireless/ath/ath9k/common-beacon.c +++ b/drivers/net/wireless/ath/ath9k/common-beacon.c @@ -57,7 +57,7 @@ int ath9k_cmn_beacon_config_sta(struct ath_hw *ah, struct ath9k_beacon_state *bs) { struct ath_common *common = ath9k_hw_common(ah); - int dtim_intval, sleepduration; + int dtim_intval; u64 tsf; /* No need to configure beacon if we are not associated */ @@ -75,7 +75,6 @@ int ath9k_cmn_beacon_config_sta(struct ath_hw *ah, * last beacon we received (which may be none). */ dtim_intval = conf->intval * conf->dtim_period; - sleepduration = ah->hw->conf.listen_interval * conf->intval; /* * Pull nexttbtt forward to reflect the current @@ -113,7 +112,7 @@ int ath9k_cmn_beacon_config_sta(struct ath_hw *ah, */ bs->bs_sleepduration = TU_TO_USEC(roundup(IEEE80211_MS_TO_TU(100), - sleepduration)); + conf->intval)); if (bs->bs_sleepduration > bs->bs_dtimperiod) bs->bs_sleepduration = bs->bs_dtimperiod; diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c index bb86eb2ffc95..f0484b1b617e 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c @@ -978,7 +978,7 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv, struct ath_hw *ah = common->ah; struct ath_htc_rx_status *rxstatus; struct ath_rx_status rx_stats; - bool decrypt_error; + bool decrypt_error = false; if (skb->len < HTC_RX_FRAME_HEADER_SIZE) { ath_err(common, "Corrupted RX frame, dropping (len: %d)\n", diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index e6ac8d2e610c..4b148bbb2bf6 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -513,7 +513,7 @@ irqreturn_t ath_isr(int irq, void *dev) * touch anything. Note this can happen early * on if the IRQ is shared. */ - if (test_bit(ATH_OP_INVALID, &common->op_flags)) + if (!ah || test_bit(ATH_OP_INVALID, &common->op_flags)) return IRQ_NONE; /* shared irq, not for us */ diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c index 7a2b2c5caced..c018dea0b2e8 100644 --- a/drivers/net/wireless/ath/ath9k/pci.c +++ b/drivers/net/wireless/ath/ath9k/pci.c @@ -23,7 +23,7 @@ #include <linux/module.h> #include "ath9k.h" -static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = { +static const struct pci_device_id ath_pci_id_table[] = { { PCI_VDEVICE(ATHEROS, 0x0023) }, /* PCI */ { PCI_VDEVICE(ATHEROS, 0x0024) }, /* PCI-E */ { PCI_VDEVICE(ATHEROS, 0x0027) }, /* PCI */ diff --git a/drivers/net/wireless/ath/ath9k/spectral.c b/drivers/net/wireless/ath/ath9k/spectral.c index 5fe29b9f8fa2..8f68426ca653 100644 --- a/drivers/net/wireless/ath/ath9k/spectral.c +++ b/drivers/net/wireless/ath/ath9k/spectral.c @@ -253,7 +253,7 @@ static ssize_t write_file_spec_scan_ctl(struct file *file, if (strncmp("trigger", buf, 7) == 0) { ath9k_spectral_scan_trigger(sc->hw); - } else if (strncmp("background", buf, 9) == 0) { + } else if (strncmp("background", buf, 10) == 0) { ath9k_spectral_scan_config(sc->hw, SPECTRAL_BACKGROUND); ath_dbg(common, CONFIG, "spectral scan: background mode enabled\n"); } else if (strncmp("chanscan", buf, 8) == 0) { diff --git a/drivers/net/wireless/ath/carl9170/carl9170.h b/drivers/net/wireless/ath/carl9170/carl9170.h index 8596aba34f96..237d0cda1bcb 100644 --- a/drivers/net/wireless/ath/carl9170/carl9170.h +++ b/drivers/net/wireless/ath/carl9170/carl9170.h @@ -256,6 +256,7 @@ struct ar9170 { atomic_t rx_work_urbs; atomic_t rx_pool_urbs; kernel_ulong_t features; + bool usb_ep_cmd_is_bulk; /* firmware settings */ struct completion fw_load_wait; diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c index f35c7f30f9a6..c9f93310c0d6 100644 --- a/drivers/net/wireless/ath/carl9170/usb.c +++ b/drivers/net/wireless/ath/carl9170/usb.c @@ -621,9 +621,16 @@ int __carl9170_exec_cmd(struct ar9170 *ar, struct carl9170_cmd *cmd, goto err_free; } - usb_fill_int_urb(urb, ar->udev, usb_sndintpipe(ar->udev, - AR9170_USB_EP_CMD), cmd, cmd->hdr.len + 4, - carl9170_usb_cmd_complete, ar, 1); + if (ar->usb_ep_cmd_is_bulk) + usb_fill_bulk_urb(urb, ar->udev, + usb_sndbulkpipe(ar->udev, AR9170_USB_EP_CMD), + cmd, cmd->hdr.len + 4, + carl9170_usb_cmd_complete, ar); + else + usb_fill_int_urb(urb, ar->udev, + usb_sndintpipe(ar->udev, AR9170_USB_EP_CMD), + cmd, cmd->hdr.len + 4, + carl9170_usb_cmd_complete, ar, 1); if (free_buf) urb->transfer_flags |= URB_FREE_BUFFER; @@ -1032,9 +1039,10 @@ static void carl9170_usb_firmware_step2(const struct firmware *fw, static int carl9170_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { + struct usb_endpoint_descriptor *ep; struct ar9170 *ar; struct usb_device *udev; - int err; + int i, err; err = usb_reset_device(interface_to_usbdev(intf)); if (err) @@ -1050,6 +1058,21 @@ static int carl9170_usb_probe(struct usb_interface *intf, ar->intf = intf; ar->features = id->driver_info; + /* We need to remember the type of endpoint 4 because it differs + * between high- and full-speed configuration. The high-speed + * configuration specifies it as interrupt and the full-speed + * configuration as bulk endpoint. This information is required + * later when sending urbs to that endpoint. + */ + for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; ++i) { + ep = &intf->cur_altsetting->endpoint[i].desc; + + if (usb_endpoint_num(ep) == AR9170_USB_EP_CMD && + usb_endpoint_dir_out(ep) && + usb_endpoint_type(ep) == USB_ENDPOINT_XFER_BULK) + ar->usb_ep_cmd_is_bulk = true; + } + usb_set_intfdata(intf, ar); SET_IEEE80211_DEV(ar->hw, &intf->dev); diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c index 1fe41af81a59..9183f1cf89a7 100644 --- a/drivers/net/wireless/atmel.c +++ b/drivers/net/wireless/atmel.c @@ -2598,11 +2598,11 @@ static const iw_handler atmel_private_handler[] = NULL, /* SIOCIWFIRSTPRIV */ }; -typedef struct atmel_priv_ioctl { +struct atmel_priv_ioctl { char id[32]; unsigned char __user *data; unsigned short len; -} atmel_priv_ioctl; +}; #define ATMELFWL SIOCIWFIRSTPRIV #define ATMELIDIFC ATMELFWL + 1 @@ -2615,7 +2615,7 @@ static const struct iw_priv_args atmel_private_args[] = { .cmd = ATMELFWL, .set_args = IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED - | sizeof (atmel_priv_ioctl), + | sizeof(struct atmel_priv_ioctl), .get_args = IW_PRIV_TYPE_NONE, .name = "atmelfwl" }, { @@ -2645,7 +2645,7 @@ static int atmel_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { int i, rc = 0; struct atmel_private *priv = netdev_priv(dev); - atmel_priv_ioctl com; + struct atmel_priv_ioctl com; struct iwreq *wrq = (struct iwreq *) rq; unsigned char *new_firmware; char domain[REGDOMAINSZ + 1]; diff --git a/drivers/net/wireless/atmel_pci.c b/drivers/net/wireless/atmel_pci.c index 5cd97e3cbee3..bcf1f274a251 100644 --- a/drivers/net/wireless/atmel_pci.c +++ b/drivers/net/wireless/atmel_pci.c @@ -30,7 +30,7 @@ MODULE_DESCRIPTION("Support for Atmel at76c50x 802.11 wireless ethernet cards.") MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("Atmel at76c506 PCI wireless cards"); -static DEFINE_PCI_DEVICE_TABLE(card_ids) = { +static const struct pci_device_id card_ids[] = { { 0x1114, 0x0506, PCI_ANY_ID, PCI_ANY_ID }, { 0, } }; diff --git a/drivers/net/wireless/brcm80211/Kconfig b/drivers/net/wireless/brcm80211/Kconfig index b8e2561ea645..fe3dc126b149 100644 --- a/drivers/net/wireless/brcm80211/Kconfig +++ b/drivers/net/wireless/brcm80211/Kconfig @@ -27,10 +27,17 @@ config BRCMFMAC one of the bus interface support. If you choose to build a module, it'll be called brcmfmac.ko. +config BRCMFMAC_PROTO_BCDC + bool + +config BRCMFMAC_PROTO_MSGBUF + bool + config BRCMFMAC_SDIO bool "SDIO bus interface support for FullMAC driver" depends on (MMC = y || MMC = BRCMFMAC) depends on BRCMFMAC + select BRCMFMAC_PROTO_BCDC select FW_LOADER default y ---help--- @@ -42,6 +49,7 @@ config BRCMFMAC_USB bool "USB bus interface support for FullMAC driver" depends on (USB = y || USB = BRCMFMAC) depends on BRCMFMAC + select BRCMFMAC_PROTO_BCDC select FW_LOADER ---help--- This option enables the USB bus interface support for Broadcom @@ -52,6 +60,8 @@ config BRCMFMAC_PCIE bool "PCIE bus interface support for FullMAC driver" depends on BRCMFMAC depends on PCI + depends on HAS_DMA + select BRCMFMAC_PROTO_MSGBUF select FW_LOADER ---help--- This option enables the PCIE bus interface support for Broadcom diff --git a/drivers/net/wireless/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/brcm80211/brcmfmac/Makefile index c35adf4bc70b..90a977fe9a64 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/Makefile +++ b/drivers/net/wireless/brcm80211/brcmfmac/Makefile @@ -30,16 +30,18 @@ brcmfmac-objs += \ fwsignal.o \ p2p.o \ proto.o \ - bcdc.o \ - commonring.o \ - flowring.o \ - msgbuf.o \ dhd_common.o \ dhd_linux.o \ firmware.o \ feature.o \ btcoex.o \ vendor.o +brcmfmac-$(CONFIG_BRCMFMAC_PROTO_BCDC) += \ + bcdc.o +brcmfmac-$(CONFIG_BRCMFMAC_PROTO_MSGBUF) += \ + commonring.o \ + flowring.o \ + msgbuf.o brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \ dhd_sdio.o \ bcmsdh.o diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcdc.h b/drivers/net/wireless/brcm80211/brcmfmac/bcdc.h index 17e8c039ff32..6003179c0ceb 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/bcdc.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/bcdc.h @@ -16,9 +16,12 @@ #ifndef BRCMFMAC_BCDC_H #define BRCMFMAC_BCDC_H - +#ifdef CONFIG_BRCMFMAC_PROTO_BCDC int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr); void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr); - +#else +static inline int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr) { return 0; } +static inline void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr) {} +#endif #endif /* BRCMFMAC_BCDC_H */ diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c index 4f1daabc551b..44fc85f68f7a 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c @@ -185,7 +185,13 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr, ifevent->action, ifevent->ifidx, ifevent->bssidx, ifevent->flags, ifevent->role); - if (ifevent->flags & BRCMF_E_IF_FLAG_NOIF) { + /* The P2P Device interface event must not be ignored + * contrary to what firmware tells us. The only way to + * distinguish the P2P Device is by looking at the ifidx + * and bssidx received. + */ + if (!(ifevent->ifidx == 0 && ifevent->bssidx == 1) && + (ifevent->flags & BRCMF_E_IF_FLAG_NOIF)) { brcmf_dbg(EVENT, "event can be ignored\n"); return; } @@ -210,12 +216,12 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr, return; } - if (ifevent->action == BRCMF_E_IF_CHANGE) + if (ifp && ifevent->action == BRCMF_E_IF_CHANGE) brcmf_fws_reset_interface(ifp); err = brcmf_fweh_call_event_handler(ifp, emsg->event_code, emsg, data); - if (ifevent->action == BRCMF_E_IF_DEL) { + if (ifp && ifevent->action == BRCMF_E_IF_DEL) { brcmf_fws_del_interface(ifp); brcmf_del_if(drvr, ifevent->bssidx); } diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h index dd20b1862d44..cbf033f59109 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h @@ -172,6 +172,8 @@ enum brcmf_fweh_event_code { #define BRCMF_E_IF_ROLE_STA 0 #define BRCMF_E_IF_ROLE_AP 1 #define BRCMF_E_IF_ROLE_WDS 2 +#define BRCMF_E_IF_ROLE_P2P_GO 3 +#define BRCMF_E_IF_ROLE_P2P_CLIENT 4 /** * definitions for event packet validation. diff --git a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c index 535c7eb01b3a..8f8b9373de95 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c @@ -1318,6 +1318,8 @@ int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr) msgbuf->nrof_flowrings = if_msgbuf->nrof_flowrings; msgbuf->flowring_dma_handle = kzalloc(msgbuf->nrof_flowrings * sizeof(*msgbuf->flowring_dma_handle), GFP_ATOMIC); + if (!msgbuf->flowring_dma_handle) + goto fail; msgbuf->rx_dataoffset = if_msgbuf->rx_dataoffset; msgbuf->max_rxbufpost = if_msgbuf->max_rxbufpost; @@ -1362,6 +1364,7 @@ fail: kfree(msgbuf->flow_map); kfree(msgbuf->txstatus_done_map); brcmf_msgbuf_release_pktids(msgbuf); + kfree(msgbuf->flowring_dma_handle); if (msgbuf->ioctbuf) dma_free_coherent(drvr->bus_if->dev, BRCMF_TX_IOCTL_MAX_MSG_SIZE, @@ -1391,6 +1394,7 @@ void brcmf_proto_msgbuf_detach(struct brcmf_pub *drvr) BRCMF_TX_IOCTL_MAX_MSG_SIZE, msgbuf->ioctbuf, msgbuf->ioctbuf_handle); brcmf_msgbuf_release_pktids(msgbuf); + kfree(msgbuf->flowring_dma_handle); kfree(msgbuf); drvr->proto->pd = NULL; } diff --git a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.h b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.h index f901ae52bf2b..77a51b8c1e12 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.h @@ -15,6 +15,7 @@ #ifndef BRCMFMAC_MSGBUF_H #define BRCMFMAC_MSGBUF_H +#ifdef CONFIG_BRCMFMAC_PROTO_MSGBUF #define BRCMF_H2D_MSGRING_CONTROL_SUBMIT_MAX_ITEM 20 #define BRCMF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM 256 @@ -32,9 +33,15 @@ int brcmf_proto_msgbuf_rx_trigger(struct device *dev); +void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u8 flowid); int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr); void brcmf_proto_msgbuf_detach(struct brcmf_pub *drvr); -void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u8 flowid); - +#else +static inline int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr) +{ + return 0; +} +static inline void brcmf_proto_msgbuf_detach(struct brcmf_pub *drvr) {} +#endif #endif /* BRCMFMAC_MSGBUF_H */ diff --git a/drivers/net/wireless/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/brcm80211/brcmfmac/pcie.c index bc972c0ba5f8..e5101b287e4e 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/pcie.c @@ -591,12 +591,13 @@ static void brcmf_pcie_handle_mb_data(struct brcmf_pciedev_info *devinfo) } if (dtoh_mb_data & BRCMF_D2H_DEV_DS_EXIT_NOTE) brcmf_dbg(PCIE, "D2H_MB_DATA: DEEP SLEEP EXIT\n"); - if (dtoh_mb_data & BRCMF_D2H_DEV_D3_ACK) + if (dtoh_mb_data & BRCMF_D2H_DEV_D3_ACK) { brcmf_dbg(PCIE, "D2H_MB_DATA: D3 ACK\n"); if (waitqueue_active(&devinfo->mbdata_resp_wait)) { devinfo->mbdata_completed = true; wake_up(&devinfo->mbdata_resp_wait); } + } } diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c index 02fe706fc9ec..16a246bfc343 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c @@ -497,8 +497,11 @@ brcmf_configure_arp_offload(struct brcmf_if *ifp, bool enable) static void brcmf_cfg80211_update_proto_addr_mode(struct wireless_dev *wdev) { - struct net_device *ndev = wdev->netdev; - struct brcmf_if *ifp = netdev_priv(ndev); + struct brcmf_cfg80211_vif *vif; + struct brcmf_if *ifp; + + vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev); + ifp = vif->ifp; if ((wdev->iftype == NL80211_IFTYPE_ADHOC) || (wdev->iftype == NL80211_IFTYPE_AP) || @@ -4918,7 +4921,7 @@ static void brcmf_count_20mhz_channels(struct brcmf_cfg80211_info *cfg, struct brcmu_chan ch; int i; - for (i = 0; i <= total; i++) { + for (i = 0; i < total; i++) { ch.chspec = (u16)le32_to_cpu(chlist->element[i]); cfg->d11inf.decchspec(&ch); @@ -5143,6 +5146,7 @@ static int brcmf_enable_bw40_2g(struct brcmf_cfg80211_info *cfg) ch.band = BRCMU_CHAN_BAND_2G; ch.bw = BRCMU_CHAN_BW_40; + ch.sb = BRCMU_CHAN_SB_NONE; ch.chnum = 0; cfg->d11inf.encchspec(&ch); @@ -5176,6 +5180,7 @@ static int brcmf_enable_bw40_2g(struct brcmf_cfg80211_info *cfg) brcmf_update_bw40_channel_flag(&band->channels[j], &ch); } + kfree(pbuf); } return err; } diff --git a/drivers/net/wireless/hostap/hostap_pci.c b/drivers/net/wireless/hostap/hostap_pci.c index 91158e2e961c..c864ef4b0015 100644 --- a/drivers/net/wireless/hostap/hostap_pci.c +++ b/drivers/net/wireless/hostap/hostap_pci.c @@ -39,7 +39,7 @@ struct hostap_pci_priv { /* FIX: do we need mb/wmb/rmb with memory operations? */ -static DEFINE_PCI_DEVICE_TABLE(prism2_pci_id_table) = { +static const struct pci_device_id prism2_pci_id_table[] = { /* Intersil Prism3 ISL3872 11Mb/s WLAN Controller */ { 0x1260, 0x3872, PCI_ANY_ID, PCI_ANY_ID }, /* Intersil Prism2.5 ISL3874 11Mb/s WLAN Controller */ diff --git a/drivers/net/wireless/hostap/hostap_plx.c b/drivers/net/wireless/hostap/hostap_plx.c index 3bf530d9a40f..4901a99c6c59 100644 --- a/drivers/net/wireless/hostap/hostap_plx.c +++ b/drivers/net/wireless/hostap/hostap_plx.c @@ -60,7 +60,7 @@ struct hostap_plx_priv { #define PLXDEV(vendor,dev,str) { vendor, dev, PCI_ANY_ID, PCI_ANY_ID } -static DEFINE_PCI_DEVICE_TABLE(prism2_plx_id_table) = { +static const struct pci_device_id prism2_plx_id_table[] = { PLXDEV(0x10b7, 0x7770, "3Com AirConnect PCI 777A"), PLXDEV(0x111a, 0x1023, "Siemens SpeedStream SS1023"), PLXDEV(0x126c, 0x8030, "Nortel emobility"), diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c index dfc6dfc56d52..c3d726f334e3 100644 --- a/drivers/net/wireless/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/ipw2x00/ipw2100.c @@ -3449,8 +3449,9 @@ static int ipw2100_msg_allocate(struct ipw2100_priv *priv) return -ENOMEM; for (i = 0; i < IPW_COMMAND_POOL_SIZE; i++) { - v = pci_alloc_consistent(priv->pci_dev, - sizeof(struct ipw2100_cmd_header), &p); + v = pci_zalloc_consistent(priv->pci_dev, + sizeof(struct ipw2100_cmd_header), + &p); if (!v) { printk(KERN_ERR DRV_NAME ": " "%s: PCI alloc failed for msg " @@ -3459,8 +3460,6 @@ static int ipw2100_msg_allocate(struct ipw2100_priv *priv) break; } - memset(v, 0, sizeof(struct ipw2100_cmd_header)); - priv->msg_buffers[i].type = COMMAND; priv->msg_buffers[i].info.c_struct.cmd = (struct ipw2100_cmd_header *)v; @@ -4336,16 +4335,12 @@ static int status_queue_allocate(struct ipw2100_priv *priv, int entries) IPW_DEBUG_INFO("enter\n"); q->size = entries * sizeof(struct ipw2100_status); - q->drv = - (struct ipw2100_status *)pci_alloc_consistent(priv->pci_dev, - q->size, &q->nic); + q->drv = pci_zalloc_consistent(priv->pci_dev, q->size, &q->nic); if (!q->drv) { IPW_DEBUG_WARNING("Can not allocate status queue.\n"); return -ENOMEM; } - memset(q->drv, 0, q->size); - IPW_DEBUG_INFO("exit\n"); return 0; @@ -4374,13 +4369,12 @@ static int bd_queue_allocate(struct ipw2100_priv *priv, q->entries = entries; q->size = entries * sizeof(struct ipw2100_bd); - q->drv = pci_alloc_consistent(priv->pci_dev, q->size, &q->nic); + q->drv = pci_zalloc_consistent(priv->pci_dev, q->size, &q->nic); if (!q->drv) { IPW_DEBUG_INFO ("can't allocate shared memory for buffer descriptors\n"); return -ENOMEM; } - memset(q->drv, 0, q->size); IPW_DEBUG_INFO("exit\n"); @@ -6511,7 +6505,7 @@ static void ipw2100_shutdown(struct pci_dev *pci_dev) #define IPW2100_DEV_ID(x) { PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, x } -static DEFINE_PCI_DEVICE_TABLE(ipw2100_pci_id_table) = { +static const struct pci_device_id ipw2100_pci_id_table[] = { IPW2100_DEV_ID(0x2520), /* IN 2100A mPCI 3A */ IPW2100_DEV_ID(0x2521), /* IN 2100A mPCI 3B */ IPW2100_DEV_ID(0x2524), /* IN 2100A mPCI 3B */ diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c index c5aa404069f3..a42f9c335090 100644 --- a/drivers/net/wireless/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/ipw2x00/ipw2200.c @@ -9853,6 +9853,7 @@ static int ipw_wx_get_wireless_mode(struct net_device *dev, strncpy(extra, "unknown", MAX_WX_STRING); break; } + extra[MAX_WX_STRING - 1] = '\0'; IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra); @@ -11542,7 +11543,7 @@ out: } /* PCI driver stuff */ -static DEFINE_PCI_DEVICE_TABLE(card_ids) = { +static const struct pci_device_id card_ids[] = { {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0}, {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0}, {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0}, diff --git a/drivers/net/wireless/iwlegacy/3945.c b/drivers/net/wireless/iwlegacy/3945.c index b598e2803500..93bdf684babe 100644 --- a/drivers/net/wireless/iwlegacy/3945.c +++ b/drivers/net/wireless/iwlegacy/3945.c @@ -2728,7 +2728,7 @@ static struct il_cfg il3945_abg_cfg = { }, }; -DEFINE_PCI_DEVICE_TABLE(il3945_hw_card_ids) = { +const struct pci_device_id il3945_hw_card_ids[] = { {IL_PCI_DEVICE(0x4222, 0x1005, il3945_bg_cfg)}, {IL_PCI_DEVICE(0x4222, 0x1034, il3945_bg_cfg)}, {IL_PCI_DEVICE(0x4222, 0x1044, il3945_bg_cfg)}, diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c index c159c05db6ef..3dcbe2cd2b28 100644 --- a/drivers/net/wireless/iwlegacy/4965-mac.c +++ b/drivers/net/wireless/iwlegacy/4965-mac.c @@ -6800,7 +6800,7 @@ il4965_txq_set_sched(struct il_priv *il, u32 mask) *****************************************************************************/ /* Hardware specific file defines the PCI IDs table for that hardware module */ -static DEFINE_PCI_DEVICE_TABLE(il4965_hw_card_ids) = { +static const struct pci_device_id il4965_hw_card_ids[] = { {IL_PCI_DEVICE(0x4229, PCI_ANY_ID, il4965_cfg)}, {IL_PCI_DEVICE(0x4230, PCI_ANY_ID, il4965_cfg)}, {0} diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig index 6451d2b6abcf..824f5e287783 100644 --- a/drivers/net/wireless/iwlwifi/Kconfig +++ b/drivers/net/wireless/iwlwifi/Kconfig @@ -51,7 +51,6 @@ config IWLWIFI_LEDS config IWLDVM tristate "Intel Wireless WiFi DVM Firmware support" - depends on m default IWLWIFI help This is the driver that supports the DVM firmware which is @@ -60,7 +59,6 @@ config IWLDVM config IWLMVM tristate "Intel Wireless WiFi MVM Firmware support" - depends on m help This is the driver that supports the MVM firmware which is currently only available for 7260 and 3160 devices. diff --git a/drivers/net/wireless/iwlwifi/dvm/power.c b/drivers/net/wireless/iwlwifi/dvm/power.c index 760c45c34ef3..1513dbc79c14 100644 --- a/drivers/net/wireless/iwlwifi/dvm/power.c +++ b/drivers/net/wireless/iwlwifi/dvm/power.c @@ -40,7 +40,7 @@ #include "commands.h" #include "power.h" -static bool force_cam; +static bool force_cam = true; module_param(force_cam, bool, 0644); MODULE_PARM_DESC(force_cam, "force continuously aware mode (no power saving at all)"); diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c index 6dc5dd3ced44..ed50de6362ed 100644 --- a/drivers/net/wireless/iwlwifi/dvm/rxon.c +++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c @@ -1068,6 +1068,13 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx) /* recalculate basic rates */ iwl_calc_basic_rates(priv, ctx); + /* + * force CTS-to-self frames protection if RTS-CTS is not preferred + * one aggregation protection method + */ + if (!priv->hw_params.use_rts_for_aggregation) + ctx->staging.flags |= RXON_FLG_SELF_CTS_EN; + if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) || !(ctx->staging.flags & RXON_FLG_BAND_24G_MSK)) ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; @@ -1473,6 +1480,11 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw, else ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK; + if (bss_conf->use_cts_prot) + ctx->staging.flags |= RXON_FLG_SELF_CTS_EN; + else + ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN; + memcpy(ctx->staging.bssid_addr, bss_conf->bssid, ETH_ALEN); if (vif->type == NL80211_IFTYPE_AP || diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c index 48730064da73..d53adc245497 100644 --- a/drivers/net/wireless/iwlwifi/iwl-7000.c +++ b/drivers/net/wireless/iwlwifi/iwl-7000.c @@ -67,8 +67,8 @@ #include "iwl-agn-hw.h" /* Highest firmware API version supported */ -#define IWL7260_UCODE_API_MAX 9 -#define IWL3160_UCODE_API_MAX 9 +#define IWL7260_UCODE_API_MAX 10 +#define IWL3160_UCODE_API_MAX 10 /* Oldest version we won't warn about */ #define IWL7260_UCODE_API_OK 9 @@ -83,6 +83,8 @@ #define IWL7260_TX_POWER_VERSION 0xffff /* meaningless */ #define IWL3160_NVM_VERSION 0x709 #define IWL3160_TX_POWER_VERSION 0xffff /* meaningless */ +#define IWL3165_NVM_VERSION 0x709 +#define IWL3165_TX_POWER_VERSION 0xffff /* meaningless */ #define IWL7265_NVM_VERSION 0x0a1d #define IWL7265_TX_POWER_VERSION 0xffff /* meaningless */ @@ -92,6 +94,9 @@ #define IWL3160_FW_PRE "iwlwifi-3160-" #define IWL3160_MODULE_FIRMWARE(api) IWL3160_FW_PRE __stringify(api) ".ucode" +#define IWL3165_FW_PRE "iwlwifi-3165-" +#define IWL3165_MODULE_FIRMWARE(api) IWL3165_FW_PRE __stringify(api) ".ucode" + #define IWL7265_FW_PRE "iwlwifi-7265-" #define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode" @@ -213,6 +218,16 @@ static const struct iwl_pwr_tx_backoff iwl7265_pwr_tx_backoffs[] = { {0}, }; +const struct iwl_cfg iwl3165_2ac_cfg = { + .name = "Intel(R) Dual Band Wireless AC 3165", + .fw_name_pre = IWL3165_FW_PRE, + IWL_DEVICE_7000, + .ht_params = &iwl7000_ht_params, + .nvm_ver = IWL3165_NVM_VERSION, + .nvm_calib_ver = IWL3165_TX_POWER_VERSION, + .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, +}; + const struct iwl_cfg iwl7265_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 7265", .fw_name_pre = IWL7265_FW_PRE, @@ -245,4 +260,5 @@ const struct iwl_cfg iwl7265_n_cfg = { MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK)); +MODULE_FIRMWARE(IWL3165_MODULE_FIRMWARE(IWL3160_UCODE_API_OK)); MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/iwlwifi/iwl-8000.c index 44b19e015102..e93c6972290b 100644 --- a/drivers/net/wireless/iwlwifi/iwl-8000.c +++ b/drivers/net/wireless/iwlwifi/iwl-8000.c @@ -67,7 +67,7 @@ #include "iwl-agn-hw.h" /* Highest firmware API version supported */ -#define IWL8000_UCODE_API_MAX 9 +#define IWL8000_UCODE_API_MAX 10 /* Oldest version we won't warn about */ #define IWL8000_UCODE_API_OK 8 diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h index 8da596db9abe..3d7cc37420ae 100644 --- a/drivers/net/wireless/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/iwlwifi/iwl-config.h @@ -120,6 +120,8 @@ enum iwl_led_mode { #define IWL_LONG_WD_TIMEOUT 10000 #define IWL_MAX_WD_TIMEOUT 120000 +#define IWL_DEFAULT_MAX_TX_POWER 22 + /* Antenna presence definitions */ #define ANT_NONE 0x0 #define ANT_A BIT(0) @@ -335,6 +337,7 @@ extern const struct iwl_cfg iwl7260_n_cfg; extern const struct iwl_cfg iwl3160_2ac_cfg; extern const struct iwl_cfg iwl3160_2n_cfg; extern const struct iwl_cfg iwl3160_n_cfg; +extern const struct iwl_cfg iwl3165_2ac_cfg; extern const struct iwl_cfg iwl7265_2ac_cfg; extern const struct iwl_cfg iwl7265_2n_cfg; extern const struct iwl_cfg iwl7265_n_cfg; diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c index 018af2957d3b..354255f08754 100644 --- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c @@ -146,8 +146,6 @@ static const u8 iwl_nvm_channels_family_8000[] = { #define LAST_2GHZ_HT_PLUS 9 #define LAST_5GHZ_HT 161 -#define DEFAULT_MAX_TX_POWER 16 - /* rate data (static) */ static struct ieee80211_rate iwl_cfg80211_rates[] = { { .bitrate = 1 * 10, .hw_value = 0, .hw_value_short = 0, }, @@ -295,7 +293,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, * Default value - highest tx power value. max_power * is not used in mvm, and is used for backwards compatibility */ - channel->max_power = DEFAULT_MAX_TX_POWER; + channel->max_power = IWL_DEFAULT_MAX_TX_POWER; is_5ghz = channel->band == IEEE80211_BAND_5GHZ; IWL_DEBUG_EEPROM(dev, "Ch. %d [%sGHz] %s%s%s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n", diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c index 2291bbcaaeab..ce71625f497f 100644 --- a/drivers/net/wireless/iwlwifi/mvm/coex.c +++ b/drivers/net/wireless/iwlwifi/mvm/coex.c @@ -585,8 +585,6 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm) lockdep_assert_held(&mvm->mutex); if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) { - u32 mode; - switch (mvm->bt_force_ant_mode) { case BT_FORCE_ANT_BT: mode = BT_COEX_BT; @@ -756,7 +754,8 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, struct iwl_bt_iterator_data *data = _data; struct iwl_mvm *mvm = data->mvm; struct ieee80211_chanctx_conf *chanctx_conf; - enum ieee80211_smps_mode smps_mode; + /* default smps_mode is AUTOMATIC - only used for client modes */ + enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_AUTOMATIC; u32 bt_activity_grading; int ave_rssi; @@ -764,8 +763,6 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, switch (vif->type) { case NL80211_IFTYPE_STATION: - /* default smps_mode for BSS / P2P client is AUTOMATIC */ - smps_mode = IEEE80211_SMPS_AUTOMATIC; break; case NL80211_IFTYPE_AP: if (!mvmvif->ap_ibss_active) @@ -797,7 +794,7 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, else if (bt_activity_grading >= BT_LOW_TRAFFIC) smps_mode = IEEE80211_SMPS_DYNAMIC; - /* relax SMPS contraints for next association */ + /* relax SMPS constraints for next association */ if (!vif->bss_conf.assoc) smps_mode = IEEE80211_SMPS_AUTOMATIC; diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c index 2e90ff795c13..87e517bffedc 100644 --- a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c +++ b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c @@ -74,8 +74,7 @@ static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm, switch (param) { case MVM_DEBUGFS_PM_KEEP_ALIVE: { - struct ieee80211_hw *hw = mvm->hw; - int dtimper = hw->conf.ps_dtim_period ?: 1; + int dtimper = vif->bss_conf.dtim_period ?: 1; int dtimper_msec = dtimper * vif->bss_conf.beacon_int; IWL_DEBUG_POWER(mvm, "debugfs: set keep_alive= %d sec\n", val); diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h index 95f5b3274efb..9a922f3bd16b 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h @@ -1563,14 +1563,14 @@ enum iwl_sf_scenario { /** * Smart Fifo configuration command. - * @state: smart fifo state, types listed in iwl_sf_sate. + * @state: smart fifo state, types listed in enum %iwl_sf_sate. * @watermark: Minimum allowed availabe free space in RXF for transient state. * @long_delay_timeouts: aging and idle timer values for each scenario * in long delay state. * @full_on_timeouts: timer values for each scenario in full on state. */ struct iwl_sf_cfg_cmd { - enum iwl_sf_state state; + __le32 state; __le32 watermark[SF_TRANSIENT_STATES_NUMBER]; __le32 long_delay_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES]; __le32 full_on_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES]; diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c index 0e523e28cabf..8242e689ddb1 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c @@ -721,11 +721,6 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm, !force_assoc_off) { u32 dtim_offs; - /* Allow beacons to pass through as long as we are not - * associated, or we do not have dtim period information. - */ - cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON); - /* * The DTIM count counts down, so when it is N that means N * more beacon intervals happen until the DTIM TBTT. Therefore @@ -759,6 +754,11 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm, ctxt_sta->is_assoc = cpu_to_le32(1); } else { ctxt_sta->is_assoc = cpu_to_le32(0); + + /* Allow beacons to pass through as long as we are not + * associated, or we do not have dtim period information. + */ + cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON); } ctxt_sta->bi = cpu_to_le32(vif->bss_conf.beacon_int); diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index 0d6a8b768a68..cdc272d776e7 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -396,11 +396,14 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) else hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; - hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN; - hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX; - hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES; - /* we create the 802.11 header and zero length SSID IE. */ - hw->wiphy->max_sched_scan_ie_len = SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2; + if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 10) { + hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN; + hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX; + hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES; + /* we create the 802.11 header and zero length SSID IE. */ + hw->wiphy->max_sched_scan_ie_len = + SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2; + } hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN | NL80211_FEATURE_LOW_PRIORITY_SCAN | @@ -1523,11 +1526,6 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, */ iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data); - } else if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | - BSS_CHANGED_QOS)) { - ret = iwl_mvm_power_update_mac(mvm); - if (ret) - IWL_ERR(mvm, "failed to update power mode\n"); } if (changes & BSS_CHANGED_BEACON_INFO) { @@ -1535,6 +1533,12 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); } + if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS)) { + ret = iwl_mvm_power_update_mac(mvm); + if (ret) + IWL_ERR(mvm, "failed to update power mode\n"); + } + if (changes & BSS_CHANGED_TXPOWER) { IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n", bss_conf->txpower); diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c index 2b2d10800a55..d9769a23c68b 100644 --- a/drivers/net/wireless/iwlwifi/mvm/power.c +++ b/drivers/net/wireless/iwlwifi/mvm/power.c @@ -281,7 +281,6 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mac_power_cmd *cmd) { - struct ieee80211_hw *hw = mvm->hw; struct ieee80211_chanctx_conf *chanctx_conf; struct ieee80211_channel *chan; int dtimper, dtimper_msec; @@ -292,7 +291,7 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); - dtimper = hw->conf.ps_dtim_period ?: 1; + dtimper = vif->bss_conf.dtim_period; /* * Regardless of power management state the driver must set @@ -885,7 +884,7 @@ int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm, iwl_mvm_power_build_cmd(mvm, vif, &cmd); if (enable) { /* configure skip over dtim up to 300 msec */ - int dtimper = mvm->hw->conf.ps_dtim_period ?: 1; + int dtimper = vif->bss_conf.dtim_period ?: 1; int dtimper_msec = dtimper * vif->bss_conf.beacon_int; if (WARN_ON(!dtimper_msec)) diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c index 4b98987fc413..bf5cd8c8b0f7 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/iwlwifi/mvm/rx.c @@ -149,13 +149,13 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm, le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_ENERGY_ANT_ABC_IDX]); energy_a = (val & IWL_RX_INFO_ENERGY_ANT_A_MSK) >> IWL_RX_INFO_ENERGY_ANT_A_POS; - energy_a = energy_a ? -energy_a : -256; + energy_a = energy_a ? -energy_a : S8_MIN; energy_b = (val & IWL_RX_INFO_ENERGY_ANT_B_MSK) >> IWL_RX_INFO_ENERGY_ANT_B_POS; - energy_b = energy_b ? -energy_b : -256; + energy_b = energy_b ? -energy_b : S8_MIN; energy_c = (val & IWL_RX_INFO_ENERGY_ANT_C_MSK) >> IWL_RX_INFO_ENERGY_ANT_C_POS; - energy_c = energy_c ? -energy_c : -256; + energy_c = energy_c ? -energy_c : S8_MIN; max_energy = max(energy_a, energy_b); max_energy = max(max_energy, energy_c); diff --git a/drivers/net/wireless/iwlwifi/mvm/sf.c b/drivers/net/wireless/iwlwifi/mvm/sf.c index 7edfd15efc9d..e843b67f2201 100644 --- a/drivers/net/wireless/iwlwifi/mvm/sf.c +++ b/drivers/net/wireless/iwlwifi/mvm/sf.c @@ -172,7 +172,7 @@ static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id, enum iwl_sf_state new_state) { struct iwl_sf_cfg_cmd sf_cmd = { - .state = new_state, + .state = cpu_to_le32(new_state), }; struct ieee80211_sta *sta; int ret = 0; diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c index dbc870713882..9ee410bf6da2 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/iwlwifi/mvm/tx.c @@ -168,10 +168,14 @@ static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, /* * for data packets, rate info comes from the table inside the fw. This - * table is controlled by LINK_QUALITY commands + * table is controlled by LINK_QUALITY commands. Exclude ctrl port + * frames like EAPOLs which should be treated as mgmt frames. This + * avoids them being sent initially in high rates which increases the + * chances for completion of the 4-Way handshake. */ - if (ieee80211_is_data(fc) && sta) { + if (ieee80211_is_data(fc) && sta && + !(info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO)) { tx_cmd->initial_rate_index = 0; tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE); return; diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c index 98950e45c7b0..073a68b97a72 100644 --- a/drivers/net/wireless/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c @@ -78,7 +78,7 @@ .driver_data = (kernel_ulong_t)&(cfg) /* Hardware specific file defines the PCI IDs table for that hardware module */ -static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = { +static const struct pci_device_id iwl_hw_card_ids[] = { #if IS_ENABLED(CONFIG_IWLDVM) {IWL_PCI_DEVICE(0x4232, 0x1201, iwl5100_agn_cfg)}, /* Mini Card */ {IWL_PCI_DEVICE(0x4232, 0x1301, iwl5100_agn_cfg)}, /* Half Mini Card */ @@ -352,11 +352,17 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = { {IWL_PCI_DEVICE(0x08B3, 0x8060, iwl3160_2n_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x8062, iwl3160_n_cfg)}, {IWL_PCI_DEVICE(0x08B4, 0x8270, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B4, 0x8370, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B4, 0x8272, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x8470, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x8570, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x1070, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x1170, iwl3160_2ac_cfg)}, +/* 3165 Series */ + {IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)}, + {IWL_PCI_DEVICE(0x3165, 0x4210, iwl3165_2ac_cfg)}, + /* 7265 Series */ {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5110, iwl7265_2ac_cfg)}, @@ -378,6 +384,7 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = { {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9012, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x900A, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)}, diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c index c16dd2cc8198..ff0545888dd0 100644 --- a/drivers/net/wireless/mwifiex/pcie.c +++ b/drivers/net/wireless/mwifiex/pcie.c @@ -257,7 +257,7 @@ static void mwifiex_pcie_shutdown(struct pci_dev *pdev) return; } -static DEFINE_PCI_DEVICE_TABLE(mwifiex_ids) = { +static const struct pci_device_id mwifiex_ids[] = { { PCIE_VENDOR_ID_MARVELL, PCIE_DEVICE_ID_MARVELL_88W8766P, PCI_ANY_ID, PCI_ANY_ID, 0, 0, diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c index 9a3d4d6724f7..ef1104476bd8 100644 --- a/drivers/net/wireless/mwl8k.c +++ b/drivers/net/wireless/mwl8k.c @@ -1159,12 +1159,11 @@ static int mwl8k_rxq_init(struct ieee80211_hw *hw, int index) size = MWL8K_RX_DESCS * priv->rxd_ops->rxd_size; - rxq->rxd = pci_alloc_consistent(priv->pdev, size, &rxq->rxd_dma); + rxq->rxd = pci_zalloc_consistent(priv->pdev, size, &rxq->rxd_dma); if (rxq->rxd == NULL) { wiphy_err(hw->wiphy, "failed to alloc RX descriptors\n"); return -ENOMEM; } - memset(rxq->rxd, 0, size); rxq->buf = kcalloc(MWL8K_RX_DESCS, sizeof(*rxq->buf), GFP_KERNEL); if (rxq->buf == NULL) { @@ -1451,12 +1450,11 @@ static int mwl8k_txq_init(struct ieee80211_hw *hw, int index) size = MWL8K_TX_DESCS * sizeof(struct mwl8k_tx_desc); - txq->txd = pci_alloc_consistent(priv->pdev, size, &txq->txd_dma); + txq->txd = pci_zalloc_consistent(priv->pdev, size, &txq->txd_dma); if (txq->txd == NULL) { wiphy_err(hw->wiphy, "failed to alloc TX descriptors\n"); return -ENOMEM; } - memset(txq->txd, 0, size); txq->skb = kcalloc(MWL8K_TX_DESCS, sizeof(*txq->skb), GFP_KERNEL); if (txq->skb == NULL) { @@ -5676,7 +5674,7 @@ MODULE_FIRMWARE("mwl8k/helper_8366.fw"); MODULE_FIRMWARE("mwl8k/fmimage_8366.fw"); MODULE_FIRMWARE(MWL8K_8366_AP_FW(MWL8K_8366_AP_FW_API)); -static DEFINE_PCI_DEVICE_TABLE(mwl8k_pci_id_table) = { +static const struct pci_device_id mwl8k_pci_id_table[] = { { PCI_VDEVICE(MARVELL, 0x2a0a), .driver_data = MWL8363, }, { PCI_VDEVICE(MARVELL, 0x2a0c), .driver_data = MWL8363, }, { PCI_VDEVICE(MARVELL, 0x2a24), .driver_data = MWL8363, }, diff --git a/drivers/net/wireless/orinoco/orinoco_nortel.c b/drivers/net/wireless/orinoco/orinoco_nortel.c index ffb2469eb679..1b543e30eff7 100644 --- a/drivers/net/wireless/orinoco/orinoco_nortel.c +++ b/drivers/net/wireless/orinoco/orinoco_nortel.c @@ -272,7 +272,7 @@ static void orinoco_nortel_remove_one(struct pci_dev *pdev) pci_disable_device(pdev); } -static DEFINE_PCI_DEVICE_TABLE(orinoco_nortel_id_table) = { +static const struct pci_device_id orinoco_nortel_id_table[] = { /* Nortel emobility PCI */ {0x126c, 0x8030, PCI_ANY_ID, PCI_ANY_ID,}, /* Symbol LA-4123 PCI */ diff --git a/drivers/net/wireless/orinoco/orinoco_pci.c b/drivers/net/wireless/orinoco/orinoco_pci.c index 5ae1191d2532..b6bdad632842 100644 --- a/drivers/net/wireless/orinoco/orinoco_pci.c +++ b/drivers/net/wireless/orinoco/orinoco_pci.c @@ -210,7 +210,7 @@ static void orinoco_pci_remove_one(struct pci_dev *pdev) pci_disable_device(pdev); } -static DEFINE_PCI_DEVICE_TABLE(orinoco_pci_id_table) = { +static const struct pci_device_id orinoco_pci_id_table[] = { /* Intersil Prism 3 */ {0x1260, 0x3872, PCI_ANY_ID, PCI_ANY_ID,}, /* Intersil Prism 2.5 */ diff --git a/drivers/net/wireless/orinoco/orinoco_plx.c b/drivers/net/wireless/orinoco/orinoco_plx.c index bbd36d1676ff..b8f6e5c431ae 100644 --- a/drivers/net/wireless/orinoco/orinoco_plx.c +++ b/drivers/net/wireless/orinoco/orinoco_plx.c @@ -308,7 +308,7 @@ static void orinoco_plx_remove_one(struct pci_dev *pdev) pci_disable_device(pdev); } -static DEFINE_PCI_DEVICE_TABLE(orinoco_plx_id_table) = { +static const struct pci_device_id orinoco_plx_id_table[] = { {0x111a, 0x1023, PCI_ANY_ID, PCI_ANY_ID,}, /* Siemens SpeedStream SS1023 */ {0x1385, 0x4100, PCI_ANY_ID, PCI_ANY_ID,}, /* Netgear MA301 */ {0x15e8, 0x0130, PCI_ANY_ID, PCI_ANY_ID,}, /* Correga - does this work? */ diff --git a/drivers/net/wireless/orinoco/orinoco_tmd.c b/drivers/net/wireless/orinoco/orinoco_tmd.c index 04b08de5fd5d..79d0e33b625e 100644 --- a/drivers/net/wireless/orinoco/orinoco_tmd.c +++ b/drivers/net/wireless/orinoco/orinoco_tmd.c @@ -201,7 +201,7 @@ static void orinoco_tmd_remove_one(struct pci_dev *pdev) pci_disable_device(pdev); } -static DEFINE_PCI_DEVICE_TABLE(orinoco_tmd_id_table) = { +static const struct pci_device_id orinoco_tmd_id_table[] = { {0x15e8, 0x0131, PCI_ANY_ID, PCI_ANY_ID,}, /* NDC and OEMs, e.g. pheecom */ {0,}, }; diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c index d411de409050..d4aee64fb5ea 100644 --- a/drivers/net/wireless/p54/p54pci.c +++ b/drivers/net/wireless/p54/p54pci.c @@ -32,7 +32,7 @@ MODULE_LICENSE("GPL"); MODULE_ALIAS("prism54pci"); MODULE_FIRMWARE("isl3886pci"); -static DEFINE_PCI_DEVICE_TABLE(p54p_table) = { +static const struct pci_device_id p54p_table[] = { /* Intersil PRISM Duette/Prism GT Wireless LAN adapter */ { PCI_DEVICE(0x1260, 0x3890) }, /* 3COM 3CRWE154G72 Wireless LAN adapter */ diff --git a/drivers/net/wireless/prism54/islpci_hotplug.c b/drivers/net/wireless/prism54/islpci_hotplug.c index 1105a12dbde8..300c846ea087 100644 --- a/drivers/net/wireless/prism54/islpci_hotplug.c +++ b/drivers/net/wireless/prism54/islpci_hotplug.c @@ -39,7 +39,7 @@ module_param(init_pcitm, int, 0); * driver_data * If you have an update for this please contact prism54-devel@prism54.org * The latest list can be found at http://wireless.kernel.org/en/users/Drivers/p54 */ -static DEFINE_PCI_DEVICE_TABLE(prism54_id_tbl) = { +static const struct pci_device_id prism54_id_tbl[] = { /* Intersil PRISM Duette/Prism GT Wireless LAN adapter */ { 0x1260, 0x3890, diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c index 4ccfef5094e0..bdf5590ba304 100644 --- a/drivers/net/wireless/rt2x00/rt2400pci.c +++ b/drivers/net/wireless/rt2x00/rt2400pci.c @@ -1821,7 +1821,7 @@ static const struct rt2x00_ops rt2400pci_ops = { /* * RT2400pci module information. */ -static DEFINE_PCI_DEVICE_TABLE(rt2400pci_device_table) = { +static const struct pci_device_id rt2400pci_device_table[] = { { PCI_DEVICE(0x1814, 0x0101) }, { 0, } }; diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c index a511cccc9f01..79f4fe65a119 100644 --- a/drivers/net/wireless/rt2x00/rt2500pci.c +++ b/drivers/net/wireless/rt2x00/rt2500pci.c @@ -2120,7 +2120,7 @@ static const struct rt2x00_ops rt2500pci_ops = { /* * RT2500pci module information. */ -static DEFINE_PCI_DEVICE_TABLE(rt2500pci_device_table) = { +static const struct pci_device_id rt2500pci_device_table[] = { { PCI_DEVICE(0x1814, 0x0201) }, { 0, } }; diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c index a5b32ca2cf0f..cc1b3cc73c5a 100644 --- a/drivers/net/wireless/rt2x00/rt2800pci.c +++ b/drivers/net/wireless/rt2x00/rt2800pci.c @@ -400,7 +400,7 @@ static const struct rt2x00_ops rt2800pci_ops = { /* * RT2800pci module information. */ -static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = { +static const struct pci_device_id rt2800pci_device_table[] = { { PCI_DEVICE(0x1814, 0x0601) }, { PCI_DEVICE(0x1814, 0x0681) }, { PCI_DEVICE(0x1814, 0x0701) }, diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c index 9048a9cbe52c..819455009fe4 100644 --- a/drivers/net/wireless/rt2x00/rt61pci.c +++ b/drivers/net/wireless/rt2x00/rt61pci.c @@ -3075,7 +3075,7 @@ static const struct rt2x00_ops rt61pci_ops = { /* * RT61pci module information. */ -static DEFINE_PCI_DEVICE_TABLE(rt61pci_device_table) = { +static const struct pci_device_id rt61pci_device_table[] = { /* RT2561s */ { PCI_DEVICE(0x1814, 0x0301) }, /* RT2561 v2 */ diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c index 4b904f708184..026d912f516b 100644 --- a/drivers/net/wireless/rtl818x/rtl8180/dev.c +++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c @@ -64,7 +64,7 @@ MODULE_AUTHOR("Andrea Merello <andrea.merello@gmail.com>"); MODULE_DESCRIPTION("RTL8180 / RTL8185 / RTL8187SE PCI wireless driver"); MODULE_LICENSE("GPL"); -static DEFINE_PCI_DEVICE_TABLE(rtl8180_table) = { +static const struct pci_device_id rtl8180_table[] = { /* rtl8187se */ { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8199) }, @@ -972,16 +972,13 @@ static int rtl8180_init_rx_ring(struct ieee80211_hw *dev) else priv->rx_ring_sz = sizeof(struct rtl8180_rx_desc); - priv->rx_ring = pci_alloc_consistent(priv->pdev, - priv->rx_ring_sz * 32, - &priv->rx_ring_dma); - + priv->rx_ring = pci_zalloc_consistent(priv->pdev, priv->rx_ring_sz * 32, + &priv->rx_ring_dma); if (!priv->rx_ring || (unsigned long)priv->rx_ring & 0xFF) { wiphy_err(dev->wiphy, "Cannot allocate RX ring\n"); return -ENOMEM; } - memset(priv->rx_ring, 0, priv->rx_ring_sz * 32); priv->rx_idx = 0; for (i = 0; i < 32; i++) { @@ -1040,14 +1037,14 @@ static int rtl8180_init_tx_ring(struct ieee80211_hw *dev, dma_addr_t dma; int i; - ring = pci_alloc_consistent(priv->pdev, sizeof(*ring) * entries, &dma); + ring = pci_zalloc_consistent(priv->pdev, sizeof(*ring) * entries, + &dma); if (!ring || (unsigned long)ring & 0xFF) { wiphy_err(dev->wiphy, "Cannot allocate TX ring (prio = %d)\n", prio); return -ENOMEM; } - memset(ring, 0, sizeof(*ring)*entries); priv->tx_ring[prio].desc = ring; priv->tx_ring[prio].dma = dma; priv->tx_ring[prio].idx = 0; diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c index 33da3dfcfa4f..d4bd550f505c 100644 --- a/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c +++ b/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c @@ -101,7 +101,7 @@ static bool halbtc_legacy(struct rtl_priv *adapter) bool is_legacy = false; - if ((mac->mode == WIRELESS_MODE_B) || (mac->mode == WIRELESS_MODE_B)) + if ((mac->mode == WIRELESS_MODE_B) || (mac->mode == WIRELESS_MODE_G)) is_legacy = true; return is_legacy; diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c index dae55257f0e8..67d1ee6edcad 100644 --- a/drivers/net/wireless/rtlwifi/pci.c +++ b/drivers/net/wireless/rtlwifi/pci.c @@ -1092,16 +1092,14 @@ static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw, u32 nextdescaddress; int i; - ring = pci_alloc_consistent(rtlpci->pdev, - sizeof(*ring) * entries, &dma); - + ring = pci_zalloc_consistent(rtlpci->pdev, sizeof(*ring) * entries, + &dma); if (!ring || (unsigned long)ring & 0xFF) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Cannot allocate TX ring (prio = %d)\n", prio); return -ENOMEM; } - memset(ring, 0, sizeof(*ring) * entries); rtlpci->tx_ring[prio].desc = ring; rtlpci->tx_ring[prio].dma = dma; rtlpci->tx_ring[prio].idx = 0; @@ -1139,10 +1137,9 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw) for (rx_queue_idx = 0; rx_queue_idx < RTL_PCI_MAX_RX_QUEUE; rx_queue_idx++) { rtlpci->rx_ring[rx_queue_idx].desc = - pci_alloc_consistent(rtlpci->pdev, - sizeof(*rtlpci->rx_ring[rx_queue_idx]. - desc) * rtlpci->rxringcount, - &rtlpci->rx_ring[rx_queue_idx].dma); + pci_zalloc_consistent(rtlpci->pdev, + sizeof(*rtlpci->rx_ring[rx_queue_idx].desc) * rtlpci->rxringcount, + &rtlpci->rx_ring[rx_queue_idx].dma); if (!rtlpci->rx_ring[rx_queue_idx].desc || (unsigned long)rtlpci->rx_ring[rx_queue_idx].desc & 0xFF) { @@ -1151,10 +1148,6 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw) return -ENOMEM; } - memset(rtlpci->rx_ring[rx_queue_idx].desc, 0, - sizeof(*rtlpci->rx_ring[rx_queue_idx].desc) * - rtlpci->rxringcount); - rtlpci->rx_ring[rx_queue_idx].idx = 0; /* If amsdu_8k is disabled, set buffersize to 4096. This diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c index 842d69349a37..631b6907c17d 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c +++ b/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c @@ -364,7 +364,7 @@ static struct rtl_hal_cfg rtl88ee_hal_cfg = { .maps[RTL_RC_HT_RATEMCS15] = DESC92C_RATEMCS15, }; -static DEFINE_PCI_DEVICE_TABLE(rtl88ee_pci_ids) = { +static const struct pci_device_id rtl88ee_pci_ids[] = { {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8179, rtl88ee_hal_cfg)}, {}, }; diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c index 12f21f4073e8..4bbdfb2df363 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c @@ -344,7 +344,7 @@ static struct rtl_hal_cfg rtl92ce_hal_cfg = { .maps[RTL_RC_HT_RATEMCS15] = DESC92_RATEMCS15, }; -static DEFINE_PCI_DEVICE_TABLE(rtl92ce_pci_ids) = { +static const struct pci_device_id rtl92ce_pci_ids[] = { {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8191, rtl92ce_hal_cfg)}, {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8178, rtl92ce_hal_cfg)}, {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8177, rtl92ce_hal_cfg)}, diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c index 361435f8608a..1ac6383e7947 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c @@ -317,6 +317,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = { {RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/ {RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/ {RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/ + {RTL_USB_DEVICE(0x0df6, 0x0070, rtl92cu_hal_cfg)}, /*Sitecom - 150N */ {RTL_USB_DEVICE(0x0df6, 0x0077, rtl92cu_hal_cfg)}, /*Sitecom-WLA2100V2*/ {RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/ {RTL_USB_DEVICE(0x4856, 0x0091, rtl92cu_hal_cfg)}, /*NetweeN - Feixun*/ diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c index ff12bf41644b..532913c6622a 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c +++ b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c @@ -348,7 +348,7 @@ static struct rtl_hal_cfg rtl8723be_hal_cfg = { .maps[RTL_RC_HT_RATEMCS15] = DESC92C_RATEMCS15, }; -static DEFINE_PCI_DEVICE_TABLE(rtl8723be_pci_id) = { +static const struct pci_device_id rtl8723be_pci_id[] = { {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xb723, rtl8723be_hal_cfg)}, {}, }; diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index ef3026f46a37..d4eb8d2e9cb7 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h @@ -165,6 +165,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */ u16 dealloc_ring[MAX_PENDING_REQS]; struct task_struct *dealloc_task; wait_queue_head_t dealloc_wq; + atomic_t inflight_packets; /* Use kthread for guest RX */ struct task_struct *task; @@ -329,4 +330,8 @@ extern unsigned int xenvif_max_queues; extern struct dentry *xen_netback_dbg_root; #endif +void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue, + struct sk_buff *skb); +void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue); + #endif /* __XEN_NETBACK__COMMON_H__ */ diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 48a55cda979b..f379689dde30 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -43,6 +43,23 @@ #define XENVIF_QUEUE_LENGTH 32 #define XENVIF_NAPI_WEIGHT 64 +/* This function is used to set SKBTX_DEV_ZEROCOPY as well as + * increasing the inflight counter. We need to increase the inflight + * counter because core driver calls into xenvif_zerocopy_callback + * which calls xenvif_skb_zerocopy_complete. + */ +void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue, + struct sk_buff *skb) +{ + skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; + atomic_inc(&queue->inflight_packets); +} + +void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue) +{ + atomic_dec(&queue->inflight_packets); +} + static inline void xenvif_stop_queue(struct xenvif_queue *queue) { struct net_device *dev = queue->vif->dev; @@ -78,12 +95,8 @@ int xenvif_poll(struct napi_struct *napi, int budget) /* This vif is rogue, we pretend we've there is nothing to do * for this vif to deschedule it from NAPI. But this interface * will be turned off in thread context later. - * Also, if a guest doesn't post enough slots to receive data on one of - * its queues, the carrier goes down and NAPI is descheduled here so - * the guest can't send more packets until it's ready to receive. */ - if (unlikely(queue->vif->disabled || - !netif_carrier_ok(queue->vif->dev))) { + if (unlikely(queue->vif->disabled)) { napi_complete(napi); return 0; } @@ -528,9 +541,6 @@ int xenvif_init_queue(struct xenvif_queue *queue) init_timer(&queue->rx_stalled); - netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll, - XENVIF_NAPI_WEIGHT); - return 0; } @@ -564,6 +574,10 @@ int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref, init_waitqueue_head(&queue->wq); init_waitqueue_head(&queue->dealloc_wq); + atomic_set(&queue->inflight_packets, 0); + + netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll, + XENVIF_NAPI_WEIGHT); if (tx_evtchn == rx_evtchn) { /* feature-split-event-channels == 0 */ @@ -646,25 +660,6 @@ void xenvif_carrier_off(struct xenvif *vif) rtnl_unlock(); } -static void xenvif_wait_unmap_timeout(struct xenvif_queue *queue, - unsigned int worst_case_skb_lifetime) -{ - int i, unmap_timeout = 0; - - for (i = 0; i < MAX_PENDING_REQS; ++i) { - if (queue->grant_tx_handle[i] != NETBACK_INVALID_HANDLE) { - unmap_timeout++; - schedule_timeout(msecs_to_jiffies(1000)); - if (unmap_timeout > worst_case_skb_lifetime && - net_ratelimit()) - netdev_err(queue->vif->dev, - "Page still granted! Index: %x\n", - i); - i = -1; - } - } -} - void xenvif_disconnect(struct xenvif *vif) { struct xenvif_queue *queue = NULL; @@ -676,6 +671,8 @@ void xenvif_disconnect(struct xenvif *vif) for (queue_index = 0; queue_index < num_queues; ++queue_index) { queue = &vif->queues[queue_index]; + netif_napi_del(&queue->napi); + if (queue->task) { del_timer_sync(&queue->rx_stalled); kthread_stop(queue->task); @@ -708,7 +705,6 @@ void xenvif_disconnect(struct xenvif *vif) void xenvif_deinit_queue(struct xenvif_queue *queue) { free_xenballooned_pages(MAX_PENDING_REQS, queue->mmap_pages); - netif_napi_del(&queue->napi); } void xenvif_free(struct xenvif *vif) @@ -716,21 +712,11 @@ void xenvif_free(struct xenvif *vif) struct xenvif_queue *queue = NULL; unsigned int num_queues = vif->num_queues; unsigned int queue_index; - /* Here we want to avoid timeout messages if an skb can be legitimately - * stuck somewhere else. Realistically this could be an another vif's - * internal or QDisc queue. That another vif also has this - * rx_drain_timeout_msecs timeout, so give it time to drain out. - * Although if that other guest wakes up just before its timeout happens - * and takes only one skb from QDisc, it can hold onto other skbs for a - * longer period. - */ - unsigned int worst_case_skb_lifetime = (rx_drain_timeout_msecs/1000); unregister_netdev(vif->dev); for (queue_index = 0; queue_index < num_queues; ++queue_index) { queue = &vif->queues[queue_index]; - xenvif_wait_unmap_timeout(queue, worst_case_skb_lifetime); xenvif_deinit_queue(queue); } diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index aa2093325be1..08f65996534c 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -1525,10 +1525,12 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s /* remove traces of mapped pages and frag_list */ skb_frag_list_init(skb); uarg = skb_shinfo(skb)->destructor_arg; + /* increase inflight counter to offset decrement in callback */ + atomic_inc(&queue->inflight_packets); uarg->callback(uarg, true); skb_shinfo(skb)->destructor_arg = NULL; - skb_shinfo(nskb)->tx_flags |= SKBTX_DEV_ZEROCOPY; + xenvif_skb_zerocopy_prepare(queue, nskb); kfree_skb(nskb); return 0; @@ -1589,7 +1591,7 @@ static int xenvif_tx_submit(struct xenvif_queue *queue) if (net_ratelimit()) netdev_err(queue->vif->dev, "Not enough memory to consolidate frag_list!\n"); - skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; + xenvif_skb_zerocopy_prepare(queue, skb); kfree_skb(skb); continue; } @@ -1609,7 +1611,7 @@ static int xenvif_tx_submit(struct xenvif_queue *queue) "Can't setup checksum in net_tx_action\n"); /* We have to set this flag to trigger the callback */ if (skb_shinfo(skb)->destructor_arg) - skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; + xenvif_skb_zerocopy_prepare(queue, skb); kfree_skb(skb); continue; } @@ -1641,7 +1643,7 @@ static int xenvif_tx_submit(struct xenvif_queue *queue) * skb. E.g. the __pskb_pull_tail earlier can do such thing. */ if (skb_shinfo(skb)->destructor_arg) { - skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; + xenvif_skb_zerocopy_prepare(queue, skb); queue->stats.tx_zerocopy_sent++; } @@ -1681,6 +1683,7 @@ void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success) queue->stats.tx_zerocopy_success++; else queue->stats.tx_zerocopy_fail++; + xenvif_skb_zerocopy_complete(queue); } static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue) @@ -2025,9 +2028,15 @@ int xenvif_kthread_guest_rx(void *data) * context so we defer it here, if this thread is * associated with queue 0. */ - if (unlikely(queue->vif->disabled && queue->id == 0)) + if (unlikely(queue->vif->disabled && queue->id == 0)) { xenvif_carrier_off(queue->vif); - else if (unlikely(test_and_clear_bit(QUEUE_STATUS_RX_PURGE_EVENT, + } else if (unlikely(queue->vif->disabled)) { + /* kthread_stop() would be called upon this thread soon, + * be a bit proactive + */ + skb_queue_purge(&queue->rx_queue); + queue->rx_last_skb_slots = 0; + } else if (unlikely(test_and_clear_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status))) { xenvif_rx_purge_event(queue); } else if (!netif_carrier_ok(queue->vif->dev)) { @@ -2052,15 +2061,24 @@ int xenvif_kthread_guest_rx(void *data) return 0; } +static bool xenvif_dealloc_kthread_should_stop(struct xenvif_queue *queue) +{ + /* Dealloc thread must remain running until all inflight + * packets complete. + */ + return kthread_should_stop() && + !atomic_read(&queue->inflight_packets); +} + int xenvif_dealloc_kthread(void *data) { struct xenvif_queue *queue = data; - while (!kthread_should_stop()) { + for (;;) { wait_event_interruptible(queue->dealloc_wq, tx_dealloc_work_todo(queue) || - kthread_should_stop()); - if (kthread_should_stop()) + xenvif_dealloc_kthread_should_stop(queue)); + if (xenvif_dealloc_kthread_should_stop(queue)) break; xenvif_tx_dealloc_action(queue); diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index 580517d857bf..9c47b897b6d2 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -116,6 +116,7 @@ static int xenvif_read_io_ring(struct seq_file *m, void *v) } #define XENVIF_KICK_STR "kick" +#define BUFFER_SIZE 32 static ssize_t xenvif_write_io_ring(struct file *filp, const char __user *buf, size_t count, @@ -124,22 +125,24 @@ xenvif_write_io_ring(struct file *filp, const char __user *buf, size_t count, struct xenvif_queue *queue = ((struct seq_file *)filp->private_data)->private; int len; - char write[sizeof(XENVIF_KICK_STR)]; + char write[BUFFER_SIZE]; /* don't allow partial writes and check the length */ if (*ppos != 0) return 0; - if (count < sizeof(XENVIF_KICK_STR) - 1) + if (count >= sizeof(write)) return -ENOSPC; len = simple_write_to_buffer(write, - sizeof(write), + sizeof(write) - 1, ppos, buf, count); if (len < 0) return len; + write[len] = '\0'; + if (!strncmp(write, XENVIF_KICK_STR, sizeof(XENVIF_KICK_STR) - 1)) xenvif_interrupt(0, (void *)queue); else { @@ -171,10 +174,9 @@ static const struct file_operations xenvif_dbg_io_ring_ops_fops = { .write = xenvif_write_io_ring, }; -static void xenvif_debugfs_addif(struct xenvif_queue *queue) +static void xenvif_debugfs_addif(struct xenvif *vif) { struct dentry *pfile; - struct xenvif *vif = queue->vif; int i; if (IS_ERR_OR_NULL(xen_netback_dbg_root)) @@ -733,10 +735,11 @@ static void connect(struct backend_info *be) be->vif->num_queues = queue_index; goto err; } + } + #ifdef CONFIG_DEBUG_FS - xenvif_debugfs_addif(queue); + xenvif_debugfs_addif(be->vif); #endif /* CONFIG_DEBUG_FS */ - } /* Initialisation completed, tell core driver the number of * active queues. diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 28204bc4f369..ca82f545ec2c 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -628,9 +628,10 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) slots = DIV_ROUND_UP(offset + len, PAGE_SIZE) + xennet_count_skb_frag_slots(skb); if (unlikely(slots > MAX_SKB_FRAGS + 1)) { - net_alert_ratelimited( - "xennet: skb rides the rocket: %d slots\n", slots); - goto drop; + net_dbg_ratelimited("xennet: skb rides the rocket: %d slots, %d bytes\n", + slots, skb->len); + if (skb_linearize(skb)) + goto drop; } spin_lock_irqsave(&queue->tx_lock, flags); diff --git a/drivers/nfc/microread/microread.c b/drivers/nfc/microread/microread.c index f868333271aa..963a4a5dc88e 100644 --- a/drivers/nfc/microread/microread.c +++ b/drivers/nfc/microread/microread.c @@ -501,9 +501,13 @@ static void microread_target_discovered(struct nfc_hci_dev *hdev, u8 gate, targets->sens_res = be16_to_cpu(*(u16 *)&skb->data[MICROREAD_EMCF_A_ATQA]); targets->sel_res = skb->data[MICROREAD_EMCF_A_SAK]; - memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_A_UID], - skb->data[MICROREAD_EMCF_A_LEN]); targets->nfcid1_len = skb->data[MICROREAD_EMCF_A_LEN]; + if (targets->nfcid1_len > sizeof(targets->nfcid1)) { + r = -EINVAL; + goto exit_free; + } + memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_A_UID], + targets->nfcid1_len); break; case MICROREAD_GATE_ID_MREAD_ISO_A_3: targets->supported_protocols = @@ -511,9 +515,13 @@ static void microread_target_discovered(struct nfc_hci_dev *hdev, u8 gate, targets->sens_res = be16_to_cpu(*(u16 *)&skb->data[MICROREAD_EMCF_A3_ATQA]); targets->sel_res = skb->data[MICROREAD_EMCF_A3_SAK]; - memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_A3_UID], - skb->data[MICROREAD_EMCF_A3_LEN]); targets->nfcid1_len = skb->data[MICROREAD_EMCF_A3_LEN]; + if (targets->nfcid1_len > sizeof(targets->nfcid1)) { + r = -EINVAL; + goto exit_free; + } + memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_A3_UID], + targets->nfcid1_len); break; case MICROREAD_GATE_ID_MREAD_ISO_B: targets->supported_protocols = NFC_PROTO_ISO14443_B_MASK; diff --git a/drivers/nfc/st21nfca/Makefile b/drivers/nfc/st21nfca/Makefile index db7a38ae05f7..7d688f97aa27 100644 --- a/drivers/nfc/st21nfca/Makefile +++ b/drivers/nfc/st21nfca/Makefile @@ -2,7 +2,8 @@ # Makefile for ST21NFCA HCI based NFC driver # -st21nfca_i2c-objs = i2c.o +st21nfca_hci-objs = st21nfca.o st21nfca_dep.o +obj-$(CONFIG_NFC_ST21NFCA) += st21nfca_hci.o -obj-$(CONFIG_NFC_ST21NFCA) += st21nfca.o st21nfca_dep.o +st21nfca_i2c-objs = i2c.o obj-$(CONFIG_NFC_ST21NFCA_I2C) += st21nfca_i2c.o diff --git a/drivers/nfc/st21nfcb/Makefile b/drivers/nfc/st21nfcb/Makefile index 13d9f03b2fea..f4d835dd15f2 100644 --- a/drivers/nfc/st21nfcb/Makefile +++ b/drivers/nfc/st21nfcb/Makefile @@ -2,7 +2,8 @@ # Makefile for ST21NFCB NCI based NFC driver # -st21nfcb_i2c-objs = i2c.o +st21nfcb_nci-objs = ndlc.o st21nfcb.o +obj-$(CONFIG_NFC_ST21NFCB) += st21nfcb_nci.o -obj-$(CONFIG_NFC_ST21NFCB) += st21nfcb.o ndlc.o +st21nfcb_i2c-objs = i2c.o obj-$(CONFIG_NFC_ST21NFCB_I2C) += st21nfcb_i2c.o diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index 9dd63b822025..e9bf2f47b61a 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c @@ -510,7 +510,7 @@ static void ntb_transport_setup_qp_mw(struct ntb_transport *nt, WARN_ON(nt->mw[mw_num].virt_addr == NULL); - if (nt->max_qps % mw_max && mw_num < nt->max_qps % mw_max) + if (nt->max_qps % mw_max && mw_num + 1 < nt->max_qps / mw_max) num_qps_mw = nt->max_qps / mw_max + 1; else num_qps_mw = nt->max_qps / mw_max; @@ -576,6 +576,19 @@ static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size) return -ENOMEM; } + /* + * we must ensure that the memory address allocated is BAR size + * aligned in order for the XLAT register to take the value. This + * is a requirement of the hardware. It is recommended to setup CMA + * for BAR sizes equal or greater than 4MB. + */ + if (!IS_ALIGNED(mw->dma_addr, mw->size)) { + dev_err(&pdev->dev, "DMA memory %pad not aligned to BAR size\n", + &mw->dma_addr); + ntb_free_mw(nt, num_mw); + return -ENOMEM; + } + /* Notify HW the memory location of the receive buffer */ ntb_set_mw_addr(nt->ndev, num_mw, mw->dma_addr); @@ -856,7 +869,7 @@ static int ntb_transport_init_queue(struct ntb_transport *nt, qp->client_ready = NTB_LINK_DOWN; qp->event_handler = NULL; - if (nt->max_qps % mw_max && mw_num < nt->max_qps % mw_max) + if (nt->max_qps % mw_max && mw_num + 1 < nt->max_qps / mw_max) num_qps_mw = nt->max_qps / mw_max + 1; else num_qps_mw = nt->max_qps / mw_max; diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig index 2dcb0541012d..5160c4eb73c2 100644 --- a/drivers/of/Kconfig +++ b/drivers/of/Kconfig @@ -9,7 +9,8 @@ menu "Device Tree and Open Firmware support" config OF_SELFTEST bool "Device Tree Runtime self tests" - depends on OF_IRQ + depends on OF_IRQ && OF_EARLY_FLATTREE + select OF_DYNAMIC help This option builds in test cases for the device tree infrastructure that are executed once at boot time, and the results dumped to the diff --git a/drivers/of/Makefile b/drivers/of/Makefile index 099b1fb00af4..2b6a7b129d10 100644 --- a/drivers/of/Makefile +++ b/drivers/of/Makefile @@ -1,11 +1,13 @@ obj-y = base.o device.o platform.o +obj-$(CONFIG_OF_DYNAMIC) += dynamic.o obj-$(CONFIG_OF_FLATTREE) += fdt.o obj-$(CONFIG_OF_EARLY_FLATTREE) += fdt_address.o obj-$(CONFIG_OF_PROMTREE) += pdt.o obj-$(CONFIG_OF_ADDRESS) += address.o obj-$(CONFIG_OF_IRQ) += irq.o obj-$(CONFIG_OF_NET) += of_net.o -obj-$(CONFIG_OF_SELFTEST) += selftest.o +obj-$(CONFIG_OF_SELFTEST) += of_selftest.o +of_selftest-objs := selftest.o testcase-data/testcases.dtb.o obj-$(CONFIG_OF_MDIO) += of_mdio.o obj-$(CONFIG_OF_PCI) += of_pci.o obj-$(CONFIG_OF_PCI_IRQ) += of_pci_irq.o diff --git a/drivers/of/base.c b/drivers/of/base.c index b9864806e9b8..293ed4b687ba 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c @@ -17,6 +17,7 @@ * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ +#include <linux/console.h> #include <linux/ctype.h> #include <linux/cpu.h> #include <linux/module.h> @@ -35,15 +36,17 @@ struct device_node *of_allnodes; EXPORT_SYMBOL(of_allnodes); struct device_node *of_chosen; struct device_node *of_aliases; -static struct device_node *of_stdout; +struct device_node *of_stdout; -static struct kset *of_kset; +struct kset *of_kset; /* - * Used to protect the of_aliases; but also overloaded to hold off addition of - * nodes to sysfs + * Used to protect the of_aliases, to hold off addition of nodes to sysfs. + * This mutex must be held whenever modifications are being made to the + * device tree. The of_{attach,detach}_node() and + * of_{add,remove,update}_property() helpers make sure this happens. */ -DEFINE_MUTEX(of_aliases_mutex); +DEFINE_MUTEX(of_mutex); /* use when traversing tree through the allnext, child, sibling, * or parent members of struct device_node. @@ -89,79 +92,7 @@ int __weak of_node_to_nid(struct device_node *np) } #endif -#if defined(CONFIG_OF_DYNAMIC) -/** - * of_node_get - Increment refcount of a node - * @node: Node to inc refcount, NULL is supported to - * simplify writing of callers - * - * Returns node. - */ -struct device_node *of_node_get(struct device_node *node) -{ - if (node) - kobject_get(&node->kobj); - return node; -} -EXPORT_SYMBOL(of_node_get); - -static inline struct device_node *kobj_to_device_node(struct kobject *kobj) -{ - return container_of(kobj, struct device_node, kobj); -} - -/** - * of_node_release - release a dynamically allocated node - * @kref: kref element of the node to be released - * - * In of_node_put() this function is passed to kref_put() - * as the destructor. - */ -static void of_node_release(struct kobject *kobj) -{ - struct device_node *node = kobj_to_device_node(kobj); - struct property *prop = node->properties; - - /* We should never be releasing nodes that haven't been detached. */ - if (!of_node_check_flag(node, OF_DETACHED)) { - pr_err("ERROR: Bad of_node_put() on %s\n", node->full_name); - dump_stack(); - return; - } - - if (!of_node_check_flag(node, OF_DYNAMIC)) - return; - - while (prop) { - struct property *next = prop->next; - kfree(prop->name); - kfree(prop->value); - kfree(prop); - prop = next; - - if (!prop) { - prop = node->deadprops; - node->deadprops = NULL; - } - } - kfree(node->full_name); - kfree(node->data); - kfree(node); -} - -/** - * of_node_put - Decrement refcount of a node - * @node: Node to dec refcount, NULL is supported to - * simplify writing of callers - * - */ -void of_node_put(struct device_node *node) -{ - if (node) - kobject_put(&node->kobj); -} -EXPORT_SYMBOL(of_node_put); -#else +#ifndef CONFIG_OF_DYNAMIC static void of_node_release(struct kobject *kobj) { /* Without CONFIG_OF_DYNAMIC, no nodes gets freed */ @@ -200,13 +131,19 @@ static const char *safe_name(struct kobject *kobj, const char *orig_name) return name; } -static int __of_add_property_sysfs(struct device_node *np, struct property *pp) +int __of_add_property_sysfs(struct device_node *np, struct property *pp) { int rc; /* Important: Don't leak passwords */ bool secure = strncmp(pp->name, "security-", 9) == 0; + if (!IS_ENABLED(CONFIG_SYSFS)) + return 0; + + if (!of_kset || !of_node_is_attached(np)) + return 0; + sysfs_bin_attr_init(&pp->attr); pp->attr.attr.name = safe_name(&np->kobj, pp->name); pp->attr.attr.mode = secure ? S_IRUSR : S_IRUGO; @@ -218,12 +155,18 @@ static int __of_add_property_sysfs(struct device_node *np, struct property *pp) return rc; } -static int __of_node_add(struct device_node *np) +int __of_attach_node_sysfs(struct device_node *np) { const char *name; struct property *pp; int rc; + if (!IS_ENABLED(CONFIG_SYSFS)) + return 0; + + if (!of_kset) + return 0; + np->kobj.kset = of_kset; if (!np->parent) { /* Nodes without parents are new top level trees */ @@ -245,59 +188,20 @@ static int __of_node_add(struct device_node *np) return 0; } -int of_node_add(struct device_node *np) -{ - int rc = 0; - - BUG_ON(!of_node_is_initialized(np)); - - /* - * Grab the mutex here so that in a race condition between of_init() and - * of_node_add(), node addition will still be consistent. - */ - mutex_lock(&of_aliases_mutex); - if (of_kset) - rc = __of_node_add(np); - else - /* This scenario may be perfectly valid, but report it anyway */ - pr_info("of_node_add(%s) before of_init()\n", np->full_name); - mutex_unlock(&of_aliases_mutex); - return rc; -} - -#if defined(CONFIG_OF_DYNAMIC) -static void of_node_remove(struct device_node *np) -{ - struct property *pp; - - BUG_ON(!of_node_is_initialized(np)); - - /* only remove properties if on sysfs */ - if (of_node_is_attached(np)) { - for_each_property_of_node(np, pp) - sysfs_remove_bin_file(&np->kobj, &pp->attr); - kobject_del(&np->kobj); - } - - /* finally remove the kobj_init ref */ - of_node_put(np); -} -#endif - static int __init of_init(void) { struct device_node *np; /* Create the kset, and register existing nodes */ - mutex_lock(&of_aliases_mutex); + mutex_lock(&of_mutex); of_kset = kset_create_and_add("devicetree", NULL, firmware_kobj); if (!of_kset) { - mutex_unlock(&of_aliases_mutex); + mutex_unlock(&of_mutex); return -ENOMEM; } for_each_of_allnodes(np) - __of_node_add(np); - mutex_unlock(&of_aliases_mutex); + __of_attach_node_sysfs(np); + mutex_unlock(&of_mutex); /* Symlink in /proc as required by userspace ABI */ if (of_allnodes) @@ -369,8 +273,8 @@ EXPORT_SYMBOL(of_find_all_nodes); * Find a property with a given name for a given node * and return the value. */ -static const void *__of_get_property(const struct device_node *np, - const char *name, int *lenp) +const void *__of_get_property(const struct device_node *np, + const char *name, int *lenp) { struct property *pp = __of_find_property(np, name, lenp); @@ -1748,32 +1652,10 @@ int of_count_phandle_with_args(const struct device_node *np, const char *list_na } EXPORT_SYMBOL(of_count_phandle_with_args); -#if defined(CONFIG_OF_DYNAMIC) -static int of_property_notify(int action, struct device_node *np, - struct property *prop) -{ - struct of_prop_reconfig pr; - - /* only call notifiers if the node is attached */ - if (!of_node_is_attached(np)) - return 0; - - pr.dn = np; - pr.prop = prop; - return of_reconfig_notify(action, &pr); -} -#else -static int of_property_notify(int action, struct device_node *np, - struct property *prop) -{ - return 0; -} -#endif - /** * __of_add_property - Add a property to a node without lock operations */ -static int __of_add_property(struct device_node *np, struct property *prop) +int __of_add_property(struct device_node *np, struct property *prop) { struct property **next; @@ -1799,22 +1681,52 @@ int of_add_property(struct device_node *np, struct property *prop) unsigned long flags; int rc; - rc = of_property_notify(OF_RECONFIG_ADD_PROPERTY, np, prop); - if (rc) - return rc; + mutex_lock(&of_mutex); raw_spin_lock_irqsave(&devtree_lock, flags); rc = __of_add_property(np, prop); raw_spin_unlock_irqrestore(&devtree_lock, flags); - if (rc) - return rc; - if (of_node_is_attached(np)) + if (!rc) __of_add_property_sysfs(np, prop); + mutex_unlock(&of_mutex); + + if (!rc) + of_property_notify(OF_RECONFIG_ADD_PROPERTY, np, prop, NULL); + return rc; } +int __of_remove_property(struct device_node *np, struct property *prop) +{ + struct property **next; + + for (next = &np->properties; *next; next = &(*next)->next) { + if (*next == prop) + break; + } + if (*next == NULL) + return -ENODEV; + + /* found the node */ + *next = prop->next; + prop->next = np->deadprops; + np->deadprops = prop; + + return 0; +} + +void __of_remove_property_sysfs(struct device_node *np, struct property *prop) +{ + if (!IS_ENABLED(CONFIG_SYSFS)) + return; + + /* at early boot, bail here and defer setup to of_init() */ + if (of_kset && of_node_is_attached(np)) + sysfs_remove_bin_file(&np->kobj, &prop->attr); +} + /** * of_remove_property - Remove a property from a node. * @@ -1825,211 +1737,101 @@ int of_add_property(struct device_node *np, struct property *prop) */ int of_remove_property(struct device_node *np, struct property *prop) { - struct property **next; unsigned long flags; - int found = 0; int rc; - rc = of_property_notify(OF_RECONFIG_REMOVE_PROPERTY, np, prop); - if (rc) - return rc; + mutex_lock(&of_mutex); raw_spin_lock_irqsave(&devtree_lock, flags); - next = &np->properties; - while (*next) { - if (*next == prop) { - /* found the node */ - *next = prop->next; - prop->next = np->deadprops; - np->deadprops = prop; - found = 1; - break; - } - next = &(*next)->next; - } + rc = __of_remove_property(np, prop); raw_spin_unlock_irqrestore(&devtree_lock, flags); - if (!found) - return -ENODEV; + if (!rc) + __of_remove_property_sysfs(np, prop); - /* at early boot, bail hear and defer setup to of_init() */ - if (!of_kset) - return 0; + mutex_unlock(&of_mutex); - sysfs_remove_bin_file(&np->kobj, &prop->attr); + if (!rc) + of_property_notify(OF_RECONFIG_REMOVE_PROPERTY, np, prop, NULL); - return 0; + return rc; } -/* - * of_update_property - Update a property in a node, if the property does - * not exist, add it. - * - * Note that we don't actually remove it, since we have given out - * who-knows-how-many pointers to the data using get-property. - * Instead we just move the property to the "dead properties" list, - * and add the new property to the property list - */ -int of_update_property(struct device_node *np, struct property *newprop) +int __of_update_property(struct device_node *np, struct property *newprop, + struct property **oldpropp) { struct property **next, *oldprop; - unsigned long flags; - int rc; - - rc = of_property_notify(OF_RECONFIG_UPDATE_PROPERTY, np, newprop); - if (rc) - return rc; - if (!newprop->name) - return -EINVAL; + for (next = &np->properties; *next; next = &(*next)->next) { + if (of_prop_cmp((*next)->name, newprop->name) == 0) + break; + } + *oldpropp = oldprop = *next; - raw_spin_lock_irqsave(&devtree_lock, flags); - next = &np->properties; - oldprop = __of_find_property(np, newprop->name, NULL); - if (!oldprop) { - /* add the new node */ - rc = __of_add_property(np, newprop); - } else while (*next) { + if (oldprop) { /* replace the node */ - if (*next == oldprop) { - newprop->next = oldprop->next; - *next = newprop; - oldprop->next = np->deadprops; - np->deadprops = oldprop; - break; - } - next = &(*next)->next; + newprop->next = oldprop->next; + *next = newprop; + oldprop->next = np->deadprops; + np->deadprops = oldprop; + } else { + /* new node */ + newprop->next = NULL; + *next = newprop; } - raw_spin_unlock_irqrestore(&devtree_lock, flags); - if (rc) - return rc; + + return 0; +} + +void __of_update_property_sysfs(struct device_node *np, struct property *newprop, + struct property *oldprop) +{ + if (!IS_ENABLED(CONFIG_SYSFS)) + return; /* At early boot, bail out and defer setup to of_init() */ if (!of_kset) - return 0; + return; - /* Update the sysfs attribute */ if (oldprop) sysfs_remove_bin_file(&np->kobj, &oldprop->attr); __of_add_property_sysfs(np, newprop); - - return 0; } -#if defined(CONFIG_OF_DYNAMIC) /* - * Support for dynamic device trees. + * of_update_property - Update a property in a node, if the property does + * not exist, add it. * - * On some platforms, the device tree can be manipulated at runtime. - * The routines in this section support adding, removing and changing - * device tree nodes. - */ - -static BLOCKING_NOTIFIER_HEAD(of_reconfig_chain); - -int of_reconfig_notifier_register(struct notifier_block *nb) -{ - return blocking_notifier_chain_register(&of_reconfig_chain, nb); -} -EXPORT_SYMBOL_GPL(of_reconfig_notifier_register); - -int of_reconfig_notifier_unregister(struct notifier_block *nb) -{ - return blocking_notifier_chain_unregister(&of_reconfig_chain, nb); -} -EXPORT_SYMBOL_GPL(of_reconfig_notifier_unregister); - -int of_reconfig_notify(unsigned long action, void *p) -{ - int rc; - - rc = blocking_notifier_call_chain(&of_reconfig_chain, action, p); - return notifier_to_errno(rc); -} - -/** - * of_attach_node - Plug a device node into the tree and global list. + * Note that we don't actually remove it, since we have given out + * who-knows-how-many pointers to the data using get-property. + * Instead we just move the property to the "dead properties" list, + * and add the new property to the property list */ -int of_attach_node(struct device_node *np) +int of_update_property(struct device_node *np, struct property *newprop) { + struct property *oldprop; unsigned long flags; int rc; - rc = of_reconfig_notify(OF_RECONFIG_ATTACH_NODE, np); - if (rc) - return rc; - - raw_spin_lock_irqsave(&devtree_lock, flags); - np->sibling = np->parent->child; - np->allnext = np->parent->allnext; - np->parent->allnext = np; - np->parent->child = np; - of_node_clear_flag(np, OF_DETACHED); - raw_spin_unlock_irqrestore(&devtree_lock, flags); - - of_node_add(np); - return 0; -} - -/** - * of_detach_node - "Unplug" a node from the device tree. - * - * The caller must hold a reference to the node. The memory associated with - * the node is not freed until its refcount goes to zero. - */ -int of_detach_node(struct device_node *np) -{ - struct device_node *parent; - unsigned long flags; - int rc = 0; + if (!newprop->name) + return -EINVAL; - rc = of_reconfig_notify(OF_RECONFIG_DETACH_NODE, np); - if (rc) - return rc; + mutex_lock(&of_mutex); raw_spin_lock_irqsave(&devtree_lock, flags); + rc = __of_update_property(np, newprop, &oldprop); + raw_spin_unlock_irqrestore(&devtree_lock, flags); - if (of_node_check_flag(np, OF_DETACHED)) { - /* someone already detached it */ - raw_spin_unlock_irqrestore(&devtree_lock, flags); - return rc; - } - - parent = np->parent; - if (!parent) { - raw_spin_unlock_irqrestore(&devtree_lock, flags); - return rc; - } - - if (of_allnodes == np) - of_allnodes = np->allnext; - else { - struct device_node *prev; - for (prev = of_allnodes; - prev->allnext != np; - prev = prev->allnext) - ; - prev->allnext = np->allnext; - } + if (!rc) + __of_update_property_sysfs(np, newprop, oldprop); - if (parent->child == np) - parent->child = np->sibling; - else { - struct device_node *prevsib; - for (prevsib = np->parent->child; - prevsib->sibling != np; - prevsib = prevsib->sibling) - ; - prevsib->sibling = np->sibling; - } + mutex_unlock(&of_mutex); - of_node_set_flag(np, OF_DETACHED); - raw_spin_unlock_irqrestore(&devtree_lock, flags); + if (!rc) + of_property_notify(OF_RECONFIG_UPDATE_PROPERTY, np, newprop, oldprop); - of_node_remove(np); return rc; } -#endif /* defined(CONFIG_OF_DYNAMIC) */ static void of_alias_add(struct alias_prop *ap, struct device_node *np, int id, const char *stem, int stem_len) @@ -2057,19 +1859,22 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align)) { struct property *pp; + of_aliases = of_find_node_by_path("/aliases"); of_chosen = of_find_node_by_path("/chosen"); if (of_chosen == NULL) of_chosen = of_find_node_by_path("/chosen@0"); if (of_chosen) { + /* linux,stdout-path and /aliases/stdout are for legacy compatibility */ const char *name = of_get_property(of_chosen, "stdout-path", NULL); if (!name) name = of_get_property(of_chosen, "linux,stdout-path", NULL); + if (IS_ENABLED(CONFIG_PPC) && !name) + name = of_get_property(of_aliases, "stdout", NULL); if (name) of_stdout = of_find_node_by_path(name); } - of_aliases = of_find_node_by_path("/aliases"); if (!of_aliases) return; @@ -2122,7 +1927,7 @@ int of_alias_get_id(struct device_node *np, const char *stem) struct alias_prop *app; int id = -ENODEV; - mutex_lock(&of_aliases_mutex); + mutex_lock(&of_mutex); list_for_each_entry(app, &aliases_lookup, link) { if (strcmp(app->stem, stem) != 0) continue; @@ -2132,7 +1937,7 @@ int of_alias_get_id(struct device_node *np, const char *stem) break; } } - mutex_unlock(&of_aliases_mutex); + mutex_unlock(&of_mutex); return id; } @@ -2180,20 +1985,22 @@ const char *of_prop_next_string(struct property *prop, const char *cur) EXPORT_SYMBOL_GPL(of_prop_next_string); /** - * of_device_is_stdout_path - check if a device node matches the - * linux,stdout-path property - * - * Check if this device node matches the linux,stdout-path property - * in the chosen node. return true if yes, false otherwise. + * of_console_check() - Test and setup console for DT setup + * @dn - Pointer to device node + * @name - Name to use for preferred console without index. ex. "ttyS" + * @index - Index to use for preferred console. + * + * Check if the given device node matches the stdout-path property in the + * /chosen node. If it does then register it as the preferred console and return + * TRUE. Otherwise return FALSE. */ -int of_device_is_stdout_path(struct device_node *dn) +bool of_console_check(struct device_node *dn, char *name, int index) { - if (!of_stdout) + if (!dn || dn != of_stdout || console_set_on_cmdline) return false; - - return of_stdout == dn; + return !add_preferred_console(name, index, NULL); } -EXPORT_SYMBOL_GPL(of_device_is_stdout_path); +EXPORT_SYMBOL_GPL(of_console_check); /** * of_find_next_cache_node - Find a node's subsidiary cache diff --git a/drivers/of/device.c b/drivers/of/device.c index dafb9736ab9b..46d6c75c1404 100644 --- a/drivers/of/device.c +++ b/drivers/of/device.c @@ -160,7 +160,7 @@ void of_device_uevent(struct device *dev, struct kobj_uevent_env *env) add_uevent_var(env, "OF_COMPATIBLE_N=%d", seen); seen = 0; - mutex_lock(&of_aliases_mutex); + mutex_lock(&of_mutex); list_for_each_entry(app, &aliases_lookup, link) { if (dev->of_node == app->np) { add_uevent_var(env, "OF_ALIAS_%d=%s", seen, @@ -168,7 +168,7 @@ void of_device_uevent(struct device *dev, struct kobj_uevent_env *env) seen++; } } - mutex_unlock(&of_aliases_mutex); + mutex_unlock(&of_mutex); } int of_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env) diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c new file mode 100644 index 000000000000..f297891d8529 --- /dev/null +++ b/drivers/of/dynamic.c @@ -0,0 +1,663 @@ +/* + * Support for dynamic device trees. + * + * On some platforms, the device tree can be manipulated at runtime. + * The routines in this section support adding, removing and changing + * device tree nodes. + */ + +#include <linux/of.h> +#include <linux/spinlock.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/proc_fs.h> + +#include "of_private.h" + +/** + * of_node_get() - Increment refcount of a node + * @node: Node to inc refcount, NULL is supported to simplify writing of + * callers + * + * Returns node. + */ +struct device_node *of_node_get(struct device_node *node) +{ + if (node) + kobject_get(&node->kobj); + return node; +} +EXPORT_SYMBOL(of_node_get); + +/** + * of_node_put() - Decrement refcount of a node + * @node: Node to dec refcount, NULL is supported to simplify writing of + * callers + */ +void of_node_put(struct device_node *node) +{ + if (node) + kobject_put(&node->kobj); +} +EXPORT_SYMBOL(of_node_put); + +void __of_detach_node_sysfs(struct device_node *np) +{ + struct property *pp; + + if (!IS_ENABLED(CONFIG_SYSFS)) + return; + + BUG_ON(!of_node_is_initialized(np)); + if (!of_kset) + return; + + /* only remove properties if on sysfs */ + if (of_node_is_attached(np)) { + for_each_property_of_node(np, pp) + sysfs_remove_bin_file(&np->kobj, &pp->attr); + kobject_del(&np->kobj); + } + + /* finally remove the kobj_init ref */ + of_node_put(np); +} + +static BLOCKING_NOTIFIER_HEAD(of_reconfig_chain); + +int of_reconfig_notifier_register(struct notifier_block *nb) +{ + return blocking_notifier_chain_register(&of_reconfig_chain, nb); +} +EXPORT_SYMBOL_GPL(of_reconfig_notifier_register); + +int of_reconfig_notifier_unregister(struct notifier_block *nb) +{ + return blocking_notifier_chain_unregister(&of_reconfig_chain, nb); +} +EXPORT_SYMBOL_GPL(of_reconfig_notifier_unregister); + +int of_reconfig_notify(unsigned long action, void *p) +{ + int rc; + + rc = blocking_notifier_call_chain(&of_reconfig_chain, action, p); + return notifier_to_errno(rc); +} + +int of_property_notify(int action, struct device_node *np, + struct property *prop, struct property *oldprop) +{ + struct of_prop_reconfig pr; + + /* only call notifiers if the node is attached */ + if (!of_node_is_attached(np)) + return 0; + + pr.dn = np; + pr.prop = prop; + pr.old_prop = oldprop; + return of_reconfig_notify(action, &pr); +} + +void __of_attach_node(struct device_node *np) +{ + const __be32 *phandle; + int sz; + + np->name = __of_get_property(np, "name", NULL) ? : "<NULL>"; + np->type = __of_get_property(np, "device_type", NULL) ? : "<NULL>"; + + phandle = __of_get_property(np, "phandle", &sz); + if (!phandle) + phandle = __of_get_property(np, "linux,phandle", &sz); + if (IS_ENABLED(PPC_PSERIES) && !phandle) + phandle = __of_get_property(np, "ibm,phandle", &sz); + np->phandle = (phandle && (sz >= 4)) ? be32_to_cpup(phandle) : 0; + + np->child = NULL; + np->sibling = np->parent->child; + np->allnext = np->parent->allnext; + np->parent->allnext = np; + np->parent->child = np; + of_node_clear_flag(np, OF_DETACHED); +} + +/** + * of_attach_node() - Plug a device node into the tree and global list. + */ +int of_attach_node(struct device_node *np) +{ + unsigned long flags; + + mutex_lock(&of_mutex); + raw_spin_lock_irqsave(&devtree_lock, flags); + __of_attach_node(np); + raw_spin_unlock_irqrestore(&devtree_lock, flags); + + __of_attach_node_sysfs(np); + mutex_unlock(&of_mutex); + + of_reconfig_notify(OF_RECONFIG_ATTACH_NODE, np); + + return 0; +} + +void __of_detach_node(struct device_node *np) +{ + struct device_node *parent; + + if (WARN_ON(of_node_check_flag(np, OF_DETACHED))) + return; + + parent = np->parent; + if (WARN_ON(!parent)) + return; + + if (of_allnodes == np) + of_allnodes = np->allnext; + else { + struct device_node *prev; + for (prev = of_allnodes; + prev->allnext != np; + prev = prev->allnext) + ; + prev->allnext = np->allnext; + } + + if (parent->child == np) + parent->child = np->sibling; + else { + struct device_node *prevsib; + for (prevsib = np->parent->child; + prevsib->sibling != np; + prevsib = prevsib->sibling) + ; + prevsib->sibling = np->sibling; + } + + of_node_set_flag(np, OF_DETACHED); +} + +/** + * of_detach_node() - "Unplug" a node from the device tree. + * + * The caller must hold a reference to the node. The memory associated with + * the node is not freed until its refcount goes to zero. + */ +int of_detach_node(struct device_node *np) +{ + unsigned long flags; + int rc = 0; + + mutex_lock(&of_mutex); + raw_spin_lock_irqsave(&devtree_lock, flags); + __of_detach_node(np); + raw_spin_unlock_irqrestore(&devtree_lock, flags); + + __of_detach_node_sysfs(np); + mutex_unlock(&of_mutex); + + of_reconfig_notify(OF_RECONFIG_DETACH_NODE, np); + + return rc; +} + +/** + * of_node_release() - release a dynamically allocated node + * @kref: kref element of the node to be released + * + * In of_node_put() this function is passed to kref_put() as the destructor. + */ +void of_node_release(struct kobject *kobj) +{ + struct device_node *node = kobj_to_device_node(kobj); + struct property *prop = node->properties; + + /* We should never be releasing nodes that haven't been detached. */ + if (!of_node_check_flag(node, OF_DETACHED)) { + pr_err("ERROR: Bad of_node_put() on %s\n", node->full_name); + dump_stack(); + return; + } + + if (!of_node_check_flag(node, OF_DYNAMIC)) + return; + + while (prop) { + struct property *next = prop->next; + kfree(prop->name); + kfree(prop->value); + kfree(prop); + prop = next; + + if (!prop) { + prop = node->deadprops; + node->deadprops = NULL; + } + } + kfree(node->full_name); + kfree(node->data); + kfree(node); +} + +/** + * __of_prop_dup - Copy a property dynamically. + * @prop: Property to copy + * @allocflags: Allocation flags (typically pass GFP_KERNEL) + * + * Copy a property by dynamically allocating the memory of both the + * property stucture and the property name & contents. The property's + * flags have the OF_DYNAMIC bit set so that we can differentiate between + * dynamically allocated properties and not. + * Returns the newly allocated property or NULL on out of memory error. + */ +struct property *__of_prop_dup(const struct property *prop, gfp_t allocflags) +{ + struct property *new; + + new = kzalloc(sizeof(*new), allocflags); + if (!new) + return NULL; + + /* + * NOTE: There is no check for zero length value. + * In case of a boolean property, this will allocate a value + * of zero bytes. We do this to work around the use + * of of_get_property() calls on boolean values. + */ + new->name = kstrdup(prop->name, allocflags); + new->value = kmemdup(prop->value, prop->length, allocflags); + new->length = prop->length; + if (!new->name || !new->value) + goto err_free; + + /* mark the property as dynamic */ + of_property_set_flag(new, OF_DYNAMIC); + + return new; + + err_free: + kfree(new->name); + kfree(new->value); + kfree(new); + return NULL; +} + +/** + * __of_node_alloc() - Create an empty device node dynamically. + * @full_name: Full name of the new device node + * @allocflags: Allocation flags (typically pass GFP_KERNEL) + * + * Create an empty device tree node, suitable for further modification. + * The node data are dynamically allocated and all the node flags + * have the OF_DYNAMIC & OF_DETACHED bits set. + * Returns the newly allocated node or NULL on out of memory error. + */ +struct device_node *__of_node_alloc(const char *full_name, gfp_t allocflags) +{ + struct device_node *node; + + node = kzalloc(sizeof(*node), allocflags); + if (!node) + return NULL; + + node->full_name = kstrdup(full_name, allocflags); + of_node_set_flag(node, OF_DYNAMIC); + of_node_set_flag(node, OF_DETACHED); + if (!node->full_name) + goto err_free; + + of_node_init(node); + + return node; + + err_free: + kfree(node->full_name); + kfree(node); + return NULL; +} + +static void __of_changeset_entry_destroy(struct of_changeset_entry *ce) +{ + of_node_put(ce->np); + list_del(&ce->node); + kfree(ce); +} + +#ifdef DEBUG +static void __of_changeset_entry_dump(struct of_changeset_entry *ce) +{ + switch (ce->action) { + case OF_RECONFIG_ADD_PROPERTY: + pr_debug("%p: %s %s/%s\n", + ce, "ADD_PROPERTY ", ce->np->full_name, + ce->prop->name); + break; + case OF_RECONFIG_REMOVE_PROPERTY: + pr_debug("%p: %s %s/%s\n", + ce, "REMOVE_PROPERTY", ce->np->full_name, + ce->prop->name); + break; + case OF_RECONFIG_UPDATE_PROPERTY: + pr_debug("%p: %s %s/%s\n", + ce, "UPDATE_PROPERTY", ce->np->full_name, + ce->prop->name); + break; + case OF_RECONFIG_ATTACH_NODE: + pr_debug("%p: %s %s\n", + ce, "ATTACH_NODE ", ce->np->full_name); + break; + case OF_RECONFIG_DETACH_NODE: + pr_debug("%p: %s %s\n", + ce, "DETACH_NODE ", ce->np->full_name); + break; + } +} +#else +static inline void __of_changeset_entry_dump(struct of_changeset_entry *ce) +{ + /* empty */ +} +#endif + +static void __of_changeset_entry_invert(struct of_changeset_entry *ce, + struct of_changeset_entry *rce) +{ + memcpy(rce, ce, sizeof(*rce)); + + switch (ce->action) { + case OF_RECONFIG_ATTACH_NODE: + rce->action = OF_RECONFIG_DETACH_NODE; + break; + case OF_RECONFIG_DETACH_NODE: + rce->action = OF_RECONFIG_ATTACH_NODE; + break; + case OF_RECONFIG_ADD_PROPERTY: + rce->action = OF_RECONFIG_REMOVE_PROPERTY; + break; + case OF_RECONFIG_REMOVE_PROPERTY: + rce->action = OF_RECONFIG_ADD_PROPERTY; + break; + case OF_RECONFIG_UPDATE_PROPERTY: + rce->old_prop = ce->prop; + rce->prop = ce->old_prop; + break; + } +} + +static void __of_changeset_entry_notify(struct of_changeset_entry *ce, bool revert) +{ + struct of_changeset_entry ce_inverted; + int ret; + + if (revert) { + __of_changeset_entry_invert(ce, &ce_inverted); + ce = &ce_inverted; + } + + switch (ce->action) { + case OF_RECONFIG_ATTACH_NODE: + case OF_RECONFIG_DETACH_NODE: + ret = of_reconfig_notify(ce->action, ce->np); + break; + case OF_RECONFIG_ADD_PROPERTY: + case OF_RECONFIG_REMOVE_PROPERTY: + case OF_RECONFIG_UPDATE_PROPERTY: + ret = of_property_notify(ce->action, ce->np, ce->prop, ce->old_prop); + break; + default: + pr_err("%s: invalid devicetree changeset action: %i\n", __func__, + (int)ce->action); + return; + } + + if (ret) + pr_err("%s: notifier error @%s\n", __func__, ce->np->full_name); +} + +static int __of_changeset_entry_apply(struct of_changeset_entry *ce) +{ + struct property *old_prop, **propp; + unsigned long flags; + int ret = 0; + + __of_changeset_entry_dump(ce); + + raw_spin_lock_irqsave(&devtree_lock, flags); + switch (ce->action) { + case OF_RECONFIG_ATTACH_NODE: + __of_attach_node(ce->np); + break; + case OF_RECONFIG_DETACH_NODE: + __of_detach_node(ce->np); + break; + case OF_RECONFIG_ADD_PROPERTY: + /* If the property is in deadprops then it must be removed */ + for (propp = &ce->np->deadprops; *propp; propp = &(*propp)->next) { + if (*propp == ce->prop) { + *propp = ce->prop->next; + ce->prop->next = NULL; + break; + } + } + + ret = __of_add_property(ce->np, ce->prop); + if (ret) { + pr_err("%s: add_property failed @%s/%s\n", + __func__, ce->np->full_name, + ce->prop->name); + break; + } + break; + case OF_RECONFIG_REMOVE_PROPERTY: + ret = __of_remove_property(ce->np, ce->prop); + if (ret) { + pr_err("%s: remove_property failed @%s/%s\n", + __func__, ce->np->full_name, + ce->prop->name); + break; + } + break; + + case OF_RECONFIG_UPDATE_PROPERTY: + /* If the property is in deadprops then it must be removed */ + for (propp = &ce->np->deadprops; *propp; propp = &(*propp)->next) { + if (*propp == ce->prop) { + *propp = ce->prop->next; + ce->prop->next = NULL; + break; + } + } + + ret = __of_update_property(ce->np, ce->prop, &old_prop); + if (ret) { + pr_err("%s: update_property failed @%s/%s\n", + __func__, ce->np->full_name, + ce->prop->name); + break; + } + break; + default: + ret = -EINVAL; + } + raw_spin_unlock_irqrestore(&devtree_lock, flags); + + if (ret) + return ret; + + switch (ce->action) { + case OF_RECONFIG_ATTACH_NODE: + __of_attach_node_sysfs(ce->np); + break; + case OF_RECONFIG_DETACH_NODE: + __of_detach_node_sysfs(ce->np); + break; + case OF_RECONFIG_ADD_PROPERTY: + /* ignore duplicate names */ + __of_add_property_sysfs(ce->np, ce->prop); + break; + case OF_RECONFIG_REMOVE_PROPERTY: + __of_remove_property_sysfs(ce->np, ce->prop); + break; + case OF_RECONFIG_UPDATE_PROPERTY: + __of_update_property_sysfs(ce->np, ce->prop, ce->old_prop); + break; + } + + return 0; +} + +static inline int __of_changeset_entry_revert(struct of_changeset_entry *ce) +{ + struct of_changeset_entry ce_inverted; + + __of_changeset_entry_invert(ce, &ce_inverted); + return __of_changeset_entry_apply(&ce_inverted); +} + +/** + * of_changeset_init - Initialize a changeset for use + * + * @ocs: changeset pointer + * + * Initialize a changeset structure + */ +void of_changeset_init(struct of_changeset *ocs) +{ + memset(ocs, 0, sizeof(*ocs)); + INIT_LIST_HEAD(&ocs->entries); +} + +/** + * of_changeset_destroy - Destroy a changeset + * + * @ocs: changeset pointer + * + * Destroys a changeset. Note that if a changeset is applied, + * its changes to the tree cannot be reverted. + */ +void of_changeset_destroy(struct of_changeset *ocs) +{ + struct of_changeset_entry *ce, *cen; + + list_for_each_entry_safe_reverse(ce, cen, &ocs->entries, node) + __of_changeset_entry_destroy(ce); +} + +/** + * of_changeset_apply - Applies a changeset + * + * @ocs: changeset pointer + * + * Applies a changeset to the live tree. + * Any side-effects of live tree state changes are applied here on + * sucess, like creation/destruction of devices and side-effects + * like creation of sysfs properties and directories. + * Returns 0 on success, a negative error value in case of an error. + * On error the partially applied effects are reverted. + */ +int of_changeset_apply(struct of_changeset *ocs) +{ + struct of_changeset_entry *ce; + int ret; + + /* perform the rest of the work */ + pr_debug("of_changeset: applying...\n"); + list_for_each_entry(ce, &ocs->entries, node) { + ret = __of_changeset_entry_apply(ce); + if (ret) { + pr_err("%s: Error applying changeset (%d)\n", __func__, ret); + list_for_each_entry_continue_reverse(ce, &ocs->entries, node) + __of_changeset_entry_revert(ce); + return ret; + } + } + pr_debug("of_changeset: applied, emitting notifiers.\n"); + + /* drop the global lock while emitting notifiers */ + mutex_unlock(&of_mutex); + list_for_each_entry(ce, &ocs->entries, node) + __of_changeset_entry_notify(ce, 0); + mutex_lock(&of_mutex); + pr_debug("of_changeset: notifiers sent.\n"); + + return 0; +} + +/** + * of_changeset_revert - Reverts an applied changeset + * + * @ocs: changeset pointer + * + * Reverts a changeset returning the state of the tree to what it + * was before the application. + * Any side-effects like creation/destruction of devices and + * removal of sysfs properties and directories are applied. + * Returns 0 on success, a negative error value in case of an error. + */ +int of_changeset_revert(struct of_changeset *ocs) +{ + struct of_changeset_entry *ce; + int ret; + + pr_debug("of_changeset: reverting...\n"); + list_for_each_entry_reverse(ce, &ocs->entries, node) { + ret = __of_changeset_entry_revert(ce); + if (ret) { + pr_err("%s: Error reverting changeset (%d)\n", __func__, ret); + list_for_each_entry_continue(ce, &ocs->entries, node) + __of_changeset_entry_apply(ce); + return ret; + } + } + pr_debug("of_changeset: reverted, emitting notifiers.\n"); + + /* drop the global lock while emitting notifiers */ + mutex_unlock(&of_mutex); + list_for_each_entry_reverse(ce, &ocs->entries, node) + __of_changeset_entry_notify(ce, 1); + mutex_lock(&of_mutex); + pr_debug("of_changeset: notifiers sent.\n"); + + return 0; +} + +/** + * of_changeset_action - Perform a changeset action + * + * @ocs: changeset pointer + * @action: action to perform + * @np: Pointer to device node + * @prop: Pointer to property + * + * On action being one of: + * + OF_RECONFIG_ATTACH_NODE + * + OF_RECONFIG_DETACH_NODE, + * + OF_RECONFIG_ADD_PROPERTY + * + OF_RECONFIG_REMOVE_PROPERTY, + * + OF_RECONFIG_UPDATE_PROPERTY + * Returns 0 on success, a negative error value in case of an error. + */ +int of_changeset_action(struct of_changeset *ocs, unsigned long action, + struct device_node *np, struct property *prop) +{ + struct of_changeset_entry *ce; + + ce = kzalloc(sizeof(*ce), GFP_KERNEL); + if (!ce) { + pr_err("%s: Failed to allocate\n", __func__); + return -ENOMEM; + } + /* get a reference to the node */ + ce->action = action; + ce->np = of_node_get(np); + ce->prop = prop; + + if (action == OF_RECONFIG_UPDATE_PROPERTY && prop) + ce->old_prop = of_find_property(np, prop->name, NULL); + + /* add it to the list */ + list_add_tail(&ce->node, &ocs->entries); + return 0; +} diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index 9aa012e6ea0a..d1ffca8b34ea 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c @@ -453,7 +453,7 @@ static int __init __reserved_mem_reserve_reg(unsigned long node, base = dt_mem_next_cell(dt_root_addr_cells, &prop); size = dt_mem_next_cell(dt_root_size_cells, &prop); - if (base && size && + if (size && early_init_dt_reserve_memory_arch(base, size, nomap) == 0) pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %ld MiB\n", uname, &base, (unsigned long)size / SZ_1M); @@ -923,24 +923,28 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname, } #ifdef CONFIG_HAVE_MEMBLOCK +#define MAX_PHYS_ADDR ((phys_addr_t)~0) + void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size) { const u64 phys_offset = __pa(PAGE_OFFSET); - base &= PAGE_MASK; + + if (!PAGE_ALIGNED(base)) { + size -= PAGE_SIZE - (base & ~PAGE_MASK); + base = PAGE_ALIGN(base); + } size &= PAGE_MASK; - if (sizeof(phys_addr_t) < sizeof(u64)) { - if (base > ULONG_MAX) { - pr_warning("Ignoring memory block 0x%llx - 0x%llx\n", - base, base + size); - return; - } + if (base > MAX_PHYS_ADDR) { + pr_warning("Ignoring memory block 0x%llx - 0x%llx\n", + base, base + size); + return; + } - if (base + size > ULONG_MAX) { - pr_warning("Ignoring memory range 0x%lx - 0x%llx\n", - ULONG_MAX, base + size); - size = ULONG_MAX - base; - } + if (base + size - 1 > MAX_PHYS_ADDR) { + pr_warning("Ignoring memory range 0x%llx - 0x%llx\n", + ((u64)MAX_PHYS_ADDR) + 1, base + size); + size = MAX_PHYS_ADDR - base + 1; } if (base + size < phys_offset) { diff --git a/drivers/of/irq.c b/drivers/of/irq.c index 3e06a699352d..1471e0a223a5 100644 --- a/drivers/of/irq.c +++ b/drivers/of/irq.c @@ -301,16 +301,17 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar /* Get the reg property (if any) */ addr = of_get_property(device, "reg", NULL); + /* Try the new-style interrupts-extended first */ + res = of_parse_phandle_with_args(device, "interrupts-extended", + "#interrupt-cells", index, out_irq); + if (!res) + return of_irq_parse_raw(addr, out_irq); + /* Get the interrupts property */ intspec = of_get_property(device, "interrupts", &intlen); - if (intspec == NULL) { - /* Try the new-style interrupts-extended */ - res = of_parse_phandle_with_args(device, "interrupts-extended", - "#interrupt-cells", index, out_irq); - if (res) - return -EINVAL; - return of_irq_parse_raw(addr, out_irq); - } + if (intspec == NULL) + return -EINVAL; + intlen /= sizeof(*intspec); pr_debug(" intspec=%d intlen=%d\n", be32_to_cpup(intspec), intlen); diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h index ff350c8fa7ac..858e0a5d9a11 100644 --- a/drivers/of/of_private.h +++ b/drivers/of/of_private.h @@ -31,6 +31,63 @@ struct alias_prop { char stem[0]; }; -extern struct mutex of_aliases_mutex; +extern struct mutex of_mutex; extern struct list_head aliases_lookup; +extern struct kset *of_kset; + + +static inline struct device_node *kobj_to_device_node(struct kobject *kobj) +{ + return container_of(kobj, struct device_node, kobj); +} + +#if defined(CONFIG_OF_DYNAMIC) +extern int of_property_notify(int action, struct device_node *np, + struct property *prop, struct property *old_prop); +extern void of_node_release(struct kobject *kobj); +#else /* CONFIG_OF_DYNAMIC */ +static inline int of_property_notify(int action, struct device_node *np, + struct property *prop, struct property *old_prop) +{ + return 0; +} +#endif /* CONFIG_OF_DYNAMIC */ + +/** + * General utilities for working with live trees. + * + * All functions with two leading underscores operate + * without taking node references, so you either have to + * own the devtree lock or work on detached trees only. + */ +struct property *__of_prop_dup(const struct property *prop, gfp_t allocflags); +struct device_node *__of_node_alloc(const char *full_name, gfp_t allocflags); + +extern const void *__of_get_property(const struct device_node *np, + const char *name, int *lenp); +extern int __of_add_property(struct device_node *np, struct property *prop); +extern int __of_add_property_sysfs(struct device_node *np, + struct property *prop); +extern int __of_remove_property(struct device_node *np, struct property *prop); +extern void __of_remove_property_sysfs(struct device_node *np, + struct property *prop); +extern int __of_update_property(struct device_node *np, + struct property *newprop, struct property **oldprop); +extern void __of_update_property_sysfs(struct device_node *np, + struct property *newprop, struct property *oldprop); + +extern void __of_attach_node(struct device_node *np); +extern int __of_attach_node_sysfs(struct device_node *np); +extern void __of_detach_node(struct device_node *np); +extern void __of_detach_node_sysfs(struct device_node *np); + +/* iterators for transactions, used for overlays */ +/* forward iterator */ +#define for_each_transaction_entry(_oft, _te) \ + list_for_each_entry(_te, &(_oft)->te_list, node) + +/* reverse iterator */ +#define for_each_transaction_entry_reverse(_oft, _te) \ + list_for_each_entry_reverse(_te, &(_oft)->te_list, node) + #endif /* _LINUX_OF_PRIVATE_H */ diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c index 632aae861375..59fb12e84e6b 100644 --- a/drivers/of/of_reserved_mem.c +++ b/drivers/of/of_reserved_mem.c @@ -206,8 +206,16 @@ void __init fdt_init_reserved_mem(void) for (i = 0; i < reserved_mem_count; i++) { struct reserved_mem *rmem = &reserved_mem[i]; unsigned long node = rmem->fdt_node; + int len; + const __be32 *prop; int err = 0; + prop = of_get_flat_dt_prop(node, "phandle", &len); + if (!prop) + prop = of_get_flat_dt_prop(node, "linux,phandle", &len); + if (prop) + rmem->phandle = of_read_number(prop, len/4); + if (rmem->size == 0) err = __reserved_mem_alloc_size(node, rmem->name, &rmem->base, &rmem->size); @@ -215,3 +223,65 @@ void __init fdt_init_reserved_mem(void) __reserved_mem_init_node(rmem); } } + +static inline struct reserved_mem *__find_rmem(struct device_node *node) +{ + unsigned int i; + + if (!node->phandle) + return NULL; + + for (i = 0; i < reserved_mem_count; i++) + if (reserved_mem[i].phandle == node->phandle) + return &reserved_mem[i]; + return NULL; +} + +/** + * of_reserved_mem_device_init() - assign reserved memory region to given device + * + * This function assign memory region pointed by "memory-region" device tree + * property to the given device. + */ +void of_reserved_mem_device_init(struct device *dev) +{ + struct reserved_mem *rmem; + struct device_node *np; + + np = of_parse_phandle(dev->of_node, "memory-region", 0); + if (!np) + return; + + rmem = __find_rmem(np); + of_node_put(np); + + if (!rmem || !rmem->ops || !rmem->ops->device_init) + return; + + rmem->ops->device_init(rmem, dev); + dev_info(dev, "assigned reserved memory node %s\n", rmem->name); +} + +/** + * of_reserved_mem_device_release() - release reserved memory device structures + * + * This function releases structures allocated for memory region handling for + * the given device. + */ +void of_reserved_mem_device_release(struct device *dev) +{ + struct reserved_mem *rmem; + struct device_node *np; + + np = of_parse_phandle(dev->of_node, "memory-region", 0); + if (!np) + return; + + rmem = __find_rmem(np); + of_node_put(np); + + if (!rmem || !rmem->ops || !rmem->ops->device_release) + return; + + rmem->ops->device_release(rmem, dev); +} diff --git a/drivers/of/platform.c b/drivers/of/platform.c index 500436f9be7f..0197725e033a 100644 --- a/drivers/of/platform.c +++ b/drivers/of/platform.c @@ -422,6 +422,7 @@ static int of_platform_bus_create(struct device_node *bus, break; } } + of_node_set_flag(bus, OF_POPULATED_BUS); return rc; } @@ -508,19 +509,13 @@ EXPORT_SYMBOL_GPL(of_platform_populate); static int of_platform_device_destroy(struct device *dev, void *data) { - bool *children_left = data; - /* Do not touch devices not populated from the device tree */ - if (!dev->of_node || !of_node_check_flag(dev->of_node, OF_POPULATED)) { - *children_left = true; + if (!dev->of_node || !of_node_check_flag(dev->of_node, OF_POPULATED)) return 0; - } - /* Recurse, but don't touch this device if it has any children left */ - if (of_platform_depopulate(dev) != 0) { - *children_left = true; - return 0; - } + /* Recurse for any nodes that were treated as busses */ + if (of_node_check_flag(dev->of_node, OF_POPULATED_BUS)) + device_for_each_child(dev, NULL, of_platform_device_destroy); if (dev->bus == &platform_bus_type) platform_device_unregister(to_platform_device(dev)); @@ -528,19 +523,15 @@ static int of_platform_device_destroy(struct device *dev, void *data) else if (dev->bus == &amba_bustype) amba_device_unregister(to_amba_device(dev)); #endif - else { - *children_left = true; - return 0; - } of_node_clear_flag(dev->of_node, OF_POPULATED); - + of_node_clear_flag(dev->of_node, OF_POPULATED_BUS); return 0; } /** * of_platform_depopulate() - Remove devices populated from device tree - * @parent: device which childred will be removed + * @parent: device which children will be removed * * Complementary to of_platform_populate(), this function removes children * of the given device (and, recurrently, their children) that have been @@ -550,14 +541,9 @@ static int of_platform_device_destroy(struct device *dev, void *data) * Returns 0 when all children devices have been removed or * -EBUSY when some children remained. */ -int of_platform_depopulate(struct device *parent) +void of_platform_depopulate(struct device *parent) { - bool children_left = false; - - device_for_each_child(parent, &children_left, - of_platform_device_destroy); - - return children_left ? -EBUSY : 0; + device_for_each_child(parent, NULL, of_platform_device_destroy); } EXPORT_SYMBOL_GPL(of_platform_depopulate); diff --git a/drivers/of/selftest.c b/drivers/of/selftest.c index 077314eebb95..a737cb5974de 100644 --- a/drivers/of/selftest.c +++ b/drivers/of/selftest.c @@ -9,6 +9,7 @@ #include <linux/errno.h> #include <linux/module.h> #include <linux/of.h> +#include <linux/of_fdt.h> #include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/list.h> @@ -16,11 +17,18 @@ #include <linux/slab.h> #include <linux/device.h> +#include "of_private.h" + static struct selftest_results { int passed; int failed; } selftest_results; +#define NO_OF_NODES 2 +static struct device_node *nodes[NO_OF_NODES]; +static int last_node_index; +static bool selftest_live_tree; + #define selftest(result, fmt, ...) { \ if (!(result)) { \ selftest_results.failed++; \ @@ -266,6 +274,81 @@ static void __init of_selftest_property_match_string(void) selftest(rc == -EILSEQ, "unterminated string; rc=%i", rc); } +#define propcmp(p1, p2) (((p1)->length == (p2)->length) && \ + (p1)->value && (p2)->value && \ + !memcmp((p1)->value, (p2)->value, (p1)->length) && \ + !strcmp((p1)->name, (p2)->name)) +static void __init of_selftest_property_copy(void) +{ +#ifdef CONFIG_OF_DYNAMIC + struct property p1 = { .name = "p1", .length = 0, .value = "" }; + struct property p2 = { .name = "p2", .length = 5, .value = "abcd" }; + struct property *new; + + new = __of_prop_dup(&p1, GFP_KERNEL); + selftest(new && propcmp(&p1, new), "empty property didn't copy correctly\n"); + kfree(new->value); + kfree(new->name); + kfree(new); + + new = __of_prop_dup(&p2, GFP_KERNEL); + selftest(new && propcmp(&p2, new), "non-empty property didn't copy correctly\n"); + kfree(new->value); + kfree(new->name); + kfree(new); +#endif +} + +static void __init of_selftest_changeset(void) +{ +#ifdef CONFIG_OF_DYNAMIC + struct property *ppadd, padd = { .name = "prop-add", .length = 0, .value = "" }; + struct property *ppupdate, pupdate = { .name = "prop-update", .length = 5, .value = "abcd" }; + struct property *ppremove; + struct device_node *n1, *n2, *n21, *nremove, *parent; + struct of_changeset chgset; + + of_changeset_init(&chgset); + n1 = __of_node_alloc("/testcase-data/changeset/n1", GFP_KERNEL); + selftest(n1, "testcase setup failure\n"); + n2 = __of_node_alloc("/testcase-data/changeset/n2", GFP_KERNEL); + selftest(n2, "testcase setup failure\n"); + n21 = __of_node_alloc("/testcase-data/changeset/n2/n21", GFP_KERNEL); + selftest(n21, "testcase setup failure %p\n", n21); + nremove = of_find_node_by_path("/testcase-data/changeset/node-remove"); + selftest(nremove, "testcase setup failure\n"); + ppadd = __of_prop_dup(&padd, GFP_KERNEL); + selftest(ppadd, "testcase setup failure\n"); + ppupdate = __of_prop_dup(&pupdate, GFP_KERNEL); + selftest(ppupdate, "testcase setup failure\n"); + parent = nremove->parent; + n1->parent = parent; + n2->parent = parent; + n21->parent = n2; + n2->child = n21; + ppremove = of_find_property(parent, "prop-remove", NULL); + selftest(ppremove, "failed to find removal prop"); + + of_changeset_init(&chgset); + selftest(!of_changeset_attach_node(&chgset, n1), "fail attach n1\n"); + selftest(!of_changeset_attach_node(&chgset, n2), "fail attach n2\n"); + selftest(!of_changeset_detach_node(&chgset, nremove), "fail remove node\n"); + selftest(!of_changeset_attach_node(&chgset, n21), "fail attach n21\n"); + selftest(!of_changeset_add_property(&chgset, parent, ppadd), "fail add prop\n"); + selftest(!of_changeset_update_property(&chgset, parent, ppupdate), "fail update prop\n"); + selftest(!of_changeset_remove_property(&chgset, parent, ppremove), "fail remove prop\n"); + mutex_lock(&of_mutex); + selftest(!of_changeset_apply(&chgset), "apply failed\n"); + mutex_unlock(&of_mutex); + + mutex_lock(&of_mutex); + selftest(!of_changeset_revert(&chgset), "revert failed\n"); + mutex_unlock(&of_mutex); + + of_changeset_destroy(&chgset); +#endif +} + static void __init of_selftest_parse_interrupts(void) { struct device_node *np; @@ -517,9 +600,177 @@ static void __init of_selftest_platform_populate(void) } } +/** + * update_node_properties - adds the properties + * of np into dup node (present in live tree) and + * updates parent of children of np to dup. + * + * @np: node already present in live tree + * @dup: node present in live tree to be updated + */ +static void update_node_properties(struct device_node *np, + struct device_node *dup) +{ + struct property *prop; + struct device_node *child; + + for_each_property_of_node(np, prop) + of_add_property(dup, prop); + + for_each_child_of_node(np, child) + child->parent = dup; +} + +/** + * attach_node_and_children - attaches nodes + * and its children to live tree + * + * @np: Node to attach to live tree + */ +static int attach_node_and_children(struct device_node *np) +{ + struct device_node *next, *root = np, *dup; + + /* skip root node */ + np = np->child; + /* storing a copy in temporary node */ + dup = np; + + while (dup) { + nodes[last_node_index++] = dup; + dup = dup->sibling; + } + dup = NULL; + + while (np) { + next = np->allnext; + dup = of_find_node_by_path(np->full_name); + if (dup) + update_node_properties(np, dup); + else { + np->child = NULL; + if (np->parent == root) + np->parent = of_allnodes; + of_attach_node(np); + } + np = next; + } + + return 0; +} + +/** + * selftest_data_add - Reads, copies data from + * linked tree and attaches it to the live tree + */ +static int __init selftest_data_add(void) +{ + void *selftest_data; + struct device_node *selftest_data_node, *np; + extern uint8_t __dtb_testcases_begin[]; + extern uint8_t __dtb_testcases_end[]; + const int size = __dtb_testcases_end - __dtb_testcases_begin; + + if (!size) { + pr_warn("%s: No testcase data to attach; not running tests\n", + __func__); + return -ENODATA; + } + + /* creating copy */ + selftest_data = kmemdup(__dtb_testcases_begin, size, GFP_KERNEL); + + if (!selftest_data) { + pr_warn("%s: Failed to allocate memory for selftest_data; " + "not running tests\n", __func__); + return -ENOMEM; + } + of_fdt_unflatten_tree(selftest_data, &selftest_data_node); + if (!selftest_data_node) { + pr_warn("%s: No tree to attach; not running tests\n", __func__); + return -ENODATA; + } + + if (!of_allnodes) { + /* enabling flag for removing nodes */ + selftest_live_tree = true; + of_allnodes = selftest_data_node; + + for_each_of_allnodes(np) + __of_attach_node_sysfs(np); + of_aliases = of_find_node_by_path("/aliases"); + of_chosen = of_find_node_by_path("/chosen"); + return 0; + } + + /* attach the sub-tree to live tree */ + return attach_node_and_children(selftest_data_node); +} + +/** + * detach_node_and_children - detaches node + * and its children from live tree + * + * @np: Node to detach from live tree + */ +static void detach_node_and_children(struct device_node *np) +{ + while (np->child) + detach_node_and_children(np->child); + + while (np->sibling) + detach_node_and_children(np->sibling); + + of_detach_node(np); +} + +/** + * selftest_data_remove - removes the selftest data + * nodes from the live tree + */ +static void selftest_data_remove(void) +{ + struct device_node *np; + struct property *prop; + + if (selftest_live_tree) { + of_node_put(of_aliases); + of_node_put(of_chosen); + of_aliases = NULL; + of_chosen = NULL; + for_each_child_of_node(of_allnodes, np) + detach_node_and_children(np); + __of_detach_node_sysfs(of_allnodes); + of_allnodes = NULL; + return; + } + + while (last_node_index >= 0) { + if (nodes[last_node_index]) { + np = of_find_node_by_path(nodes[last_node_index]->full_name); + if (strcmp(np->full_name, "/aliases") != 0) { + detach_node_and_children(np->child); + of_detach_node(np); + } else { + for_each_property_of_node(np, prop) { + if (strcmp(prop->name, "testcase-alias") == 0) + of_remove_property(np, prop); + } + } + } + last_node_index--; + } +} + static int __init of_selftest(void) { struct device_node *np; + int res; + + /* adding data for selftest */ + res = selftest_data_add(); + if (res) + return res; np = of_find_node_by_path("/testcase-data/phandle-tests/consumer-a"); if (!np) { @@ -533,12 +784,18 @@ static int __init of_selftest(void) of_selftest_dynamic(); of_selftest_parse_phandle_with_args(); of_selftest_property_match_string(); + of_selftest_property_copy(); + of_selftest_changeset(); of_selftest_parse_interrupts(); of_selftest_parse_interrupts_extended(); of_selftest_match_node(); of_selftest_platform_populate(); pr_info("end of selftest - %i passed, %i failed\n", selftest_results.passed, selftest_results.failed); + + /* removing selftest data from live tree */ + selftest_data_remove(); + return 0; } late_initcall(of_selftest); diff --git a/drivers/of/testcase-data/testcases.dts b/drivers/of/testcase-data/testcases.dts new file mode 100644 index 000000000000..219ef9324e9c --- /dev/null +++ b/drivers/of/testcase-data/testcases.dts @@ -0,0 +1,15 @@ +/dts-v1/; +/ { + testcase-data { + changeset { + prop-update = "hello"; + prop-remove = "world"; + node-remove { + }; + }; + }; +}; +#include "tests-phandle.dtsi" +#include "tests-interrupts.dtsi" +#include "tests-match.dtsi" +#include "tests-platform.dtsi" diff --git a/drivers/of/testcase-data/testcases.dtsi b/drivers/of/testcase-data/testcases.dtsi deleted file mode 100644 index 6d8d980ac858..000000000000 --- a/drivers/of/testcase-data/testcases.dtsi +++ /dev/null @@ -1,4 +0,0 @@ -#include "tests-phandle.dtsi" -#include "tests-interrupts.dtsi" -#include "tests-match.dtsi" -#include "tests-platform.dtsi" diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c index 9eae9834bcc7..a0580afe1713 100644 --- a/drivers/parisc/dino.c +++ b/drivers/parisc/dino.c @@ -913,7 +913,7 @@ static int __init dino_probe(struct parisc_device *dev) printk("%s version %s found at 0x%lx\n", name, version, hpa); if (!request_mem_region(hpa, PAGE_SIZE, name)) { - printk(KERN_ERR "DINO: Hey! Someone took my MMIO space (0x%ld)!\n", + printk(KERN_ERR "DINO: Hey! Someone took my MMIO space (0x%lx)!\n", hpa); return 1; } diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c index 0f54ab6260df..3651c3871d5b 100644 --- a/drivers/parisc/pdc_stable.c +++ b/drivers/parisc/pdc_stable.c @@ -278,7 +278,7 @@ pdcspath_hwpath_write(struct pdcspath_entry *entry, const char *buf, size_t coun { struct hardware_path hwpath; unsigned short i; - char in[count+1], *temp; + char in[64], *temp; struct device *dev; int ret; @@ -286,8 +286,9 @@ pdcspath_hwpath_write(struct pdcspath_entry *entry, const char *buf, size_t coun return -EINVAL; /* We'll use a local copy of buf */ - memset(in, 0, count+1); + count = min_t(size_t, count, sizeof(in)-1); strncpy(in, buf, count); + in[count] = '\0'; /* Let's clean up the target. 0xff is a blank pattern */ memset(&hwpath, 0xff, sizeof(hwpath)); @@ -393,14 +394,15 @@ pdcspath_layer_write(struct pdcspath_entry *entry, const char *buf, size_t count { unsigned int layers[6]; /* device-specific info (ctlr#, unit#, ...) */ unsigned short i; - char in[count+1], *temp; + char in[64], *temp; if (!entry || !buf || !count) return -EINVAL; /* We'll use a local copy of buf */ - memset(in, 0, count+1); + count = min_t(size_t, count, sizeof(in)-1); strncpy(in, buf, count); + in[count] = '\0'; /* Let's clean up the target. 0 is a blank pattern */ memset(&layers, 0, sizeof(layers)); @@ -755,7 +757,7 @@ static ssize_t pdcs_auto_write(struct kobject *kobj, { struct pdcspath_entry *pathentry; unsigned char flags; - char in[count+1], *temp; + char in[8], *temp; char c; if (!capable(CAP_SYS_ADMIN)) @@ -765,8 +767,9 @@ static ssize_t pdcs_auto_write(struct kobject *kobj, return -EINVAL; /* We'll use a local copy of buf */ - memset(in, 0, count+1); + count = min_t(size_t, count, sizeof(in)-1); strncpy(in, buf, count); + in[count] = '\0'; /* Current flags are stored in primary boot path entry */ pathentry = &pdcspath_entry_primary; diff --git a/drivers/parisc/superio.c b/drivers/parisc/superio.c index a042d065a0c7..8be2096c8423 100644 --- a/drivers/parisc/superio.c +++ b/drivers/parisc/superio.c @@ -395,7 +395,8 @@ static void __init superio_serial_init(void) serial_port.iotype = UPIO_PORT; serial_port.type = PORT_16550A; serial_port.uartclk = 115200*16; - serial_port.fifosize = 16; + serial_port.flags = UPF_FIXED_PORT | UPF_FIXED_TYPE | + UPF_BOOT_AUTOCONF; /* serial port #1 */ serial_port.iobase = sio_dev.sp1_base; diff --git a/drivers/parport/parport_ip32.c b/drivers/parport/parport_ip32.c index c864f82bd37d..30e981be14c2 100644 --- a/drivers/parport/parport_ip32.c +++ b/drivers/parport/parport_ip32.c @@ -2204,7 +2204,7 @@ static int __init parport_ip32_init(void) { pr_info(PPIP32 "SGI IP32 built-in parallel port driver v0.6\n"); this_port = parport_ip32_probe_port(); - return IS_ERR(this_port) ? PTR_ERR(this_port) : 0; + return PTR_ERR_OR_ZERO(this_port); } /** diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig index 21df477be0c8..90f5ccacce4b 100644 --- a/drivers/pci/host/Kconfig +++ b/drivers/pci/host/Kconfig @@ -1,9 +1,18 @@ menu "PCI host controller drivers" depends on PCI +config PCI_DRA7XX + bool "TI DRA7xx PCIe controller" + select PCIE_DW + depends on OF && HAS_IOMEM && TI_PIPE3 + help + Enables support for the PCIe controller in the DRA7xx SoC. There + are two instances of PCIe controller in DRA7xx. This controller can + act both as EP and RC. This reuses the Designware core. + config PCI_MVEBU bool "Marvell EBU PCIe controller" - depends on ARCH_MVEBU || ARCH_DOVE || ARCH_KIRKWOOD + depends on ARCH_MVEBU || ARCH_DOVE depends on OF config PCIE_DW @@ -46,4 +55,12 @@ config PCI_HOST_GENERIC Say Y here if you want to support a simple generic PCI host controller, such as the one emulated by kvmtool. +config PCIE_SPEAR13XX + bool "STMicroelectronics SPEAr PCIe controller" + depends on ARCH_SPEAR13XX + select PCIEPORTBUS + select PCIE_DW + help + Say Y here if you want PCIe support on SPEAr13XX SoCs. + endmenu diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile index 611ba4b48c94..d0e88f114ff9 100644 --- a/drivers/pci/host/Makefile +++ b/drivers/pci/host/Makefile @@ -1,4 +1,5 @@ obj-$(CONFIG_PCIE_DW) += pcie-designware.o +obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o obj-$(CONFIG_PCI_IMX6) += pci-imx6.o obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o @@ -6,3 +7,4 @@ obj-$(CONFIG_PCI_TEGRA) += pci-tegra.o obj-$(CONFIG_PCI_RCAR_GEN2) += pci-rcar-gen2.o obj-$(CONFIG_PCI_RCAR_GEN2_PCIE) += pcie-rcar.o obj-$(CONFIG_PCI_HOST_GENERIC) += pci-host-generic.o +obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o diff --git a/drivers/pci/host/pci-dra7xx.c b/drivers/pci/host/pci-dra7xx.c new file mode 100644 index 000000000000..52b34fee07fd --- /dev/null +++ b/drivers/pci/host/pci-dra7xx.c @@ -0,0 +1,458 @@ +/* + * pcie-dra7xx - PCIe controller driver for TI DRA7xx SoCs + * + * Copyright (C) 2013-2014 Texas Instruments Incorporated - http://www.ti.com + * + * Authors: Kishon Vijay Abraham I <kishon@ti.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/irqdomain.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/phy/phy.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/resource.h> +#include <linux/types.h> + +#include "pcie-designware.h" + +/* PCIe controller wrapper DRA7XX configuration registers */ + +#define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN 0x0024 +#define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN 0x0028 +#define ERR_SYS BIT(0) +#define ERR_FATAL BIT(1) +#define ERR_NONFATAL BIT(2) +#define ERR_COR BIT(3) +#define ERR_AXI BIT(4) +#define ERR_ECRC BIT(5) +#define PME_TURN_OFF BIT(8) +#define PME_TO_ACK BIT(9) +#define PM_PME BIT(10) +#define LINK_REQ_RST BIT(11) +#define LINK_UP_EVT BIT(12) +#define CFG_BME_EVT BIT(13) +#define CFG_MSE_EVT BIT(14) +#define INTERRUPTS (ERR_SYS | ERR_FATAL | ERR_NONFATAL | ERR_COR | ERR_AXI | \ + ERR_ECRC | PME_TURN_OFF | PME_TO_ACK | PM_PME | \ + LINK_REQ_RST | LINK_UP_EVT | CFG_BME_EVT | CFG_MSE_EVT) + +#define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI 0x0034 +#define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI 0x0038 +#define INTA BIT(0) +#define INTB BIT(1) +#define INTC BIT(2) +#define INTD BIT(3) +#define MSI BIT(4) +#define LEG_EP_INTERRUPTS (INTA | INTB | INTC | INTD) + +#define PCIECTRL_DRA7XX_CONF_DEVICE_CMD 0x0104 +#define LTSSM_EN 0x1 + +#define PCIECTRL_DRA7XX_CONF_PHY_CS 0x010C +#define LINK_UP BIT(16) + +struct dra7xx_pcie { + void __iomem *base; + struct phy **phy; + int phy_count; + struct device *dev; + struct pcie_port pp; +}; + +#define to_dra7xx_pcie(x) container_of((x), struct dra7xx_pcie, pp) + +static inline u32 dra7xx_pcie_readl(struct dra7xx_pcie *pcie, u32 offset) +{ + return readl(pcie->base + offset); +} + +static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset, + u32 value) +{ + writel(value, pcie->base + offset); +} + +static int dra7xx_pcie_link_up(struct pcie_port *pp) +{ + struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp); + u32 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_PHY_CS); + + return !!(reg & LINK_UP); +} + +static int dra7xx_pcie_establish_link(struct pcie_port *pp) +{ + u32 reg; + unsigned int retries = 1000; + struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp); + + if (dw_pcie_link_up(pp)) { + dev_err(pp->dev, "link is already up\n"); + return 0; + } + + reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD); + reg |= LTSSM_EN; + dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); + + while (retries--) { + reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_PHY_CS); + if (reg & LINK_UP) + break; + usleep_range(10, 20); + } + + if (retries == 0) { + dev_err(pp->dev, "link is not up\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static void dra7xx_pcie_enable_interrupts(struct pcie_port *pp) +{ + struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp); + + dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, + ~INTERRUPTS); + dra7xx_pcie_writel(dra7xx, + PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN, INTERRUPTS); + dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, + ~LEG_EP_INTERRUPTS & ~MSI); + + if (IS_ENABLED(CONFIG_PCI_MSI)) + dra7xx_pcie_writel(dra7xx, + PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI, MSI); + else + dra7xx_pcie_writel(dra7xx, + PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI, + LEG_EP_INTERRUPTS); +} + +static void dra7xx_pcie_host_init(struct pcie_port *pp) +{ + dw_pcie_setup_rc(pp); + dra7xx_pcie_establish_link(pp); + if (IS_ENABLED(CONFIG_PCI_MSI)) + dw_pcie_msi_init(pp); + dra7xx_pcie_enable_interrupts(pp); +} + +static struct pcie_host_ops dra7xx_pcie_host_ops = { + .link_up = dra7xx_pcie_link_up, + .host_init = dra7xx_pcie_host_init, +}; + +static int dra7xx_pcie_intx_map(struct irq_domain *domain, unsigned int irq, + irq_hw_number_t hwirq) +{ + irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq); + irq_set_chip_data(irq, domain->host_data); + set_irq_flags(irq, IRQF_VALID); + + return 0; +} + +static const struct irq_domain_ops intx_domain_ops = { + .map = dra7xx_pcie_intx_map, +}; + +static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp) +{ + struct device *dev = pp->dev; + struct device_node *node = dev->of_node; + struct device_node *pcie_intc_node = of_get_next_child(node, NULL); + + if (!pcie_intc_node) { + dev_err(dev, "No PCIe Intc node found\n"); + return PTR_ERR(pcie_intc_node); + } + + pp->irq_domain = irq_domain_add_linear(pcie_intc_node, 4, + &intx_domain_ops, pp); + if (!pp->irq_domain) { + dev_err(dev, "Failed to get a INTx IRQ domain\n"); + return PTR_ERR(pp->irq_domain); + } + + return 0; +} + +static irqreturn_t dra7xx_pcie_msi_irq_handler(int irq, void *arg) +{ + struct pcie_port *pp = arg; + struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp); + u32 reg; + + reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI); + + switch (reg) { + case MSI: + dw_handle_msi_irq(pp); + break; + case INTA: + case INTB: + case INTC: + case INTD: + generic_handle_irq(irq_find_mapping(pp->irq_domain, ffs(reg))); + break; + } + + dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, reg); + + return IRQ_HANDLED; +} + + +static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg) +{ + struct dra7xx_pcie *dra7xx = arg; + u32 reg; + + reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN); + + if (reg & ERR_SYS) + dev_dbg(dra7xx->dev, "System Error\n"); + + if (reg & ERR_FATAL) + dev_dbg(dra7xx->dev, "Fatal Error\n"); + + if (reg & ERR_NONFATAL) + dev_dbg(dra7xx->dev, "Non Fatal Error\n"); + + if (reg & ERR_COR) + dev_dbg(dra7xx->dev, "Correctable Error\n"); + + if (reg & ERR_AXI) + dev_dbg(dra7xx->dev, "AXI tag lookup fatal Error\n"); + + if (reg & ERR_ECRC) + dev_dbg(dra7xx->dev, "ECRC Error\n"); + + if (reg & PME_TURN_OFF) + dev_dbg(dra7xx->dev, + "Power Management Event Turn-Off message received\n"); + + if (reg & PME_TO_ACK) + dev_dbg(dra7xx->dev, + "Power Management Turn-Off Ack message received\n"); + + if (reg & PM_PME) + dev_dbg(dra7xx->dev, + "PM Power Management Event message received\n"); + + if (reg & LINK_REQ_RST) + dev_dbg(dra7xx->dev, "Link Request Reset\n"); + + if (reg & LINK_UP_EVT) + dev_dbg(dra7xx->dev, "Link-up state change\n"); + + if (reg & CFG_BME_EVT) + dev_dbg(dra7xx->dev, "CFG 'Bus Master Enable' change\n"); + + if (reg & CFG_MSE_EVT) + dev_dbg(dra7xx->dev, "CFG 'Memory Space Enable' change\n"); + + dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, reg); + + return IRQ_HANDLED; +} + +static int add_pcie_port(struct dra7xx_pcie *dra7xx, + struct platform_device *pdev) +{ + int ret; + struct pcie_port *pp; + struct resource *res; + struct device *dev = &pdev->dev; + + pp = &dra7xx->pp; + pp->dev = dev; + pp->ops = &dra7xx_pcie_host_ops; + + pp->irq = platform_get_irq(pdev, 1); + if (pp->irq < 0) { + dev_err(dev, "missing IRQ resource\n"); + return -EINVAL; + } + + ret = devm_request_irq(&pdev->dev, pp->irq, + dra7xx_pcie_msi_irq_handler, IRQF_SHARED, + "dra7-pcie-msi", pp); + if (ret) { + dev_err(&pdev->dev, "failed to request irq\n"); + return ret; + } + + if (!IS_ENABLED(CONFIG_PCI_MSI)) { + ret = dra7xx_pcie_init_irq_domain(pp); + if (ret < 0) + return ret; + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbics"); + pp->dbi_base = devm_ioremap(dev, res->start, resource_size(res)); + if (!pp->dbi_base) + return -ENOMEM; + + ret = dw_pcie_host_init(pp); + if (ret) { + dev_err(dra7xx->dev, "failed to initialize host\n"); + return ret; + } + + return 0; +} + +static int __init dra7xx_pcie_probe(struct platform_device *pdev) +{ + u32 reg; + int ret; + int irq; + int i; + int phy_count; + struct phy **phy; + void __iomem *base; + struct resource *res; + struct dra7xx_pcie *dra7xx; + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + char name[10]; + + dra7xx = devm_kzalloc(dev, sizeof(*dra7xx), GFP_KERNEL); + if (!dra7xx) + return -ENOMEM; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(dev, "missing IRQ resource\n"); + return -EINVAL; + } + + ret = devm_request_irq(dev, irq, dra7xx_pcie_irq_handler, + IRQF_SHARED, "dra7xx-pcie-main", dra7xx); + if (ret) { + dev_err(dev, "failed to request irq\n"); + return ret; + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ti_conf"); + base = devm_ioremap_nocache(dev, res->start, resource_size(res)); + if (!base) + return -ENOMEM; + + phy_count = of_property_count_strings(np, "phy-names"); + if (phy_count < 0) { + dev_err(dev, "unable to find the strings\n"); + return phy_count; + } + + phy = devm_kzalloc(dev, sizeof(*phy) * phy_count, GFP_KERNEL); + if (!phy) + return -ENOMEM; + + for (i = 0; i < phy_count; i++) { + snprintf(name, sizeof(name), "pcie-phy%d", i); + phy[i] = devm_phy_get(dev, name); + if (IS_ERR(phy[i])) + return PTR_ERR(phy[i]); + + ret = phy_init(phy[i]); + if (ret < 0) + goto err_phy; + + ret = phy_power_on(phy[i]); + if (ret < 0) { + phy_exit(phy[i]); + goto err_phy; + } + } + + dra7xx->base = base; + dra7xx->phy = phy; + dra7xx->dev = dev; + dra7xx->phy_count = phy_count; + + pm_runtime_enable(dev); + ret = pm_runtime_get_sync(dev); + if (IS_ERR_VALUE(ret)) { + dev_err(dev, "pm_runtime_get_sync failed\n"); + goto err_phy; + } + + reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD); + reg &= ~LTSSM_EN; + dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); + + platform_set_drvdata(pdev, dra7xx); + + ret = add_pcie_port(dra7xx, pdev); + if (ret < 0) + goto err_add_port; + + return 0; + +err_add_port: + pm_runtime_put(dev); + pm_runtime_disable(dev); + +err_phy: + while (--i >= 0) { + phy_power_off(phy[i]); + phy_exit(phy[i]); + } + + return ret; +} + +static int __exit dra7xx_pcie_remove(struct platform_device *pdev) +{ + struct dra7xx_pcie *dra7xx = platform_get_drvdata(pdev); + struct pcie_port *pp = &dra7xx->pp; + struct device *dev = &pdev->dev; + int count = dra7xx->phy_count; + + if (pp->irq_domain) + irq_domain_remove(pp->irq_domain); + pm_runtime_put(dev); + pm_runtime_disable(dev); + while (count--) { + phy_power_off(dra7xx->phy[count]); + phy_exit(dra7xx->phy[count]); + } + + return 0; +} + +static const struct of_device_id of_dra7xx_pcie_match[] = { + { .compatible = "ti,dra7-pcie", }, + {}, +}; +MODULE_DEVICE_TABLE(of, of_dra7xx_pcie_match); + +static struct platform_driver dra7xx_pcie_driver = { + .remove = __exit_p(dra7xx_pcie_remove), + .driver = { + .name = "dra7-pcie", + .owner = THIS_MODULE, + .of_match_table = of_dra7xx_pcie_match, + }, +}; + +module_platform_driver_probe(dra7xx_pcie_driver, dra7xx_pcie_probe); + +MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>"); +MODULE_DESCRIPTION("TI PCIe controller driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c index a568efaa331c..35fc73a8d0b3 100644 --- a/drivers/pci/host/pci-imx6.c +++ b/drivers/pci/host/pci-imx6.c @@ -49,6 +49,9 @@ struct imx6_pcie { /* PCIe Port Logic registers (memory-mapped) */ #define PL_OFFSET 0x700 +#define PCIE_PL_PFLR (PL_OFFSET + 0x08) +#define PCIE_PL_PFLR_LINK_STATE_MASK (0x3f << 16) +#define PCIE_PL_PFLR_FORCE_LINK (1 << 15) #define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28) #define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c) #define PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING (1 << 29) @@ -214,6 +217,32 @@ static int imx6q_pcie_abort_handler(unsigned long addr, static int imx6_pcie_assert_core_reset(struct pcie_port *pp) { struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp); + u32 val, gpr1, gpr12; + + /* + * If the bootloader already enabled the link we need some special + * handling to get the core back into a state where it is safe to + * touch it for configuration. As there is no dedicated reset signal + * wired up for MX6QDL, we need to manually force LTSSM into "detect" + * state before completely disabling LTSSM, which is a prerequisite + * for core configuration. + * + * If both LTSSM_ENABLE and REF_SSP_ENABLE are active we have a strong + * indication that the bootloader activated the link. + */ + regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, &gpr1); + regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, &gpr12); + + if ((gpr1 & IMX6Q_GPR1_PCIE_REF_CLK_EN) && + (gpr12 & IMX6Q_GPR12_PCIE_CTL_2)) { + val = readl(pp->dbi_base + PCIE_PL_PFLR); + val &= ~PCIE_PL_PFLR_LINK_STATE_MASK; + val |= PCIE_PL_PFLR_FORCE_LINK; + writel(val, pp->dbi_base + PCIE_PL_PFLR); + + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, + IMX6Q_GPR12_PCIE_CTL_2, 0 << 10); + } regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18); @@ -589,6 +618,14 @@ static int __init imx6_pcie_probe(struct platform_device *pdev) return 0; } +static void imx6_pcie_shutdown(struct platform_device *pdev) +{ + struct imx6_pcie *imx6_pcie = platform_get_drvdata(pdev); + + /* bring down link, so bootloader gets clean state in case of reboot */ + imx6_pcie_assert_core_reset(&imx6_pcie->pp); +} + static const struct of_device_id imx6_pcie_of_match[] = { { .compatible = "fsl,imx6q-pcie", }, {}, @@ -601,6 +638,7 @@ static struct platform_driver imx6_pcie_driver = { .owner = THIS_MODULE, .of_match_table = imx6_pcie_of_match, }, + .shutdown = imx6_pcie_shutdown, }; /* Freescale PCIe driver does not allow module unload */ diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c index c284e841e3ea..0fb0fdb223d5 100644 --- a/drivers/pci/host/pci-tegra.c +++ b/drivers/pci/host/pci-tegra.c @@ -25,6 +25,7 @@ */ #include <linux/clk.h> +#include <linux/debugfs.h> #include <linux/delay.h> #include <linux/export.h> #include <linux/interrupt.h> @@ -41,11 +42,12 @@ #include <linux/reset.h> #include <linux/sizes.h> #include <linux/slab.h> -#include <linux/tegra-cpuidle.h> -#include <linux/tegra-powergate.h> #include <linux/vmalloc.h> #include <linux/regulator/consumer.h> +#include <soc/tegra/cpuidle.h> +#include <soc/tegra/pmc.h> + #include <asm/mach/irq.h> #include <asm/mach/map.h> #include <asm/mach/pci.h> @@ -233,7 +235,6 @@ struct tegra_pcie_soc_data { bool has_pex_clkreq_en; bool has_pex_bias_ctrl; bool has_intr_prsnt_sense; - bool has_avdd_supply; bool has_cml_clk; }; @@ -272,11 +273,11 @@ struct tegra_pcie { unsigned int num_ports; u32 xbar_config; - struct regulator *pex_clk_supply; - struct regulator *vdd_supply; - struct regulator *avdd_supply; + struct regulator_bulk_data *supplies; + unsigned int num_supplies; const struct tegra_pcie_soc_data *soc_data; + struct dentry *debugfs; }; struct tegra_pcie_port { @@ -894,7 +895,6 @@ static int tegra_pcie_enable_controller(struct tegra_pcie *pcie) static void tegra_pcie_power_off(struct tegra_pcie *pcie) { - const struct tegra_pcie_soc_data *soc = pcie->soc_data; int err; /* TODO: disable and unprepare clocks? */ @@ -905,23 +905,9 @@ static void tegra_pcie_power_off(struct tegra_pcie *pcie) tegra_powergate_power_off(TEGRA_POWERGATE_PCIE); - if (soc->has_avdd_supply) { - err = regulator_disable(pcie->avdd_supply); - if (err < 0) - dev_warn(pcie->dev, - "failed to disable AVDD regulator: %d\n", - err); - } - - err = regulator_disable(pcie->pex_clk_supply); + err = regulator_bulk_disable(pcie->num_supplies, pcie->supplies); if (err < 0) - dev_warn(pcie->dev, "failed to disable pex-clk regulator: %d\n", - err); - - err = regulator_disable(pcie->vdd_supply); - if (err < 0) - dev_warn(pcie->dev, "failed to disable VDD regulator: %d\n", - err); + dev_warn(pcie->dev, "failed to disable regulators: %d\n", err); } static int tegra_pcie_power_on(struct tegra_pcie *pcie) @@ -936,28 +922,9 @@ static int tegra_pcie_power_on(struct tegra_pcie *pcie) tegra_powergate_power_off(TEGRA_POWERGATE_PCIE); /* enable regulators */ - err = regulator_enable(pcie->vdd_supply); - if (err < 0) { - dev_err(pcie->dev, "failed to enable VDD regulator: %d\n", err); - return err; - } - - err = regulator_enable(pcie->pex_clk_supply); - if (err < 0) { - dev_err(pcie->dev, "failed to enable pex-clk regulator: %d\n", - err); - return err; - } - - if (soc->has_avdd_supply) { - err = regulator_enable(pcie->avdd_supply); - if (err < 0) { - dev_err(pcie->dev, - "failed to enable AVDD regulator: %d\n", - err); - return err; - } - } + err = regulator_bulk_enable(pcie->num_supplies, pcie->supplies); + if (err < 0) + dev_err(pcie->dev, "failed to enable regulators: %d\n", err); err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE, pcie->pex_clk, @@ -1394,14 +1361,157 @@ static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes, return -EINVAL; } +/* + * Check whether a given set of supplies is available in a device tree node. + * This is used to check whether the new or the legacy device tree bindings + * should be used. + */ +static bool of_regulator_bulk_available(struct device_node *np, + struct regulator_bulk_data *supplies, + unsigned int num_supplies) +{ + char property[32]; + unsigned int i; + + for (i = 0; i < num_supplies; i++) { + snprintf(property, 32, "%s-supply", supplies[i].supply); + + if (of_find_property(np, property, NULL) == NULL) + return false; + } + + return true; +} + +/* + * Old versions of the device tree binding for this device used a set of power + * supplies that didn't match the hardware inputs. This happened to work for a + * number of cases but is not future proof. However to preserve backwards- + * compatibility with old device trees, this function will try to use the old + * set of supplies. + */ +static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie) +{ + struct device_node *np = pcie->dev->of_node; + + if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) + pcie->num_supplies = 3; + else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) + pcie->num_supplies = 2; + + if (pcie->num_supplies == 0) { + dev_err(pcie->dev, "device %s not supported in legacy mode\n", + np->full_name); + return -ENODEV; + } + + pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies, + sizeof(*pcie->supplies), + GFP_KERNEL); + if (!pcie->supplies) + return -ENOMEM; + + pcie->supplies[0].supply = "pex-clk"; + pcie->supplies[1].supply = "vdd"; + + if (pcie->num_supplies > 2) + pcie->supplies[2].supply = "avdd"; + + return devm_regulator_bulk_get(pcie->dev, pcie->num_supplies, + pcie->supplies); +} + +/* + * Obtains the list of regulators required for a particular generation of the + * IP block. + * + * This would've been nice to do simply by providing static tables for use + * with the regulator_bulk_*() API, but unfortunately Tegra30 is a bit quirky + * in that it has two pairs or AVDD_PEX and VDD_PEX supplies (PEXA and PEXB) + * and either seems to be optional depending on which ports are being used. + */ +static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask) +{ + struct device_node *np = pcie->dev->of_node; + unsigned int i = 0; + + if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) { + bool need_pexa = false, need_pexb = false; + + /* VDD_PEXA and AVDD_PEXA supply lanes 0 to 3 */ + if (lane_mask & 0x0f) + need_pexa = true; + + /* VDD_PEXB and AVDD_PEXB supply lanes 4 to 5 */ + if (lane_mask & 0x30) + need_pexb = true; + + pcie->num_supplies = 4 + (need_pexa ? 2 : 0) + + (need_pexb ? 2 : 0); + + pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies, + sizeof(*pcie->supplies), + GFP_KERNEL); + if (!pcie->supplies) + return -ENOMEM; + + pcie->supplies[i++].supply = "avdd-pex-pll"; + pcie->supplies[i++].supply = "hvdd-pex"; + pcie->supplies[i++].supply = "vddio-pex-ctl"; + pcie->supplies[i++].supply = "avdd-plle"; + + if (need_pexa) { + pcie->supplies[i++].supply = "avdd-pexa"; + pcie->supplies[i++].supply = "vdd-pexa"; + } + + if (need_pexb) { + pcie->supplies[i++].supply = "avdd-pexb"; + pcie->supplies[i++].supply = "vdd-pexb"; + } + } else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) { + pcie->num_supplies = 5; + + pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies, + sizeof(*pcie->supplies), + GFP_KERNEL); + if (!pcie->supplies) + return -ENOMEM; + + pcie->supplies[0].supply = "avdd-pex"; + pcie->supplies[1].supply = "vdd-pex"; + pcie->supplies[2].supply = "avdd-pex-pll"; + pcie->supplies[3].supply = "avdd-plle"; + pcie->supplies[4].supply = "vddio-pex-clk"; + } + + if (of_regulator_bulk_available(pcie->dev->of_node, pcie->supplies, + pcie->num_supplies)) + return devm_regulator_bulk_get(pcie->dev, pcie->num_supplies, + pcie->supplies); + + /* + * If not all regulators are available for this new scheme, assume + * that the device tree complies with an older version of the device + * tree binding. + */ + dev_info(pcie->dev, "using legacy DT binding for power supplies\n"); + + devm_kfree(pcie->dev, pcie->supplies); + pcie->num_supplies = 0; + + return tegra_pcie_get_legacy_regulators(pcie); +} + static int tegra_pcie_parse_dt(struct tegra_pcie *pcie) { const struct tegra_pcie_soc_data *soc = pcie->soc_data; struct device_node *np = pcie->dev->of_node, *port; struct of_pci_range_parser parser; struct of_pci_range range; + u32 lanes = 0, mask = 0; + unsigned int lane = 0; struct resource res; - u32 lanes = 0; int err; if (of_pci_range_parser_init(&parser, np)) { @@ -1409,20 +1519,6 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie) return -EINVAL; } - pcie->vdd_supply = devm_regulator_get(pcie->dev, "vdd"); - if (IS_ERR(pcie->vdd_supply)) - return PTR_ERR(pcie->vdd_supply); - - pcie->pex_clk_supply = devm_regulator_get(pcie->dev, "pex-clk"); - if (IS_ERR(pcie->pex_clk_supply)) - return PTR_ERR(pcie->pex_clk_supply); - - if (soc->has_avdd_supply) { - pcie->avdd_supply = devm_regulator_get(pcie->dev, "avdd"); - if (IS_ERR(pcie->avdd_supply)) - return PTR_ERR(pcie->avdd_supply); - } - for_each_of_pci_range(&parser, &range) { of_pci_range_to_resource(&range, np, &res); @@ -1490,8 +1586,13 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie) lanes |= value << (index << 3); - if (!of_device_is_available(port)) + if (!of_device_is_available(port)) { + lane += value; continue; + } + + mask |= ((1 << value) - 1) << lane; + lane += value; rp = devm_kzalloc(pcie->dev, sizeof(*rp), GFP_KERNEL); if (!rp) @@ -1522,6 +1623,10 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie) return err; } + err = tegra_pcie_get_regulators(pcie, mask); + if (err < 0) + return err; + return 0; } @@ -1615,7 +1720,6 @@ static const struct tegra_pcie_soc_data tegra20_pcie_data = { .has_pex_clkreq_en = false, .has_pex_bias_ctrl = false, .has_intr_prsnt_sense = false, - .has_avdd_supply = false, .has_cml_clk = false, }; @@ -1627,7 +1731,6 @@ static const struct tegra_pcie_soc_data tegra30_pcie_data = { .has_pex_clkreq_en = true, .has_pex_bias_ctrl = true, .has_intr_prsnt_sense = true, - .has_avdd_supply = true, .has_cml_clk = true, }; @@ -1638,6 +1741,115 @@ static const struct of_device_id tegra_pcie_of_match[] = { }; MODULE_DEVICE_TABLE(of, tegra_pcie_of_match); +static void *tegra_pcie_ports_seq_start(struct seq_file *s, loff_t *pos) +{ + struct tegra_pcie *pcie = s->private; + + if (list_empty(&pcie->ports)) + return NULL; + + seq_printf(s, "Index Status\n"); + + return seq_list_start(&pcie->ports, *pos); +} + +static void *tegra_pcie_ports_seq_next(struct seq_file *s, void *v, loff_t *pos) +{ + struct tegra_pcie *pcie = s->private; + + return seq_list_next(v, &pcie->ports, pos); +} + +static void tegra_pcie_ports_seq_stop(struct seq_file *s, void *v) +{ +} + +static int tegra_pcie_ports_seq_show(struct seq_file *s, void *v) +{ + bool up = false, active = false; + struct tegra_pcie_port *port; + unsigned int value; + + port = list_entry(v, struct tegra_pcie_port, list); + + value = readl(port->base + RP_VEND_XP); + + if (value & RP_VEND_XP_DL_UP) + up = true; + + value = readl(port->base + RP_LINK_CONTROL_STATUS); + + if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE) + active = true; + + seq_printf(s, "%2u ", port->index); + + if (up) + seq_printf(s, "up"); + + if (active) { + if (up) + seq_printf(s, ", "); + + seq_printf(s, "active"); + } + + seq_printf(s, "\n"); + return 0; +} + +static const struct seq_operations tegra_pcie_ports_seq_ops = { + .start = tegra_pcie_ports_seq_start, + .next = tegra_pcie_ports_seq_next, + .stop = tegra_pcie_ports_seq_stop, + .show = tegra_pcie_ports_seq_show, +}; + +static int tegra_pcie_ports_open(struct inode *inode, struct file *file) +{ + struct tegra_pcie *pcie = inode->i_private; + struct seq_file *s; + int err; + + err = seq_open(file, &tegra_pcie_ports_seq_ops); + if (err) + return err; + + s = file->private_data; + s->private = pcie; + + return 0; +} + +static const struct file_operations tegra_pcie_ports_ops = { + .owner = THIS_MODULE, + .open = tegra_pcie_ports_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +static int tegra_pcie_debugfs_init(struct tegra_pcie *pcie) +{ + struct dentry *file; + + pcie->debugfs = debugfs_create_dir("pcie", NULL); + if (!pcie->debugfs) + return -ENOMEM; + + file = debugfs_create_file("ports", S_IFREG | S_IRUGO, pcie->debugfs, + pcie, &tegra_pcie_ports_ops); + if (!file) + goto remove; + + return 0; + +remove: + debugfs_remove_recursive(pcie->debugfs); + pcie->debugfs = NULL; + return -ENOMEM; +} + static int tegra_pcie_probe(struct platform_device *pdev) { const struct of_device_id *match; @@ -1692,6 +1904,13 @@ static int tegra_pcie_probe(struct platform_device *pdev) goto disable_msi; } + if (IS_ENABLED(CONFIG_DEBUG_FS)) { + err = tegra_pcie_debugfs_init(pcie); + if (err < 0) + dev_err(&pdev->dev, "failed to setup debugfs: %d\n", + err); + } + platform_set_drvdata(pdev, pcie); return 0; diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c index 1eaf4df3618a..52bd3a143563 100644 --- a/drivers/pci/host/pcie-designware.c +++ b/drivers/pci/host/pcie-designware.c @@ -20,6 +20,7 @@ #include <linux/of_pci.h> #include <linux/pci.h> #include <linux/pci_regs.h> +#include <linux/platform_device.h> #include <linux/types.h> #include "pcie-designware.h" @@ -217,27 +218,47 @@ static int find_valid_pos0(struct pcie_port *pp, int msgvec, int pos, int *pos0) return 0; } +static void dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq) +{ + unsigned int res, bit, val; + + res = (irq / 32) * 12; + bit = irq % 32; + dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val); + val &= ~(1 << bit); + dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val); +} + static void clear_irq_range(struct pcie_port *pp, unsigned int irq_base, unsigned int nvec, unsigned int pos) { - unsigned int i, res, bit, val; + unsigned int i; for (i = 0; i < nvec; i++) { irq_set_msi_desc_off(irq_base, i, NULL); clear_bit(pos + i, pp->msi_irq_in_use); /* Disable corresponding interrupt on MSI controller */ - res = ((pos + i) / 32) * 12; - bit = (pos + i) % 32; - dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val); - val &= ~(1 << bit); - dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val); + if (pp->ops->msi_clear_irq) + pp->ops->msi_clear_irq(pp, pos + i); + else + dw_pcie_msi_clear_irq(pp, pos + i); } } +static void dw_pcie_msi_set_irq(struct pcie_port *pp, int irq) +{ + unsigned int res, bit, val; + + res = (irq / 32) * 12; + bit = irq % 32; + dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val); + val |= 1 << bit; + dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val); +} + static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos) { - int res, bit, irq, pos0, pos1, i; - u32 val; + int irq, pos0, pos1, i; struct pcie_port *pp = sys_to_pcie(desc->dev->bus->sysdata); if (!pp) { @@ -281,11 +302,10 @@ static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos) } set_bit(pos0 + i, pp->msi_irq_in_use); /*Enable corresponding interrupt in MSI interrupt controller */ - res = ((pos0 + i) / 32) * 12; - bit = (pos0 + i) % 32; - dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val); - val |= 1 << bit; - dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val); + if (pp->ops->msi_set_irq) + pp->ops->msi_set_irq(pp, pos0 + i); + else + dw_pcie_msi_set_irq(pp, pos0 + i); } *pos = pos0; @@ -353,7 +373,10 @@ static int dw_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev, */ desc->msi_attrib.multiple = msgvec; - msg.address_lo = virt_to_phys((void *)pp->msi_data); + if (pp->ops->get_msi_data) + msg.address_lo = pp->ops->get_msi_data(pp); + else + msg.address_lo = virt_to_phys((void *)pp->msi_data); msg.address_hi = 0x0; msg.data = pos; write_msi_msg(irq, &msg); @@ -396,10 +419,35 @@ static const struct irq_domain_ops msi_domain_ops = { int __init dw_pcie_host_init(struct pcie_port *pp) { struct device_node *np = pp->dev->of_node; + struct platform_device *pdev = to_platform_device(pp->dev); struct of_pci_range range; struct of_pci_range_parser parser; - u32 val; - int i; + struct resource *cfg_res; + u32 val, na, ns; + const __be32 *addrp; + int i, index; + + /* Find the address cell size and the number of cells in order to get + * the untranslated address. + */ + of_property_read_u32(np, "#address-cells", &na); + ns = of_n_size_cells(np); + + cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config"); + if (cfg_res) { + pp->config.cfg0_size = resource_size(cfg_res)/2; + pp->config.cfg1_size = resource_size(cfg_res)/2; + pp->cfg0_base = cfg_res->start; + pp->cfg1_base = cfg_res->start + pp->config.cfg0_size; + + /* Find the untranslated configuration space address */ + index = of_property_match_string(np, "reg-names", "config"); + addrp = of_get_address(np, index, false, false); + pp->cfg0_mod_base = of_read_number(addrp, ns); + pp->cfg1_mod_base = pp->cfg0_mod_base + pp->config.cfg0_size; + } else { + dev_err(pp->dev, "missing *config* reg space\n"); + } if (of_pci_range_parser_init(&parser, np)) { dev_err(pp->dev, "missing ranges property\n"); @@ -422,17 +470,33 @@ int __init dw_pcie_host_init(struct pcie_port *pp) pp->config.io_size = resource_size(&pp->io); pp->config.io_bus_addr = range.pci_addr; pp->io_base = range.cpu_addr; + + /* Find the untranslated IO space address */ + pp->io_mod_base = of_read_number(parser.range - + parser.np + na, ns); } if (restype == IORESOURCE_MEM) { of_pci_range_to_resource(&range, np, &pp->mem); pp->mem.name = "MEM"; pp->config.mem_size = resource_size(&pp->mem); pp->config.mem_bus_addr = range.pci_addr; + + /* Find the untranslated MEM space address */ + pp->mem_mod_base = of_read_number(parser.range - + parser.np + na, ns); } if (restype == 0) { of_pci_range_to_resource(&range, np, &pp->cfg); pp->config.cfg0_size = resource_size(&pp->cfg)/2; pp->config.cfg1_size = resource_size(&pp->cfg)/2; + pp->cfg0_base = pp->cfg.start; + pp->cfg1_base = pp->cfg.start + pp->config.cfg0_size; + + /* Find the untranslated configuration space address */ + pp->cfg0_mod_base = of_read_number(parser.range - + parser.np + na, ns); + pp->cfg1_mod_base = pp->cfg0_mod_base + + pp->config.cfg0_size; } } @@ -445,8 +509,6 @@ int __init dw_pcie_host_init(struct pcie_port *pp) } } - pp->cfg0_base = pp->cfg.start; - pp->cfg1_base = pp->cfg.start + pp->config.cfg0_size; pp->mem_base = pp->mem.start; pp->va_cfg0_base = devm_ioremap(pp->dev, pp->cfg0_base, @@ -509,9 +571,9 @@ static void dw_pcie_prog_viewport_cfg0(struct pcie_port *pp, u32 busdev) /* Program viewport 0 : OUTBOUND : CFG0 */ dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0, PCIE_ATU_VIEWPORT); - dw_pcie_writel_rc(pp, pp->cfg0_base, PCIE_ATU_LOWER_BASE); - dw_pcie_writel_rc(pp, (pp->cfg0_base >> 32), PCIE_ATU_UPPER_BASE); - dw_pcie_writel_rc(pp, pp->cfg0_base + pp->config.cfg0_size - 1, + dw_pcie_writel_rc(pp, pp->cfg0_mod_base, PCIE_ATU_LOWER_BASE); + dw_pcie_writel_rc(pp, (pp->cfg0_mod_base >> 32), PCIE_ATU_UPPER_BASE); + dw_pcie_writel_rc(pp, pp->cfg0_mod_base + pp->config.cfg0_size - 1, PCIE_ATU_LIMIT); dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET); dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET); @@ -525,9 +587,9 @@ static void dw_pcie_prog_viewport_cfg1(struct pcie_port *pp, u32 busdev) dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1, PCIE_ATU_VIEWPORT); dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG1, PCIE_ATU_CR1); - dw_pcie_writel_rc(pp, pp->cfg1_base, PCIE_ATU_LOWER_BASE); - dw_pcie_writel_rc(pp, (pp->cfg1_base >> 32), PCIE_ATU_UPPER_BASE); - dw_pcie_writel_rc(pp, pp->cfg1_base + pp->config.cfg1_size - 1, + dw_pcie_writel_rc(pp, pp->cfg1_mod_base, PCIE_ATU_LOWER_BASE); + dw_pcie_writel_rc(pp, (pp->cfg1_mod_base >> 32), PCIE_ATU_UPPER_BASE); + dw_pcie_writel_rc(pp, pp->cfg1_mod_base + pp->config.cfg1_size - 1, PCIE_ATU_LIMIT); dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET); dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET); @@ -540,9 +602,9 @@ static void dw_pcie_prog_viewport_mem_outbound(struct pcie_port *pp) dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0, PCIE_ATU_VIEWPORT); dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_MEM, PCIE_ATU_CR1); - dw_pcie_writel_rc(pp, pp->mem_base, PCIE_ATU_LOWER_BASE); - dw_pcie_writel_rc(pp, (pp->mem_base >> 32), PCIE_ATU_UPPER_BASE); - dw_pcie_writel_rc(pp, pp->mem_base + pp->config.mem_size - 1, + dw_pcie_writel_rc(pp, pp->mem_mod_base, PCIE_ATU_LOWER_BASE); + dw_pcie_writel_rc(pp, (pp->mem_mod_base >> 32), PCIE_ATU_UPPER_BASE); + dw_pcie_writel_rc(pp, pp->mem_mod_base + pp->config.mem_size - 1, PCIE_ATU_LIMIT); dw_pcie_writel_rc(pp, pp->config.mem_bus_addr, PCIE_ATU_LOWER_TARGET); dw_pcie_writel_rc(pp, upper_32_bits(pp->config.mem_bus_addr), @@ -556,9 +618,9 @@ static void dw_pcie_prog_viewport_io_outbound(struct pcie_port *pp) dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1, PCIE_ATU_VIEWPORT); dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_IO, PCIE_ATU_CR1); - dw_pcie_writel_rc(pp, pp->io_base, PCIE_ATU_LOWER_BASE); - dw_pcie_writel_rc(pp, (pp->io_base >> 32), PCIE_ATU_UPPER_BASE); - dw_pcie_writel_rc(pp, pp->io_base + pp->config.io_size - 1, + dw_pcie_writel_rc(pp, pp->io_mod_base, PCIE_ATU_LOWER_BASE); + dw_pcie_writel_rc(pp, (pp->io_mod_base >> 32), PCIE_ATU_UPPER_BASE); + dw_pcie_writel_rc(pp, pp->io_mod_base + pp->config.io_size - 1, PCIE_ATU_LIMIT); dw_pcie_writel_rc(pp, pp->config.io_bus_addr, PCIE_ATU_LOWER_TARGET); dw_pcie_writel_rc(pp, upper_32_bits(pp->config.io_bus_addr), @@ -656,7 +718,11 @@ static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, } if (bus->number != pp->root_bus_nr) - ret = dw_pcie_rd_other_conf(pp, bus, devfn, + if (pp->ops->rd_other_conf) + ret = pp->ops->rd_other_conf(pp, bus, devfn, + where, size, val); + else + ret = dw_pcie_rd_other_conf(pp, bus, devfn, where, size, val); else ret = dw_pcie_rd_own_conf(pp, where, size, val); @@ -679,7 +745,11 @@ static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn, return PCIBIOS_DEVICE_NOT_FOUND; if (bus->number != pp->root_bus_nr) - ret = dw_pcie_wr_other_conf(pp, bus, devfn, + if (pp->ops->wr_other_conf) + ret = pp->ops->wr_other_conf(pp, bus, devfn, + where, size, val); + else + ret = dw_pcie_wr_other_conf(pp, bus, devfn, where, size, val); else ret = dw_pcie_wr_own_conf(pp, where, size, val); diff --git a/drivers/pci/host/pcie-designware.h b/drivers/pci/host/pcie-designware.h index 77f592faa7bf..daf81f922cda 100644 --- a/drivers/pci/host/pcie-designware.h +++ b/drivers/pci/host/pcie-designware.h @@ -36,11 +36,15 @@ struct pcie_port { u8 root_bus_nr; void __iomem *dbi_base; u64 cfg0_base; + u64 cfg0_mod_base; void __iomem *va_cfg0_base; u64 cfg1_base; + u64 cfg1_mod_base; void __iomem *va_cfg1_base; u64 io_base; + u64 io_mod_base; u64 mem_base; + u64 mem_mod_base; struct resource cfg; struct resource io; struct resource mem; @@ -61,8 +65,15 @@ struct pcie_host_ops { u32 val, void __iomem *dbi_base); int (*rd_own_conf)(struct pcie_port *pp, int where, int size, u32 *val); int (*wr_own_conf)(struct pcie_port *pp, int where, int size, u32 val); + int (*rd_other_conf)(struct pcie_port *pp, struct pci_bus *bus, + unsigned int devfn, int where, int size, u32 *val); + int (*wr_other_conf)(struct pcie_port *pp, struct pci_bus *bus, + unsigned int devfn, int where, int size, u32 val); int (*link_up)(struct pcie_port *pp); void (*host_init)(struct pcie_port *pp); + void (*msi_set_irq)(struct pcie_port *pp, int irq); + void (*msi_clear_irq)(struct pcie_port *pp, int irq); + u32 (*get_msi_data)(struct pcie_port *pp); }; int dw_pcie_cfg_read(void __iomem *addr, int where, int size, u32 *val); diff --git a/drivers/pci/host/pcie-spear13xx.c b/drivers/pci/host/pcie-spear13xx.c new file mode 100644 index 000000000000..6dea9e43a75c --- /dev/null +++ b/drivers/pci/host/pcie-spear13xx.c @@ -0,0 +1,393 @@ +/* + * PCIe host controller driver for ST Microelectronics SPEAr13xx SoCs + * + * SPEAr13xx PCIe Glue Layer Source Code + * + * Copyright (C) 2010-2014 ST Microelectronics + * Pratyush Anand <pratyush.anand@st.com> + * Mohit Kumar <mohit.kumar@st.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/pci.h> +#include <linux/phy/phy.h> +#include <linux/platform_device.h> +#include <linux/resource.h> + +#include "pcie-designware.h" + +struct spear13xx_pcie { + void __iomem *app_base; + struct phy *phy; + struct clk *clk; + struct pcie_port pp; + bool is_gen1; +}; + +struct pcie_app_reg { + u32 app_ctrl_0; /* cr0 */ + u32 app_ctrl_1; /* cr1 */ + u32 app_status_0; /* cr2 */ + u32 app_status_1; /* cr3 */ + u32 msg_status; /* cr4 */ + u32 msg_payload; /* cr5 */ + u32 int_sts; /* cr6 */ + u32 int_clr; /* cr7 */ + u32 int_mask; /* cr8 */ + u32 mst_bmisc; /* cr9 */ + u32 phy_ctrl; /* cr10 */ + u32 phy_status; /* cr11 */ + u32 cxpl_debug_info_0; /* cr12 */ + u32 cxpl_debug_info_1; /* cr13 */ + u32 ven_msg_ctrl_0; /* cr14 */ + u32 ven_msg_ctrl_1; /* cr15 */ + u32 ven_msg_data_0; /* cr16 */ + u32 ven_msg_data_1; /* cr17 */ + u32 ven_msi_0; /* cr18 */ + u32 ven_msi_1; /* cr19 */ + u32 mst_rmisc; /* cr20 */ +}; + +/* CR0 ID */ +#define RX_LANE_FLIP_EN_ID 0 +#define TX_LANE_FLIP_EN_ID 1 +#define SYS_AUX_PWR_DET_ID 2 +#define APP_LTSSM_ENABLE_ID 3 +#define SYS_ATTEN_BUTTON_PRESSED_ID 4 +#define SYS_MRL_SENSOR_STATE_ID 5 +#define SYS_PWR_FAULT_DET_ID 6 +#define SYS_MRL_SENSOR_CHGED_ID 7 +#define SYS_PRE_DET_CHGED_ID 8 +#define SYS_CMD_CPLED_INT_ID 9 +#define APP_INIT_RST_0_ID 11 +#define APP_REQ_ENTR_L1_ID 12 +#define APP_READY_ENTR_L23_ID 13 +#define APP_REQ_EXIT_L1_ID 14 +#define DEVICE_TYPE_EP (0 << 25) +#define DEVICE_TYPE_LEP (1 << 25) +#define DEVICE_TYPE_RC (4 << 25) +#define SYS_INT_ID 29 +#define MISCTRL_EN_ID 30 +#define REG_TRANSLATION_ENABLE 31 + +/* CR1 ID */ +#define APPS_PM_XMT_TURNOFF_ID 2 +#define APPS_PM_XMT_PME_ID 5 + +/* CR3 ID */ +#define XMLH_LTSSM_STATE_DETECT_QUIET 0x00 +#define XMLH_LTSSM_STATE_DETECT_ACT 0x01 +#define XMLH_LTSSM_STATE_POLL_ACTIVE 0x02 +#define XMLH_LTSSM_STATE_POLL_COMPLIANCE 0x03 +#define XMLH_LTSSM_STATE_POLL_CONFIG 0x04 +#define XMLH_LTSSM_STATE_PRE_DETECT_QUIET 0x05 +#define XMLH_LTSSM_STATE_DETECT_WAIT 0x06 +#define XMLH_LTSSM_STATE_CFG_LINKWD_START 0x07 +#define XMLH_LTSSM_STATE_CFG_LINKWD_ACEPT 0x08 +#define XMLH_LTSSM_STATE_CFG_LANENUM_WAIT 0x09 +#define XMLH_LTSSM_STATE_CFG_LANENUM_ACEPT 0x0A +#define XMLH_LTSSM_STATE_CFG_COMPLETE 0x0B +#define XMLH_LTSSM_STATE_CFG_IDLE 0x0C +#define XMLH_LTSSM_STATE_RCVRY_LOCK 0x0D +#define XMLH_LTSSM_STATE_RCVRY_SPEED 0x0E +#define XMLH_LTSSM_STATE_RCVRY_RCVRCFG 0x0F +#define XMLH_LTSSM_STATE_RCVRY_IDLE 0x10 +#define XMLH_LTSSM_STATE_L0 0x11 +#define XMLH_LTSSM_STATE_L0S 0x12 +#define XMLH_LTSSM_STATE_L123_SEND_EIDLE 0x13 +#define XMLH_LTSSM_STATE_L1_IDLE 0x14 +#define XMLH_LTSSM_STATE_L2_IDLE 0x15 +#define XMLH_LTSSM_STATE_L2_WAKE 0x16 +#define XMLH_LTSSM_STATE_DISABLED_ENTRY 0x17 +#define XMLH_LTSSM_STATE_DISABLED_IDLE 0x18 +#define XMLH_LTSSM_STATE_DISABLED 0x19 +#define XMLH_LTSSM_STATE_LPBK_ENTRY 0x1A +#define XMLH_LTSSM_STATE_LPBK_ACTIVE 0x1B +#define XMLH_LTSSM_STATE_LPBK_EXIT 0x1C +#define XMLH_LTSSM_STATE_LPBK_EXIT_TIMEOUT 0x1D +#define XMLH_LTSSM_STATE_HOT_RESET_ENTRY 0x1E +#define XMLH_LTSSM_STATE_HOT_RESET 0x1F +#define XMLH_LTSSM_STATE_MASK 0x3F +#define XMLH_LINK_UP (1 << 6) + +/* CR4 ID */ +#define CFG_MSI_EN_ID 18 + +/* CR6 */ +#define INTA_CTRL_INT (1 << 7) +#define INTB_CTRL_INT (1 << 8) +#define INTC_CTRL_INT (1 << 9) +#define INTD_CTRL_INT (1 << 10) +#define MSI_CTRL_INT (1 << 26) + +/* CR19 ID */ +#define VEN_MSI_REQ_ID 11 +#define VEN_MSI_FUN_NUM_ID 8 +#define VEN_MSI_TC_ID 5 +#define VEN_MSI_VECTOR_ID 0 +#define VEN_MSI_REQ_EN ((u32)0x1 << VEN_MSI_REQ_ID) +#define VEN_MSI_FUN_NUM_MASK ((u32)0x7 << VEN_MSI_FUN_NUM_ID) +#define VEN_MSI_TC_MASK ((u32)0x7 << VEN_MSI_TC_ID) +#define VEN_MSI_VECTOR_MASK ((u32)0x1F << VEN_MSI_VECTOR_ID) + +#define EXP_CAP_ID_OFFSET 0x70 + +#define to_spear13xx_pcie(x) container_of(x, struct spear13xx_pcie, pp) + +static int spear13xx_pcie_establish_link(struct pcie_port *pp) +{ + u32 val; + int count = 0; + struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pp); + struct pcie_app_reg *app_reg = spear13xx_pcie->app_base; + u32 exp_cap_off = EXP_CAP_ID_OFFSET; + + if (dw_pcie_link_up(pp)) { + dev_err(pp->dev, "link already up\n"); + return 0; + } + + dw_pcie_setup_rc(pp); + + /* + * this controller support only 128 bytes read size, however its + * default value in capability register is 512 bytes. So force + * it to 128 here. + */ + dw_pcie_cfg_read(pp->dbi_base, exp_cap_off + PCI_EXP_DEVCTL, 4, &val); + val &= ~PCI_EXP_DEVCTL_READRQ; + dw_pcie_cfg_write(pp->dbi_base, exp_cap_off + PCI_EXP_DEVCTL, 4, val); + + dw_pcie_cfg_write(pp->dbi_base, PCI_VENDOR_ID, 2, 0x104A); + dw_pcie_cfg_write(pp->dbi_base, PCI_DEVICE_ID, 2, 0xCD80); + + /* + * if is_gen1 is set then handle it, so that some buggy card + * also works + */ + if (spear13xx_pcie->is_gen1) { + dw_pcie_cfg_read(pp->dbi_base, exp_cap_off + PCI_EXP_LNKCAP, 4, + &val); + if ((val & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) { + val &= ~((u32)PCI_EXP_LNKCAP_SLS); + val |= PCI_EXP_LNKCAP_SLS_2_5GB; + dw_pcie_cfg_write(pp->dbi_base, exp_cap_off + + PCI_EXP_LNKCAP, 4, val); + } + + dw_pcie_cfg_read(pp->dbi_base, exp_cap_off + PCI_EXP_LNKCTL2, 4, + &val); + if ((val & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) { + val &= ~((u32)PCI_EXP_LNKCAP_SLS); + val |= PCI_EXP_LNKCAP_SLS_2_5GB; + dw_pcie_cfg_write(pp->dbi_base, exp_cap_off + + PCI_EXP_LNKCTL2, 4, val); + } + } + + /* enable ltssm */ + writel(DEVICE_TYPE_RC | (1 << MISCTRL_EN_ID) + | (1 << APP_LTSSM_ENABLE_ID) + | ((u32)1 << REG_TRANSLATION_ENABLE), + &app_reg->app_ctrl_0); + + /* check if the link is up or not */ + while (!dw_pcie_link_up(pp)) { + mdelay(100); + count++; + if (count == 10) { + dev_err(pp->dev, "link Fail\n"); + return -EINVAL; + } + } + dev_info(pp->dev, "link up\n"); + + return 0; +} + +static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg) +{ + struct pcie_port *pp = arg; + struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pp); + struct pcie_app_reg *app_reg = spear13xx_pcie->app_base; + unsigned int status; + + status = readl(&app_reg->int_sts); + + if (status & MSI_CTRL_INT) { + if (!IS_ENABLED(CONFIG_PCI_MSI)) + BUG(); + dw_handle_msi_irq(pp); + } + + writel(status, &app_reg->int_clr); + + return IRQ_HANDLED; +} + +static void spear13xx_pcie_enable_interrupts(struct pcie_port *pp) +{ + struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pp); + struct pcie_app_reg *app_reg = spear13xx_pcie->app_base; + + /* Enable MSI interrupt */ + if (IS_ENABLED(CONFIG_PCI_MSI)) { + dw_pcie_msi_init(pp); + writel(readl(&app_reg->int_mask) | + MSI_CTRL_INT, &app_reg->int_mask); + } +} + +static int spear13xx_pcie_link_up(struct pcie_port *pp) +{ + struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pp); + struct pcie_app_reg *app_reg = spear13xx_pcie->app_base; + + if (readl(&app_reg->app_status_1) & XMLH_LINK_UP) + return 1; + + return 0; +} + +static void spear13xx_pcie_host_init(struct pcie_port *pp) +{ + spear13xx_pcie_establish_link(pp); + spear13xx_pcie_enable_interrupts(pp); +} + +static struct pcie_host_ops spear13xx_pcie_host_ops = { + .link_up = spear13xx_pcie_link_up, + .host_init = spear13xx_pcie_host_init, +}; + +static int add_pcie_port(struct pcie_port *pp, struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + int ret; + + pp->irq = platform_get_irq(pdev, 0); + if (!pp->irq) { + dev_err(dev, "failed to get irq\n"); + return -ENODEV; + } + ret = devm_request_irq(dev, pp->irq, spear13xx_pcie_irq_handler, + IRQF_SHARED, "spear1340-pcie", pp); + if (ret) { + dev_err(dev, "failed to request irq %d\n", pp->irq); + return ret; + } + + pp->root_bus_nr = -1; + pp->ops = &spear13xx_pcie_host_ops; + + ret = dw_pcie_host_init(pp); + if (ret) { + dev_err(dev, "failed to initialize host\n"); + return ret; + } + + return 0; +} + +static int __init spear13xx_pcie_probe(struct platform_device *pdev) +{ + struct spear13xx_pcie *spear13xx_pcie; + struct pcie_port *pp; + struct device *dev = &pdev->dev; + struct device_node *np = pdev->dev.of_node; + struct resource *dbi_base; + int ret; + + spear13xx_pcie = devm_kzalloc(dev, sizeof(*spear13xx_pcie), GFP_KERNEL); + if (!spear13xx_pcie) { + dev_err(dev, "no memory for SPEAr13xx pcie\n"); + return -ENOMEM; + } + + spear13xx_pcie->phy = devm_phy_get(dev, "pcie-phy"); + if (IS_ERR(spear13xx_pcie->phy)) { + ret = PTR_ERR(spear13xx_pcie->phy); + if (ret == -EPROBE_DEFER) + dev_info(dev, "probe deferred\n"); + else + dev_err(dev, "couldn't get pcie-phy\n"); + return ret; + } + + phy_init(spear13xx_pcie->phy); + + spear13xx_pcie->clk = devm_clk_get(dev, NULL); + if (IS_ERR(spear13xx_pcie->clk)) { + dev_err(dev, "couldn't get clk for pcie\n"); + return PTR_ERR(spear13xx_pcie->clk); + } + ret = clk_prepare_enable(spear13xx_pcie->clk); + if (ret) { + dev_err(dev, "couldn't enable clk for pcie\n"); + return ret; + } + + pp = &spear13xx_pcie->pp; + + pp->dev = dev; + + dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0); + pp->dbi_base = devm_ioremap_resource(dev, dbi_base); + if (IS_ERR(pp->dbi_base)) { + dev_err(dev, "couldn't remap dbi base %p\n", dbi_base); + ret = PTR_ERR(pp->dbi_base); + goto fail_clk; + } + spear13xx_pcie->app_base = pp->dbi_base + 0x2000; + + if (of_property_read_bool(np, "st,pcie-is-gen1")) + spear13xx_pcie->is_gen1 = true; + + ret = add_pcie_port(pp, pdev); + if (ret < 0) + goto fail_clk; + + platform_set_drvdata(pdev, spear13xx_pcie); + return 0; + +fail_clk: + clk_disable_unprepare(spear13xx_pcie->clk); + + return ret; +} + +static const struct of_device_id spear13xx_pcie_of_match[] = { + { .compatible = "st,spear1340-pcie", }, + {}, +}; +MODULE_DEVICE_TABLE(of, spear13xx_pcie_of_match); + +static struct platform_driver spear13xx_pcie_driver __initdata = { + .probe = spear13xx_pcie_probe, + .driver = { + .name = "spear-pcie", + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(spear13xx_pcie_of_match), + }, +}; + +/* SPEAr13xx PCIe driver does not allow module unload */ + +static int __init pcie_init(void) +{ + return platform_driver_register(&spear13xx_pcie_driver); +} +module_init(pcie_init); + +MODULE_DESCRIPTION("ST Microelectronics SPEAr13xx PCIe host controller driver"); +MODULE_AUTHOR("Pratyush Anand <pratyush.anand@st.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index 70741c8c46a0..6cd5160fc057 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c @@ -560,19 +560,15 @@ static void disable_slot(struct acpiphp_slot *slot) slot->flags &= (~SLOT_ENABLED); } -static bool acpiphp_no_hotplug(struct acpi_device *adev) -{ - return adev && adev->flags.no_hotplug; -} - static bool slot_no_hotplug(struct acpiphp_slot *slot) { - struct acpiphp_func *func; + struct pci_bus *bus = slot->bus; + struct pci_dev *dev; - list_for_each_entry(func, &slot->funcs, sibling) - if (acpiphp_no_hotplug(func_to_acpi_device(func))) + list_for_each_entry(dev, &bus->devices, bus_list) { + if (PCI_SLOT(dev->devfn) == slot->device && dev->ignore_hotplug) return true; - + } return false; } @@ -645,7 +641,7 @@ static void trim_stale_devices(struct pci_dev *dev) status = acpi_evaluate_integer(adev->handle, "_STA", NULL, &sta); alive = (ACPI_SUCCESS(status) && device_status_valid(sta)) - || acpiphp_no_hotplug(adev); + || dev->ignore_hotplug; } if (!alive) alive = pci_device_is_present(dev); diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 9da84b8b27d8..2a412fa3b338 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -160,7 +160,7 @@ static void pcie_wait_cmd(struct controller *ctrl) ctrl->slot_ctrl & PCI_EXP_SLTCTL_CCIE) rc = wait_event_timeout(ctrl->queue, !ctrl->cmd_busy, timeout); else - rc = pcie_poll_cmd(ctrl, timeout); + rc = pcie_poll_cmd(ctrl, jiffies_to_msecs(timeout)); /* * Controllers with errata like Intel CF118 don't generate @@ -506,6 +506,8 @@ static irqreturn_t pcie_isr(int irq, void *dev_id) { struct controller *ctrl = (struct controller *)dev_id; struct pci_dev *pdev = ctrl_dev(ctrl); + struct pci_bus *subordinate = pdev->subordinate; + struct pci_dev *dev; struct slot *slot = ctrl->slot; u16 detected, intr_loc; @@ -539,6 +541,16 @@ static irqreturn_t pcie_isr(int irq, void *dev_id) wake_up(&ctrl->queue); } + if (subordinate) { + list_for_each_entry(dev, &subordinate->devices, bus_list) { + if (dev->ignore_hotplug) { + ctrl_dbg(ctrl, "ignoring hotplug event %#06x (%s requested no hotplug)\n", + intr_loc, pci_name(dev)); + return IRQ_HANDLED; + } + } + } + if (!(intr_loc & ~PCI_EXP_SLTSTA_CC)) return IRQ_HANDLED; diff --git a/drivers/pci/hotplug/pcihp_slot.c b/drivers/pci/hotplug/pcihp_slot.c index e246a10a0d2c..3e36ec8d708a 100644 --- a/drivers/pci/hotplug/pcihp_slot.c +++ b/drivers/pci/hotplug/pcihp_slot.c @@ -46,7 +46,6 @@ static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp) */ if (pci_is_pcie(dev)) return; - dev_info(&dev->dev, "using default PCI settings\n"); hpp = &pci_default_type0; } @@ -153,7 +152,6 @@ void pci_configure_slot(struct pci_dev *dev) { struct pci_dev *cdev; struct hotplug_params hpp; - int ret; if (!(dev->hdr_type == PCI_HEADER_TYPE_NORMAL || (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE && @@ -163,9 +161,7 @@ void pci_configure_slot(struct pci_dev *dev) pcie_bus_configure_settings(dev->bus); memset(&hpp, 0, sizeof(hpp)); - ret = pci_get_hp_params(dev, &hpp); - if (ret) - dev_warn(&dev->dev, "no hotplug settings from platform\n"); + pci_get_hp_params(dev, &hpp); program_hpp_type2(dev, hpp.t2); program_hpp_type1(dev, hpp.t1); diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c index 93aa29f6d39c..f2945fa73d4f 100644 --- a/drivers/pci/hotplug/rpaphp_core.c +++ b/drivers/pci/hotplug/rpaphp_core.c @@ -375,11 +375,11 @@ static void __exit cleanup_slots(void) static int __init rpaphp_init(void) { - struct device_node *dn = NULL; + struct device_node *dn; info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); - while ((dn = of_find_node_by_name(dn, "pci"))) + for_each_node_by_name(dn, "pci") rpaphp_add_slot(dn); return 0; diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c index d1332d2f8730..d77e46bca54c 100644 --- a/drivers/pci/hotplug/s390_pci_hpc.c +++ b/drivers/pci/hotplug/s390_pci_hpc.c @@ -7,8 +7,8 @@ * Jan Glauber <jang@linux.vnet.ibm.com> */ -#define COMPONENT "zPCI hpc" -#define pr_fmt(fmt) COMPONENT ": " fmt +#define KMSG_COMPONENT "zpci" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/module.h> #include <linux/kernel.h> diff --git a/drivers/pci/ioapic.c b/drivers/pci/ioapic.c index 6b2b7dddbbdb..f6219d36227f 100644 --- a/drivers/pci/ioapic.c +++ b/drivers/pci/ioapic.c @@ -98,7 +98,7 @@ static void ioapic_remove(struct pci_dev *dev) } -static DEFINE_PCI_DEVICE_TABLE(ioapic_devices) = { +static const struct pci_device_id ioapic_devices[] = { { PCI_DEVICE_CLASS(PCI_CLASS_SYSTEM_PIC_IOAPIC, ~0) }, { PCI_DEVICE_CLASS(PCI_CLASS_SYSTEM_PIC_IOXAPIC, ~0) }, { } diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index e3cf8a2e6292..4170113cde61 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -775,7 +775,7 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass) /* Check if setup is sensible at all */ if (!pass && (primary != bus->number || secondary <= bus->number || - secondary > subordinate || subordinate > bus->busn_res.end)) { + secondary > subordinate)) { dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n", secondary, subordinate); broken = 1; @@ -838,23 +838,18 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass) goto out; } - if (max >= bus->busn_res.end) { - dev_warn(&dev->dev, "can't allocate child bus %02x from %pR\n", - max, &bus->busn_res); - goto out; - } - /* Clear errors */ pci_write_config_word(dev, PCI_STATUS, 0xffff); - /* The bus will already exist if we are rescanning */ + /* Prevent assigning a bus number that already exists. + * This can happen when a bridge is hot-plugged, so in + * this case we only re-scan this bus. */ child = pci_find_bus(pci_domain_nr(bus), max+1); if (!child) { child = pci_add_new_bus(bus, dev, max+1); if (!child) goto out; - pci_bus_insert_busn_res(child, max+1, - bus->busn_res.end); + pci_bus_insert_busn_res(child, max+1, 0xff); } max++; buses = (buses & 0xff000000) @@ -913,11 +908,6 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass) /* * Set the subordinate bus number to its real value. */ - if (max > bus->busn_res.end) { - dev_warn(&dev->dev, "max busn %02x is outside %pR\n", - max, &bus->busn_res); - max = bus->busn_res.end; - } pci_bus_update_busn_res_end(child, max); pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max); } diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig index 51cf8083b299..b0ce7cdee0c2 100644 --- a/drivers/pcmcia/Kconfig +++ b/drivers/pcmcia/Kconfig @@ -144,16 +144,6 @@ config TCIC "Bridge" is the name used for the hardware inside your computer that PCMCIA cards are plugged into. If unsure, say N. -config PCMCIA_M8XX - tristate "MPC8xx PCMCIA support" - depends on PCCARD && PPC && 8xx - select PCCARD_IODYN if PCMCIA != n - help - Say Y here to include support for PowerPC 8xx series PCMCIA - controller. - - This driver is also available as a module called m8xx_pcmcia. - config PCMCIA_ALCHEMY_DEVBOARD tristate "Alchemy Db/Pb1xxx PCMCIA socket services" depends on MIPS_ALCHEMY && PCMCIA diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile index fd55a6951402..27e94b30cf96 100644 --- a/drivers/pcmcia/Makefile +++ b/drivers/pcmcia/Makefile @@ -23,7 +23,6 @@ obj-$(CONFIG_PD6729) += pd6729.o obj-$(CONFIG_I82365) += i82365.o obj-$(CONFIG_I82092) += i82092.o obj-$(CONFIG_TCIC) += tcic.o -obj-$(CONFIG_PCMCIA_M8XX) += m8xx_pcmcia.o obj-$(CONFIG_PCMCIA_SOC_COMMON) += soc_common.o obj-$(CONFIG_PCMCIA_SA11XX_BASE) += sa11xx_base.o obj-$(CONFIG_PCMCIA_SA1100) += sa1100_cs.o diff --git a/drivers/pcmcia/m8xx_pcmcia.c b/drivers/pcmcia/m8xx_pcmcia.c deleted file mode 100644 index 182034d2ef58..000000000000 --- a/drivers/pcmcia/m8xx_pcmcia.c +++ /dev/null @@ -1,1168 +0,0 @@ -/* - * m8xx_pcmcia.c - Linux PCMCIA socket driver for the mpc8xx series. - * - * (C) 1999-2000 Magnus Damm <damm@opensource.se> - * (C) 2001-2002 Montavista Software, Inc. - * <mlocke@mvista.com> - * - * Support for two slots by Cyclades Corporation - * <oliver.kurth@cyclades.de> - * Further fixes, v2.6 kernel port - * <marcelo.tosatti@cyclades.com> - * - * Some fixes, additions (C) 2005-2007 Montavista Software, Inc. - * <vbordug@ru.mvista.com> - * - * "The ExCA standard specifies that socket controllers should provide - * two IO and five memory windows per socket, which can be independently - * configured and positioned in the host address space and mapped to - * arbitrary segments of card address space. " - David A Hinds. 1999 - * - * This controller does _not_ meet the ExCA standard. - * - * m8xx pcmcia controller brief info: - * + 8 windows (attrib, mem, i/o) - * + up to two slots (SLOT_A and SLOT_B) - * + inputpins, outputpins, event and mask registers. - * - no offset register. sigh. - * - * Because of the lacking offset register we must map the whole card. - * We assign each memory window PCMCIA_MEM_WIN_SIZE address space. - * Make sure there is (PCMCIA_MEM_WIN_SIZE * PCMCIA_MEM_WIN_NO - * * PCMCIA_SOCKETS_NO) bytes at PCMCIA_MEM_WIN_BASE. - * The i/o windows are dynamically allocated at PCMCIA_IO_WIN_BASE. - * They are maximum 64KByte each... - */ - -#include <linux/module.h> -#include <linux/init.h> -#include <linux/types.h> -#include <linux/fcntl.h> -#include <linux/string.h> - -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/timer.h> -#include <linux/ioport.h> -#include <linux/delay.h> -#include <linux/interrupt.h> -#include <linux/fsl_devices.h> -#include <linux/bitops.h> -#include <linux/of_address.h> -#include <linux/of_device.h> -#include <linux/of_irq.h> -#include <linux/of_platform.h> - -#include <asm/io.h> -#include <asm/time.h> -#include <asm/mpc8xx.h> -#include <asm/8xx_immap.h> -#include <asm/irq.h> -#include <asm/fs_pd.h> - -#include <pcmcia/ss.h> - -#define pcmcia_info(args...) printk(KERN_INFO "m8xx_pcmcia: "args) -#define pcmcia_error(args...) printk(KERN_ERR "m8xx_pcmcia: "args) - -static const char *version = "Version 0.06, Aug 2005"; -MODULE_LICENSE("Dual MPL/GPL"); - -#if !defined(CONFIG_PCMCIA_SLOT_A) && !defined(CONFIG_PCMCIA_SLOT_B) - -/* The ADS board use SLOT_A */ -#ifdef CONFIG_ADS -#define CONFIG_PCMCIA_SLOT_A -#define CONFIG_BD_IS_MHZ -#endif - -/* The FADS series are a mess */ -#ifdef CONFIG_FADS -#if defined(CONFIG_MPC860T) || defined(CONFIG_MPC860) || defined(CONFIG_MPC821) -#define CONFIG_PCMCIA_SLOT_A -#else -#define CONFIG_PCMCIA_SLOT_B -#endif -#endif - -#if defined(CONFIG_MPC885ADS) -#define CONFIG_PCMCIA_SLOT_A -#define PCMCIA_GLITCHY_CD -#endif - -/* Cyclades ACS uses both slots */ -#ifdef CONFIG_PRxK -#define CONFIG_PCMCIA_SLOT_A -#define CONFIG_PCMCIA_SLOT_B -#endif - -#endif /* !defined(CONFIG_PCMCIA_SLOT_A) && !defined(CONFIG_PCMCIA_SLOT_B) */ - -#if defined(CONFIG_PCMCIA_SLOT_A) && defined(CONFIG_PCMCIA_SLOT_B) - -#define PCMCIA_SOCKETS_NO 2 -/* We have only 8 windows, dualsocket support will be limited. */ -#define PCMCIA_MEM_WIN_NO 2 -#define PCMCIA_IO_WIN_NO 2 -#define PCMCIA_SLOT_MSG "SLOT_A and SLOT_B" - -#elif defined(CONFIG_PCMCIA_SLOT_A) || defined(CONFIG_PCMCIA_SLOT_B) - -#define PCMCIA_SOCKETS_NO 1 -/* full support for one slot */ -#define PCMCIA_MEM_WIN_NO 5 -#define PCMCIA_IO_WIN_NO 2 - -/* define _slot_ to be able to optimize macros */ - -#ifdef CONFIG_PCMCIA_SLOT_A -#define _slot_ 0 -#define PCMCIA_SLOT_MSG "SLOT_A" -#else -#define _slot_ 1 -#define PCMCIA_SLOT_MSG "SLOT_B" -#endif - -#else -#error m8xx_pcmcia: Bad configuration! -#endif - -/* ------------------------------------------------------------------------- */ - -#define PCMCIA_MEM_WIN_BASE 0xe0000000 /* base address for memory window 0 */ -#define PCMCIA_MEM_WIN_SIZE 0x04000000 /* each memory window is 64 MByte */ -#define PCMCIA_IO_WIN_BASE _IO_BASE /* base address for io window 0 */ -/* ------------------------------------------------------------------------- */ - -static int pcmcia_schlvl; - -static DEFINE_SPINLOCK(events_lock); - -#define PCMCIA_SOCKET_KEY_5V 1 -#define PCMCIA_SOCKET_KEY_LV 2 - -/* look up table for pgcrx registers */ -static u32 *m8xx_pgcrx[2]; - -/* - * This structure is used to address each window in the PCMCIA controller. - * - * Keep in mind that we assume that pcmcia_win[n+1] is mapped directly - * after pcmcia_win[n]... - */ - -struct pcmcia_win { - u32 br; - u32 or; -}; - -/* - * For some reason the hardware guys decided to make both slots share - * some registers. - * - * Could someone invent object oriented hardware ? - * - * The macros are used to get the right bit from the registers. - * SLOT_A : slot = 0 - * SLOT_B : slot = 1 - */ - -#define M8XX_PCMCIA_VS1(slot) (0x80000000 >> (slot << 4)) -#define M8XX_PCMCIA_VS2(slot) (0x40000000 >> (slot << 4)) -#define M8XX_PCMCIA_VS_MASK(slot) (0xc0000000 >> (slot << 4)) -#define M8XX_PCMCIA_VS_SHIFT(slot) (30 - (slot << 4)) - -#define M8XX_PCMCIA_WP(slot) (0x20000000 >> (slot << 4)) -#define M8XX_PCMCIA_CD2(slot) (0x10000000 >> (slot << 4)) -#define M8XX_PCMCIA_CD1(slot) (0x08000000 >> (slot << 4)) -#define M8XX_PCMCIA_BVD2(slot) (0x04000000 >> (slot << 4)) -#define M8XX_PCMCIA_BVD1(slot) (0x02000000 >> (slot << 4)) -#define M8XX_PCMCIA_RDY(slot) (0x01000000 >> (slot << 4)) -#define M8XX_PCMCIA_RDY_L(slot) (0x00800000 >> (slot << 4)) -#define M8XX_PCMCIA_RDY_H(slot) (0x00400000 >> (slot << 4)) -#define M8XX_PCMCIA_RDY_R(slot) (0x00200000 >> (slot << 4)) -#define M8XX_PCMCIA_RDY_F(slot) (0x00100000 >> (slot << 4)) -#define M8XX_PCMCIA_MASK(slot) (0xFFFF0000 >> (slot << 4)) - -#define M8XX_PCMCIA_POR_VALID 0x00000001 -#define M8XX_PCMCIA_POR_WRPROT 0x00000002 -#define M8XX_PCMCIA_POR_ATTRMEM 0x00000010 -#define M8XX_PCMCIA_POR_IO 0x00000018 -#define M8XX_PCMCIA_POR_16BIT 0x00000040 - -#define M8XX_PGCRX(slot) m8xx_pgcrx[slot] - -#define M8XX_PGCRX_CXOE 0x00000080 -#define M8XX_PGCRX_CXRESET 0x00000040 - -/* we keep one lookup table per socket to check flags */ - -#define PCMCIA_EVENTS_MAX 5 /* 4 max at a time + termination */ - -struct event_table { - u32 regbit; - u32 eventbit; -}; - -static const char driver_name[] = "m8xx-pcmcia"; - -struct socket_info { - void (*handler) (void *info, u32 events); - void *info; - - u32 slot; - pcmconf8xx_t *pcmcia; - u32 bus_freq; - int hwirq; - - socket_state_t state; - struct pccard_mem_map mem_win[PCMCIA_MEM_WIN_NO]; - struct pccard_io_map io_win[PCMCIA_IO_WIN_NO]; - struct event_table events[PCMCIA_EVENTS_MAX]; - struct pcmcia_socket socket; -}; - -static struct socket_info socket[PCMCIA_SOCKETS_NO]; - -/* - * Search this table to see if the windowsize is - * supported... - */ - -#define M8XX_SIZES_NO 32 - -static const u32 m8xx_size_to_gray[M8XX_SIZES_NO] = { - 0x00000001, 0x00000002, 0x00000008, 0x00000004, - 0x00000080, 0x00000040, 0x00000010, 0x00000020, - 0x00008000, 0x00004000, 0x00001000, 0x00002000, - 0x00000100, 0x00000200, 0x00000800, 0x00000400, - - 0x0fffffff, 0xffffffff, 0xffffffff, 0xffffffff, - 0x01000000, 0x02000000, 0xffffffff, 0x04000000, - 0x00010000, 0x00020000, 0x00080000, 0x00040000, - 0x00800000, 0x00400000, 0x00100000, 0x00200000 -}; - -/* ------------------------------------------------------------------------- */ - -static irqreturn_t m8xx_interrupt(int irq, void *dev); - -#define PCMCIA_BMT_LIMIT (15*4) /* Bus Monitor Timeout value */ - -/* FADS Boards from Motorola */ - -#if defined(CONFIG_FADS) - -#define PCMCIA_BOARD_MSG "FADS" - -static int voltage_set(int slot, int vcc, int vpp) -{ - u32 reg = 0; - - switch (vcc) { - case 0: - break; - case 33: - reg |= BCSR1_PCCVCC0; - break; - case 50: - reg |= BCSR1_PCCVCC1; - break; - default: - return 1; - } - - switch (vpp) { - case 0: - break; - case 33: - case 50: - if (vcc == vpp) - reg |= BCSR1_PCCVPP1; - else - return 1; - break; - case 120: - if ((vcc == 33) || (vcc == 50)) - reg |= BCSR1_PCCVPP0; - else - return 1; - default: - return 1; - } - - /* first, turn off all power */ - out_be32((u32 *) BCSR1, - in_be32((u32 *) BCSR1) & ~(BCSR1_PCCVCC_MASK | - BCSR1_PCCVPP_MASK)); - - /* enable new powersettings */ - out_be32((u32 *) BCSR1, in_be32((u32 *) BCSR1) | reg); - - return 0; -} - -#define socket_get(_slot_) PCMCIA_SOCKET_KEY_5V - -static void hardware_enable(int slot) -{ - out_be32((u32 *) BCSR1, in_be32((u32 *) BCSR1) & ~BCSR1_PCCEN); -} - -static void hardware_disable(int slot) -{ - out_be32((u32 *) BCSR1, in_be32((u32 *) BCSR1) | BCSR1_PCCEN); -} - -#endif - -/* MPC885ADS Boards */ - -#if defined(CONFIG_MPC885ADS) - -#define PCMCIA_BOARD_MSG "MPC885ADS" -#define socket_get(_slot_) PCMCIA_SOCKET_KEY_5V - -static inline void hardware_enable(int slot) -{ - m8xx_pcmcia_ops.hw_ctrl(slot, 1); -} - -static inline void hardware_disable(int slot) -{ - m8xx_pcmcia_ops.hw_ctrl(slot, 0); -} - -static inline int voltage_set(int slot, int vcc, int vpp) -{ - return m8xx_pcmcia_ops.voltage_set(slot, vcc, vpp); -} - -#endif - -#if defined(CONFIG_PRxK) -#include <asm/cpld.h> -extern volatile fpga_pc_regs *fpga_pc; - -#define PCMCIA_BOARD_MSG "MPC855T" - -static int voltage_set(int slot, int vcc, int vpp) -{ - u8 reg = 0; - u8 regread; - cpld_regs *ccpld = get_cpld(); - - switch (vcc) { - case 0: - break; - case 33: - reg |= PCMCIA_VCC_33; - break; - case 50: - reg |= PCMCIA_VCC_50; - break; - default: - return 1; - } - - switch (vpp) { - case 0: - break; - case 33: - case 50: - if (vcc == vpp) - reg |= PCMCIA_VPP_VCC; - else - return 1; - break; - case 120: - if ((vcc == 33) || (vcc == 50)) - reg |= PCMCIA_VPP_12; - else - return 1; - default: - return 1; - } - - reg = reg >> (slot << 2); - regread = in_8(&ccpld->fpga_pc_ctl); - if (reg != - (regread & ((PCMCIA_VCC_MASK | PCMCIA_VPP_MASK) >> (slot << 2)))) { - /* enable new powersettings */ - regread = - regread & ~((PCMCIA_VCC_MASK | PCMCIA_VPP_MASK) >> - (slot << 2)); - out_8(&ccpld->fpga_pc_ctl, reg | regread); - msleep(100); - } - - return 0; -} - -#define socket_get(_slot_) PCMCIA_SOCKET_KEY_LV -#define hardware_enable(_slot_) /* No hardware to enable */ -#define hardware_disable(_slot_) /* No hardware to disable */ - -#endif /* CONFIG_PRxK */ - -static u32 pending_events[PCMCIA_SOCKETS_NO]; -static DEFINE_SPINLOCK(pending_event_lock); - -static irqreturn_t m8xx_interrupt(int irq, void *dev) -{ - struct socket_info *s; - struct event_table *e; - unsigned int i, events, pscr, pipr, per; - pcmconf8xx_t *pcmcia = socket[0].pcmcia; - - pr_debug("m8xx_pcmcia: Interrupt!\n"); - /* get interrupt sources */ - - pscr = in_be32(&pcmcia->pcmc_pscr); - pipr = in_be32(&pcmcia->pcmc_pipr); - per = in_be32(&pcmcia->pcmc_per); - - for (i = 0; i < PCMCIA_SOCKETS_NO; i++) { - s = &socket[i]; - e = &s->events[0]; - events = 0; - - while (e->regbit) { - if (pscr & e->regbit) - events |= e->eventbit; - - e++; - } - - /* - * report only if both card detect signals are the same - * not too nice done, - * we depend on that CD2 is the bit to the left of CD1... - */ - if (events & SS_DETECT) - if (((pipr & M8XX_PCMCIA_CD2(i)) >> 1) ^ - (pipr & M8XX_PCMCIA_CD1(i))) { - events &= ~SS_DETECT; - } -#ifdef PCMCIA_GLITCHY_CD - /* - * I've experienced CD problems with my ADS board. - * We make an extra check to see if there was a - * real change of Card detection. - */ - - if ((events & SS_DETECT) && - ((pipr & - (M8XX_PCMCIA_CD2(i) | M8XX_PCMCIA_CD1(i))) == 0) && - (s->state.Vcc | s->state.Vpp)) { - events &= ~SS_DETECT; - /*printk( "CD glitch workaround - CD = 0x%08x!\n", - (pipr & (M8XX_PCMCIA_CD2(i) - | M8XX_PCMCIA_CD1(i)))); */ - } -#endif - - /* call the handler */ - - pr_debug("m8xx_pcmcia: slot %u: events = 0x%02x, pscr = 0x%08x, " - "pipr = 0x%08x\n", i, events, pscr, pipr); - - if (events) { - spin_lock(&pending_event_lock); - pending_events[i] |= events; - spin_unlock(&pending_event_lock); - /* - * Turn off RDY_L bits in the PER mask on - * CD interrupt receival. - * - * They can generate bad interrupts on the - * ACS4,8,16,32. - marcelo - */ - per &= ~M8XX_PCMCIA_RDY_L(0); - per &= ~M8XX_PCMCIA_RDY_L(1); - - out_be32(&pcmcia->pcmc_per, per); - - if (events) - pcmcia_parse_events(&socket[i].socket, events); - } - } - - /* clear the interrupt sources */ - out_be32(&pcmcia->pcmc_pscr, pscr); - - pr_debug("m8xx_pcmcia: Interrupt done.\n"); - - return IRQ_HANDLED; -} - -static u32 m8xx_get_graycode(u32 size) -{ - u32 k; - - for (k = 0; k < M8XX_SIZES_NO; k++) - if (m8xx_size_to_gray[k] == size) - break; - - if ((k == M8XX_SIZES_NO) || (m8xx_size_to_gray[k] == -1)) - k = -1; - - return k; -} - -static u32 m8xx_get_speed(u32 ns, u32 is_io, u32 bus_freq) -{ - u32 reg, clocks, psst, psl, psht; - - if (!ns) { - - /* - * We get called with IO maps setup to 0ns - * if not specified by the user. - * They should be 255ns. - */ - - if (is_io) - ns = 255; - else - ns = 100; /* fast memory if 0 */ - } - - /* - * In PSST, PSL, PSHT fields we tell the controller - * timing parameters in CLKOUT clock cycles. - * CLKOUT is the same as GCLK2_50. - */ - -/* how we want to adjust the timing - in percent */ - -#define ADJ 180 /* 80 % longer accesstime - to be sure */ - - clocks = ((bus_freq / 1000) * ns) / 1000; - clocks = (clocks * ADJ) / (100 * 1000); - if (clocks >= PCMCIA_BMT_LIMIT) { - printk("Max access time limit reached\n"); - clocks = PCMCIA_BMT_LIMIT - 1; - } - - psst = clocks / 7; /* setup time */ - psht = clocks / 7; /* hold time */ - psl = (clocks * 5) / 7; /* strobe length */ - - psst += clocks - (psst + psht + psl); - - reg = psst << 12; - reg |= psl << 7; - reg |= psht << 16; - - return reg; -} - -static int m8xx_get_status(struct pcmcia_socket *sock, unsigned int *value) -{ - int lsock = container_of(sock, struct socket_info, socket)->slot; - struct socket_info *s = &socket[lsock]; - unsigned int pipr, reg; - pcmconf8xx_t *pcmcia = s->pcmcia; - - pipr = in_be32(&pcmcia->pcmc_pipr); - - *value = ((pipr & (M8XX_PCMCIA_CD1(lsock) - | M8XX_PCMCIA_CD2(lsock))) == 0) ? SS_DETECT : 0; - *value |= (pipr & M8XX_PCMCIA_WP(lsock)) ? SS_WRPROT : 0; - - if (s->state.flags & SS_IOCARD) - *value |= (pipr & M8XX_PCMCIA_BVD1(lsock)) ? SS_STSCHG : 0; - else { - *value |= (pipr & M8XX_PCMCIA_RDY(lsock)) ? SS_READY : 0; - *value |= (pipr & M8XX_PCMCIA_BVD1(lsock)) ? SS_BATDEAD : 0; - *value |= (pipr & M8XX_PCMCIA_BVD2(lsock)) ? SS_BATWARN : 0; - } - - if (s->state.Vcc | s->state.Vpp) - *value |= SS_POWERON; - - /* - * Voltage detection: - * This driver only supports 16-Bit pc-cards. - * Cardbus is not handled here. - * - * To determine what voltage to use we must read the VS1 and VS2 pin. - * Depending on what socket type is present, - * different combinations mean different things. - * - * Card Key Socket Key VS1 VS2 Card Vcc for CIS parse - * - * 5V 5V, LV* NC NC 5V only 5V (if available) - * - * 5V 5V, LV* GND NC 5 or 3.3V as low as possible - * - * 5V 5V, LV* GND GND 5, 3.3, x.xV as low as possible - * - * LV* 5V - - shall not fit into socket - * - * LV* LV* GND NC 3.3V only 3.3V - * - * LV* LV* NC GND x.xV x.xV (if avail.) - * - * LV* LV* GND GND 3.3 or x.xV as low as possible - * - * *LV means Low Voltage - * - * - * That gives us the following table: - * - * Socket VS1 VS2 Voltage - * - * 5V NC NC 5V - * 5V NC GND none (should not be possible) - * 5V GND NC >= 3.3V - * 5V GND GND >= x.xV - * - * LV NC NC 5V (if available) - * LV NC GND x.xV (if available) - * LV GND NC 3.3V - * LV GND GND >= x.xV - * - * So, how do I determine if I have a 5V or a LV - * socket on my board? Look at the socket! - * - * - * Socket with 5V key: - * ++--------------------------------------------+ - * || | - * || || - * || || - * | | - * +---------------------------------------------+ - * - * Socket with LV key: - * ++--------------------------------------------+ - * || | - * | || - * | || - * | | - * +---------------------------------------------+ - * - * - * With other words - LV only cards does not fit - * into the 5V socket! - */ - - /* read out VS1 and VS2 */ - - reg = (pipr & M8XX_PCMCIA_VS_MASK(lsock)) - >> M8XX_PCMCIA_VS_SHIFT(lsock); - - if (socket_get(lsock) == PCMCIA_SOCKET_KEY_LV) { - switch (reg) { - case 1: - *value |= SS_3VCARD; - break; /* GND, NC - 3.3V only */ - case 2: - *value |= SS_XVCARD; - break; /* NC. GND - x.xV only */ - }; - } - - pr_debug("m8xx_pcmcia: GetStatus(%d) = %#2.2x\n", lsock, *value); - return 0; -} - -static int m8xx_set_socket(struct pcmcia_socket *sock, socket_state_t * state) -{ - int lsock = container_of(sock, struct socket_info, socket)->slot; - struct socket_info *s = &socket[lsock]; - struct event_table *e; - unsigned int reg; - unsigned long flags; - pcmconf8xx_t *pcmcia = socket[0].pcmcia; - - pr_debug("m8xx_pcmcia: SetSocket(%d, flags %#3.3x, Vcc %d, Vpp %d, " - "io_irq %d, csc_mask %#2.2x)\n", lsock, state->flags, - state->Vcc, state->Vpp, state->io_irq, state->csc_mask); - - /* First, set voltage - bail out if invalid */ - if (voltage_set(lsock, state->Vcc, state->Vpp)) - return -EINVAL; - - /* Take care of reset... */ - if (state->flags & SS_RESET) - out_be32(M8XX_PGCRX(lsock), in_be32(M8XX_PGCRX(lsock)) | M8XX_PGCRX_CXRESET); /* active high */ - else - out_be32(M8XX_PGCRX(lsock), - in_be32(M8XX_PGCRX(lsock)) & ~M8XX_PGCRX_CXRESET); - - /* ... and output enable. */ - - /* The CxOE signal is connected to a 74541 on the ADS. - I guess most other boards used the ADS as a reference. - I tried to control the CxOE signal with SS_OUTPUT_ENA, - but the reset signal seems connected via the 541. - If the CxOE is left high are some signals tristated and - no pullups are present -> the cards act weird. - So right now the buffers are enabled if the power is on. */ - - if (state->Vcc || state->Vpp) - out_be32(M8XX_PGCRX(lsock), in_be32(M8XX_PGCRX(lsock)) & ~M8XX_PGCRX_CXOE); /* active low */ - else - out_be32(M8XX_PGCRX(lsock), - in_be32(M8XX_PGCRX(lsock)) | M8XX_PGCRX_CXOE); - - /* - * We'd better turn off interrupts before - * we mess with the events-table.. - */ - - spin_lock_irqsave(&events_lock, flags); - - /* - * Play around with the interrupt mask to be able to - * give the events the generic pcmcia driver wants us to. - */ - - e = &s->events[0]; - reg = 0; - - if (state->csc_mask & SS_DETECT) { - e->eventbit = SS_DETECT; - reg |= e->regbit = (M8XX_PCMCIA_CD2(lsock) - | M8XX_PCMCIA_CD1(lsock)); - e++; - } - if (state->flags & SS_IOCARD) { - /* - * I/O card - */ - if (state->csc_mask & SS_STSCHG) { - e->eventbit = SS_STSCHG; - reg |= e->regbit = M8XX_PCMCIA_BVD1(lsock); - e++; - } - /* - * If io_irq is non-zero we should enable irq. - */ - if (state->io_irq) { - out_be32(M8XX_PGCRX(lsock), - in_be32(M8XX_PGCRX(lsock)) | - mk_int_int_mask(s->hwirq) << 24); - /* - * Strange thing here: - * The manual does not tell us which interrupt - * the sources generate. - * Anyhow, I found out that RDY_L generates IREQLVL. - * - * We use level triggerd interrupts, and they don't - * have to be cleared in PSCR in the interrupt handler. - */ - reg |= M8XX_PCMCIA_RDY_L(lsock); - } else - out_be32(M8XX_PGCRX(lsock), - in_be32(M8XX_PGCRX(lsock)) & 0x00ffffff); - } else { - /* - * Memory card - */ - if (state->csc_mask & SS_BATDEAD) { - e->eventbit = SS_BATDEAD; - reg |= e->regbit = M8XX_PCMCIA_BVD1(lsock); - e++; - } - if (state->csc_mask & SS_BATWARN) { - e->eventbit = SS_BATWARN; - reg |= e->regbit = M8XX_PCMCIA_BVD2(lsock); - e++; - } - /* What should I trigger on - low/high,raise,fall? */ - if (state->csc_mask & SS_READY) { - e->eventbit = SS_READY; - reg |= e->regbit = 0; //?? - e++; - } - } - - e->regbit = 0; /* terminate list */ - - /* - * Clear the status changed . - * Port A and Port B share the same port. - * Writing ones will clear the bits. - */ - - out_be32(&pcmcia->pcmc_pscr, reg); - - /* - * Write the mask. - * Port A and Port B share the same port. - * Need for read-modify-write. - * Ones will enable the interrupt. - */ - - reg |= - in_be32(&pcmcia-> - pcmc_per) & (M8XX_PCMCIA_MASK(0) | M8XX_PCMCIA_MASK(1)); - out_be32(&pcmcia->pcmc_per, reg); - - spin_unlock_irqrestore(&events_lock, flags); - - /* copy the struct and modify the copy */ - - s->state = *state; - - return 0; -} - -static int m8xx_set_io_map(struct pcmcia_socket *sock, struct pccard_io_map *io) -{ - int lsock = container_of(sock, struct socket_info, socket)->slot; - - struct socket_info *s = &socket[lsock]; - struct pcmcia_win *w; - unsigned int reg, winnr; - pcmconf8xx_t *pcmcia = s->pcmcia; - -#define M8XX_SIZE (io->stop - io->start + 1) -#define M8XX_BASE (PCMCIA_IO_WIN_BASE + io->start) - - pr_debug("m8xx_pcmcia: SetIOMap(%d, %d, %#2.2x, %d ns, " - "%#4.4llx-%#4.4llx)\n", lsock, io->map, io->flags, - io->speed, (unsigned long long)io->start, - (unsigned long long)io->stop); - - if ((io->map >= PCMCIA_IO_WIN_NO) || (io->start > 0xffff) - || (io->stop > 0xffff) || (io->stop < io->start)) - return -EINVAL; - - if ((reg = m8xx_get_graycode(M8XX_SIZE)) == -1) - return -EINVAL; - - if (io->flags & MAP_ACTIVE) { - - pr_debug("m8xx_pcmcia: io->flags & MAP_ACTIVE\n"); - - winnr = (PCMCIA_MEM_WIN_NO * PCMCIA_SOCKETS_NO) - + (lsock * PCMCIA_IO_WIN_NO) + io->map; - - /* setup registers */ - - w = (void *)&pcmcia->pcmc_pbr0; - w += winnr; - - out_be32(&w->or, 0); /* turn off window first */ - out_be32(&w->br, M8XX_BASE); - - reg <<= 27; - reg |= M8XX_PCMCIA_POR_IO | (lsock << 2); - - reg |= m8xx_get_speed(io->speed, 1, s->bus_freq); - - if (io->flags & MAP_WRPROT) - reg |= M8XX_PCMCIA_POR_WRPROT; - - /*if(io->flags & (MAP_16BIT | MAP_AUTOSZ)) */ - if (io->flags & MAP_16BIT) - reg |= M8XX_PCMCIA_POR_16BIT; - - if (io->flags & MAP_ACTIVE) - reg |= M8XX_PCMCIA_POR_VALID; - - out_be32(&w->or, reg); - - pr_debug("m8xx_pcmcia: Socket %u: Mapped io window %u at " - "%#8.8x, OR = %#8.8x.\n", lsock, io->map, w->br, w->or); - } else { - /* shutdown IO window */ - winnr = (PCMCIA_MEM_WIN_NO * PCMCIA_SOCKETS_NO) - + (lsock * PCMCIA_IO_WIN_NO) + io->map; - - /* setup registers */ - - w = (void *)&pcmcia->pcmc_pbr0; - w += winnr; - - out_be32(&w->or, 0); /* turn off window */ - out_be32(&w->br, 0); /* turn off base address */ - - pr_debug("m8xx_pcmcia: Socket %u: Unmapped io window %u at " - "%#8.8x, OR = %#8.8x.\n", lsock, io->map, w->br, w->or); - } - - /* copy the struct and modify the copy */ - s->io_win[io->map] = *io; - s->io_win[io->map].flags &= (MAP_WRPROT | MAP_16BIT | MAP_ACTIVE); - pr_debug("m8xx_pcmcia: SetIOMap exit\n"); - - return 0; -} - -static int m8xx_set_mem_map(struct pcmcia_socket *sock, - struct pccard_mem_map *mem) -{ - int lsock = container_of(sock, struct socket_info, socket)->slot; - struct socket_info *s = &socket[lsock]; - struct pcmcia_win *w; - struct pccard_mem_map *old; - unsigned int reg, winnr; - pcmconf8xx_t *pcmcia = s->pcmcia; - - pr_debug("m8xx_pcmcia: SetMemMap(%d, %d, %#2.2x, %d ns, " - "%#5.5llx, %#5.5x)\n", lsock, mem->map, mem->flags, - mem->speed, (unsigned long long)mem->static_start, - mem->card_start); - - if ((mem->map >= PCMCIA_MEM_WIN_NO) -// || ((mem->s) >= PCMCIA_MEM_WIN_SIZE) - || (mem->card_start >= 0x04000000) - || (mem->static_start & 0xfff) /* 4KByte resolution */ - ||(mem->card_start & 0xfff)) - return -EINVAL; - - if ((reg = m8xx_get_graycode(PCMCIA_MEM_WIN_SIZE)) == -1) { - printk("Cannot set size to 0x%08x.\n", PCMCIA_MEM_WIN_SIZE); - return -EINVAL; - } - reg <<= 27; - - winnr = (lsock * PCMCIA_MEM_WIN_NO) + mem->map; - - /* Setup the window in the pcmcia controller */ - - w = (void *)&pcmcia->pcmc_pbr0; - w += winnr; - - reg |= lsock << 2; - - reg |= m8xx_get_speed(mem->speed, 0, s->bus_freq); - - if (mem->flags & MAP_ATTRIB) - reg |= M8XX_PCMCIA_POR_ATTRMEM; - - if (mem->flags & MAP_WRPROT) - reg |= M8XX_PCMCIA_POR_WRPROT; - - if (mem->flags & MAP_16BIT) - reg |= M8XX_PCMCIA_POR_16BIT; - - if (mem->flags & MAP_ACTIVE) - reg |= M8XX_PCMCIA_POR_VALID; - - out_be32(&w->or, reg); - - pr_debug("m8xx_pcmcia: Socket %u: Mapped memory window %u at %#8.8x, " - "OR = %#8.8x.\n", lsock, mem->map, w->br, w->or); - - if (mem->flags & MAP_ACTIVE) { - /* get the new base address */ - mem->static_start = PCMCIA_MEM_WIN_BASE + - (PCMCIA_MEM_WIN_SIZE * winnr) - + mem->card_start; - } - - pr_debug("m8xx_pcmcia: SetMemMap(%d, %d, %#2.2x, %d ns, " - "%#5.5llx, %#5.5x)\n", lsock, mem->map, mem->flags, - mem->speed, (unsigned long long)mem->static_start, - mem->card_start); - - /* copy the struct and modify the copy */ - - old = &s->mem_win[mem->map]; - - *old = *mem; - old->flags &= (MAP_ATTRIB | MAP_WRPROT | MAP_16BIT | MAP_ACTIVE); - - return 0; -} - -static int m8xx_sock_init(struct pcmcia_socket *sock) -{ - int i; - pccard_io_map io = { 0, 0, 0, 0, 1 }; - pccard_mem_map mem = { 0, 0, 0, 0, 0, 0 }; - - pr_debug("m8xx_pcmcia: sock_init(%d)\n", s); - - m8xx_set_socket(sock, &dead_socket); - for (i = 0; i < PCMCIA_IO_WIN_NO; i++) { - io.map = i; - m8xx_set_io_map(sock, &io); - } - for (i = 0; i < PCMCIA_MEM_WIN_NO; i++) { - mem.map = i; - m8xx_set_mem_map(sock, &mem); - } - - return 0; - -} - -static int m8xx_sock_suspend(struct pcmcia_socket *sock) -{ - return m8xx_set_socket(sock, &dead_socket); -} - -static struct pccard_operations m8xx_services = { - .init = m8xx_sock_init, - .suspend = m8xx_sock_suspend, - .get_status = m8xx_get_status, - .set_socket = m8xx_set_socket, - .set_io_map = m8xx_set_io_map, - .set_mem_map = m8xx_set_mem_map, -}; - -static int __init m8xx_probe(struct platform_device *ofdev) -{ - struct pcmcia_win *w; - unsigned int i, m, hwirq; - pcmconf8xx_t *pcmcia; - int status; - struct device_node *np = ofdev->dev.of_node; - - pcmcia_info("%s\n", version); - - pcmcia = of_iomap(np, 0); - if (pcmcia == NULL) - return -EINVAL; - - pcmcia_schlvl = irq_of_parse_and_map(np, 0); - hwirq = irq_map[pcmcia_schlvl].hwirq; - if (pcmcia_schlvl < 0) { - iounmap(pcmcia); - return -EINVAL; - } - - m8xx_pgcrx[0] = &pcmcia->pcmc_pgcra; - m8xx_pgcrx[1] = &pcmcia->pcmc_pgcrb; - - pcmcia_info(PCMCIA_BOARD_MSG " using " PCMCIA_SLOT_MSG - " with IRQ %u (%d). \n", pcmcia_schlvl, hwirq); - - /* Configure Status change interrupt */ - - if (request_irq(pcmcia_schlvl, m8xx_interrupt, IRQF_SHARED, - driver_name, socket)) { - pcmcia_error("Cannot allocate IRQ %u for SCHLVL!\n", - pcmcia_schlvl); - iounmap(pcmcia); - return -1; - } - - w = (void *)&pcmcia->pcmc_pbr0; - - out_be32(&pcmcia->pcmc_pscr, M8XX_PCMCIA_MASK(0) | M8XX_PCMCIA_MASK(1)); - clrbits32(&pcmcia->pcmc_per, M8XX_PCMCIA_MASK(0) | M8XX_PCMCIA_MASK(1)); - - /* connect interrupt and disable CxOE */ - - out_be32(M8XX_PGCRX(0), - M8XX_PGCRX_CXOE | (mk_int_int_mask(hwirq) << 16)); - out_be32(M8XX_PGCRX(1), - M8XX_PGCRX_CXOE | (mk_int_int_mask(hwirq) << 16)); - - /* initialize the fixed memory windows */ - - for (i = 0; i < PCMCIA_SOCKETS_NO; i++) { - for (m = 0; m < PCMCIA_MEM_WIN_NO; m++) { - out_be32(&w->br, PCMCIA_MEM_WIN_BASE + - (PCMCIA_MEM_WIN_SIZE - * (m + i * PCMCIA_MEM_WIN_NO))); - - out_be32(&w->or, 0); /* set to not valid */ - - w++; - } - } - - /* turn off voltage */ - voltage_set(0, 0, 0); - voltage_set(1, 0, 0); - - /* Enable external hardware */ - hardware_enable(0); - hardware_enable(1); - - for (i = 0; i < PCMCIA_SOCKETS_NO; i++) { - socket[i].slot = i; - socket[i].socket.owner = THIS_MODULE; - socket[i].socket.features = - SS_CAP_PCCARD | SS_CAP_MEM_ALIGN | SS_CAP_STATIC_MAP; - socket[i].socket.irq_mask = 0x000; - socket[i].socket.map_size = 0x1000; - socket[i].socket.io_offset = 0; - socket[i].socket.pci_irq = pcmcia_schlvl; - socket[i].socket.ops = &m8xx_services; - socket[i].socket.resource_ops = &pccard_iodyn_ops; - socket[i].socket.cb_dev = NULL; - socket[i].socket.dev.parent = &ofdev->dev; - socket[i].pcmcia = pcmcia; - socket[i].bus_freq = ppc_proc_freq; - socket[i].hwirq = hwirq; - - } - - for (i = 0; i < PCMCIA_SOCKETS_NO; i++) { - status = pcmcia_register_socket(&socket[i].socket); - if (status < 0) - pcmcia_error("Socket register failed\n"); - } - - return 0; -} - -static int m8xx_remove(struct platform_device *ofdev) -{ - u32 m, i; - struct pcmcia_win *w; - pcmconf8xx_t *pcmcia = socket[0].pcmcia; - - for (i = 0; i < PCMCIA_SOCKETS_NO; i++) { - w = (void *)&pcmcia->pcmc_pbr0; - - out_be32(&pcmcia->pcmc_pscr, M8XX_PCMCIA_MASK(i)); - out_be32(&pcmcia->pcmc_per, - in_be32(&pcmcia->pcmc_per) & ~M8XX_PCMCIA_MASK(i)); - - /* turn off interrupt and disable CxOE */ - out_be32(M8XX_PGCRX(i), M8XX_PGCRX_CXOE); - - /* turn off memory windows */ - for (m = 0; m < PCMCIA_MEM_WIN_NO; m++) { - out_be32(&w->or, 0); /* set to not valid */ - w++; - } - - /* turn off voltage */ - voltage_set(i, 0, 0); - - /* disable external hardware */ - hardware_disable(i); - } - for (i = 0; i < PCMCIA_SOCKETS_NO; i++) - pcmcia_unregister_socket(&socket[i].socket); - iounmap(pcmcia); - - free_irq(pcmcia_schlvl, NULL); - - return 0; -} - -static const struct of_device_id m8xx_pcmcia_match[] = { - { - .type = "pcmcia", - .compatible = "fsl,pq-pcmcia", - }, - {}, -}; - -MODULE_DEVICE_TABLE(of, m8xx_pcmcia_match); - -static struct platform_driver m8xx_pcmcia_driver = { - .driver = { - .name = driver_name, - .owner = THIS_MODULE, - .of_match_table = m8xx_pcmcia_match, - }, - .probe = m8xx_probe, - .remove = m8xx_remove, -}; - -module_platform_driver(m8xx_pcmcia_driver); diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig index cc97c897945a..f833aa271a2e 100644 --- a/drivers/phy/Kconfig +++ b/drivers/phy/Kconfig @@ -41,9 +41,9 @@ config PHY_MVEBU_SATA config PHY_MIPHY365X tristate "STMicroelectronics MIPHY365X PHY driver for STiH41x series" depends on ARCH_STI - depends on GENERIC_PHY depends on HAS_IOMEM depends on OF + select GENERIC_PHY help Enable this to support the miphy transceiver (for SATA/PCIE) that is part of STMicroelectronics STiH41x SoC series. @@ -159,6 +159,16 @@ config PHY_SAMSUNG_USB2 for particular PHYs will be enabled based on the SoC type in addition to this driver. +config PHY_S5PV210_USB2 + bool "Support for S5PV210" + depends on PHY_SAMSUNG_USB2 + depends on ARCH_S5PV210 + help + Enable USB PHY support for S5PV210. This option requires that Samsung + USB 2.0 PHY driver is enabled and means that support for this + particular SoC is compiled in the driver. In case of S5PV210 two phys + are available - device and host. + config PHY_EXYNOS4210_USB2 bool depends on PHY_SAMSUNG_USB2 @@ -187,13 +197,6 @@ config PHY_EXYNOS5_USBDRD This driver provides PHY interface for USB 3.0 DRD controller present on Exynos5 SoC series. -config PHY_XGENE - tristate "APM X-Gene 15Gbps PHY support" - depends on HAS_IOMEM && OF && (ARM64 || COMPILE_TEST) - select GENERIC_PHY - help - This option enables support for APM X-Gene SoC multi-purpose PHY. - config PHY_QCOM_APQ8064_SATA tristate "Qualcomm APQ8064 SATA SerDes/PHY driver" depends on ARCH_QCOM @@ -208,4 +211,25 @@ config PHY_QCOM_IPQ806X_SATA depends on OF select GENERIC_PHY +config PHY_ST_SPEAR1310_MIPHY + tristate "ST SPEAR1310-MIPHY driver" + select GENERIC_PHY + depends on MACH_SPEAR1310 || COMPILE_TEST + help + Support for ST SPEAr1310 MIPHY which can be used for PCIe and SATA. + +config PHY_ST_SPEAR1340_MIPHY + tristate "ST SPEAR1340-MIPHY driver" + select GENERIC_PHY + depends on MACH_SPEAR1340 || COMPILE_TEST + help + Support for ST SPEAr1340 MIPHY which can be used for PCIe and SATA. + +config PHY_XGENE + tristate "APM X-Gene 15Gbps PHY support" + depends on HAS_IOMEM && OF && (ARM64 || COMPILE_TEST) + select GENERIC_PHY + help + This option enables support for APM X-Gene SoC multi-purpose PHY. + endmenu diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile index 971ad0aac388..95c69ed5ed45 100644 --- a/drivers/phy/Makefile +++ b/drivers/phy/Makefile @@ -21,7 +21,10 @@ phy-exynos-usb2-y += phy-samsung-usb2.o phy-exynos-usb2-$(CONFIG_PHY_EXYNOS4210_USB2) += phy-exynos4210-usb2.o phy-exynos-usb2-$(CONFIG_PHY_EXYNOS4X12_USB2) += phy-exynos4x12-usb2.o phy-exynos-usb2-$(CONFIG_PHY_EXYNOS5250_USB2) += phy-exynos5250-usb2.o +phy-exynos-usb2-$(CONFIG_PHY_S5PV210_USB2) += phy-s5pv210-usb2.o obj-$(CONFIG_PHY_EXYNOS5_USBDRD) += phy-exynos5-usbdrd.o -obj-$(CONFIG_PHY_XGENE) += phy-xgene.o obj-$(CONFIG_PHY_QCOM_APQ8064_SATA) += phy-qcom-apq8064-sata.o obj-$(CONFIG_PHY_QCOM_IPQ806X_SATA) += phy-qcom-ipq806x-sata.o +obj-$(CONFIG_PHY_ST_SPEAR1310_MIPHY) += phy-spear1310-miphy.o +obj-$(CONFIG_PHY_ST_SPEAR1340_MIPHY) += phy-spear1340-miphy.o +obj-$(CONFIG_PHY_XGENE) += phy-xgene.o diff --git a/drivers/phy/phy-exynos5-usbdrd.c b/drivers/phy/phy-exynos5-usbdrd.c index b05302b09c9f..392101c8d6b0 100644 --- a/drivers/phy/phy-exynos5-usbdrd.c +++ b/drivers/phy/phy-exynos5-usbdrd.c @@ -542,6 +542,7 @@ static const struct of_device_id exynos5_usbdrd_phy_of_match[] = { }, { }, }; +MODULE_DEVICE_TABLE(of, exynos5_usbdrd_phy_of_match); static int exynos5_usbdrd_phy_probe(struct platform_device *pdev) { diff --git a/drivers/phy/phy-miphy365x.c b/drivers/phy/phy-miphy365x.c index e111baf187ce..e0fb7a1e5a5a 100644 --- a/drivers/phy/phy-miphy365x.c +++ b/drivers/phy/phy-miphy365x.c @@ -163,6 +163,7 @@ enum miphy_sata_gen { }; static u8 rx_tx_spd[] = { + 0, /* GEN0 doesn't exist. */ TX_SPDSEL_GEN1_VAL | RX_SPDSEL_GEN1_VAL, TX_SPDSEL_GEN2_VAL | RX_SPDSEL_GEN2_VAL, TX_SPDSEL_GEN3_VAL | RX_SPDSEL_GEN3_VAL diff --git a/drivers/phy/phy-s5pv210-usb2.c b/drivers/phy/phy-s5pv210-usb2.c new file mode 100644 index 000000000000..004d320767e4 --- /dev/null +++ b/drivers/phy/phy-s5pv210-usb2.c @@ -0,0 +1,187 @@ +/* + * Samsung SoC USB 1.1/2.0 PHY driver - S5PV210 support + * + * Copyright (C) 2013 Samsung Electronics Co., Ltd. + * Authors: Kamil Debski <k.debski@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/phy/phy.h> +#include "phy-samsung-usb2.h" + +/* Exynos USB PHY registers */ + +/* PHY power control */ +#define S5PV210_UPHYPWR 0x0 + +#define S5PV210_UPHYPWR_PHY0_SUSPEND BIT(0) +#define S5PV210_UPHYPWR_PHY0_PWR BIT(3) +#define S5PV210_UPHYPWR_PHY0_OTG_PWR BIT(4) +#define S5PV210_UPHYPWR_PHY0 ( \ + S5PV210_UPHYPWR_PHY0_SUSPEND | \ + S5PV210_UPHYPWR_PHY0_PWR | \ + S5PV210_UPHYPWR_PHY0_OTG_PWR) + +#define S5PV210_UPHYPWR_PHY1_SUSPEND BIT(6) +#define S5PV210_UPHYPWR_PHY1_PWR BIT(7) +#define S5PV210_UPHYPWR_PHY1 ( \ + S5PV210_UPHYPWR_PHY1_SUSPEND | \ + S5PV210_UPHYPWR_PHY1_PWR) + +/* PHY clock control */ +#define S5PV210_UPHYCLK 0x4 + +#define S5PV210_UPHYCLK_PHYFSEL_MASK (0x3 << 0) +#define S5PV210_UPHYCLK_PHYFSEL_48MHZ (0x0 << 0) +#define S5PV210_UPHYCLK_PHYFSEL_24MHZ (0x3 << 0) +#define S5PV210_UPHYCLK_PHYFSEL_12MHZ (0x2 << 0) + +#define S5PV210_UPHYCLK_PHY0_ID_PULLUP BIT(2) +#define S5PV210_UPHYCLK_PHY0_COMMON_ON BIT(4) +#define S5PV210_UPHYCLK_PHY1_COMMON_ON BIT(7) + +/* PHY reset control */ +#define S5PV210_UPHYRST 0x8 + +#define S5PV210_URSTCON_PHY0 BIT(0) +#define S5PV210_URSTCON_OTG_HLINK BIT(1) +#define S5PV210_URSTCON_OTG_PHYLINK BIT(2) +#define S5PV210_URSTCON_PHY1_ALL BIT(3) +#define S5PV210_URSTCON_HOST_LINK_ALL BIT(4) + +/* Isolation, configured in the power management unit */ +#define S5PV210_USB_ISOL_OFFSET 0x680c +#define S5PV210_USB_ISOL_DEVICE BIT(0) +#define S5PV210_USB_ISOL_HOST BIT(1) + + +enum s5pv210_phy_id { + S5PV210_DEVICE, + S5PV210_HOST, + S5PV210_NUM_PHYS, +}; + +/* + * s5pv210_rate_to_clk() converts the supplied clock rate to the value that + * can be written to the phy register. + */ +static int s5pv210_rate_to_clk(unsigned long rate, u32 *reg) +{ + switch (rate) { + case 12 * MHZ: + *reg = S5PV210_UPHYCLK_PHYFSEL_12MHZ; + break; + case 24 * MHZ: + *reg = S5PV210_UPHYCLK_PHYFSEL_24MHZ; + break; + case 48 * MHZ: + *reg = S5PV210_UPHYCLK_PHYFSEL_48MHZ; + break; + default: + return -EINVAL; + } + + return 0; +} + +static void s5pv210_isol(struct samsung_usb2_phy_instance *inst, bool on) +{ + struct samsung_usb2_phy_driver *drv = inst->drv; + u32 mask; + + switch (inst->cfg->id) { + case S5PV210_DEVICE: + mask = S5PV210_USB_ISOL_DEVICE; + break; + case S5PV210_HOST: + mask = S5PV210_USB_ISOL_HOST; + break; + default: + return; + }; + + regmap_update_bits(drv->reg_pmu, S5PV210_USB_ISOL_OFFSET, + mask, on ? 0 : mask); +} + +static void s5pv210_phy_pwr(struct samsung_usb2_phy_instance *inst, bool on) +{ + struct samsung_usb2_phy_driver *drv = inst->drv; + u32 rstbits = 0; + u32 phypwr = 0; + u32 rst; + u32 pwr; + + switch (inst->cfg->id) { + case S5PV210_DEVICE: + phypwr = S5PV210_UPHYPWR_PHY0; + rstbits = S5PV210_URSTCON_PHY0; + break; + case S5PV210_HOST: + phypwr = S5PV210_UPHYPWR_PHY1; + rstbits = S5PV210_URSTCON_PHY1_ALL | + S5PV210_URSTCON_HOST_LINK_ALL; + break; + }; + + if (on) { + writel(drv->ref_reg_val, drv->reg_phy + S5PV210_UPHYCLK); + + pwr = readl(drv->reg_phy + S5PV210_UPHYPWR); + pwr &= ~phypwr; + writel(pwr, drv->reg_phy + S5PV210_UPHYPWR); + + rst = readl(drv->reg_phy + S5PV210_UPHYRST); + rst |= rstbits; + writel(rst, drv->reg_phy + S5PV210_UPHYRST); + udelay(10); + rst &= ~rstbits; + writel(rst, drv->reg_phy + S5PV210_UPHYRST); + } else { + pwr = readl(drv->reg_phy + S5PV210_UPHYPWR); + pwr |= phypwr; + writel(pwr, drv->reg_phy + S5PV210_UPHYPWR); + } +} + +static int s5pv210_power_on(struct samsung_usb2_phy_instance *inst) +{ + s5pv210_isol(inst, 0); + s5pv210_phy_pwr(inst, 1); + + return 0; +} + +static int s5pv210_power_off(struct samsung_usb2_phy_instance *inst) +{ + s5pv210_phy_pwr(inst, 0); + s5pv210_isol(inst, 1); + + return 0; +} + +static const struct samsung_usb2_common_phy s5pv210_phys[S5PV210_NUM_PHYS] = { + [S5PV210_DEVICE] = { + .label = "device", + .id = S5PV210_DEVICE, + .power_on = s5pv210_power_on, + .power_off = s5pv210_power_off, + }, + [S5PV210_HOST] = { + .label = "host", + .id = S5PV210_HOST, + .power_on = s5pv210_power_on, + .power_off = s5pv210_power_off, + }, +}; + +const struct samsung_usb2_phy_config s5pv210_usb2_phy_config = { + .num_phys = ARRAY_SIZE(s5pv210_phys), + .phys = s5pv210_phys, + .rate_to_clk = s5pv210_rate_to_clk, +}; diff --git a/drivers/phy/phy-samsung-usb2.c b/drivers/phy/phy-samsung-usb2.c index ae30640a411d..3732ca25e09f 100644 --- a/drivers/phy/phy-samsung-usb2.c +++ b/drivers/phy/phy-samsung-usb2.c @@ -111,6 +111,12 @@ static const struct of_device_id samsung_usb2_phy_of_match[] = { .data = &exynos5250_usb2_phy_config, }, #endif +#ifdef CONFIG_PHY_S5PV210_USB2 + { + .compatible = "samsung,s5pv210-usb2-phy", + .data = &s5pv210_usb2_phy_config, + }, +#endif { }, }; MODULE_DEVICE_TABLE(of, samsung_usb2_phy_of_match); diff --git a/drivers/phy/phy-samsung-usb2.h b/drivers/phy/phy-samsung-usb2.h index b03da0ef39ac..44bead9b8f34 100644 --- a/drivers/phy/phy-samsung-usb2.h +++ b/drivers/phy/phy-samsung-usb2.h @@ -67,4 +67,5 @@ extern const struct samsung_usb2_phy_config exynos3250_usb2_phy_config; extern const struct samsung_usb2_phy_config exynos4210_usb2_phy_config; extern const struct samsung_usb2_phy_config exynos4x12_usb2_phy_config; extern const struct samsung_usb2_phy_config exynos5250_usb2_phy_config; +extern const struct samsung_usb2_phy_config s5pv210_usb2_phy_config; #endif diff --git a/drivers/phy/phy-spear1310-miphy.c b/drivers/phy/phy-spear1310-miphy.c new file mode 100644 index 000000000000..6dcbfcddb372 --- /dev/null +++ b/drivers/phy/phy-spear1310-miphy.c @@ -0,0 +1,274 @@ +/* + * ST SPEAr1310-miphy driver + * + * Copyright (C) 2014 ST Microelectronics + * Pratyush Anand <pratyush.anand@st.com> + * Mohit Kumar <mohit.kumar@st.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include <linux/bitops.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/kernel.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/phy/phy.h> +#include <linux/regmap.h> + +/* SPEAr1310 Registers */ +#define SPEAR1310_PCIE_SATA_CFG 0x3A4 + #define SPEAR1310_PCIE_SATA2_SEL_PCIE (0 << 31) + #define SPEAR1310_PCIE_SATA1_SEL_PCIE (0 << 30) + #define SPEAR1310_PCIE_SATA0_SEL_PCIE (0 << 29) + #define SPEAR1310_PCIE_SATA2_SEL_SATA BIT(31) + #define SPEAR1310_PCIE_SATA1_SEL_SATA BIT(30) + #define SPEAR1310_PCIE_SATA0_SEL_SATA BIT(29) + #define SPEAR1310_SATA2_CFG_TX_CLK_EN BIT(27) + #define SPEAR1310_SATA2_CFG_RX_CLK_EN BIT(26) + #define SPEAR1310_SATA2_CFG_POWERUP_RESET BIT(25) + #define SPEAR1310_SATA2_CFG_PM_CLK_EN BIT(24) + #define SPEAR1310_SATA1_CFG_TX_CLK_EN BIT(23) + #define SPEAR1310_SATA1_CFG_RX_CLK_EN BIT(22) + #define SPEAR1310_SATA1_CFG_POWERUP_RESET BIT(21) + #define SPEAR1310_SATA1_CFG_PM_CLK_EN BIT(20) + #define SPEAR1310_SATA0_CFG_TX_CLK_EN BIT(19) + #define SPEAR1310_SATA0_CFG_RX_CLK_EN BIT(18) + #define SPEAR1310_SATA0_CFG_POWERUP_RESET BIT(17) + #define SPEAR1310_SATA0_CFG_PM_CLK_EN BIT(16) + #define SPEAR1310_PCIE2_CFG_DEVICE_PRESENT BIT(11) + #define SPEAR1310_PCIE2_CFG_POWERUP_RESET BIT(10) + #define SPEAR1310_PCIE2_CFG_CORE_CLK_EN BIT(9) + #define SPEAR1310_PCIE2_CFG_AUX_CLK_EN BIT(8) + #define SPEAR1310_PCIE1_CFG_DEVICE_PRESENT BIT(7) + #define SPEAR1310_PCIE1_CFG_POWERUP_RESET BIT(6) + #define SPEAR1310_PCIE1_CFG_CORE_CLK_EN BIT(5) + #define SPEAR1310_PCIE1_CFG_AUX_CLK_EN BIT(4) + #define SPEAR1310_PCIE0_CFG_DEVICE_PRESENT BIT(3) + #define SPEAR1310_PCIE0_CFG_POWERUP_RESET BIT(2) + #define SPEAR1310_PCIE0_CFG_CORE_CLK_EN BIT(1) + #define SPEAR1310_PCIE0_CFG_AUX_CLK_EN BIT(0) + + #define SPEAR1310_PCIE_CFG_MASK(x) ((0xF << (x * 4)) | BIT((x + 29))) + #define SPEAR1310_SATA_CFG_MASK(x) ((0xF << (x * 4 + 16)) | \ + BIT((x + 29))) + #define SPEAR1310_PCIE_CFG_VAL(x) \ + (SPEAR1310_PCIE_SATA##x##_SEL_PCIE | \ + SPEAR1310_PCIE##x##_CFG_AUX_CLK_EN | \ + SPEAR1310_PCIE##x##_CFG_CORE_CLK_EN | \ + SPEAR1310_PCIE##x##_CFG_POWERUP_RESET | \ + SPEAR1310_PCIE##x##_CFG_DEVICE_PRESENT) + #define SPEAR1310_SATA_CFG_VAL(x) \ + (SPEAR1310_PCIE_SATA##x##_SEL_SATA | \ + SPEAR1310_SATA##x##_CFG_PM_CLK_EN | \ + SPEAR1310_SATA##x##_CFG_POWERUP_RESET | \ + SPEAR1310_SATA##x##_CFG_RX_CLK_EN | \ + SPEAR1310_SATA##x##_CFG_TX_CLK_EN) + +#define SPEAR1310_PCIE_MIPHY_CFG_1 0x3A8 + #define SPEAR1310_MIPHY_DUAL_OSC_BYPASS_EXT BIT(31) + #define SPEAR1310_MIPHY_DUAL_CLK_REF_DIV2 BIT(28) + #define SPEAR1310_MIPHY_DUAL_PLL_RATIO_TOP(x) (x << 16) + #define SPEAR1310_MIPHY_SINGLE_OSC_BYPASS_EXT BIT(15) + #define SPEAR1310_MIPHY_SINGLE_CLK_REF_DIV2 BIT(12) + #define SPEAR1310_MIPHY_SINGLE_PLL_RATIO_TOP(x) (x << 0) + #define SPEAR1310_PCIE_SATA_MIPHY_CFG_SATA_MASK (0xFFFF) + #define SPEAR1310_PCIE_SATA_MIPHY_CFG_PCIE_MASK (0xFFFF << 16) + #define SPEAR1310_PCIE_SATA_MIPHY_CFG_SATA \ + (SPEAR1310_MIPHY_DUAL_OSC_BYPASS_EXT | \ + SPEAR1310_MIPHY_DUAL_CLK_REF_DIV2 | \ + SPEAR1310_MIPHY_DUAL_PLL_RATIO_TOP(60) | \ + SPEAR1310_MIPHY_SINGLE_OSC_BYPASS_EXT | \ + SPEAR1310_MIPHY_SINGLE_CLK_REF_DIV2 | \ + SPEAR1310_MIPHY_SINGLE_PLL_RATIO_TOP(60)) + #define SPEAR1310_PCIE_SATA_MIPHY_CFG_SATA_25M_CRYSTAL_CLK \ + (SPEAR1310_MIPHY_SINGLE_PLL_RATIO_TOP(120)) + #define SPEAR1310_PCIE_SATA_MIPHY_CFG_PCIE \ + (SPEAR1310_MIPHY_DUAL_OSC_BYPASS_EXT | \ + SPEAR1310_MIPHY_DUAL_PLL_RATIO_TOP(25) | \ + SPEAR1310_MIPHY_SINGLE_OSC_BYPASS_EXT | \ + SPEAR1310_MIPHY_SINGLE_PLL_RATIO_TOP(25)) + +#define SPEAR1310_PCIE_MIPHY_CFG_2 0x3AC + +enum spear1310_miphy_mode { + SATA, + PCIE, +}; + +struct spear1310_miphy_priv { + /* instance id of this phy */ + u32 id; + /* phy mode: 0 for SATA 1 for PCIe */ + enum spear1310_miphy_mode mode; + /* regmap for any soc specific misc registers */ + struct regmap *misc; + /* phy struct pointer */ + struct phy *phy; +}; + +static int spear1310_miphy_pcie_init(struct spear1310_miphy_priv *priv) +{ + u32 val; + + regmap_update_bits(priv->misc, SPEAR1310_PCIE_MIPHY_CFG_1, + SPEAR1310_PCIE_SATA_MIPHY_CFG_PCIE_MASK, + SPEAR1310_PCIE_SATA_MIPHY_CFG_PCIE); + + switch (priv->id) { + case 0: + val = SPEAR1310_PCIE_CFG_VAL(0); + break; + case 1: + val = SPEAR1310_PCIE_CFG_VAL(1); + break; + case 2: + val = SPEAR1310_PCIE_CFG_VAL(2); + break; + default: + return -EINVAL; + } + + regmap_update_bits(priv->misc, SPEAR1310_PCIE_SATA_CFG, + SPEAR1310_PCIE_CFG_MASK(priv->id), val); + + return 0; +} + +static int spear1310_miphy_pcie_exit(struct spear1310_miphy_priv *priv) +{ + regmap_update_bits(priv->misc, SPEAR1310_PCIE_SATA_CFG, + SPEAR1310_PCIE_CFG_MASK(priv->id), 0); + + regmap_update_bits(priv->misc, SPEAR1310_PCIE_MIPHY_CFG_1, + SPEAR1310_PCIE_SATA_MIPHY_CFG_PCIE_MASK, 0); + + return 0; +} + +static int spear1310_miphy_init(struct phy *phy) +{ + struct spear1310_miphy_priv *priv = phy_get_drvdata(phy); + int ret = 0; + + if (priv->mode == PCIE) + ret = spear1310_miphy_pcie_init(priv); + + return ret; +} + +static int spear1310_miphy_exit(struct phy *phy) +{ + struct spear1310_miphy_priv *priv = phy_get_drvdata(phy); + int ret = 0; + + if (priv->mode == PCIE) + ret = spear1310_miphy_pcie_exit(priv); + + return ret; +} + +static const struct of_device_id spear1310_miphy_of_match[] = { + { .compatible = "st,spear1310-miphy" }, + { }, +}; +MODULE_DEVICE_TABLE(of, spear1310_miphy_of_match); + +static struct phy_ops spear1310_miphy_ops = { + .init = spear1310_miphy_init, + .exit = spear1310_miphy_exit, + .owner = THIS_MODULE, +}; + +static struct phy *spear1310_miphy_xlate(struct device *dev, + struct of_phandle_args *args) +{ + struct spear1310_miphy_priv *priv = dev_get_drvdata(dev); + + if (args->args_count < 1) { + dev_err(dev, "DT did not pass correct no of args\n"); + return NULL; + } + + priv->mode = args->args[0]; + + if (priv->mode != SATA && priv->mode != PCIE) { + dev_err(dev, "DT did not pass correct phy mode\n"); + return NULL; + } + + return priv->phy; +} + +static int spear1310_miphy_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct spear1310_miphy_priv *priv; + struct phy_provider *phy_provider; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) { + dev_err(dev, "can't alloc spear1310_miphy private date memory\n"); + return -ENOMEM; + } + + priv->misc = + syscon_regmap_lookup_by_phandle(dev->of_node, "misc"); + if (IS_ERR(priv->misc)) { + dev_err(dev, "failed to find misc regmap\n"); + return PTR_ERR(priv->misc); + } + + if (of_property_read_u32(dev->of_node, "phy-id", &priv->id)) { + dev_err(dev, "failed to find phy id\n"); + return -EINVAL; + } + + priv->phy = devm_phy_create(dev, NULL, &spear1310_miphy_ops, NULL); + if (IS_ERR(priv->phy)) { + dev_err(dev, "failed to create SATA PCIe PHY\n"); + return PTR_ERR(priv->phy); + } + + dev_set_drvdata(dev, priv); + phy_set_drvdata(priv->phy, priv); + + phy_provider = + devm_of_phy_provider_register(dev, spear1310_miphy_xlate); + if (IS_ERR(phy_provider)) { + dev_err(dev, "failed to register phy provider\n"); + return PTR_ERR(phy_provider); + } + + return 0; +} + +static struct platform_driver spear1310_miphy_driver = { + .probe = spear1310_miphy_probe, + .driver = { + .name = "spear1310-miphy", + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(spear1310_miphy_of_match), + }, +}; + +static int __init spear1310_miphy_phy_init(void) +{ + return platform_driver_register(&spear1310_miphy_driver); +} +module_init(spear1310_miphy_phy_init); + +static void __exit spear1310_miphy_phy_exit(void) +{ + platform_driver_unregister(&spear1310_miphy_driver); +} +module_exit(spear1310_miphy_phy_exit); + +MODULE_DESCRIPTION("ST SPEAR1310-MIPHY driver"); +MODULE_AUTHOR("Pratyush Anand <pratyush.anand@st.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/phy/phy-spear1340-miphy.c b/drivers/phy/phy-spear1340-miphy.c new file mode 100644 index 000000000000..7135ba2603b6 --- /dev/null +++ b/drivers/phy/phy-spear1340-miphy.c @@ -0,0 +1,307 @@ +/* + * ST spear1340-miphy driver + * + * Copyright (C) 2014 ST Microelectronics + * Pratyush Anand <pratyush.anand@st.com> + * Mohit Kumar <mohit.kumar@st.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include <linux/bitops.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/kernel.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/phy/phy.h> +#include <linux/regmap.h> + +/* SPEAr1340 Registers */ +/* Power Management Registers */ +#define SPEAR1340_PCM_CFG 0x100 + #define SPEAR1340_PCM_CFG_SATA_POWER_EN BIT(11) +#define SPEAR1340_PCM_WKUP_CFG 0x104 +#define SPEAR1340_SWITCH_CTR 0x108 + +#define SPEAR1340_PERIP1_SW_RST 0x318 + #define SPEAR1340_PERIP1_SW_RSATA BIT(12) +#define SPEAR1340_PERIP2_SW_RST 0x31C +#define SPEAR1340_PERIP3_SW_RST 0x320 + +/* PCIE - SATA configuration registers */ +#define SPEAR1340_PCIE_SATA_CFG 0x424 + /* PCIE CFG MASks */ + #define SPEAR1340_PCIE_CFG_DEVICE_PRESENT BIT(11) + #define SPEAR1340_PCIE_CFG_POWERUP_RESET BIT(10) + #define SPEAR1340_PCIE_CFG_CORE_CLK_EN BIT(9) + #define SPEAR1340_PCIE_CFG_AUX_CLK_EN BIT(8) + #define SPEAR1340_SATA_CFG_TX_CLK_EN BIT(4) + #define SPEAR1340_SATA_CFG_RX_CLK_EN BIT(3) + #define SPEAR1340_SATA_CFG_POWERUP_RESET BIT(2) + #define SPEAR1340_SATA_CFG_PM_CLK_EN BIT(1) + #define SPEAR1340_PCIE_SATA_SEL_PCIE (0) + #define SPEAR1340_PCIE_SATA_SEL_SATA (1) + #define SPEAR1340_PCIE_SATA_CFG_MASK 0xF1F + #define SPEAR1340_PCIE_CFG_VAL (SPEAR1340_PCIE_SATA_SEL_PCIE | \ + SPEAR1340_PCIE_CFG_AUX_CLK_EN | \ + SPEAR1340_PCIE_CFG_CORE_CLK_EN | \ + SPEAR1340_PCIE_CFG_POWERUP_RESET | \ + SPEAR1340_PCIE_CFG_DEVICE_PRESENT) + #define SPEAR1340_SATA_CFG_VAL (SPEAR1340_PCIE_SATA_SEL_SATA | \ + SPEAR1340_SATA_CFG_PM_CLK_EN | \ + SPEAR1340_SATA_CFG_POWERUP_RESET | \ + SPEAR1340_SATA_CFG_RX_CLK_EN | \ + SPEAR1340_SATA_CFG_TX_CLK_EN) + +#define SPEAR1340_PCIE_MIPHY_CFG 0x428 + #define SPEAR1340_MIPHY_OSC_BYPASS_EXT BIT(31) + #define SPEAR1340_MIPHY_CLK_REF_DIV2 BIT(27) + #define SPEAR1340_MIPHY_CLK_REF_DIV4 (2 << 27) + #define SPEAR1340_MIPHY_CLK_REF_DIV8 (3 << 27) + #define SPEAR1340_MIPHY_PLL_RATIO_TOP(x) (x << 0) + #define SPEAR1340_PCIE_MIPHY_CFG_MASK 0xF80000FF + #define SPEAR1340_PCIE_SATA_MIPHY_CFG_SATA \ + (SPEAR1340_MIPHY_OSC_BYPASS_EXT | \ + SPEAR1340_MIPHY_CLK_REF_DIV2 | \ + SPEAR1340_MIPHY_PLL_RATIO_TOP(60)) + #define SPEAR1340_PCIE_SATA_MIPHY_CFG_SATA_25M_CRYSTAL_CLK \ + (SPEAR1340_MIPHY_PLL_RATIO_TOP(120)) + #define SPEAR1340_PCIE_SATA_MIPHY_CFG_PCIE \ + (SPEAR1340_MIPHY_OSC_BYPASS_EXT | \ + SPEAR1340_MIPHY_PLL_RATIO_TOP(25)) + +enum spear1340_miphy_mode { + SATA, + PCIE, +}; + +struct spear1340_miphy_priv { + /* phy mode: 0 for SATA 1 for PCIe */ + enum spear1340_miphy_mode mode; + /* regmap for any soc specific misc registers */ + struct regmap *misc; + /* phy struct pointer */ + struct phy *phy; +}; + +static int spear1340_miphy_sata_init(struct spear1340_miphy_priv *priv) +{ + regmap_update_bits(priv->misc, SPEAR1340_PCIE_SATA_CFG, + SPEAR1340_PCIE_SATA_CFG_MASK, + SPEAR1340_SATA_CFG_VAL); + regmap_update_bits(priv->misc, SPEAR1340_PCIE_MIPHY_CFG, + SPEAR1340_PCIE_MIPHY_CFG_MASK, + SPEAR1340_PCIE_SATA_MIPHY_CFG_SATA_25M_CRYSTAL_CLK); + /* Switch on sata power domain */ + regmap_update_bits(priv->misc, SPEAR1340_PCM_CFG, + SPEAR1340_PCM_CFG_SATA_POWER_EN, + SPEAR1340_PCM_CFG_SATA_POWER_EN); + /* Wait for SATA power domain on */ + msleep(20); + + /* Disable PCIE SATA Controller reset */ + regmap_update_bits(priv->misc, SPEAR1340_PERIP1_SW_RST, + SPEAR1340_PERIP1_SW_RSATA, 0); + /* Wait for SATA reset de-assert completion */ + msleep(20); + + return 0; +} + +static int spear1340_miphy_sata_exit(struct spear1340_miphy_priv *priv) +{ + regmap_update_bits(priv->misc, SPEAR1340_PCIE_SATA_CFG, + SPEAR1340_PCIE_SATA_CFG_MASK, 0); + regmap_update_bits(priv->misc, SPEAR1340_PCIE_MIPHY_CFG, + SPEAR1340_PCIE_MIPHY_CFG_MASK, 0); + + /* Enable PCIE SATA Controller reset */ + regmap_update_bits(priv->misc, SPEAR1340_PERIP1_SW_RST, + SPEAR1340_PERIP1_SW_RSATA, + SPEAR1340_PERIP1_SW_RSATA); + /* Wait for SATA power domain off */ + msleep(20); + /* Switch off sata power domain */ + regmap_update_bits(priv->misc, SPEAR1340_PCM_CFG, + SPEAR1340_PCM_CFG_SATA_POWER_EN, 0); + /* Wait for SATA reset assert completion */ + msleep(20); + + return 0; +} + +static int spear1340_miphy_pcie_init(struct spear1340_miphy_priv *priv) +{ + regmap_update_bits(priv->misc, SPEAR1340_PCIE_MIPHY_CFG, + SPEAR1340_PCIE_MIPHY_CFG_MASK, + SPEAR1340_PCIE_SATA_MIPHY_CFG_PCIE); + regmap_update_bits(priv->misc, SPEAR1340_PCIE_SATA_CFG, + SPEAR1340_PCIE_SATA_CFG_MASK, + SPEAR1340_PCIE_CFG_VAL); + + return 0; +} + +static int spear1340_miphy_pcie_exit(struct spear1340_miphy_priv *priv) +{ + regmap_update_bits(priv->misc, SPEAR1340_PCIE_MIPHY_CFG, + SPEAR1340_PCIE_MIPHY_CFG_MASK, 0); + regmap_update_bits(priv->misc, SPEAR1340_PCIE_SATA_CFG, + SPEAR1340_PCIE_SATA_CFG_MASK, 0); + + return 0; +} + +static int spear1340_miphy_init(struct phy *phy) +{ + struct spear1340_miphy_priv *priv = phy_get_drvdata(phy); + int ret = 0; + + if (priv->mode == SATA) + ret = spear1340_miphy_sata_init(priv); + else if (priv->mode == PCIE) + ret = spear1340_miphy_pcie_init(priv); + + return ret; +} + +static int spear1340_miphy_exit(struct phy *phy) +{ + struct spear1340_miphy_priv *priv = phy_get_drvdata(phy); + int ret = 0; + + if (priv->mode == SATA) + ret = spear1340_miphy_sata_exit(priv); + else if (priv->mode == PCIE) + ret = spear1340_miphy_pcie_exit(priv); + + return ret; +} + +static const struct of_device_id spear1340_miphy_of_match[] = { + { .compatible = "st,spear1340-miphy" }, + { }, +}; +MODULE_DEVICE_TABLE(of, spear1340_miphy_of_match); + +static struct phy_ops spear1340_miphy_ops = { + .init = spear1340_miphy_init, + .exit = spear1340_miphy_exit, + .owner = THIS_MODULE, +}; + +#ifdef CONFIG_PM_SLEEP +static int spear1340_miphy_suspend(struct device *dev) +{ + struct spear1340_miphy_priv *priv = dev_get_drvdata(dev); + int ret = 0; + + if (priv->mode == SATA) + ret = spear1340_miphy_sata_exit(priv); + + return ret; +} + +static int spear1340_miphy_resume(struct device *dev) +{ + struct spear1340_miphy_priv *priv = dev_get_drvdata(dev); + int ret = 0; + + if (priv->mode == SATA) + ret = spear1340_miphy_sata_init(priv); + + return ret; +} +#endif + +static SIMPLE_DEV_PM_OPS(spear1340_miphy_pm_ops, spear1340_miphy_suspend, + spear1340_miphy_resume); + +static struct phy *spear1340_miphy_xlate(struct device *dev, + struct of_phandle_args *args) +{ + struct spear1340_miphy_priv *priv = dev_get_drvdata(dev); + + if (args->args_count < 1) { + dev_err(dev, "DT did not pass correct no of args\n"); + return NULL; + } + + priv->mode = args->args[0]; + + if (priv->mode != SATA && priv->mode != PCIE) { + dev_err(dev, "DT did not pass correct phy mode\n"); + return NULL; + } + + return priv->phy; +} + +static int spear1340_miphy_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct spear1340_miphy_priv *priv; + struct phy_provider *phy_provider; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) { + dev_err(dev, "can't alloc spear1340_miphy private date memory\n"); + return -ENOMEM; + } + + priv->misc = + syscon_regmap_lookup_by_phandle(dev->of_node, "misc"); + if (IS_ERR(priv->misc)) { + dev_err(dev, "failed to find misc regmap\n"); + return PTR_ERR(priv->misc); + } + + priv->phy = devm_phy_create(dev, NULL, &spear1340_miphy_ops, NULL); + if (IS_ERR(priv->phy)) { + dev_err(dev, "failed to create SATA PCIe PHY\n"); + return PTR_ERR(priv->phy); + } + + dev_set_drvdata(dev, priv); + phy_set_drvdata(priv->phy, priv); + + phy_provider = + devm_of_phy_provider_register(dev, spear1340_miphy_xlate); + if (IS_ERR(phy_provider)) { + dev_err(dev, "failed to register phy provider\n"); + return PTR_ERR(phy_provider); + } + + return 0; +} + +static struct platform_driver spear1340_miphy_driver = { + .probe = spear1340_miphy_probe, + .driver = { + .name = "spear1340-miphy", + .owner = THIS_MODULE, + .pm = &spear1340_miphy_pm_ops, + .of_match_table = of_match_ptr(spear1340_miphy_of_match), + }, +}; + +static int __init spear1340_miphy_phy_init(void) +{ + return platform_driver_register(&spear1340_miphy_driver); +} +module_init(spear1340_miphy_phy_init); + +static void __exit spear1340_miphy_phy_exit(void) +{ + platform_driver_unregister(&spear1340_miphy_driver); +} +module_exit(spear1340_miphy_phy_exit); + +MODULE_DESCRIPTION("ST SPEAR1340-MIPHY driver"); +MODULE_AUTHOR("Pratyush Anand <pratyush.anand@st.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/phy/phy-twl4030-usb.c b/drivers/phy/phy-twl4030-usb.c index e1a6623d4696..9cd33a4bcfb1 100644 --- a/drivers/phy/phy-twl4030-usb.c +++ b/drivers/phy/phy-twl4030-usb.c @@ -34,6 +34,7 @@ #include <linux/delay.h> #include <linux/usb/otg.h> #include <linux/phy/phy.h> +#include <linux/pm_runtime.h> #include <linux/usb/musb-omap.h> #include <linux/usb/ulpi.h> #include <linux/i2c/twl.h> @@ -422,37 +423,55 @@ static void twl4030_phy_power(struct twl4030_usb *twl, int on) } } -static int twl4030_phy_power_off(struct phy *phy) +static int twl4030_usb_runtime_suspend(struct device *dev) { - struct twl4030_usb *twl = phy_get_drvdata(phy); + struct twl4030_usb *twl = dev_get_drvdata(dev); + dev_dbg(twl->dev, "%s\n", __func__); if (twl->asleep) return 0; twl4030_phy_power(twl, 0); twl->asleep = 1; - dev_dbg(twl->dev, "%s\n", __func__); + return 0; } -static void __twl4030_phy_power_on(struct twl4030_usb *twl) +static int twl4030_usb_runtime_resume(struct device *dev) { + struct twl4030_usb *twl = dev_get_drvdata(dev); + + dev_dbg(twl->dev, "%s\n", __func__); + if (!twl->asleep) + return 0; + twl4030_phy_power(twl, 1); - twl4030_i2c_access(twl, 1); - twl4030_usb_set_mode(twl, twl->usb_mode); - if (twl->usb_mode == T2_USB_MODE_ULPI) - twl4030_i2c_access(twl, 0); + twl->asleep = 0; + + return 0; +} + +static int twl4030_phy_power_off(struct phy *phy) +{ + struct twl4030_usb *twl = phy_get_drvdata(phy); + + dev_dbg(twl->dev, "%s\n", __func__); + pm_runtime_mark_last_busy(twl->dev); + pm_runtime_put_autosuspend(twl->dev); + + return 0; } static int twl4030_phy_power_on(struct phy *phy) { struct twl4030_usb *twl = phy_get_drvdata(phy); - if (!twl->asleep) - return 0; - __twl4030_phy_power_on(twl); - twl->asleep = 0; dev_dbg(twl->dev, "%s\n", __func__); + pm_runtime_get_sync(twl->dev); + twl4030_i2c_access(twl, 1); + twl4030_usb_set_mode(twl, twl->usb_mode); + if (twl->usb_mode == T2_USB_MODE_ULPI) + twl4030_i2c_access(twl, 0); /* * XXX When VBUS gets driven after musb goes to A mode, @@ -558,9 +577,27 @@ static irqreturn_t twl4030_usb_irq(int irq, void *_twl) * USB_LINK_VBUS state. musb_hdrc won't care until it * starts to handle softconnect right. */ + if ((status == OMAP_MUSB_VBUS_VALID) || + (status == OMAP_MUSB_ID_GROUND)) { + if (twl->asleep) + pm_runtime_get_sync(twl->dev); + } else { + if (!twl->asleep) { + pm_runtime_mark_last_busy(twl->dev); + pm_runtime_put_autosuspend(twl->dev); + } + } omap_musb_mailbox(status); } - sysfs_notify(&twl->dev->kobj, NULL, "vbus"); + + /* don't schedule during sleep - irq works right then */ + if (status == OMAP_MUSB_ID_GROUND && !twl->asleep) { + cancel_delayed_work(&twl->id_workaround_work); + schedule_delayed_work(&twl->id_workaround_work, HZ); + } + + if (irq) + sysfs_notify(&twl->dev->kobj, NULL, "vbus"); return IRQ_HANDLED; } @@ -569,29 +606,8 @@ static void twl4030_id_workaround_work(struct work_struct *work) { struct twl4030_usb *twl = container_of(work, struct twl4030_usb, id_workaround_work.work); - enum omap_musb_vbus_id_status status; - bool status_changed = false; - - status = twl4030_usb_linkstat(twl); - - spin_lock_irq(&twl->lock); - if (status >= 0 && status != twl->linkstat) { - twl->linkstat = status; - status_changed = true; - } - spin_unlock_irq(&twl->lock); - - if (status_changed) { - dev_dbg(twl->dev, "handle missing status change to %d\n", - status); - omap_musb_mailbox(status); - } - /* don't schedule during sleep - irq works right then */ - if (status == OMAP_MUSB_ID_GROUND && !twl->asleep) { - cancel_delayed_work(&twl->id_workaround_work); - schedule_delayed_work(&twl->id_workaround_work, HZ); - } + twl4030_usb_irq(0, twl); } static int twl4030_phy_init(struct phy *phy) @@ -599,22 +615,17 @@ static int twl4030_phy_init(struct phy *phy) struct twl4030_usb *twl = phy_get_drvdata(phy); enum omap_musb_vbus_id_status status; - /* - * Start in sleep state, we'll get called through set_suspend() - * callback when musb is runtime resumed and it's time to start. - */ - __twl4030_phy_power(twl, 0); - twl->asleep = 1; - + pm_runtime_get_sync(twl->dev); status = twl4030_usb_linkstat(twl); twl->linkstat = status; - if (status == OMAP_MUSB_ID_GROUND || status == OMAP_MUSB_VBUS_VALID) { + if (status == OMAP_MUSB_ID_GROUND || status == OMAP_MUSB_VBUS_VALID) omap_musb_mailbox(twl->linkstat); - twl4030_phy_power_on(phy); - } sysfs_notify(&twl->dev->kobj, NULL, "vbus"); + pm_runtime_mark_last_busy(twl->dev); + pm_runtime_put_autosuspend(twl->dev); + return 0; } @@ -650,6 +661,11 @@ static const struct phy_ops ops = { .owner = THIS_MODULE, }; +static const struct dev_pm_ops twl4030_usb_pm_ops = { + SET_RUNTIME_PM_OPS(twl4030_usb_runtime_suspend, + twl4030_usb_runtime_resume, NULL) +}; + static int twl4030_usb_probe(struct platform_device *pdev) { struct twl4030_usb_data *pdata = dev_get_platdata(&pdev->dev); @@ -726,6 +742,11 @@ static int twl4030_usb_probe(struct platform_device *pdev) ATOMIC_INIT_NOTIFIER_HEAD(&twl->phy.notifier); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_set_autosuspend_delay(&pdev->dev, 2000); + pm_runtime_enable(&pdev->dev); + pm_runtime_get_sync(&pdev->dev); + /* Our job is to use irqs and status from the power module * to keep the transceiver disabled when nothing's connected. * @@ -744,6 +765,9 @@ static int twl4030_usb_probe(struct platform_device *pdev) return status; } + pm_runtime_mark_last_busy(&pdev->dev); + pm_runtime_put_autosuspend(twl->dev); + dev_info(&pdev->dev, "Initialized TWL4030 USB module\n"); return 0; } @@ -753,6 +777,7 @@ static int twl4030_usb_remove(struct platform_device *pdev) struct twl4030_usb *twl = platform_get_drvdata(pdev); int val; + pm_runtime_get_sync(twl->dev); cancel_delayed_work(&twl->id_workaround_work); device_remove_file(twl->dev, &dev_attr_vbus); @@ -772,9 +797,8 @@ static int twl4030_usb_remove(struct platform_device *pdev) /* disable complete OTG block */ twl4030_usb_clear_bits(twl, POWER_CTRL, POWER_CTRL_OTG_ENAB); - - if (!twl->asleep) - twl4030_phy_power(twl, 0); + pm_runtime_mark_last_busy(twl->dev); + pm_runtime_put(twl->dev); return 0; } @@ -792,6 +816,7 @@ static struct platform_driver twl4030_usb_driver = { .remove = twl4030_usb_remove, .driver = { .name = "twl4030_usb", + .pm = &twl4030_usb_pm_ops, .owner = THIS_MODULE, .of_match_table = of_match_ptr(twl4030_usb_id_table), }, diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig index 0042ccb46b9a..64d06b52f98a 100644 --- a/drivers/pinctrl/Kconfig +++ b/drivers/pinctrl/Kconfig @@ -11,10 +11,10 @@ menu "Pin controllers" depends on PINCTRL config PINMUX - bool "Support pin multiplexing controllers" + bool "Support pin multiplexing controllers" if COMPILE_TEST config PINCONF - bool "Support pin configuration controllers" + bool "Support pin configuration controllers" if COMPILE_TEST config GENERIC_PINCONF bool @@ -26,29 +26,6 @@ config DEBUG_PINCTRL help Say Y here to add some extra checks and diagnostics to PINCTRL calls. -config PINCTRL_ABX500 - bool "ST-Ericsson ABx500 family Mixed Signal Circuit gpio functions" - depends on AB8500_CORE - select GENERIC_PINCONF - help - Select this to enable the ABx500 family IC GPIO driver - -config PINCTRL_AB8500 - bool "AB8500 pin controller driver" - depends on PINCTRL_ABX500 && ARCH_U8500 - -config PINCTRL_AB8540 - bool "AB8540 pin controller driver" - depends on PINCTRL_ABX500 && ARCH_U8500 - -config PINCTRL_AB9540 - bool "AB9540 pin controller driver" - depends on PINCTRL_ABX500 && ARCH_U8500 - -config PINCTRL_AB8505 - bool "AB8505 pin controller driver" - depends on PINCTRL_ABX500 && ARCH_U8500 - config PINCTRL_ADI2 bool "ADI pin controller driver" depends on BLACKFIN @@ -93,7 +70,7 @@ config PINCTRL_AT91 config PINCTRL_BAYTRAIL bool "Intel Baytrail GPIO pin control" depends on GPIOLIB && ACPI && X86 - select IRQ_DOMAIN + select GPIOLIB_IRQCHIP help driver for memory mapped GPIO functionality on Intel Baytrail platforms. Supports 3 banks with 102, 28 and 44 gpios. @@ -120,88 +97,6 @@ config PINCTRL_BCM281XX BCM28145, and BCM28155 SoCs. This driver requires the pinctrl framework. GPIO is provided by a separate GPIO driver. -config PINCTRL_IMX - bool - select PINMUX - select PINCONF - -config PINCTRL_IMX1_CORE - bool - select PINMUX - select PINCONF - -config PINCTRL_IMX27 - bool "IMX27 pinctrl driver" - depends on SOC_IMX27 - select PINCTRL_IMX1_CORE - help - Say Y here to enable the imx27 pinctrl driver - - -config PINCTRL_IMX25 - bool "IMX25 pinctrl driver" - depends on OF - depends on SOC_IMX25 - select PINCTRL_IMX - help - Say Y here to enable the imx25 pinctrl driver - -config PINCTRL_IMX35 - bool "IMX35 pinctrl driver" - depends on SOC_IMX35 - select PINCTRL_IMX - help - Say Y here to enable the imx35 pinctrl driver - -config PINCTRL_IMX50 - bool "IMX50 pinctrl driver" - depends on SOC_IMX50 - select PINCTRL_IMX - help - Say Y here to enable the imx50 pinctrl driver - -config PINCTRL_IMX51 - bool "IMX51 pinctrl driver" - depends on SOC_IMX51 - select PINCTRL_IMX - help - Say Y here to enable the imx51 pinctrl driver - -config PINCTRL_IMX53 - bool "IMX53 pinctrl driver" - depends on SOC_IMX53 - select PINCTRL_IMX - help - Say Y here to enable the imx53 pinctrl driver - -config PINCTRL_IMX6Q - bool "IMX6Q/DL pinctrl driver" - depends on SOC_IMX6Q - select PINCTRL_IMX - help - Say Y here to enable the imx6q/dl pinctrl driver - -config PINCTRL_IMX6SL - bool "IMX6SL pinctrl driver" - depends on SOC_IMX6SL - select PINCTRL_IMX - help - Say Y here to enable the imx6sl pinctrl driver - -config PINCTRL_IMX6SX - bool "IMX6SX pinctrl driver" - depends on SOC_IMX6SX - select PINCTRL_IMX - help - Say Y here to enable the imx6sx pinctrl driver - -config PINCTRL_VF610 - bool "Freescale Vybrid VF610 pinctrl driver" - depends on SOC_VF610 - select PINCTRL_IMX - help - Say Y here to enable the Freescale Vybrid VF610 pinctrl driver - config PINCTRL_LANTIQ bool depends on LANTIQ @@ -213,71 +108,6 @@ config PINCTRL_FALCON depends on SOC_FALCON depends on PINCTRL_LANTIQ -config PINCTRL_MXS - bool - select PINMUX - select PINCONF - -config PINCTRL_IMX23 - bool - select PINCTRL_MXS - -config PINCTRL_IMX28 - bool - select PINCTRL_MXS - -config PINCTRL_MSM - bool - select PINMUX - select PINCONF - select GENERIC_PINCONF - select GPIOLIB_IRQCHIP - -config PINCTRL_APQ8064 - tristate "Qualcomm APQ8064 pin controller driver" - depends on GPIOLIB && OF - select PINCTRL_MSM - help - This is the pinctrl, pinmux, pinconf and gpiolib driver for the - Qualcomm TLMM block found in the Qualcomm APQ8064 platform. - -config PINCTRL_IPQ8064 - tristate "Qualcomm IPQ8064 pin controller driver" - depends on GPIOLIB && OF - select PINCTRL_MSM - help - This is the pinctrl, pinmux, pinconf and gpiolib driver for the - Qualcomm TLMM block found in the Qualcomm IPQ8064 platform. - -config PINCTRL_MSM8X74 - tristate "Qualcomm 8x74 pin controller driver" - depends on GPIOLIB && OF && (ARCH_QCOM || COMPILE_TEST) - select PINCTRL_MSM - help - This is the pinctrl, pinmux, pinconf and gpiolib driver for the - Qualcomm TLMM block found in the Qualcomm 8974 platform. - -config PINCTRL_NOMADIK - bool "Nomadik pin controller driver" - depends on ARCH_U8500 || ARCH_NOMADIK - select PINMUX - select PINCONF - select GPIOLIB - select OF_GPIO - select GPIOLIB_IRQCHIP - -config PINCTRL_STN8815 - bool "STN8815 pin controller driver" - depends on PINCTRL_NOMADIK && ARCH_NOMADIK - -config PINCTRL_DB8500 - bool "DB8500 pin controller driver" - depends on PINCTRL_NOMADIK && ARCH_U8500 - -config PINCTRL_DB8540 - bool "DB8540 pin controller driver" - depends on PINCTRL_NOMADIK && ARCH_U8500 - config PINCTRL_ROCKCHIP bool select PINMUX @@ -328,6 +158,12 @@ config PINCTRL_TEGRA124 bool select PINCTRL_TEGRA +config PINCTRL_TEGRA_XUSB + def_bool y if ARCH_TEGRA + select GENERIC_PHY + select PINCONF + select PINMUX + config PINCTRL_TZ1090 bool "Toumaz Xenif TZ1090 pin control driver" depends on SOC_TZ1090 @@ -356,22 +192,6 @@ config PINCTRL_COH901 COH 901 335 and COH 901 571/3. They contain 3, 5 or 7 ports of 8 GPIO pins each. -config PINCTRL_SAMSUNG - bool - select PINMUX - select PINCONF - -config PINCTRL_EXYNOS - bool "Pinctrl driver data for Samsung EXYNOS SoCs other than 5440" - depends on OF && GPIOLIB && (ARCH_EXYNOS || ARCH_S5PV210) - select PINCTRL_SAMSUNG - -config PINCTRL_EXYNOS5440 - bool "Samsung EXYNOS5440 SoC pinctrl driver" - depends on SOC_EXYNOS5440 - select PINMUX - select PINCONF - config PINCTRL_PALMAS bool "Pinctrl driver for the PALMAS Series MFD devices" depends on OF && MFD_PALMAS @@ -383,18 +203,12 @@ config PINCTRL_PALMAS open drain configuration for the Palmas series devices like TPS65913, TPS80036 etc. -config PINCTRL_S3C24XX - bool "Samsung S3C24XX SoC pinctrl driver" - depends on ARCH_S3C24XX - select PINCTRL_SAMSUNG - -config PINCTRL_S3C64XX - bool "Samsung S3C64XX SoC pinctrl driver" - depends on ARCH_S3C64XX - select PINCTRL_SAMSUNG - source "drivers/pinctrl/berlin/Kconfig" +source "drivers/pinctrl/freescale/Kconfig" source "drivers/pinctrl/mvebu/Kconfig" +source "drivers/pinctrl/nomadik/Kconfig" +source "drivers/pinctrl/qcom/Kconfig" +source "drivers/pinctrl/samsung/Kconfig" source "drivers/pinctrl/sh-pfc/Kconfig" source "drivers/pinctrl/spear/Kconfig" source "drivers/pinctrl/sunxi/Kconfig" diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile index c4b5d405b8f5..51f52d32859e 100644 --- a/drivers/pinctrl/Makefile +++ b/drivers/pinctrl/Makefile @@ -9,11 +9,6 @@ ifeq ($(CONFIG_OF),y) obj-$(CONFIG_PINCTRL) += devicetree.o endif obj-$(CONFIG_GENERIC_PINCONF) += pinconf-generic.o -obj-$(CONFIG_PINCTRL_ABX500) += pinctrl-abx500.o -obj-$(CONFIG_PINCTRL_AB8500) += pinctrl-ab8500.o -obj-$(CONFIG_PINCTRL_AB8540) += pinctrl-ab8540.o -obj-$(CONFIG_PINCTRL_AB9540) += pinctrl-ab9540.o -obj-$(CONFIG_PINCTRL_AB8505) += pinctrl-ab8505.o obj-$(CONFIG_PINCTRL_ADI2) += pinctrl-adi2.o obj-$(CONFIG_PINCTRL_AS3722) += pinctrl-as3722.o obj-$(CONFIG_PINCTRL_BF54x) += pinctrl-adi2-bf54x.o @@ -22,30 +17,7 @@ obj-$(CONFIG_PINCTRL_AT91) += pinctrl-at91.o obj-$(CONFIG_PINCTRL_BCM2835) += pinctrl-bcm2835.o obj-$(CONFIG_PINCTRL_BAYTRAIL) += pinctrl-baytrail.o obj-$(CONFIG_PINCTRL_BCM281XX) += pinctrl-bcm281xx.o -obj-$(CONFIG_PINCTRL_IMX) += pinctrl-imx.o -obj-$(CONFIG_PINCTRL_IMX1_CORE) += pinctrl-imx1-core.o -obj-$(CONFIG_PINCTRL_IMX27) += pinctrl-imx27.o -obj-$(CONFIG_PINCTRL_IMX35) += pinctrl-imx35.o -obj-$(CONFIG_PINCTRL_IMX50) += pinctrl-imx50.o -obj-$(CONFIG_PINCTRL_IMX51) += pinctrl-imx51.o -obj-$(CONFIG_PINCTRL_IMX53) += pinctrl-imx53.o -obj-$(CONFIG_PINCTRL_IMX6Q) += pinctrl-imx6q.o -obj-$(CONFIG_PINCTRL_IMX6Q) += pinctrl-imx6dl.o -obj-$(CONFIG_PINCTRL_IMX6SL) += pinctrl-imx6sl.o -obj-$(CONFIG_PINCTRL_IMX6SX) += pinctrl-imx6sx.o obj-$(CONFIG_PINCTRL_FALCON) += pinctrl-falcon.o -obj-$(CONFIG_PINCTRL_MXS) += pinctrl-mxs.o -obj-$(CONFIG_PINCTRL_IMX23) += pinctrl-imx23.o -obj-$(CONFIG_PINCTRL_IMX25) += pinctrl-imx25.o -obj-$(CONFIG_PINCTRL_IMX28) += pinctrl-imx28.o -obj-$(CONFIG_PINCTRL_MSM) += pinctrl-msm.o -obj-$(CONFIG_PINCTRL_APQ8064) += pinctrl-apq8064.o -obj-$(CONFIG_PINCTRL_IPQ8064) += pinctrl-ipq8064.o -obj-$(CONFIG_PINCTRL_MSM8X74) += pinctrl-msm8x74.o -obj-$(CONFIG_PINCTRL_NOMADIK) += pinctrl-nomadik.o -obj-$(CONFIG_PINCTRL_STN8815) += pinctrl-nomadik-stn8815.o -obj-$(CONFIG_PINCTRL_DB8500) += pinctrl-nomadik-db8500.o -obj-$(CONFIG_PINCTRL_DB8540) += pinctrl-nomadik-db8540.o obj-$(CONFIG_PINCTRL_PALMAS) += pinctrl-palmas.o obj-$(CONFIG_PINCTRL_ROCKCHIP) += pinctrl-rockchip.o obj-$(CONFIG_PINCTRL_SINGLE) += pinctrl-single.o @@ -55,25 +27,23 @@ obj-$(CONFIG_PINCTRL_TEGRA20) += pinctrl-tegra20.o obj-$(CONFIG_PINCTRL_TEGRA30) += pinctrl-tegra30.o obj-$(CONFIG_PINCTRL_TEGRA114) += pinctrl-tegra114.o obj-$(CONFIG_PINCTRL_TEGRA124) += pinctrl-tegra124.o +obj-$(CONFIG_PINCTRL_TEGRA_XUSB) += pinctrl-tegra-xusb.o obj-$(CONFIG_PINCTRL_TZ1090) += pinctrl-tz1090.o obj-$(CONFIG_PINCTRL_TZ1090_PDC) += pinctrl-tz1090-pdc.o obj-$(CONFIG_PINCTRL_U300) += pinctrl-u300.o obj-$(CONFIG_PINCTRL_COH901) += pinctrl-coh901.o -obj-$(CONFIG_PINCTRL_SAMSUNG) += pinctrl-samsung.o -obj-$(CONFIG_PINCTRL_EXYNOS) += pinctrl-exynos.o -obj-$(CONFIG_PINCTRL_EXYNOS5440) += pinctrl-exynos5440.o -obj-$(CONFIG_PINCTRL_S3C24XX) += pinctrl-s3c24xx.o -obj-$(CONFIG_PINCTRL_S3C64XX) += pinctrl-s3c64xx.o obj-$(CONFIG_PINCTRL_XWAY) += pinctrl-xway.o obj-$(CONFIG_PINCTRL_LANTIQ) += pinctrl-lantiq.o obj-$(CONFIG_PINCTRL_TB10X) += pinctrl-tb10x.o obj-$(CONFIG_PINCTRL_ST) += pinctrl-st.o -obj-$(CONFIG_PINCTRL_VF610) += pinctrl-vf610.o obj-$(CONFIG_ARCH_BERLIN) += berlin/ +obj-y += freescale/ obj-$(CONFIG_PLAT_ORION) += mvebu/ -obj-$(CONFIG_ARCH_SHMOBILE) += sh-pfc/ -obj-$(CONFIG_SUPERH) += sh-pfc/ +obj-y += nomadik/ +obj-$(CONFIG_ARCH_QCOM) += qcom/ +obj-$(CONFIG_PINCTRL_SAMSUNG) += samsung/ +obj-$(CONFIG_PINCTRL_SH_PFC) += sh-pfc/ obj-$(CONFIG_PLAT_SPEAR) += spear/ -obj-$(CONFIG_ARCH_VT8500) += vt8500/ obj-$(CONFIG_ARCH_SUNXI) += sunxi/ +obj-$(CONFIG_ARCH_VT8500) += vt8500/ diff --git a/drivers/pinctrl/berlin/berlin.c b/drivers/pinctrl/berlin/berlin.c index 86db2235ab00..7f0b0f93242b 100644 --- a/drivers/pinctrl/berlin/berlin.c +++ b/drivers/pinctrl/berlin/berlin.c @@ -99,30 +99,11 @@ static int berlin_pinctrl_dt_node_to_map(struct pinctrl_dev *pctrl_dev, return 0; } -static void berlin_pinctrl_dt_free_map(struct pinctrl_dev *pctrl_dev, - struct pinctrl_map *map, - unsigned nmaps) -{ - int i; - - for (i = 0; i < nmaps; i++) { - if (map[i].type == PIN_MAP_TYPE_MUX_GROUP) { - kfree(map[i].data.mux.group); - - /* a function can be applied to multiple groups */ - if (i == 0) - kfree(map[i].data.mux.function); - } - } - - kfree(map); -} - static const struct pinctrl_ops berlin_pinctrl_ops = { .get_groups_count = &berlin_pinctrl_get_group_count, .get_group_name = &berlin_pinctrl_get_group_name, .dt_node_to_map = &berlin_pinctrl_dt_node_to_map, - .dt_free_map = &berlin_pinctrl_dt_free_map, + .dt_free_map = &pinctrl_utils_dt_free_map, }; static int berlin_pinmux_get_functions_count(struct pinctrl_dev *pctrl_dev) @@ -170,9 +151,9 @@ berlin_pinctrl_find_function_by_name(struct berlin_pinctrl *pctrl, return NULL; } -static int berlin_pinmux_enable(struct pinctrl_dev *pctrl_dev, - unsigned function, - unsigned group) +static int berlin_pinmux_set(struct pinctrl_dev *pctrl_dev, + unsigned function, + unsigned group) { struct berlin_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctrl_dev); const struct berlin_desc_group *group_desc = pctrl->desc->groups + group; @@ -197,7 +178,7 @@ static const struct pinmux_ops berlin_pinmux_ops = { .get_functions_count = &berlin_pinmux_get_functions_count, .get_function_name = &berlin_pinmux_get_function_name, .get_function_groups = &berlin_pinmux_get_function_groups, - .enable = &berlin_pinmux_enable, + .set_mux = &berlin_pinmux_set, }; static int berlin_pinctrl_add_function(struct berlin_pinctrl *pctrl, diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c index e09474ecde23..e4f65510c87e 100644 --- a/drivers/pinctrl/core.c +++ b/drivers/pinctrl/core.c @@ -992,29 +992,15 @@ int pinctrl_select_state(struct pinctrl *p, struct pinctrl_state *state) if (p->state) { /* - * The set of groups with a mux configuration in the old state - * may not be identical to the set of groups with a mux setting - * in the new state. While this might be unusual, it's entirely - * possible for the "user"-supplied mapping table to be written - * that way. For each group that was configured in the old state - * but not in the new state, this code puts that group into a - * safe/disabled state. + * For each pinmux setting in the old state, forget SW's record + * of mux owner for that pingroup. Any pingroups which are + * still owned by the new state will be re-acquired by the call + * to pinmux_enable_setting() in the loop below. */ list_for_each_entry(setting, &p->state->settings, node) { - bool found = false; if (setting->type != PIN_MAP_TYPE_MUX_GROUP) continue; - list_for_each_entry(setting2, &state->settings, node) { - if (setting2->type != PIN_MAP_TYPE_MUX_GROUP) - continue; - if (setting2->data.mux.group == - setting->data.mux.group) { - found = true; - break; - } - } - if (!found) - pinmux_disable_setting(setting); + pinmux_disable_setting(setting); } } diff --git a/drivers/pinctrl/freescale/Kconfig b/drivers/pinctrl/freescale/Kconfig new file mode 100644 index 000000000000..16aac38793fe --- /dev/null +++ b/drivers/pinctrl/freescale/Kconfig @@ -0,0 +1,108 @@ +config PINCTRL_IMX + bool + select PINMUX + select PINCONF + +config PINCTRL_IMX1_CORE + bool + select PINMUX + select PINCONF + +config PINCTRL_IMX1 + bool "IMX1 pinctrl driver" + depends on SOC_IMX1 + select PINCTRL_IMX1_CORE + help + Say Y here to enable the imx1 pinctrl driver + +config PINCTRL_IMX21 + bool "i.MX21 pinctrl driver" + depends on SOC_IMX21 + select PINCTRL_IMX1_CORE + help + Say Y here to enable the i.MX21 pinctrl driver + +config PINCTRL_IMX27 + bool "IMX27 pinctrl driver" + depends on SOC_IMX27 + select PINCTRL_IMX1_CORE + help + Say Y here to enable the imx27 pinctrl driver + + +config PINCTRL_IMX25 + bool "IMX25 pinctrl driver" + depends on OF + depends on SOC_IMX25 + select PINCTRL_IMX + help + Say Y here to enable the imx25 pinctrl driver + +config PINCTRL_IMX35 + bool "IMX35 pinctrl driver" + depends on SOC_IMX35 + select PINCTRL_IMX + help + Say Y here to enable the imx35 pinctrl driver + +config PINCTRL_IMX50 + bool "IMX50 pinctrl driver" + depends on SOC_IMX50 + select PINCTRL_IMX + help + Say Y here to enable the imx50 pinctrl driver + +config PINCTRL_IMX51 + bool "IMX51 pinctrl driver" + depends on SOC_IMX51 + select PINCTRL_IMX + help + Say Y here to enable the imx51 pinctrl driver + +config PINCTRL_IMX53 + bool "IMX53 pinctrl driver" + depends on SOC_IMX53 + select PINCTRL_IMX + help + Say Y here to enable the imx53 pinctrl driver + +config PINCTRL_IMX6Q + bool "IMX6Q/DL pinctrl driver" + depends on SOC_IMX6Q + select PINCTRL_IMX + help + Say Y here to enable the imx6q/dl pinctrl driver + +config PINCTRL_IMX6SL + bool "IMX6SL pinctrl driver" + depends on SOC_IMX6SL + select PINCTRL_IMX + help + Say Y here to enable the imx6sl pinctrl driver + +config PINCTRL_IMX6SX + bool "IMX6SX pinctrl driver" + depends on SOC_IMX6SX + select PINCTRL_IMX + help + Say Y here to enable the imx6sx pinctrl driver + +config PINCTRL_VF610 + bool "Freescale Vybrid VF610 pinctrl driver" + depends on SOC_VF610 + select PINCTRL_IMX + help + Say Y here to enable the Freescale Vybrid VF610 pinctrl driver + +config PINCTRL_MXS + bool + select PINMUX + select PINCONF + +config PINCTRL_IMX23 + bool + select PINCTRL_MXS + +config PINCTRL_IMX28 + bool + select PINCTRL_MXS diff --git a/drivers/pinctrl/freescale/Makefile b/drivers/pinctrl/freescale/Makefile new file mode 100644 index 000000000000..bba73c22f043 --- /dev/null +++ b/drivers/pinctrl/freescale/Makefile @@ -0,0 +1,19 @@ +# Freescale pin control drivers +obj-$(CONFIG_PINCTRL_IMX) += pinctrl-imx.o +obj-$(CONFIG_PINCTRL_IMX1_CORE) += pinctrl-imx1-core.o +obj-$(CONFIG_PINCTRL_IMX1) += pinctrl-imx1.o +obj-$(CONFIG_PINCTRL_IMX21) += pinctrl-imx21.o +obj-$(CONFIG_PINCTRL_IMX27) += pinctrl-imx27.o +obj-$(CONFIG_PINCTRL_IMX35) += pinctrl-imx35.o +obj-$(CONFIG_PINCTRL_IMX50) += pinctrl-imx50.o +obj-$(CONFIG_PINCTRL_IMX51) += pinctrl-imx51.o +obj-$(CONFIG_PINCTRL_IMX53) += pinctrl-imx53.o +obj-$(CONFIG_PINCTRL_IMX6Q) += pinctrl-imx6q.o +obj-$(CONFIG_PINCTRL_IMX6Q) += pinctrl-imx6dl.o +obj-$(CONFIG_PINCTRL_IMX6SL) += pinctrl-imx6sl.o +obj-$(CONFIG_PINCTRL_IMX6SX) += pinctrl-imx6sx.o +obj-$(CONFIG_PINCTRL_VF610) += pinctrl-vf610.o +obj-$(CONFIG_PINCTRL_MXS) += pinctrl-mxs.o +obj-$(CONFIG_PINCTRL_IMX23) += pinctrl-imx23.o +obj-$(CONFIG_PINCTRL_IMX25) += pinctrl-imx25.o +obj-$(CONFIG_PINCTRL_IMX28) += pinctrl-imx28.o diff --git a/drivers/pinctrl/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c index a24448e5d399..f2446769247f 100644 --- a/drivers/pinctrl/pinctrl-imx.c +++ b/drivers/pinctrl/freescale/pinctrl-imx.c @@ -24,7 +24,7 @@ #include <linux/pinctrl/pinmux.h> #include <linux/slab.h> -#include "core.h" +#include "../core.h" #include "pinctrl-imx.h" /* The bits in CONFIG cell defined in binding doc*/ @@ -179,8 +179,8 @@ static const struct pinctrl_ops imx_pctrl_ops = { }; -static int imx_pmx_enable(struct pinctrl_dev *pctldev, unsigned selector, - unsigned group) +static int imx_pmx_set(struct pinctrl_dev *pctldev, unsigned selector, + unsigned group) { struct imx_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev); const struct imx_pinctrl_soc_info *info = ipctl->info; @@ -204,7 +204,7 @@ static int imx_pmx_enable(struct pinctrl_dev *pctldev, unsigned selector, pin_id = pin->pin; pin_reg = &info->pin_regs[pin_id]; - if (!(info->flags & ZERO_OFFSET_VALID) && !pin_reg->mux_reg) { + if (pin_reg->mux_reg == -1) { dev_err(ipctl->dev, "Pin(%s) does not support mux function\n", info->pins[pin_id].name); return -EINVAL; @@ -298,7 +298,7 @@ static const struct pinmux_ops imx_pmx_ops = { .get_functions_count = imx_pmx_get_funcs_count, .get_function_name = imx_pmx_get_func_name, .get_function_groups = imx_pmx_get_groups, - .enable = imx_pmx_enable, + .set_mux = imx_pmx_set, }; static int imx_pinconf_get(struct pinctrl_dev *pctldev, @@ -308,7 +308,7 @@ static int imx_pinconf_get(struct pinctrl_dev *pctldev, const struct imx_pinctrl_soc_info *info = ipctl->info; const struct imx_pin_reg *pin_reg = &info->pin_regs[pin_id]; - if (!(info->flags & ZERO_OFFSET_VALID) && !pin_reg->conf_reg) { + if (pin_reg->conf_reg == -1) { dev_err(info->dev, "Pin(%s) does not support config function\n", info->pins[pin_id].name); return -EINVAL; @@ -331,7 +331,7 @@ static int imx_pinconf_set(struct pinctrl_dev *pctldev, const struct imx_pin_reg *pin_reg = &info->pin_regs[pin_id]; int i; - if (!(info->flags & ZERO_OFFSET_VALID) && !pin_reg->conf_reg) { + if (pin_reg->conf_reg == -1) { dev_err(info->dev, "Pin(%s) does not support config function\n", info->pins[pin_id].name); return -EINVAL; @@ -515,7 +515,7 @@ static int imx_pinctrl_parse_functions(struct device_node *np, /* Initialise function */ func->name = np->name; func->num_groups = of_get_child_count(np); - if (func->num_groups <= 0) { + if (func->num_groups == 0) { dev_err(info->dev, "no groups defined in %s\n", np->full_name); return -EINVAL; } @@ -586,10 +586,11 @@ int imx_pinctrl_probe(struct platform_device *pdev, if (!ipctl) return -ENOMEM; - info->pin_regs = devm_kzalloc(&pdev->dev, sizeof(*info->pin_regs) * + info->pin_regs = devm_kmalloc(&pdev->dev, sizeof(*info->pin_regs) * info->npins, GFP_KERNEL); if (!info->pin_regs) return -ENOMEM; + memset(info->pin_regs, 0xff, sizeof(*info->pin_regs) * info->npins); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ipctl->base = devm_ioremap_resource(&pdev->dev, res); diff --git a/drivers/pinctrl/pinctrl-imx.h b/drivers/pinctrl/freescale/pinctrl-imx.h index db408b057000..49e55d39f7c8 100644 --- a/drivers/pinctrl/pinctrl-imx.h +++ b/drivers/pinctrl/freescale/pinctrl-imx.h @@ -67,8 +67,8 @@ struct imx_pmx_func { * @conf_reg: config register offset */ struct imx_pin_reg { - u16 mux_reg; - u16 conf_reg; + s16 mux_reg; + s16 conf_reg; }; struct imx_pinctrl_soc_info { @@ -83,8 +83,7 @@ struct imx_pinctrl_soc_info { unsigned int flags; }; -#define ZERO_OFFSET_VALID 0x1 -#define SHARE_MUX_CONF_REG 0x2 +#define SHARE_MUX_CONF_REG 0x1 #define NO_MUX 0x0 #define NO_PAD 0x0 diff --git a/drivers/pinctrl/pinctrl-imx1-core.c b/drivers/pinctrl/freescale/pinctrl-imx1-core.c index 815384b377b5..5ac59fbb2440 100644 --- a/drivers/pinctrl/pinctrl-imx1-core.c +++ b/drivers/pinctrl/freescale/pinctrl-imx1-core.c @@ -28,7 +28,7 @@ #include <linux/pinctrl/pinmux.h> #include <linux/slab.h> -#include "core.h" +#include "../core.h" #include "pinctrl-imx1.h" struct imx1_pinctrl { @@ -298,8 +298,8 @@ static const struct pinctrl_ops imx1_pctrl_ops = { }; -static int imx1_pmx_enable(struct pinctrl_dev *pctldev, unsigned selector, - unsigned group) +static int imx1_pmx_set(struct pinctrl_dev *pctldev, unsigned selector, + unsigned group) { struct imx1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev); const struct imx1_pinctrl_soc_info *info = ipctl->info; @@ -385,7 +385,7 @@ static const struct pinmux_ops imx1_pmx_ops = { .get_functions_count = imx1_pmx_get_funcs_count, .get_function_name = imx1_pmx_get_func_name, .get_function_groups = imx1_pmx_get_groups, - .enable = imx1_pmx_enable, + .set_mux = imx1_pmx_set, }; static int imx1_pinconf_get(struct pinctrl_dev *pctldev, @@ -526,7 +526,7 @@ static int imx1_pinctrl_parse_functions(struct device_node *np, /* Initialise function */ func->name = np->name; func->num_groups = of_get_child_count(np); - if (func->num_groups <= 0) + if (func->num_groups == 0) return -EINVAL; func->groups = devm_kzalloc(info->dev, diff --git a/drivers/pinctrl/freescale/pinctrl-imx1.c b/drivers/pinctrl/freescale/pinctrl-imx1.c new file mode 100644 index 000000000000..533a6e519648 --- /dev/null +++ b/drivers/pinctrl/freescale/pinctrl-imx1.c @@ -0,0 +1,279 @@ +/* + * i.MX1 pinctrl driver based on imx pinmux core + * + * Copyright (C) 2014 Alexander Shiyan <shc_work@mail.ru> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/pinctrl/pinctrl.h> + +#include "pinctrl-imx1.h" + +#define PAD_ID(port, pin) ((port) * 32 + (pin)) +#define PA 0 +#define PB 1 +#define PC 2 +#define PD 3 + +enum imx1_pads { + MX1_PAD_A24 = PAD_ID(PA, 0), + MX1_PAD_TIN = PAD_ID(PA, 1), + MX1_PAD_PWMO = PAD_ID(PA, 2), + MX1_PAD_CSI_MCLK = PAD_ID(PA, 3), + MX1_PAD_CSI_D0 = PAD_ID(PA, 4), + MX1_PAD_CSI_D1 = PAD_ID(PA, 5), + MX1_PAD_CSI_D2 = PAD_ID(PA, 6), + MX1_PAD_CSI_D3 = PAD_ID(PA, 7), + MX1_PAD_CSI_D4 = PAD_ID(PA, 8), + MX1_PAD_CSI_D5 = PAD_ID(PA, 9), + MX1_PAD_CSI_D6 = PAD_ID(PA, 10), + MX1_PAD_CSI_D7 = PAD_ID(PA, 11), + MX1_PAD_CSI_VSYNC = PAD_ID(PA, 12), + MX1_PAD_CSI_HSYNC = PAD_ID(PA, 13), + MX1_PAD_CSI_PIXCLK = PAD_ID(PA, 14), + MX1_PAD_I2C_SDA = PAD_ID(PA, 15), + MX1_PAD_I2C_SCL = PAD_ID(PA, 16), + MX1_PAD_DTACK = PAD_ID(PA, 17), + MX1_PAD_BCLK = PAD_ID(PA, 18), + MX1_PAD_LBA = PAD_ID(PA, 19), + MX1_PAD_ECB = PAD_ID(PA, 20), + MX1_PAD_A0 = PAD_ID(PA, 21), + MX1_PAD_CS4 = PAD_ID(PA, 22), + MX1_PAD_CS5 = PAD_ID(PA, 23), + MX1_PAD_A16 = PAD_ID(PA, 24), + MX1_PAD_A17 = PAD_ID(PA, 25), + MX1_PAD_A18 = PAD_ID(PA, 26), + MX1_PAD_A19 = PAD_ID(PA, 27), + MX1_PAD_A20 = PAD_ID(PA, 28), + MX1_PAD_A21 = PAD_ID(PA, 29), + MX1_PAD_A22 = PAD_ID(PA, 30), + MX1_PAD_A23 = PAD_ID(PA, 31), + MX1_PAD_SD_DAT0 = PAD_ID(PB, 8), + MX1_PAD_SD_DAT1 = PAD_ID(PB, 9), + MX1_PAD_SD_DAT2 = PAD_ID(PB, 10), + MX1_PAD_SD_DAT3 = PAD_ID(PB, 11), + MX1_PAD_SD_SCLK = PAD_ID(PB, 12), + MX1_PAD_SD_CMD = PAD_ID(PB, 13), + MX1_PAD_SIM_SVEN = PAD_ID(PB, 14), + MX1_PAD_SIM_PD = PAD_ID(PB, 15), + MX1_PAD_SIM_TX = PAD_ID(PB, 16), + MX1_PAD_SIM_RX = PAD_ID(PB, 17), + MX1_PAD_SIM_RST = PAD_ID(PB, 18), + MX1_PAD_SIM_CLK = PAD_ID(PB, 19), + MX1_PAD_USBD_AFE = PAD_ID(PB, 20), + MX1_PAD_USBD_OE = PAD_ID(PB, 21), + MX1_PAD_USBD_RCV = PAD_ID(PB, 22), + MX1_PAD_USBD_SUSPND = PAD_ID(PB, 23), + MX1_PAD_USBD_VP = PAD_ID(PB, 24), + MX1_PAD_USBD_VM = PAD_ID(PB, 25), + MX1_PAD_USBD_VPO = PAD_ID(PB, 26), + MX1_PAD_USBD_VMO = PAD_ID(PB, 27), + MX1_PAD_UART2_CTS = PAD_ID(PB, 28), + MX1_PAD_UART2_RTS = PAD_ID(PB, 29), + MX1_PAD_UART2_TXD = PAD_ID(PB, 30), + MX1_PAD_UART2_RXD = PAD_ID(PB, 31), + MX1_PAD_SSI_RXFS = PAD_ID(PC, 3), + MX1_PAD_SSI_RXCLK = PAD_ID(PC, 4), + MX1_PAD_SSI_RXDAT = PAD_ID(PC, 5), + MX1_PAD_SSI_TXDAT = PAD_ID(PC, 6), + MX1_PAD_SSI_TXFS = PAD_ID(PC, 7), + MX1_PAD_SSI_TXCLK = PAD_ID(PC, 8), + MX1_PAD_UART1_CTS = PAD_ID(PC, 9), + MX1_PAD_UART1_RTS = PAD_ID(PC, 10), + MX1_PAD_UART1_TXD = PAD_ID(PC, 11), + MX1_PAD_UART1_RXD = PAD_ID(PC, 12), + MX1_PAD_SPI1_RDY = PAD_ID(PC, 13), + MX1_PAD_SPI1_SCLK = PAD_ID(PC, 14), + MX1_PAD_SPI1_SS = PAD_ID(PC, 15), + MX1_PAD_SPI1_MISO = PAD_ID(PC, 16), + MX1_PAD_SPI1_MOSI = PAD_ID(PC, 17), + MX1_PAD_BT13 = PAD_ID(PC, 19), + MX1_PAD_BT12 = PAD_ID(PC, 20), + MX1_PAD_BT11 = PAD_ID(PC, 21), + MX1_PAD_BT10 = PAD_ID(PC, 22), + MX1_PAD_BT9 = PAD_ID(PC, 23), + MX1_PAD_BT8 = PAD_ID(PC, 24), + MX1_PAD_BT7 = PAD_ID(PC, 25), + MX1_PAD_BT6 = PAD_ID(PC, 26), + MX1_PAD_BT5 = PAD_ID(PC, 27), + MX1_PAD_BT4 = PAD_ID(PC, 28), + MX1_PAD_BT3 = PAD_ID(PC, 29), + MX1_PAD_BT2 = PAD_ID(PC, 30), + MX1_PAD_BT1 = PAD_ID(PC, 31), + MX1_PAD_LSCLK = PAD_ID(PD, 6), + MX1_PAD_REV = PAD_ID(PD, 7), + MX1_PAD_CLS = PAD_ID(PD, 8), + MX1_PAD_PS = PAD_ID(PD, 9), + MX1_PAD_SPL_SPR = PAD_ID(PD, 10), + MX1_PAD_CONTRAST = PAD_ID(PD, 11), + MX1_PAD_ACD_OE = PAD_ID(PD, 12), + MX1_PAD_LP_HSYNC = PAD_ID(PD, 13), + MX1_PAD_FLM_VSYNC = PAD_ID(PD, 14), + MX1_PAD_LD0 = PAD_ID(PD, 15), + MX1_PAD_LD1 = PAD_ID(PD, 16), + MX1_PAD_LD2 = PAD_ID(PD, 17), + MX1_PAD_LD3 = PAD_ID(PD, 18), + MX1_PAD_LD4 = PAD_ID(PD, 19), + MX1_PAD_LD5 = PAD_ID(PD, 20), + MX1_PAD_LD6 = PAD_ID(PD, 21), + MX1_PAD_LD7 = PAD_ID(PD, 22), + MX1_PAD_LD8 = PAD_ID(PD, 23), + MX1_PAD_LD9 = PAD_ID(PD, 24), + MX1_PAD_LD10 = PAD_ID(PD, 25), + MX1_PAD_LD11 = PAD_ID(PD, 26), + MX1_PAD_LD12 = PAD_ID(PD, 27), + MX1_PAD_LD13 = PAD_ID(PD, 28), + MX1_PAD_LD14 = PAD_ID(PD, 29), + MX1_PAD_LD15 = PAD_ID(PD, 30), + MX1_PAD_TMR2OUT = PAD_ID(PD, 31), +}; + +/* Pad names for the pinmux subsystem */ +static const struct pinctrl_pin_desc imx1_pinctrl_pads[] = { + IMX_PINCTRL_PIN(MX1_PAD_A24), + IMX_PINCTRL_PIN(MX1_PAD_TIN), + IMX_PINCTRL_PIN(MX1_PAD_PWMO), + IMX_PINCTRL_PIN(MX1_PAD_CSI_MCLK), + IMX_PINCTRL_PIN(MX1_PAD_CSI_D0), + IMX_PINCTRL_PIN(MX1_PAD_CSI_D1), + IMX_PINCTRL_PIN(MX1_PAD_CSI_D2), + IMX_PINCTRL_PIN(MX1_PAD_CSI_D3), + IMX_PINCTRL_PIN(MX1_PAD_CSI_D4), + IMX_PINCTRL_PIN(MX1_PAD_CSI_D5), + IMX_PINCTRL_PIN(MX1_PAD_CSI_D6), + IMX_PINCTRL_PIN(MX1_PAD_CSI_D7), + IMX_PINCTRL_PIN(MX1_PAD_CSI_VSYNC), + IMX_PINCTRL_PIN(MX1_PAD_CSI_HSYNC), + IMX_PINCTRL_PIN(MX1_PAD_CSI_PIXCLK), + IMX_PINCTRL_PIN(MX1_PAD_I2C_SDA), + IMX_PINCTRL_PIN(MX1_PAD_I2C_SCL), + IMX_PINCTRL_PIN(MX1_PAD_DTACK), + IMX_PINCTRL_PIN(MX1_PAD_BCLK), + IMX_PINCTRL_PIN(MX1_PAD_LBA), + IMX_PINCTRL_PIN(MX1_PAD_ECB), + IMX_PINCTRL_PIN(MX1_PAD_A0), + IMX_PINCTRL_PIN(MX1_PAD_CS4), + IMX_PINCTRL_PIN(MX1_PAD_CS5), + IMX_PINCTRL_PIN(MX1_PAD_A16), + IMX_PINCTRL_PIN(MX1_PAD_A17), + IMX_PINCTRL_PIN(MX1_PAD_A18), + IMX_PINCTRL_PIN(MX1_PAD_A19), + IMX_PINCTRL_PIN(MX1_PAD_A20), + IMX_PINCTRL_PIN(MX1_PAD_A21), + IMX_PINCTRL_PIN(MX1_PAD_A22), + IMX_PINCTRL_PIN(MX1_PAD_A23), + IMX_PINCTRL_PIN(MX1_PAD_SD_DAT0), + IMX_PINCTRL_PIN(MX1_PAD_SD_DAT1), + IMX_PINCTRL_PIN(MX1_PAD_SD_DAT2), + IMX_PINCTRL_PIN(MX1_PAD_SD_DAT3), + IMX_PINCTRL_PIN(MX1_PAD_SD_SCLK), + IMX_PINCTRL_PIN(MX1_PAD_SD_CMD), + IMX_PINCTRL_PIN(MX1_PAD_SIM_SVEN), + IMX_PINCTRL_PIN(MX1_PAD_SIM_PD), + IMX_PINCTRL_PIN(MX1_PAD_SIM_TX), + IMX_PINCTRL_PIN(MX1_PAD_SIM_RX), + IMX_PINCTRL_PIN(MX1_PAD_SIM_CLK), + IMX_PINCTRL_PIN(MX1_PAD_USBD_AFE), + IMX_PINCTRL_PIN(MX1_PAD_USBD_OE), + IMX_PINCTRL_PIN(MX1_PAD_USBD_RCV), + IMX_PINCTRL_PIN(MX1_PAD_USBD_SUSPND), + IMX_PINCTRL_PIN(MX1_PAD_USBD_VP), + IMX_PINCTRL_PIN(MX1_PAD_USBD_VM), + IMX_PINCTRL_PIN(MX1_PAD_USBD_VPO), + IMX_PINCTRL_PIN(MX1_PAD_USBD_VMO), + IMX_PINCTRL_PIN(MX1_PAD_UART2_CTS), + IMX_PINCTRL_PIN(MX1_PAD_UART2_RTS), + IMX_PINCTRL_PIN(MX1_PAD_UART2_TXD), + IMX_PINCTRL_PIN(MX1_PAD_UART2_RXD), + IMX_PINCTRL_PIN(MX1_PAD_SSI_RXFS), + IMX_PINCTRL_PIN(MX1_PAD_SSI_RXCLK), + IMX_PINCTRL_PIN(MX1_PAD_SSI_RXDAT), + IMX_PINCTRL_PIN(MX1_PAD_SSI_TXDAT), + IMX_PINCTRL_PIN(MX1_PAD_SSI_TXFS), + IMX_PINCTRL_PIN(MX1_PAD_SSI_TXCLK), + IMX_PINCTRL_PIN(MX1_PAD_UART1_CTS), + IMX_PINCTRL_PIN(MX1_PAD_UART1_RTS), + IMX_PINCTRL_PIN(MX1_PAD_UART1_TXD), + IMX_PINCTRL_PIN(MX1_PAD_UART1_RXD), + IMX_PINCTRL_PIN(MX1_PAD_SPI1_RDY), + IMX_PINCTRL_PIN(MX1_PAD_SPI1_SCLK), + IMX_PINCTRL_PIN(MX1_PAD_SPI1_SS), + IMX_PINCTRL_PIN(MX1_PAD_SPI1_MISO), + IMX_PINCTRL_PIN(MX1_PAD_SPI1_MOSI), + IMX_PINCTRL_PIN(MX1_PAD_BT13), + IMX_PINCTRL_PIN(MX1_PAD_BT12), + IMX_PINCTRL_PIN(MX1_PAD_BT11), + IMX_PINCTRL_PIN(MX1_PAD_BT10), + IMX_PINCTRL_PIN(MX1_PAD_BT9), + IMX_PINCTRL_PIN(MX1_PAD_BT8), + IMX_PINCTRL_PIN(MX1_PAD_BT7), + IMX_PINCTRL_PIN(MX1_PAD_BT6), + IMX_PINCTRL_PIN(MX1_PAD_BT5), + IMX_PINCTRL_PIN(MX1_PAD_BT4), + IMX_PINCTRL_PIN(MX1_PAD_BT3), + IMX_PINCTRL_PIN(MX1_PAD_BT2), + IMX_PINCTRL_PIN(MX1_PAD_BT1), + IMX_PINCTRL_PIN(MX1_PAD_LSCLK), + IMX_PINCTRL_PIN(MX1_PAD_REV), + IMX_PINCTRL_PIN(MX1_PAD_CLS), + IMX_PINCTRL_PIN(MX1_PAD_PS), + IMX_PINCTRL_PIN(MX1_PAD_SPL_SPR), + IMX_PINCTRL_PIN(MX1_PAD_CONTRAST), + IMX_PINCTRL_PIN(MX1_PAD_ACD_OE), + IMX_PINCTRL_PIN(MX1_PAD_LP_HSYNC), + IMX_PINCTRL_PIN(MX1_PAD_FLM_VSYNC), + IMX_PINCTRL_PIN(MX1_PAD_LD0), + IMX_PINCTRL_PIN(MX1_PAD_LD1), + IMX_PINCTRL_PIN(MX1_PAD_LD2), + IMX_PINCTRL_PIN(MX1_PAD_LD3), + IMX_PINCTRL_PIN(MX1_PAD_LD4), + IMX_PINCTRL_PIN(MX1_PAD_LD5), + IMX_PINCTRL_PIN(MX1_PAD_LD6), + IMX_PINCTRL_PIN(MX1_PAD_LD7), + IMX_PINCTRL_PIN(MX1_PAD_LD8), + IMX_PINCTRL_PIN(MX1_PAD_LD9), + IMX_PINCTRL_PIN(MX1_PAD_LD10), + IMX_PINCTRL_PIN(MX1_PAD_LD11), + IMX_PINCTRL_PIN(MX1_PAD_LD12), + IMX_PINCTRL_PIN(MX1_PAD_LD13), + IMX_PINCTRL_PIN(MX1_PAD_LD14), + IMX_PINCTRL_PIN(MX1_PAD_LD15), + IMX_PINCTRL_PIN(MX1_PAD_TMR2OUT), +}; + +static struct imx1_pinctrl_soc_info imx1_pinctrl_info = { + .pins = imx1_pinctrl_pads, + .npins = ARRAY_SIZE(imx1_pinctrl_pads), +}; + +static int __init imx1_pinctrl_probe(struct platform_device *pdev) +{ + return imx1_pinctrl_core_probe(pdev, &imx1_pinctrl_info); +} + +static const struct of_device_id imx1_pinctrl_of_match[] = { + { .compatible = "fsl,imx1-iomuxc", }, + { } +}; +MODULE_DEVICE_TABLE(of, imx1_pinctrl_of_match); + +static struct platform_driver imx1_pinctrl_driver = { + .driver = { + .name = "imx1-pinctrl", + .owner = THIS_MODULE, + .of_match_table = imx1_pinctrl_of_match, + }, + .remove = imx1_pinctrl_core_remove, +}; +module_platform_driver_probe(imx1_pinctrl_driver, imx1_pinctrl_probe); + +MODULE_AUTHOR("Alexander Shiyan <shc_work@mail.ru>"); +MODULE_DESCRIPTION("Freescale i.MX1 pinctrl driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/pinctrl/pinctrl-imx1.h b/drivers/pinctrl/freescale/pinctrl-imx1.h index 692a54c15cda..692a54c15cda 100644 --- a/drivers/pinctrl/pinctrl-imx1.h +++ b/drivers/pinctrl/freescale/pinctrl-imx1.h diff --git a/drivers/pinctrl/freescale/pinctrl-imx21.c b/drivers/pinctrl/freescale/pinctrl-imx21.c new file mode 100644 index 000000000000..1b3b2311b033 --- /dev/null +++ b/drivers/pinctrl/freescale/pinctrl-imx21.c @@ -0,0 +1,342 @@ +/* + * i.MX21 pinctrl driver based on imx pinmux core + * + * Copyright (C) 2014 Alexander Shiyan <shc_work@mail.ru> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/pinctrl/pinctrl.h> + +#include "pinctrl-imx1.h" + +#define PAD_ID(port, pin) ((port) * 32 + (pin)) +#define PA 0 +#define PB 1 +#define PC 2 +#define PD 3 +#define PE 4 +#define PF 5 + +enum imx21_pads { + MX21_PAD_LSCLK = PAD_ID(PA, 5), + MX21_PAD_LD0 = PAD_ID(PA, 6), + MX21_PAD_LD1 = PAD_ID(PA, 7), + MX21_PAD_LD2 = PAD_ID(PA, 8), + MX21_PAD_LD3 = PAD_ID(PA, 9), + MX21_PAD_LD4 = PAD_ID(PA, 10), + MX21_PAD_LD5 = PAD_ID(PA, 11), + MX21_PAD_LD6 = PAD_ID(PA, 12), + MX21_PAD_LD7 = PAD_ID(PA, 13), + MX21_PAD_LD8 = PAD_ID(PA, 14), + MX21_PAD_LD9 = PAD_ID(PA, 15), + MX21_PAD_LD10 = PAD_ID(PA, 16), + MX21_PAD_LD11 = PAD_ID(PA, 17), + MX21_PAD_LD12 = PAD_ID(PA, 18), + MX21_PAD_LD13 = PAD_ID(PA, 19), + MX21_PAD_LD14 = PAD_ID(PA, 20), + MX21_PAD_LD15 = PAD_ID(PA, 21), + MX21_PAD_LD16 = PAD_ID(PA, 22), + MX21_PAD_LD17 = PAD_ID(PA, 23), + MX21_PAD_REV = PAD_ID(PA, 24), + MX21_PAD_CLS = PAD_ID(PA, 25), + MX21_PAD_PS = PAD_ID(PA, 26), + MX21_PAD_SPL_SPR = PAD_ID(PA, 27), + MX21_PAD_HSYNC = PAD_ID(PA, 28), + MX21_PAD_VSYNC = PAD_ID(PA, 29), + MX21_PAD_CONTRAST = PAD_ID(PA, 30), + MX21_PAD_OE_ACD = PAD_ID(PA, 31), + MX21_PAD_SD2_D0 = PAD_ID(PB, 4), + MX21_PAD_SD2_D1 = PAD_ID(PB, 5), + MX21_PAD_SD2_D2 = PAD_ID(PB, 6), + MX21_PAD_SD2_D3 = PAD_ID(PB, 7), + MX21_PAD_SD2_CMD = PAD_ID(PB, 8), + MX21_PAD_SD2_CLK = PAD_ID(PB, 9), + MX21_PAD_CSI_D0 = PAD_ID(PB, 10), + MX21_PAD_CSI_D1 = PAD_ID(PB, 11), + MX21_PAD_CSI_D2 = PAD_ID(PB, 12), + MX21_PAD_CSI_D3 = PAD_ID(PB, 13), + MX21_PAD_CSI_D4 = PAD_ID(PB, 14), + MX21_PAD_CSI_MCLK = PAD_ID(PB, 15), + MX21_PAD_CSI_PIXCLK = PAD_ID(PB, 16), + MX21_PAD_CSI_D5 = PAD_ID(PB, 17), + MX21_PAD_CSI_D6 = PAD_ID(PB, 18), + MX21_PAD_CSI_D7 = PAD_ID(PB, 19), + MX21_PAD_CSI_VSYNC = PAD_ID(PB, 20), + MX21_PAD_CSI_HSYNC = PAD_ID(PB, 21), + MX21_PAD_USB_BYP = PAD_ID(PB, 22), + MX21_PAD_USB_PWR = PAD_ID(PB, 23), + MX21_PAD_USB_OC = PAD_ID(PB, 24), + MX21_PAD_USBH_ON = PAD_ID(PB, 25), + MX21_PAD_USBH1_FS = PAD_ID(PB, 26), + MX21_PAD_USBH1_OE = PAD_ID(PB, 27), + MX21_PAD_USBH1_TXDM = PAD_ID(PB, 28), + MX21_PAD_USBH1_TXDP = PAD_ID(PB, 29), + MX21_PAD_USBH1_RXDM = PAD_ID(PB, 30), + MX21_PAD_USBH1_RXDP = PAD_ID(PB, 31), + MX21_PAD_USBG_SDA = PAD_ID(PC, 5), + MX21_PAD_USBG_SCL = PAD_ID(PC, 6), + MX21_PAD_USBG_ON = PAD_ID(PC, 7), + MX21_PAD_USBG_FS = PAD_ID(PC, 8), + MX21_PAD_USBG_OE = PAD_ID(PC, 9), + MX21_PAD_USBG_TXDM = PAD_ID(PC, 10), + MX21_PAD_USBG_TXDP = PAD_ID(PC, 11), + MX21_PAD_USBG_RXDM = PAD_ID(PC, 12), + MX21_PAD_USBG_RXDP = PAD_ID(PC, 13), + MX21_PAD_TOUT = PAD_ID(PC, 14), + MX21_PAD_TIN = PAD_ID(PC, 15), + MX21_PAD_SAP_FS = PAD_ID(PC, 16), + MX21_PAD_SAP_RXD = PAD_ID(PC, 17), + MX21_PAD_SAP_TXD = PAD_ID(PC, 18), + MX21_PAD_SAP_CLK = PAD_ID(PC, 19), + MX21_PAD_SSI1_FS = PAD_ID(PC, 20), + MX21_PAD_SSI1_RXD = PAD_ID(PC, 21), + MX21_PAD_SSI1_TXD = PAD_ID(PC, 22), + MX21_PAD_SSI1_CLK = PAD_ID(PC, 23), + MX21_PAD_SSI2_FS = PAD_ID(PC, 24), + MX21_PAD_SSI2_RXD = PAD_ID(PC, 25), + MX21_PAD_SSI2_TXD = PAD_ID(PC, 26), + MX21_PAD_SSI2_CLK = PAD_ID(PC, 27), + MX21_PAD_SSI3_FS = PAD_ID(PC, 28), + MX21_PAD_SSI3_RXD = PAD_ID(PC, 29), + MX21_PAD_SSI3_TXD = PAD_ID(PC, 30), + MX21_PAD_SSI3_CLK = PAD_ID(PC, 31), + MX21_PAD_I2C_DATA = PAD_ID(PD, 17), + MX21_PAD_I2C_CLK = PAD_ID(PD, 18), + MX21_PAD_CSPI2_SS2 = PAD_ID(PD, 19), + MX21_PAD_CSPI2_SS1 = PAD_ID(PD, 20), + MX21_PAD_CSPI2_SS0 = PAD_ID(PD, 21), + MX21_PAD_CSPI2_SCLK = PAD_ID(PD, 22), + MX21_PAD_CSPI2_MISO = PAD_ID(PD, 23), + MX21_PAD_CSPI2_MOSI = PAD_ID(PD, 24), + MX21_PAD_CSPI1_RDY = PAD_ID(PD, 25), + MX21_PAD_CSPI1_SS2 = PAD_ID(PD, 26), + MX21_PAD_CSPI1_SS1 = PAD_ID(PD, 27), + MX21_PAD_CSPI1_SS0 = PAD_ID(PD, 28), + MX21_PAD_CSPI1_SCLK = PAD_ID(PD, 29), + MX21_PAD_CSPI1_MISO = PAD_ID(PD, 30), + MX21_PAD_CSPI1_MOSI = PAD_ID(PD, 31), + MX21_PAD_TEST_WB2 = PAD_ID(PE, 0), + MX21_PAD_TEST_WB1 = PAD_ID(PE, 1), + MX21_PAD_TEST_WB0 = PAD_ID(PE, 2), + MX21_PAD_UART2_CTS = PAD_ID(PE, 3), + MX21_PAD_UART2_RTS = PAD_ID(PE, 4), + MX21_PAD_PWMO = PAD_ID(PE, 5), + MX21_PAD_UART2_TXD = PAD_ID(PE, 6), + MX21_PAD_UART2_RXD = PAD_ID(PE, 7), + MX21_PAD_UART3_TXD = PAD_ID(PE, 8), + MX21_PAD_UART3_RXD = PAD_ID(PE, 9), + MX21_PAD_UART3_CTS = PAD_ID(PE, 10), + MX21_PAD_UART3_RTS = PAD_ID(PE, 11), + MX21_PAD_UART1_TXD = PAD_ID(PE, 12), + MX21_PAD_UART1_RXD = PAD_ID(PE, 13), + MX21_PAD_UART1_CTS = PAD_ID(PE, 14), + MX21_PAD_UART1_RTS = PAD_ID(PE, 15), + MX21_PAD_RTCK = PAD_ID(PE, 16), + MX21_PAD_RESET_OUT = PAD_ID(PE, 17), + MX21_PAD_SD1_D0 = PAD_ID(PE, 18), + MX21_PAD_SD1_D1 = PAD_ID(PE, 19), + MX21_PAD_SD1_D2 = PAD_ID(PE, 20), + MX21_PAD_SD1_D3 = PAD_ID(PE, 21), + MX21_PAD_SD1_CMD = PAD_ID(PE, 22), + MX21_PAD_SD1_CLK = PAD_ID(PE, 23), + MX21_PAD_NFRB = PAD_ID(PF, 0), + MX21_PAD_NFCE = PAD_ID(PF, 1), + MX21_PAD_NFWP = PAD_ID(PF, 2), + MX21_PAD_NFCLE = PAD_ID(PF, 3), + MX21_PAD_NFALE = PAD_ID(PF, 4), + MX21_PAD_NFRE = PAD_ID(PF, 5), + MX21_PAD_NFWE = PAD_ID(PF, 6), + MX21_PAD_NFIO0 = PAD_ID(PF, 7), + MX21_PAD_NFIO1 = PAD_ID(PF, 8), + MX21_PAD_NFIO2 = PAD_ID(PF, 9), + MX21_PAD_NFIO3 = PAD_ID(PF, 10), + MX21_PAD_NFIO4 = PAD_ID(PF, 11), + MX21_PAD_NFIO5 = PAD_ID(PF, 12), + MX21_PAD_NFIO6 = PAD_ID(PF, 13), + MX21_PAD_NFIO7 = PAD_ID(PF, 14), + MX21_PAD_CLKO = PAD_ID(PF, 15), + MX21_PAD_RESERVED = PAD_ID(PF, 16), + MX21_PAD_CS4 = PAD_ID(PF, 21), + MX21_PAD_CS5 = PAD_ID(PF, 22), +}; + +/* Pad names for the pinmux subsystem */ +static const struct pinctrl_pin_desc imx21_pinctrl_pads[] = { + IMX_PINCTRL_PIN(MX21_PAD_LSCLK), + IMX_PINCTRL_PIN(MX21_PAD_LD0), + IMX_PINCTRL_PIN(MX21_PAD_LD1), + IMX_PINCTRL_PIN(MX21_PAD_LD2), + IMX_PINCTRL_PIN(MX21_PAD_LD3), + IMX_PINCTRL_PIN(MX21_PAD_LD4), + IMX_PINCTRL_PIN(MX21_PAD_LD5), + IMX_PINCTRL_PIN(MX21_PAD_LD6), + IMX_PINCTRL_PIN(MX21_PAD_LD7), + IMX_PINCTRL_PIN(MX21_PAD_LD8), + IMX_PINCTRL_PIN(MX21_PAD_LD9), + IMX_PINCTRL_PIN(MX21_PAD_LD10), + IMX_PINCTRL_PIN(MX21_PAD_LD11), + IMX_PINCTRL_PIN(MX21_PAD_LD12), + IMX_PINCTRL_PIN(MX21_PAD_LD13), + IMX_PINCTRL_PIN(MX21_PAD_LD14), + IMX_PINCTRL_PIN(MX21_PAD_LD15), + IMX_PINCTRL_PIN(MX21_PAD_LD16), + IMX_PINCTRL_PIN(MX21_PAD_LD17), + IMX_PINCTRL_PIN(MX21_PAD_REV), + IMX_PINCTRL_PIN(MX21_PAD_CLS), + IMX_PINCTRL_PIN(MX21_PAD_PS), + IMX_PINCTRL_PIN(MX21_PAD_SPL_SPR), + IMX_PINCTRL_PIN(MX21_PAD_HSYNC), + IMX_PINCTRL_PIN(MX21_PAD_VSYNC), + IMX_PINCTRL_PIN(MX21_PAD_CONTRAST), + IMX_PINCTRL_PIN(MX21_PAD_OE_ACD), + IMX_PINCTRL_PIN(MX21_PAD_SD2_D0), + IMX_PINCTRL_PIN(MX21_PAD_SD2_D1), + IMX_PINCTRL_PIN(MX21_PAD_SD2_D2), + IMX_PINCTRL_PIN(MX21_PAD_SD2_D3), + IMX_PINCTRL_PIN(MX21_PAD_SD2_CMD), + IMX_PINCTRL_PIN(MX21_PAD_SD2_CLK), + IMX_PINCTRL_PIN(MX21_PAD_CSI_D0), + IMX_PINCTRL_PIN(MX21_PAD_CSI_D1), + IMX_PINCTRL_PIN(MX21_PAD_CSI_D2), + IMX_PINCTRL_PIN(MX21_PAD_CSI_D3), + IMX_PINCTRL_PIN(MX21_PAD_CSI_D4), + IMX_PINCTRL_PIN(MX21_PAD_CSI_MCLK), + IMX_PINCTRL_PIN(MX21_PAD_CSI_PIXCLK), + IMX_PINCTRL_PIN(MX21_PAD_CSI_D5), + IMX_PINCTRL_PIN(MX21_PAD_CSI_D6), + IMX_PINCTRL_PIN(MX21_PAD_CSI_D7), + IMX_PINCTRL_PIN(MX21_PAD_CSI_VSYNC), + IMX_PINCTRL_PIN(MX21_PAD_CSI_HSYNC), + IMX_PINCTRL_PIN(MX21_PAD_USB_BYP), + IMX_PINCTRL_PIN(MX21_PAD_USB_PWR), + IMX_PINCTRL_PIN(MX21_PAD_USB_OC), + IMX_PINCTRL_PIN(MX21_PAD_USBH_ON), + IMX_PINCTRL_PIN(MX21_PAD_USBH1_FS), + IMX_PINCTRL_PIN(MX21_PAD_USBH1_OE), + IMX_PINCTRL_PIN(MX21_PAD_USBH1_TXDM), + IMX_PINCTRL_PIN(MX21_PAD_USBH1_TXDP), + IMX_PINCTRL_PIN(MX21_PAD_USBH1_RXDM), + IMX_PINCTRL_PIN(MX21_PAD_USBH1_RXDP), + IMX_PINCTRL_PIN(MX21_PAD_USBG_SDA), + IMX_PINCTRL_PIN(MX21_PAD_USBG_SCL), + IMX_PINCTRL_PIN(MX21_PAD_USBG_ON), + IMX_PINCTRL_PIN(MX21_PAD_USBG_FS), + IMX_PINCTRL_PIN(MX21_PAD_USBG_OE), + IMX_PINCTRL_PIN(MX21_PAD_USBG_TXDM), + IMX_PINCTRL_PIN(MX21_PAD_USBG_TXDP), + IMX_PINCTRL_PIN(MX21_PAD_USBG_RXDM), + IMX_PINCTRL_PIN(MX21_PAD_USBG_RXDP), + IMX_PINCTRL_PIN(MX21_PAD_TOUT), + IMX_PINCTRL_PIN(MX21_PAD_TIN), + IMX_PINCTRL_PIN(MX21_PAD_SAP_FS), + IMX_PINCTRL_PIN(MX21_PAD_SAP_RXD), + IMX_PINCTRL_PIN(MX21_PAD_SAP_TXD), + IMX_PINCTRL_PIN(MX21_PAD_SAP_CLK), + IMX_PINCTRL_PIN(MX21_PAD_SSI1_FS), + IMX_PINCTRL_PIN(MX21_PAD_SSI1_RXD), + IMX_PINCTRL_PIN(MX21_PAD_SSI1_TXD), + IMX_PINCTRL_PIN(MX21_PAD_SSI1_CLK), + IMX_PINCTRL_PIN(MX21_PAD_SSI2_FS), + IMX_PINCTRL_PIN(MX21_PAD_SSI2_RXD), + IMX_PINCTRL_PIN(MX21_PAD_SSI2_TXD), + IMX_PINCTRL_PIN(MX21_PAD_SSI2_CLK), + IMX_PINCTRL_PIN(MX21_PAD_SSI3_FS), + IMX_PINCTRL_PIN(MX21_PAD_SSI3_RXD), + IMX_PINCTRL_PIN(MX21_PAD_SSI3_TXD), + IMX_PINCTRL_PIN(MX21_PAD_SSI3_CLK), + IMX_PINCTRL_PIN(MX21_PAD_I2C_DATA), + IMX_PINCTRL_PIN(MX21_PAD_I2C_CLK), + IMX_PINCTRL_PIN(MX21_PAD_CSPI2_SS2), + IMX_PINCTRL_PIN(MX21_PAD_CSPI2_SS1), + IMX_PINCTRL_PIN(MX21_PAD_CSPI2_SS0), + IMX_PINCTRL_PIN(MX21_PAD_CSPI2_SCLK), + IMX_PINCTRL_PIN(MX21_PAD_CSPI2_MISO), + IMX_PINCTRL_PIN(MX21_PAD_CSPI2_MOSI), + IMX_PINCTRL_PIN(MX21_PAD_CSPI1_RDY), + IMX_PINCTRL_PIN(MX21_PAD_CSPI1_SS2), + IMX_PINCTRL_PIN(MX21_PAD_CSPI1_SS1), + IMX_PINCTRL_PIN(MX21_PAD_CSPI1_SS0), + IMX_PINCTRL_PIN(MX21_PAD_CSPI1_SCLK), + IMX_PINCTRL_PIN(MX21_PAD_CSPI1_MISO), + IMX_PINCTRL_PIN(MX21_PAD_CSPI1_MOSI), + IMX_PINCTRL_PIN(MX21_PAD_TEST_WB2), + IMX_PINCTRL_PIN(MX21_PAD_TEST_WB1), + IMX_PINCTRL_PIN(MX21_PAD_TEST_WB0), + IMX_PINCTRL_PIN(MX21_PAD_UART2_CTS), + IMX_PINCTRL_PIN(MX21_PAD_UART2_RTS), + IMX_PINCTRL_PIN(MX21_PAD_PWMO), + IMX_PINCTRL_PIN(MX21_PAD_UART2_TXD), + IMX_PINCTRL_PIN(MX21_PAD_UART2_RXD), + IMX_PINCTRL_PIN(MX21_PAD_UART3_TXD), + IMX_PINCTRL_PIN(MX21_PAD_UART3_RXD), + IMX_PINCTRL_PIN(MX21_PAD_UART3_CTS), + IMX_PINCTRL_PIN(MX21_PAD_UART3_RTS), + IMX_PINCTRL_PIN(MX21_PAD_UART1_TXD), + IMX_PINCTRL_PIN(MX21_PAD_UART1_RXD), + IMX_PINCTRL_PIN(MX21_PAD_UART1_CTS), + IMX_PINCTRL_PIN(MX21_PAD_UART1_RTS), + IMX_PINCTRL_PIN(MX21_PAD_RTCK), + IMX_PINCTRL_PIN(MX21_PAD_RESET_OUT), + IMX_PINCTRL_PIN(MX21_PAD_SD1_D0), + IMX_PINCTRL_PIN(MX21_PAD_SD1_D1), + IMX_PINCTRL_PIN(MX21_PAD_SD1_D2), + IMX_PINCTRL_PIN(MX21_PAD_SD1_D3), + IMX_PINCTRL_PIN(MX21_PAD_SD1_CMD), + IMX_PINCTRL_PIN(MX21_PAD_SD1_CLK), + IMX_PINCTRL_PIN(MX21_PAD_NFRB), + IMX_PINCTRL_PIN(MX21_PAD_NFCE), + IMX_PINCTRL_PIN(MX21_PAD_NFWP), + IMX_PINCTRL_PIN(MX21_PAD_NFCLE), + IMX_PINCTRL_PIN(MX21_PAD_NFALE), + IMX_PINCTRL_PIN(MX21_PAD_NFRE), + IMX_PINCTRL_PIN(MX21_PAD_NFWE), + IMX_PINCTRL_PIN(MX21_PAD_NFIO0), + IMX_PINCTRL_PIN(MX21_PAD_NFIO1), + IMX_PINCTRL_PIN(MX21_PAD_NFIO2), + IMX_PINCTRL_PIN(MX21_PAD_NFIO3), + IMX_PINCTRL_PIN(MX21_PAD_NFIO4), + IMX_PINCTRL_PIN(MX21_PAD_NFIO5), + IMX_PINCTRL_PIN(MX21_PAD_NFIO6), + IMX_PINCTRL_PIN(MX21_PAD_NFIO7), + IMX_PINCTRL_PIN(MX21_PAD_CLKO), + IMX_PINCTRL_PIN(MX21_PAD_RESERVED), + IMX_PINCTRL_PIN(MX21_PAD_CS4), + IMX_PINCTRL_PIN(MX21_PAD_CS5), +}; + +static struct imx1_pinctrl_soc_info imx21_pinctrl_info = { + .pins = imx21_pinctrl_pads, + .npins = ARRAY_SIZE(imx21_pinctrl_pads), +}; + +static int __init imx21_pinctrl_probe(struct platform_device *pdev) +{ + return imx1_pinctrl_core_probe(pdev, &imx21_pinctrl_info); +} + +static const struct of_device_id imx21_pinctrl_of_match[] = { + { .compatible = "fsl,imx21-iomuxc", }, + { } +}; +MODULE_DEVICE_TABLE(of, imx21_pinctrl_of_match); + +static struct platform_driver imx21_pinctrl_driver = { + .driver = { + .name = "imx21-pinctrl", + .owner = THIS_MODULE, + .of_match_table = imx21_pinctrl_of_match, + }, + .remove = imx1_pinctrl_core_remove, +}; +module_platform_driver_probe(imx21_pinctrl_driver, imx21_pinctrl_probe); + +MODULE_AUTHOR("Alexander Shiyan <shc_work@mail.ru>"); +MODULE_DESCRIPTION("Freescale i.MX21 pinctrl driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/pinctrl/pinctrl-imx23.c b/drivers/pinctrl/freescale/pinctrl-imx23.c index e76d75c9d1ba..df79096becb0 100644 --- a/drivers/pinctrl/pinctrl-imx23.c +++ b/drivers/pinctrl/freescale/pinctrl-imx23.c @@ -272,7 +272,7 @@ static int imx23_pinctrl_probe(struct platform_device *pdev) return mxs_pinctrl_probe(pdev, &imx23_pinctrl_data); } -static struct of_device_id imx23_pinctrl_of_match[] = { +static const struct of_device_id imx23_pinctrl_of_match[] = { { .compatible = "fsl,imx23-pinctrl", }, { /* sentinel */ } }; diff --git a/drivers/pinctrl/pinctrl-imx25.c b/drivers/pinctrl/freescale/pinctrl-imx25.c index 1aae1b61c4dc..550e6d77ac2b 100644 --- a/drivers/pinctrl/pinctrl-imx25.c +++ b/drivers/pinctrl/freescale/pinctrl-imx25.c @@ -315,7 +315,7 @@ static struct imx_pinctrl_soc_info imx25_pinctrl_info = { .npins = ARRAY_SIZE(imx25_pinctrl_pads), }; -static struct of_device_id imx25_pinctrl_of_match[] = { +static const struct of_device_id imx25_pinctrl_of_match[] = { { .compatible = "fsl,imx25-iomuxc", }, { /* sentinel */ } }; diff --git a/drivers/pinctrl/pinctrl-imx27.c b/drivers/pinctrl/freescale/pinctrl-imx27.c index 417c99205bc2..945eccadea74 100644 --- a/drivers/pinctrl/pinctrl-imx27.c +++ b/drivers/pinctrl/freescale/pinctrl-imx27.c @@ -63,10 +63,6 @@ enum imx27_pads { MX27_PAD_CONTRAST = PAD_ID(PA, 30), MX27_PAD_OE_ACD = PAD_ID(PA, 31), - MX27_PAD_UNUSED0 = PAD_ID(PB, 0), - MX27_PAD_UNUSED1 = PAD_ID(PB, 1), - MX27_PAD_UNUSED2 = PAD_ID(PB, 2), - MX27_PAD_UNUSED3 = PAD_ID(PB, 3), MX27_PAD_SD2_D0 = PAD_ID(PB, 4), MX27_PAD_SD2_D1 = PAD_ID(PB, 5), MX27_PAD_SD2_D2 = PAD_ID(PB, 6), @@ -96,11 +92,6 @@ enum imx27_pads { MX27_PAD_USBH1_RXDM = PAD_ID(PB, 30), MX27_PAD_USBH1_RXDP = PAD_ID(PB, 31), - MX27_PAD_UNUSED4 = PAD_ID(PC, 0), - MX27_PAD_UNUSED5 = PAD_ID(PC, 1), - MX27_PAD_UNUSED6 = PAD_ID(PC, 2), - MX27_PAD_UNUSED7 = PAD_ID(PC, 3), - MX27_PAD_UNUSED8 = PAD_ID(PC, 4), MX27_PAD_I2C2_SDA = PAD_ID(PC, 5), MX27_PAD_I2C2_SCL = PAD_ID(PC, 6), MX27_PAD_USBOTG_DATA5 = PAD_ID(PC, 7), @@ -188,12 +179,6 @@ enum imx27_pads { MX27_PAD_SD1_CLK = PAD_ID(PE, 23), MX27_PAD_USBOTG_CLK = PAD_ID(PE, 24), MX27_PAD_USBOTG_DATA7 = PAD_ID(PE, 25), - MX27_PAD_UNUSED9 = PAD_ID(PE, 26), - MX27_PAD_UNUSED10 = PAD_ID(PE, 27), - MX27_PAD_UNUSED11 = PAD_ID(PE, 28), - MX27_PAD_UNUSED12 = PAD_ID(PE, 29), - MX27_PAD_UNUSED13 = PAD_ID(PE, 30), - MX27_PAD_UNUSED14 = PAD_ID(PE, 31), MX27_PAD_NFRB = PAD_ID(PF, 0), MX27_PAD_NFCLE = PAD_ID(PF, 1), @@ -219,14 +204,6 @@ enum imx27_pads { MX27_PAD_CS4_B = PAD_ID(PF, 21), MX27_PAD_CS5_B = PAD_ID(PF, 22), MX27_PAD_ATA_DATA15 = PAD_ID(PF, 23), - MX27_PAD_UNUSED15 = PAD_ID(PF, 24), - MX27_PAD_UNUSED16 = PAD_ID(PF, 25), - MX27_PAD_UNUSED17 = PAD_ID(PF, 26), - MX27_PAD_UNUSED18 = PAD_ID(PF, 27), - MX27_PAD_UNUSED19 = PAD_ID(PF, 28), - MX27_PAD_UNUSED20 = PAD_ID(PF, 29), - MX27_PAD_UNUSED21 = PAD_ID(PF, 30), - MX27_PAD_UNUSED22 = PAD_ID(PF, 31), }; /* Pad names for the pinmux subsystem */ @@ -264,10 +241,6 @@ static const struct pinctrl_pin_desc imx27_pinctrl_pads[] = { IMX_PINCTRL_PIN(MX27_PAD_CONTRAST), IMX_PINCTRL_PIN(MX27_PAD_OE_ACD), - IMX_PINCTRL_PIN(MX27_PAD_UNUSED0), - IMX_PINCTRL_PIN(MX27_PAD_UNUSED1), - IMX_PINCTRL_PIN(MX27_PAD_UNUSED2), - IMX_PINCTRL_PIN(MX27_PAD_UNUSED3), IMX_PINCTRL_PIN(MX27_PAD_SD2_D0), IMX_PINCTRL_PIN(MX27_PAD_SD2_D1), IMX_PINCTRL_PIN(MX27_PAD_SD2_D2), @@ -297,11 +270,6 @@ static const struct pinctrl_pin_desc imx27_pinctrl_pads[] = { IMX_PINCTRL_PIN(MX27_PAD_USBH1_RXDM), IMX_PINCTRL_PIN(MX27_PAD_USBH1_RXDP), - IMX_PINCTRL_PIN(MX27_PAD_UNUSED4), - IMX_PINCTRL_PIN(MX27_PAD_UNUSED5), - IMX_PINCTRL_PIN(MX27_PAD_UNUSED6), - IMX_PINCTRL_PIN(MX27_PAD_UNUSED7), - IMX_PINCTRL_PIN(MX27_PAD_UNUSED8), IMX_PINCTRL_PIN(MX27_PAD_I2C2_SDA), IMX_PINCTRL_PIN(MX27_PAD_I2C2_SCL), IMX_PINCTRL_PIN(MX27_PAD_USBOTG_DATA5), @@ -389,12 +357,6 @@ static const struct pinctrl_pin_desc imx27_pinctrl_pads[] = { IMX_PINCTRL_PIN(MX27_PAD_SD1_CLK), IMX_PINCTRL_PIN(MX27_PAD_USBOTG_CLK), IMX_PINCTRL_PIN(MX27_PAD_USBOTG_DATA7), - IMX_PINCTRL_PIN(MX27_PAD_UNUSED9), - IMX_PINCTRL_PIN(MX27_PAD_UNUSED10), - IMX_PINCTRL_PIN(MX27_PAD_UNUSED11), - IMX_PINCTRL_PIN(MX27_PAD_UNUSED12), - IMX_PINCTRL_PIN(MX27_PAD_UNUSED13), - IMX_PINCTRL_PIN(MX27_PAD_UNUSED14), IMX_PINCTRL_PIN(MX27_PAD_NFRB), IMX_PINCTRL_PIN(MX27_PAD_NFCLE), @@ -420,14 +382,6 @@ static const struct pinctrl_pin_desc imx27_pinctrl_pads[] = { IMX_PINCTRL_PIN(MX27_PAD_CS4_B), IMX_PINCTRL_PIN(MX27_PAD_CS5_B), IMX_PINCTRL_PIN(MX27_PAD_ATA_DATA15), - IMX_PINCTRL_PIN(MX27_PAD_UNUSED15), - IMX_PINCTRL_PIN(MX27_PAD_UNUSED16), - IMX_PINCTRL_PIN(MX27_PAD_UNUSED17), - IMX_PINCTRL_PIN(MX27_PAD_UNUSED18), - IMX_PINCTRL_PIN(MX27_PAD_UNUSED19), - IMX_PINCTRL_PIN(MX27_PAD_UNUSED20), - IMX_PINCTRL_PIN(MX27_PAD_UNUSED21), - IMX_PINCTRL_PIN(MX27_PAD_UNUSED22), }; static struct imx1_pinctrl_soc_info imx27_pinctrl_info = { @@ -435,17 +389,11 @@ static struct imx1_pinctrl_soc_info imx27_pinctrl_info = { .npins = ARRAY_SIZE(imx27_pinctrl_pads), }; -static struct of_device_id imx27_pinctrl_of_match[] = { +static const struct of_device_id imx27_pinctrl_of_match[] = { { .compatible = "fsl,imx27-iomuxc", }, { /* sentinel */ } }; -struct imx27_pinctrl_private { - int num_gpio_childs; - struct platform_device **gpio_dev; - struct mxc_gpio_platform_data *gpio_pdata; -}; - static int imx27_pinctrl_probe(struct platform_device *pdev) { return imx1_pinctrl_core_probe(pdev, &imx27_pinctrl_info); diff --git a/drivers/pinctrl/pinctrl-imx28.c b/drivers/pinctrl/freescale/pinctrl-imx28.c index 79c9c8d296af..3bd45da21229 100644 --- a/drivers/pinctrl/pinctrl-imx28.c +++ b/drivers/pinctrl/freescale/pinctrl-imx28.c @@ -388,7 +388,7 @@ static int imx28_pinctrl_probe(struct platform_device *pdev) return mxs_pinctrl_probe(pdev, &imx28_pinctrl_data); } -static struct of_device_id imx28_pinctrl_of_match[] = { +static const struct of_device_id imx28_pinctrl_of_match[] = { { .compatible = "fsl,imx28-pinctrl", }, { /* sentinel */ } }; diff --git a/drivers/pinctrl/pinctrl-imx35.c b/drivers/pinctrl/freescale/pinctrl-imx35.c index 278a04ae8940..6bfbcd0112c1 100644 --- a/drivers/pinctrl/pinctrl-imx35.c +++ b/drivers/pinctrl/freescale/pinctrl-imx35.c @@ -1005,7 +1005,7 @@ static struct imx_pinctrl_soc_info imx35_pinctrl_info = { .npins = ARRAY_SIZE(imx35_pinctrl_pads), }; -static struct of_device_id imx35_pinctrl_of_match[] = { +static const struct of_device_id imx35_pinctrl_of_match[] = { { .compatible = "fsl,imx35-iomuxc", }, { /* sentinel */ } }; diff --git a/drivers/pinctrl/pinctrl-imx50.c b/drivers/pinctrl/freescale/pinctrl-imx50.c index b06feed1b038..e8bd604ab147 100644 --- a/drivers/pinctrl/pinctrl-imx50.c +++ b/drivers/pinctrl/freescale/pinctrl-imx50.c @@ -391,7 +391,7 @@ static struct imx_pinctrl_soc_info imx50_pinctrl_info = { .npins = ARRAY_SIZE(imx50_pinctrl_pads), }; -static struct of_device_id imx50_pinctrl_of_match[] = { +static const struct of_device_id imx50_pinctrl_of_match[] = { { .compatible = "fsl,imx50-iomuxc", }, { /* sentinel */ } }; diff --git a/drivers/pinctrl/pinctrl-imx51.c b/drivers/pinctrl/freescale/pinctrl-imx51.c index 19ab182bef61..b818051db7c9 100644 --- a/drivers/pinctrl/pinctrl-imx51.c +++ b/drivers/pinctrl/freescale/pinctrl-imx51.c @@ -768,7 +768,7 @@ static struct imx_pinctrl_soc_info imx51_pinctrl_info = { .npins = ARRAY_SIZE(imx51_pinctrl_pads), }; -static struct of_device_id imx51_pinctrl_of_match[] = { +static const struct of_device_id imx51_pinctrl_of_match[] = { { .compatible = "fsl,imx51-iomuxc", }, { /* sentinel */ } }; diff --git a/drivers/pinctrl/pinctrl-imx53.c b/drivers/pinctrl/freescale/pinctrl-imx53.c index f8d45c4cfde7..1884d53cf750 100644 --- a/drivers/pinctrl/pinctrl-imx53.c +++ b/drivers/pinctrl/freescale/pinctrl-imx53.c @@ -454,7 +454,7 @@ static struct imx_pinctrl_soc_info imx53_pinctrl_info = { .npins = ARRAY_SIZE(imx53_pinctrl_pads), }; -static struct of_device_id imx53_pinctrl_of_match[] = { +static const struct of_device_id imx53_pinctrl_of_match[] = { { .compatible = "fsl,imx53-iomuxc", }, { /* sentinel */ } }; diff --git a/drivers/pinctrl/pinctrl-imx6dl.c b/drivers/pinctrl/freescale/pinctrl-imx6dl.c index db2a1489bd99..656c4b08cc2e 100644 --- a/drivers/pinctrl/pinctrl-imx6dl.c +++ b/drivers/pinctrl/freescale/pinctrl-imx6dl.c @@ -460,7 +460,7 @@ static struct imx_pinctrl_soc_info imx6dl_pinctrl_info = { .npins = ARRAY_SIZE(imx6dl_pinctrl_pads), }; -static struct of_device_id imx6dl_pinctrl_of_match[] = { +static const struct of_device_id imx6dl_pinctrl_of_match[] = { { .compatible = "fsl,imx6dl-iomuxc", }, { /* sentinel */ } }; diff --git a/drivers/pinctrl/pinctrl-imx6q.c b/drivers/pinctrl/freescale/pinctrl-imx6q.c index 8eb5ac1bd5f6..59bb5b4ec0f6 100644 --- a/drivers/pinctrl/pinctrl-imx6q.c +++ b/drivers/pinctrl/freescale/pinctrl-imx6q.c @@ -466,7 +466,7 @@ static struct imx_pinctrl_soc_info imx6q_pinctrl_info = { .npins = ARRAY_SIZE(imx6q_pinctrl_pads), }; -static struct of_device_id imx6q_pinctrl_of_match[] = { +static const struct of_device_id imx6q_pinctrl_of_match[] = { { .compatible = "fsl,imx6q-iomuxc", }, { /* sentinel */ } }; diff --git a/drivers/pinctrl/pinctrl-imx6sl.c b/drivers/pinctrl/freescale/pinctrl-imx6sl.c index f21b7389df3c..e0924bd7b98c 100644 --- a/drivers/pinctrl/pinctrl-imx6sl.c +++ b/drivers/pinctrl/freescale/pinctrl-imx6sl.c @@ -366,10 +366,11 @@ static struct imx_pinctrl_soc_info imx6sl_pinctrl_info = { .npins = ARRAY_SIZE(imx6sl_pinctrl_pads), }; -static struct of_device_id imx6sl_pinctrl_of_match[] = { +static const struct of_device_id imx6sl_pinctrl_of_match[] = { { .compatible = "fsl,imx6sl-iomuxc", }, { /* sentinel */ } }; +MODULE_DEVICE_TABLE(of, imx6sl_pinctrl_of_match); static int imx6sl_pinctrl_probe(struct platform_device *pdev) { diff --git a/drivers/pinctrl/pinctrl-imx6sx.c b/drivers/pinctrl/freescale/pinctrl-imx6sx.c index 09758a56b9df..840344c8580d 100644 --- a/drivers/pinctrl/pinctrl-imx6sx.c +++ b/drivers/pinctrl/freescale/pinctrl-imx6sx.c @@ -370,7 +370,7 @@ static struct imx_pinctrl_soc_info imx6sx_pinctrl_info = { .npins = ARRAY_SIZE(imx6sx_pinctrl_pads), }; -static struct of_device_id imx6sx_pinctrl_of_match[] = { +static const struct of_device_id imx6sx_pinctrl_of_match[] = { { .compatible = "fsl,imx6sx-iomuxc", }, { /* sentinel */ } }; diff --git a/drivers/pinctrl/pinctrl-mxs.c b/drivers/pinctrl/freescale/pinctrl-mxs.c index 40c76f26998c..f98c6bb0f769 100644 --- a/drivers/pinctrl/pinctrl-mxs.c +++ b/drivers/pinctrl/freescale/pinctrl-mxs.c @@ -21,7 +21,7 @@ #include <linux/pinctrl/pinmux.h> #include <linux/platform_device.h> #include <linux/slab.h> -#include "core.h" +#include "../core.h" #include "pinctrl-mxs.h" #define SUFFIX_LEN 4 @@ -195,8 +195,8 @@ static int mxs_pinctrl_get_func_groups(struct pinctrl_dev *pctldev, return 0; } -static int mxs_pinctrl_enable(struct pinctrl_dev *pctldev, unsigned selector, - unsigned group) +static int mxs_pinctrl_set_mux(struct pinctrl_dev *pctldev, unsigned selector, + unsigned group) { struct mxs_pinctrl_data *d = pinctrl_dev_get_drvdata(pctldev); struct mxs_group *g = &d->soc->groups[group]; @@ -223,7 +223,7 @@ static const struct pinmux_ops mxs_pinmux_ops = { .get_functions_count = mxs_pinctrl_get_funcs_count, .get_function_name = mxs_pinctrl_get_func_name, .get_function_groups = mxs_pinctrl_get_func_groups, - .enable = mxs_pinctrl_enable, + .set_mux = mxs_pinctrl_set_mux, }; static int mxs_pinconf_get(struct pinctrl_dev *pctldev, diff --git a/drivers/pinctrl/pinctrl-mxs.h b/drivers/pinctrl/freescale/pinctrl-mxs.h index fdd88d0bae22..fdd88d0bae22 100644 --- a/drivers/pinctrl/pinctrl-mxs.h +++ b/drivers/pinctrl/freescale/pinctrl-mxs.h diff --git a/drivers/pinctrl/pinctrl-vf610.c b/drivers/pinctrl/freescale/pinctrl-vf610.c index bddd913d28ba..b788e1578954 100644 --- a/drivers/pinctrl/pinctrl-vf610.c +++ b/drivers/pinctrl/freescale/pinctrl-vf610.c @@ -299,7 +299,7 @@ static const struct pinctrl_pin_desc vf610_pinctrl_pads[] = { static struct imx_pinctrl_soc_info vf610_pinctrl_info = { .pins = vf610_pinctrl_pads, .npins = ARRAY_SIZE(vf610_pinctrl_pads), - .flags = ZERO_OFFSET_VALID | SHARE_MUX_CONF_REG, + .flags = SHARE_MUX_CONF_REG, }; static struct of_device_id vf610_pinctrl_of_match[] = { diff --git a/drivers/pinctrl/mvebu/pinctrl-mvebu.c b/drivers/pinctrl/mvebu/pinctrl-mvebu.c index 9908374f8f92..f3b426cdaf8f 100644 --- a/drivers/pinctrl/mvebu/pinctrl-mvebu.c +++ b/drivers/pinctrl/mvebu/pinctrl-mvebu.c @@ -259,8 +259,8 @@ static int mvebu_pinmux_get_groups(struct pinctrl_dev *pctldev, unsigned fid, return 0; } -static int mvebu_pinmux_enable(struct pinctrl_dev *pctldev, unsigned fid, - unsigned gid) +static int mvebu_pinmux_set(struct pinctrl_dev *pctldev, unsigned fid, + unsigned gid) { struct mvebu_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); struct mvebu_pinctrl_function *func = &pctl->functions[fid]; @@ -344,7 +344,7 @@ static const struct pinmux_ops mvebu_pinmux_ops = { .get_function_groups = mvebu_pinmux_get_groups, .gpio_request_enable = mvebu_pinmux_gpio_request_enable, .gpio_set_direction = mvebu_pinmux_gpio_set_direction, - .enable = mvebu_pinmux_enable, + .set_mux = mvebu_pinmux_set, }; static int mvebu_pinctrl_get_groups_count(struct pinctrl_dev *pctldev) diff --git a/drivers/pinctrl/nomadik/Kconfig b/drivers/pinctrl/nomadik/Kconfig new file mode 100644 index 000000000000..d48a5aa24a29 --- /dev/null +++ b/drivers/pinctrl/nomadik/Kconfig @@ -0,0 +1,51 @@ +if ARCH_U8500 + +config PINCTRL_ABX500 + bool "ST-Ericsson ABx500 family Mixed Signal Circuit gpio functions" + depends on AB8500_CORE + select GENERIC_PINCONF + help + Select this to enable the ABx500 family IC GPIO driver + +config PINCTRL_AB8500 + bool "AB8500 pin controller driver" + depends on PINCTRL_ABX500 && ARCH_U8500 + +config PINCTRL_AB8540 + bool "AB8540 pin controller driver" + depends on PINCTRL_ABX500 && ARCH_U8500 + +config PINCTRL_AB9540 + bool "AB9540 pin controller driver" + depends on PINCTRL_ABX500 && ARCH_U8500 + +config PINCTRL_AB8505 + bool "AB8505 pin controller driver" + depends on PINCTRL_ABX500 && ARCH_U8500 + +endif + +if (ARCH_U8500 || ARCH_NOMADIK) + +config PINCTRL_NOMADIK + bool "Nomadik pin controller driver" + depends on ARCH_U8500 || ARCH_NOMADIK + select PINMUX + select PINCONF + select GPIOLIB + select OF_GPIO + select GPIOLIB_IRQCHIP + +config PINCTRL_STN8815 + bool "STN8815 pin controller driver" + depends on PINCTRL_NOMADIK && ARCH_NOMADIK + +config PINCTRL_DB8500 + bool "DB8500 pin controller driver" + depends on PINCTRL_NOMADIK && ARCH_U8500 + +config PINCTRL_DB8540 + bool "DB8540 pin controller driver" + depends on PINCTRL_NOMADIK && ARCH_U8500 + +endif diff --git a/drivers/pinctrl/nomadik/Makefile b/drivers/pinctrl/nomadik/Makefile new file mode 100644 index 000000000000..30b27f18cd52 --- /dev/null +++ b/drivers/pinctrl/nomadik/Makefile @@ -0,0 +1,10 @@ +# Nomadik family pin control drivers +obj-$(CONFIG_PINCTRL_ABX500) += pinctrl-abx500.o +obj-$(CONFIG_PINCTRL_AB8500) += pinctrl-ab8500.o +obj-$(CONFIG_PINCTRL_AB8540) += pinctrl-ab8540.o +obj-$(CONFIG_PINCTRL_AB9540) += pinctrl-ab9540.o +obj-$(CONFIG_PINCTRL_AB8505) += pinctrl-ab8505.o +obj-$(CONFIG_PINCTRL_NOMADIK) += pinctrl-nomadik.o +obj-$(CONFIG_PINCTRL_STN8815) += pinctrl-nomadik-stn8815.o +obj-$(CONFIG_PINCTRL_DB8500) += pinctrl-nomadik-db8500.o +obj-$(CONFIG_PINCTRL_DB8540) += pinctrl-nomadik-db8540.o diff --git a/drivers/pinctrl/pinctrl-ab8500.c b/drivers/pinctrl/nomadik/pinctrl-ab8500.c index 2ac2d0ad3025..2ac2d0ad3025 100644 --- a/drivers/pinctrl/pinctrl-ab8500.c +++ b/drivers/pinctrl/nomadik/pinctrl-ab8500.c diff --git a/drivers/pinctrl/pinctrl-ab8505.c b/drivers/pinctrl/nomadik/pinctrl-ab8505.c index bf0ef4ac376f..bf0ef4ac376f 100644 --- a/drivers/pinctrl/pinctrl-ab8505.c +++ b/drivers/pinctrl/nomadik/pinctrl-ab8505.c diff --git a/drivers/pinctrl/pinctrl-ab8540.c b/drivers/pinctrl/nomadik/pinctrl-ab8540.c index 9867535d49c1..9867535d49c1 100644 --- a/drivers/pinctrl/pinctrl-ab8540.c +++ b/drivers/pinctrl/nomadik/pinctrl-ab8540.c diff --git a/drivers/pinctrl/pinctrl-ab9540.c b/drivers/pinctrl/nomadik/pinctrl-ab9540.c index 1a281ca95dac..1a281ca95dac 100644 --- a/drivers/pinctrl/pinctrl-ab9540.c +++ b/drivers/pinctrl/nomadik/pinctrl-ab9540.c diff --git a/drivers/pinctrl/pinctrl-abx500.c b/drivers/pinctrl/nomadik/pinctrl-abx500.c index 163da9c3ea0e..47f493149863 100644 --- a/drivers/pinctrl/pinctrl-abx500.c +++ b/drivers/pinctrl/nomadik/pinctrl-abx500.c @@ -32,8 +32,9 @@ #include <linux/pinctrl/machine.h> #include "pinctrl-abx500.h" -#include "core.h" -#include "pinconf.h" +#include "../core.h" +#include "../pinconf.h" +#include "../pinctrl-utils.h" /* * The AB9540 and AB8540 GPIO support are extended versions @@ -620,8 +621,7 @@ static void abx500_gpio_dbg_show_one(struct seq_file *s, } else seq_printf(s, " %-9s", chip->get(chip, offset) ? "hi" : "lo"); - if (pctldev) - mode = abx500_get_mode(pctldev, chip, offset); + mode = abx500_get_mode(pctldev, chip, offset); seq_printf(s, " %s", (mode < 0) ? "unknown" : modes[mode]); @@ -709,8 +709,8 @@ static int abx500_pmx_get_func_groups(struct pinctrl_dev *pctldev, return 0; } -static int abx500_pmx_enable(struct pinctrl_dev *pctldev, unsigned function, - unsigned group) +static int abx500_pmx_set(struct pinctrl_dev *pctldev, unsigned function, + unsigned group) { struct abx500_pinctrl *pct = pinctrl_dev_get_drvdata(pctldev); struct gpio_chip *chip = &pct->chip; @@ -737,20 +737,6 @@ static int abx500_pmx_enable(struct pinctrl_dev *pctldev, unsigned function, return ret; } -static void abx500_pmx_disable(struct pinctrl_dev *pctldev, - unsigned function, unsigned group) -{ - struct abx500_pinctrl *pct = pinctrl_dev_get_drvdata(pctldev); - const struct abx500_pingroup *g; - - g = &pct->soc->groups[group]; - if (g->altsetting < 0) - return; - - /* FIXME: poke out the mux, set the pin to some default state? */ - dev_dbg(pct->dev, "disable group %s, %u pins\n", g->name, g->npins); -} - static int abx500_gpio_request_enable(struct pinctrl_dev *pctldev, struct pinctrl_gpio_range *range, unsigned offset) @@ -798,8 +784,7 @@ static const struct pinmux_ops abx500_pinmux_ops = { .get_functions_count = abx500_pmx_get_funcs_cnt, .get_function_name = abx500_pmx_get_func_name, .get_function_groups = abx500_pmx_get_func_groups, - .enable = abx500_pmx_enable, - .disable = abx500_pmx_disable, + .set_mux = abx500_pmx_set, .gpio_request_enable = abx500_gpio_request_enable, .gpio_disable_free = abx500_gpio_disable_free, }; @@ -842,41 +827,6 @@ static void abx500_pin_dbg_show(struct pinctrl_dev *pctldev, chip->base + offset - 1); } -static void abx500_dt_free_map(struct pinctrl_dev *pctldev, - struct pinctrl_map *map, unsigned num_maps) -{ - int i; - - for (i = 0; i < num_maps; i++) - if (map[i].type == PIN_MAP_TYPE_CONFIGS_PIN) - kfree(map[i].data.configs.configs); - kfree(map); -} - -static int abx500_dt_reserve_map(struct pinctrl_map **map, - unsigned *reserved_maps, - unsigned *num_maps, - unsigned reserve) -{ - unsigned old_num = *reserved_maps; - unsigned new_num = *num_maps + reserve; - struct pinctrl_map *new_map; - - if (old_num >= new_num) - return 0; - - new_map = krealloc(*map, sizeof(*new_map) * new_num, GFP_KERNEL); - if (!new_map) - return -ENOMEM; - - memset(new_map + old_num, 0, (new_num - old_num) * sizeof(*new_map)); - - *map = new_map; - *reserved_maps = new_num; - - return 0; -} - static int abx500_dt_add_map_mux(struct pinctrl_map **map, unsigned *reserved_maps, unsigned *num_maps, const char *group, @@ -942,19 +892,32 @@ static int abx500_dt_subnode_to_map(struct pinctrl_dev *pctldev, unsigned long *configs; unsigned int nconfigs = 0; bool has_config = 0; - unsigned reserve = 0; struct property *prop; const char *group, *gpio_name; struct device_node *np_config; ret = of_property_read_string(np, "ste,function", &function); - if (ret >= 0) - reserve = 1; + if (ret >= 0) { + ret = of_property_count_strings(np, "ste,pins"); + if (ret < 0) + goto exit; + + ret = pinctrl_utils_reserve_map(pctldev, map, reserved_maps, + num_maps, ret); + if (ret < 0) + goto exit; + + of_property_for_each_string(np, "ste,pins", prop, group) { + ret = abx500_dt_add_map_mux(map, reserved_maps, + num_maps, group, function); + if (ret < 0) + goto exit; + } + } ret = pinconf_generic_parse_dt_config(np, &configs, &nconfigs); if (nconfigs) has_config = 1; - np_config = of_parse_phandle(np, "ste,config", 0); if (np_config) { ret = pinconf_generic_parse_dt_config(np_config, &configs, @@ -963,28 +926,18 @@ static int abx500_dt_subnode_to_map(struct pinctrl_dev *pctldev, goto exit; has_config |= nconfigs; } + if (has_config) { + ret = of_property_count_strings(np, "ste,pins"); + if (ret < 0) + goto exit; - ret = of_property_count_strings(np, "ste,pins"); - if (ret < 0) - goto exit; - - if (has_config) - reserve++; - - reserve *= ret; - - ret = abx500_dt_reserve_map(map, reserved_maps, num_maps, reserve); - if (ret < 0) - goto exit; + ret = pinctrl_utils_reserve_map(pctldev, map, + reserved_maps, + num_maps, ret); + if (ret < 0) + goto exit; - of_property_for_each_string(np, "ste,pins", prop, group) { - if (function) { - ret = abx500_dt_add_map_mux(map, reserved_maps, - num_maps, group, function); - if (ret < 0) - goto exit; - } - if (has_config) { + of_property_for_each_string(np, "ste,pins", prop, group) { gpio_name = abx500_find_pin_name(pctldev, group); ret = abx500_dt_add_map_configs(map, reserved_maps, @@ -992,8 +945,8 @@ static int abx500_dt_subnode_to_map(struct pinctrl_dev *pctldev, if (ret < 0) goto exit; } - } + exit: return ret; } @@ -1014,7 +967,7 @@ static int abx500_dt_node_to_map(struct pinctrl_dev *pctldev, ret = abx500_dt_subnode_to_map(pctldev, np, map, &reserved_maps, num_maps); if (ret < 0) { - abx500_dt_free_map(pctldev, *map, *num_maps); + pinctrl_utils_dt_free_map(pctldev, *map, *num_maps); return ret; } } @@ -1028,7 +981,7 @@ static const struct pinctrl_ops abx500_pinctrl_ops = { .get_group_pins = abx500_get_group_pins, .pin_dbg_show = abx500_pin_dbg_show, .dt_node_to_map = abx500_dt_node_to_map, - .dt_free_map = abx500_dt_free_map, + .dt_free_map = pinctrl_utils_dt_free_map, }; static int abx500_pin_config_get(struct pinctrl_dev *pctldev, diff --git a/drivers/pinctrl/pinctrl-abx500.h b/drivers/pinctrl/nomadik/pinctrl-abx500.h index 2beef3bfe9ca..2beef3bfe9ca 100644 --- a/drivers/pinctrl/pinctrl-abx500.h +++ b/drivers/pinctrl/nomadik/pinctrl-abx500.h diff --git a/drivers/pinctrl/pinctrl-nomadik-db8500.c b/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c index c74840729648..c74840729648 100644 --- a/drivers/pinctrl/pinctrl-nomadik-db8500.c +++ b/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c diff --git a/drivers/pinctrl/pinctrl-nomadik-db8540.c b/drivers/pinctrl/nomadik/pinctrl-nomadik-db8540.c index d7ba5443bae0..d7ba5443bae0 100644 --- a/drivers/pinctrl/pinctrl-nomadik-db8540.c +++ b/drivers/pinctrl/nomadik/pinctrl-nomadik-db8540.c diff --git a/drivers/pinctrl/pinctrl-nomadik-stn8815.c b/drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c index ed39dcafd4f8..ed39dcafd4f8 100644 --- a/drivers/pinctrl/pinctrl-nomadik-stn8815.c +++ b/drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c diff --git a/drivers/pinctrl/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c index 8f6f16ef73f3..3c29d9187146 100644 --- a/drivers/pinctrl/pinctrl-nomadik.c +++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c @@ -31,7 +31,8 @@ /* Since we request GPIOs from ourself */ #include <linux/pinctrl/consumer.h> #include "pinctrl-nomadik.h" -#include "core.h" +#include "../core.h" +#include "../pinctrl-utils.h" /* * The GPIO module in the Nomadik family of Systems-on-Chip is an @@ -985,6 +986,7 @@ static void nmk_gpio_dbg_show_one(struct seq_file *s, container_of(chip, struct nmk_gpio_chip, chip); int mode; bool is_out; + bool data_out; bool pull; u32 bit = 1 << offset; const char *modes[] = { @@ -997,28 +999,41 @@ static void nmk_gpio_dbg_show_one(struct seq_file *s, [NMK_GPIO_ALT_C+3] = "altC3", [NMK_GPIO_ALT_C+4] = "altC4", }; + const char *pulls[] = { + "none ", + "pull down", + "pull up ", + }; clk_enable(nmk_chip->clk); is_out = !!(readl(nmk_chip->addr + NMK_GPIO_DIR) & bit); pull = !(readl(nmk_chip->addr + NMK_GPIO_PDIS) & bit); + data_out = !!(readl(nmk_chip->addr + NMK_GPIO_DAT) & bit); mode = nmk_gpio_get_mode(gpio); if ((mode == NMK_GPIO_ALT_C) && pctldev) mode = nmk_prcm_gpiocr_get_mode(pctldev, gpio); - seq_printf(s, " gpio-%-3d (%-20.20s) %s %s %s %s", - gpio, label ?: "(none)", - is_out ? "out" : "in ", - chip->get - ? (chip->get(chip, offset) ? "hi" : "lo") - : "? ", - (mode < 0) ? "unknown" : modes[mode], - pull ? "pull" : "none"); - - if (!is_out) { + if (is_out) { + seq_printf(s, " gpio-%-3d (%-20.20s) out %s %s", + gpio, + label ?: "(none)", + data_out ? "hi" : "lo", + (mode < 0) ? "unknown" : modes[mode]); + } else { int irq = gpio_to_irq(gpio); struct irq_desc *desc = irq_to_desc(irq); + int pullidx = 0; + + if (pull) + pullidx = data_out ? 1 : 2; - /* This races with request_irq(), set_irq_type(), + seq_printf(s, " gpio-%-3d (%-20.20s) in %s %s", + gpio, + label ?: "(none)", + pulls[pullidx], + (mode < 0) ? "unknown" : modes[mode]); + /* + * This races with request_irq(), set_irq_type(), * and set_irq_wake() ... but those are "rare". */ if (irq > 0 && desc && desc->action) { @@ -1338,39 +1353,6 @@ static void nmk_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s, nmk_gpio_dbg_show_one(s, pctldev, chip, offset - chip->base, offset); } -static void nmk_pinctrl_dt_free_map(struct pinctrl_dev *pctldev, - struct pinctrl_map *map, unsigned num_maps) -{ - int i; - - for (i = 0; i < num_maps; i++) - if (map[i].type == PIN_MAP_TYPE_CONFIGS_PIN) - kfree(map[i].data.configs.configs); - kfree(map); -} - -static int nmk_dt_reserve_map(struct pinctrl_map **map, unsigned *reserved_maps, - unsigned *num_maps, unsigned reserve) -{ - unsigned old_num = *reserved_maps; - unsigned new_num = *num_maps + reserve; - struct pinctrl_map *new_map; - - if (old_num >= new_num) - return 0; - - new_map = krealloc(*map, sizeof(*new_map) * new_num, GFP_KERNEL); - if (!new_map) - return -ENOMEM; - - memset(new_map + old_num, 0, (new_num - old_num) * sizeof(*new_map)); - - *map = new_map; - *reserved_maps = new_num; - - return 0; -} - static int nmk_dt_add_map_mux(struct pinctrl_map **map, unsigned *reserved_maps, unsigned *num_maps, const char *group, const char *function) @@ -1537,51 +1519,55 @@ static int nmk_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev, const char *function = NULL; unsigned long configs = 0; bool has_config = 0; - unsigned reserve = 0; struct property *prop; const char *group, *gpio_name; struct device_node *np_config; ret = of_property_read_string(np, "ste,function", &function); - if (ret >= 0) - reserve = 1; - - has_config = nmk_pinctrl_dt_get_config(np, &configs); - - np_config = of_parse_phandle(np, "ste,config", 0); - if (np_config) - has_config |= nmk_pinctrl_dt_get_config(np_config, &configs); - - ret = of_property_count_strings(np, "ste,pins"); - if (ret < 0) - goto exit; - - if (has_config) - reserve++; - - reserve *= ret; - - ret = nmk_dt_reserve_map(map, reserved_maps, num_maps, reserve); - if (ret < 0) - goto exit; - - of_property_for_each_string(np, "ste,pins", prop, group) { - if (function) { + if (ret >= 0) { + ret = of_property_count_strings(np, "ste,pins"); + if (ret < 0) + goto exit; + + ret = pinctrl_utils_reserve_map(pctldev, map, + reserved_maps, + num_maps, ret); + if (ret < 0) + goto exit; + + of_property_for_each_string(np, "ste,pins", prop, group) { ret = nmk_dt_add_map_mux(map, reserved_maps, num_maps, group, function); if (ret < 0) goto exit; } - if (has_config) { + } + + has_config = nmk_pinctrl_dt_get_config(np, &configs); + np_config = of_parse_phandle(np, "ste,config", 0); + if (np_config) + has_config |= nmk_pinctrl_dt_get_config(np_config, &configs); + if (has_config) { + ret = of_property_count_strings(np, "ste,pins"); + if (ret < 0) + goto exit; + ret = pinctrl_utils_reserve_map(pctldev, map, + reserved_maps, + num_maps, ret); + if (ret < 0) + goto exit; + + of_property_for_each_string(np, "ste,pins", prop, group) { gpio_name = nmk_find_pin_name(pctldev, group); - ret = nmk_dt_add_map_configs(map, reserved_maps, num_maps, - gpio_name, &configs, 1); + ret = nmk_dt_add_map_configs(map, reserved_maps, + num_maps, + gpio_name, &configs, 1); if (ret < 0) goto exit; } - } + exit: return ret; } @@ -1602,7 +1588,7 @@ static int nmk_pinctrl_dt_node_to_map(struct pinctrl_dev *pctldev, ret = nmk_pinctrl_dt_subnode_to_map(pctldev, np, map, &reserved_maps, num_maps); if (ret < 0) { - nmk_pinctrl_dt_free_map(pctldev, *map, *num_maps); + pinctrl_utils_dt_free_map(pctldev, *map, *num_maps); return ret; } } @@ -1616,7 +1602,7 @@ static const struct pinctrl_ops nmk_pinctrl_ops = { .get_group_pins = nmk_get_group_pins, .pin_dbg_show = nmk_pin_dbg_show, .dt_node_to_map = nmk_pinctrl_dt_node_to_map, - .dt_free_map = nmk_pinctrl_dt_free_map, + .dt_free_map = pinctrl_utils_dt_free_map, }; static int nmk_pmx_get_funcs_cnt(struct pinctrl_dev *pctldev) @@ -1647,8 +1633,8 @@ static int nmk_pmx_get_func_groups(struct pinctrl_dev *pctldev, return 0; } -static int nmk_pmx_enable(struct pinctrl_dev *pctldev, unsigned function, - unsigned group) +static int nmk_pmx_set(struct pinctrl_dev *pctldev, unsigned function, + unsigned group) { struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev); const struct nmk_pingroup *g; @@ -1765,21 +1751,6 @@ out_glitch: return ret; } -static void nmk_pmx_disable(struct pinctrl_dev *pctldev, - unsigned function, unsigned group) -{ - struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev); - const struct nmk_pingroup *g; - - g = &npct->soc->groups[group]; - - if (g->altsetting < 0) - return; - - /* Poke out the mux, set the pin to some default state? */ - dev_dbg(npct->dev, "disable group %s, %u pins\n", g->name, g->npins); -} - static int nmk_gpio_request_enable(struct pinctrl_dev *pctldev, struct pinctrl_gpio_range *range, unsigned offset) @@ -1825,8 +1796,7 @@ static const struct pinmux_ops nmk_pinmux_ops = { .get_functions_count = nmk_pmx_get_funcs_cnt, .get_function_name = nmk_pmx_get_func_name, .get_function_groups = nmk_pmx_get_func_groups, - .enable = nmk_pmx_enable, - .disable = nmk_pmx_disable, + .set_mux = nmk_pmx_set, .gpio_request_enable = nmk_gpio_request_enable, .gpio_disable_free = nmk_gpio_disable_free, }; diff --git a/drivers/pinctrl/pinctrl-nomadik.h b/drivers/pinctrl/nomadik/pinctrl-nomadik.h index d8215f1e70c7..d8215f1e70c7 100644 --- a/drivers/pinctrl/pinctrl-nomadik.h +++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.h diff --git a/drivers/pinctrl/pinctrl-adi2.c b/drivers/pinctrl/pinctrl-adi2.c index 5c44feb54ebb..8434439c5017 100644 --- a/drivers/pinctrl/pinctrl-adi2.c +++ b/drivers/pinctrl/pinctrl-adi2.c @@ -401,7 +401,7 @@ static int adi_gpio_irq_type(struct irq_data *d, unsigned int type) if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) { - snprintf(buf, 16, "gpio-irq%d", irq); + snprintf(buf, 16, "gpio-irq%u", irq); port_setup(port, d->hwirq, true); } else goto out; @@ -619,8 +619,8 @@ static struct pinctrl_ops adi_pctrl_ops = { .get_group_pins = adi_get_group_pins, }; -static int adi_pinmux_enable(struct pinctrl_dev *pctldev, unsigned func_id, - unsigned group_id) +static int adi_pinmux_set(struct pinctrl_dev *pctldev, unsigned func_id, + unsigned group_id) { struct adi_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctldev); struct gpio_port *port; @@ -652,35 +652,6 @@ static int adi_pinmux_enable(struct pinctrl_dev *pctldev, unsigned func_id, return 0; } -static void adi_pinmux_disable(struct pinctrl_dev *pctldev, unsigned func_id, - unsigned group_id) -{ - struct adi_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctldev); - struct gpio_port *port; - struct pinctrl_gpio_range *range; - unsigned long flags; - unsigned short *mux, pin; - - mux = (unsigned short *)pinctrl->soc->groups[group_id].mux; - - while (*mux) { - pin = P_IDENT(*mux); - - range = pinctrl_find_gpio_range_from_pin(pctldev, pin); - if (range == NULL) /* should not happen */ - return; - - port = container_of(range->gc, struct gpio_port, chip); - - spin_lock_irqsave(&port->lock, flags); - - port_setup(port, pin_to_offset(range, pin), true); - mux++; - - spin_unlock_irqrestore(&port->lock, flags); - } -} - static int adi_pinmux_get_funcs_count(struct pinctrl_dev *pctldev) { struct adi_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctldev); @@ -727,8 +698,7 @@ static int adi_pinmux_request_gpio(struct pinctrl_dev *pctldev, } static struct pinmux_ops adi_pinmux_ops = { - .enable = adi_pinmux_enable, - .disable = adi_pinmux_disable, + .set_mux = adi_pinmux_set, .get_functions_count = adi_pinmux_get_funcs_count, .get_function_name = adi_pinmux_get_func_name, .get_function_groups = adi_pinmux_get_groups, @@ -979,7 +949,7 @@ static int adi_gpio_probe(struct platform_device *pdev) struct gpio_port *port; char pinctrl_devname[DEVNAME_SIZE]; static int gpio; - int ret = 0, ret1; + int ret = 0; pdata = dev->platform_data; if (!pdata) @@ -1057,7 +1027,7 @@ static int adi_gpio_probe(struct platform_device *pdev) return 0; out_remove_gpiochip: - ret1 = gpiochip_remove(&port->chip); + gpiochip_remove(&port->chip); out_remove_domain: if (port->pint) irq_domain_remove(port->domain); @@ -1068,12 +1038,10 @@ out_remove_domain: static int adi_gpio_remove(struct platform_device *pdev) { struct gpio_port *port = platform_get_drvdata(pdev); - int ret; u8 offset; list_del(&port->node); - gpiochip_remove_pin_ranges(&port->chip); - ret = gpiochip_remove(&port->chip); + gpiochip_remove(&port->chip); if (port->pint) { for (offset = 0; offset < port->width; offset++) irq_dispose_mapping(irq_find_mapping(port->domain, @@ -1081,7 +1049,7 @@ static int adi_gpio_remove(struct platform_device *pdev) irq_domain_remove(port->domain); } - return ret; + return 0; } static int adi_pinctrl_probe(struct platform_device *pdev) diff --git a/drivers/pinctrl/pinctrl-as3722.c b/drivers/pinctrl/pinctrl-as3722.c index c862f9c0e9ce..1f790a4b83fe 100644 --- a/drivers/pinctrl/pinctrl-as3722.c +++ b/drivers/pinctrl/pinctrl-as3722.c @@ -230,7 +230,7 @@ static int as3722_pinctrl_get_func_groups(struct pinctrl_dev *pctldev, return 0; } -static int as3722_pinctrl_enable(struct pinctrl_dev *pctldev, unsigned function, +static int as3722_pinctrl_set(struct pinctrl_dev *pctldev, unsigned function, unsigned group) { struct as3722_pctrl_info *as_pci = pinctrl_dev_get_drvdata(pctldev); @@ -327,7 +327,7 @@ static const struct pinmux_ops as3722_pinmux_ops = { .get_functions_count = as3722_pinctrl_get_funcs_count, .get_function_name = as3722_pinctrl_get_func_name, .get_function_groups = as3722_pinctrl_get_func_groups, - .enable = as3722_pinctrl_enable, + .set_mux = as3722_pinctrl_set, .gpio_request_enable = as3722_pinctrl_gpio_request_enable, .gpio_set_direction = as3722_pinctrl_gpio_set_direction, }; @@ -565,7 +565,6 @@ static int as3722_pinctrl_probe(struct platform_device *pdev) { struct as3722_pctrl_info *as_pci; int ret; - int tret; as_pci = devm_kzalloc(&pdev->dev, sizeof(*as_pci), GFP_KERNEL); if (!as_pci) @@ -611,10 +610,7 @@ static int as3722_pinctrl_probe(struct platform_device *pdev) return 0; fail_range_add: - tret = gpiochip_remove(&as_pci->gpio_chip); - if (tret < 0) - dev_warn(&pdev->dev, "Couldn't remove gpio chip, %d\n", tret); - + gpiochip_remove(&as_pci->gpio_chip); fail_chip_add: pinctrl_unregister(as_pci->pctl); return ret; @@ -623,11 +619,8 @@ fail_chip_add: static int as3722_pinctrl_remove(struct platform_device *pdev) { struct as3722_pctrl_info *as_pci = platform_get_drvdata(pdev); - int ret; - ret = gpiochip_remove(&as_pci->gpio_chip); - if (ret < 0) - return ret; + gpiochip_remove(&as_pci->gpio_chip); pinctrl_unregister(as_pci->pctl); return 0; } diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c index 421493cb490c..354a81d40925 100644 --- a/drivers/pinctrl/pinctrl-at91.c +++ b/drivers/pinctrl/pinctrl-at91.c @@ -58,11 +58,28 @@ static int gpio_banks; #define DEGLITCH (1 << 2) #define PULL_DOWN (1 << 3) #define DIS_SCHMIT (1 << 4) +#define DRIVE_STRENGTH_SHIFT 5 +#define DRIVE_STRENGTH_MASK 0x3 +#define DRIVE_STRENGTH (DRIVE_STRENGTH_MASK << DRIVE_STRENGTH_SHIFT) #define DEBOUNCE (1 << 16) #define DEBOUNCE_VAL_SHIFT 17 #define DEBOUNCE_VAL (0x3fff << DEBOUNCE_VAL_SHIFT) /** + * These defines will translated the dt binding settings to our internal + * settings. They are not necessarily the same value as the register setting. + * The actual drive strength current of low, medium and high must be looked up + * from the corresponding device datasheet. This value is different for pins + * that are even in the same banks. It is also dependent on VCC. + * DRIVE_STRENGTH_DEFAULT is just a placeholder to avoid changing the drive + * strength when there is no dt config for it. + */ +#define DRIVE_STRENGTH_DEFAULT (0 << DRIVE_STRENGTH_SHIFT) +#define DRIVE_STRENGTH_LOW (1 << DRIVE_STRENGTH_SHIFT) +#define DRIVE_STRENGTH_MED (2 << DRIVE_STRENGTH_SHIFT) +#define DRIVE_STRENGTH_HI (3 << DRIVE_STRENGTH_SHIFT) + +/** * struct at91_pmx_func - describes AT91 pinmux functions * @name: the name of this specific function * @groups: corresponding pin groups @@ -148,6 +165,9 @@ struct at91_pinctrl_mux_ops { void (*set_pulldown)(void __iomem *pio, unsigned mask, bool is_on); bool (*get_schmitt_trig)(void __iomem *pio, unsigned pin); void (*disable_schmitt_trig)(void __iomem *pio, unsigned mask); + unsigned (*get_drivestrength)(void __iomem *pio, unsigned pin); + void (*set_drivestrength)(void __iomem *pio, unsigned pin, + u32 strength); /* irq */ int (*irq_type)(struct irq_data *d, unsigned type); }; @@ -315,6 +335,30 @@ static unsigned pin_to_mask(unsigned int pin) return 1 << pin; } +static unsigned two_bit_pin_value_shift_amount(unsigned int pin) +{ + /* return the shift value for a pin for "two bit" per pin registers, + * i.e. drive strength */ + return 2*((pin >= MAX_NB_GPIO_PER_BANK/2) + ? pin - MAX_NB_GPIO_PER_BANK/2 : pin); +} + +static unsigned sama5d3_get_drive_register(unsigned int pin) +{ + /* drive strength is split between two registers + * with two bits per pin */ + return (pin >= MAX_NB_GPIO_PER_BANK/2) + ? SAMA5D3_PIO_DRIVER2 : SAMA5D3_PIO_DRIVER1; +} + +static unsigned at91sam9x5_get_drive_register(unsigned int pin) +{ + /* drive strength is split between two registers + * with two bits per pin */ + return (pin >= MAX_NB_GPIO_PER_BANK/2) + ? AT91SAM9X5_PIO_DRIVER2 : AT91SAM9X5_PIO_DRIVER1; +} + static void at91_mux_disable_interrupt(void __iomem *pio, unsigned mask) { writel_relaxed(mask, pio + PIO_IDR); @@ -327,6 +371,9 @@ static unsigned at91_mux_get_pullup(void __iomem *pio, unsigned pin) static void at91_mux_set_pullup(void __iomem *pio, unsigned mask, bool on) { + if (on) + writel_relaxed(mask, pio + PIO_PPDDR); + writel_relaxed(mask, pio + (on ? PIO_PUER : PIO_PUDR)); } @@ -455,6 +502,9 @@ static bool at91_mux_pio3_get_pulldown(void __iomem *pio, unsigned pin) static void at91_mux_pio3_set_pulldown(void __iomem *pio, unsigned mask, bool is_on) { + if (is_on) + __raw_writel(mask, pio + PIO_PUDR); + __raw_writel(mask, pio + (is_on ? PIO_PPDER : PIO_PPDDR)); } @@ -468,6 +518,79 @@ static bool at91_mux_pio3_get_schmitt_trig(void __iomem *pio, unsigned pin) return (__raw_readl(pio + PIO_SCHMITT) >> pin) & 0x1; } +static inline u32 read_drive_strength(void __iomem *reg, unsigned pin) +{ + unsigned tmp = __raw_readl(reg); + + tmp = tmp >> two_bit_pin_value_shift_amount(pin); + + return tmp & DRIVE_STRENGTH_MASK; +} + +static unsigned at91_mux_sama5d3_get_drivestrength(void __iomem *pio, + unsigned pin) +{ + unsigned tmp = read_drive_strength(pio + + sama5d3_get_drive_register(pin), pin); + + /* SAMA5 strength is 1:1 with our defines, + * except 0 is equivalent to low per datasheet */ + if (!tmp) + tmp = DRIVE_STRENGTH_LOW; + + return tmp; +} + +static unsigned at91_mux_sam9x5_get_drivestrength(void __iomem *pio, + unsigned pin) +{ + unsigned tmp = read_drive_strength(pio + + at91sam9x5_get_drive_register(pin), pin); + + /* strength is inverse in SAM9x5s hardware with the pinctrl defines + * hardware: 0 = hi, 1 = med, 2 = low, 3 = rsvd */ + tmp = DRIVE_STRENGTH_HI - tmp; + + return tmp; +} + +static void set_drive_strength(void __iomem *reg, unsigned pin, u32 strength) +{ + unsigned tmp = __raw_readl(reg); + unsigned shift = two_bit_pin_value_shift_amount(pin); + + tmp &= ~(DRIVE_STRENGTH_MASK << shift); + tmp |= strength << shift; + + __raw_writel(tmp, reg); +} + +static void at91_mux_sama5d3_set_drivestrength(void __iomem *pio, unsigned pin, + u32 setting) +{ + /* do nothing if setting is zero */ + if (!setting) + return; + + /* strength is 1 to 1 with setting for SAMA5 */ + set_drive_strength(pio + sama5d3_get_drive_register(pin), pin, setting); +} + +static void at91_mux_sam9x5_set_drivestrength(void __iomem *pio, unsigned pin, + u32 setting) +{ + /* do nothing if setting is zero */ + if (!setting) + return; + + /* strength is inverse on SAM9x5s with our defines + * 0 = hi, 1 = med, 2 = low, 3 = rsvd */ + setting = DRIVE_STRENGTH_HI - setting; + + set_drive_strength(pio + at91sam9x5_get_drive_register(pin), pin, + setting); +} + static struct at91_pinctrl_mux_ops at91rm9200_ops = { .get_periph = at91_mux_get_periph, .mux_A_periph = at91_mux_set_A_periph, @@ -491,16 +614,37 @@ static struct at91_pinctrl_mux_ops at91sam9x5_ops = { .set_pulldown = at91_mux_pio3_set_pulldown, .get_schmitt_trig = at91_mux_pio3_get_schmitt_trig, .disable_schmitt_trig = at91_mux_pio3_disable_schmitt_trig, + .get_drivestrength = at91_mux_sam9x5_get_drivestrength, + .set_drivestrength = at91_mux_sam9x5_set_drivestrength, + .irq_type = alt_gpio_irq_type, +}; + +static struct at91_pinctrl_mux_ops sama5d3_ops = { + .get_periph = at91_mux_pio3_get_periph, + .mux_A_periph = at91_mux_pio3_set_A_periph, + .mux_B_periph = at91_mux_pio3_set_B_periph, + .mux_C_periph = at91_mux_pio3_set_C_periph, + .mux_D_periph = at91_mux_pio3_set_D_periph, + .get_deglitch = at91_mux_pio3_get_deglitch, + .set_deglitch = at91_mux_pio3_set_deglitch, + .get_debounce = at91_mux_pio3_get_debounce, + .set_debounce = at91_mux_pio3_set_debounce, + .get_pulldown = at91_mux_pio3_get_pulldown, + .set_pulldown = at91_mux_pio3_set_pulldown, + .get_schmitt_trig = at91_mux_pio3_get_schmitt_trig, + .disable_schmitt_trig = at91_mux_pio3_disable_schmitt_trig, + .get_drivestrength = at91_mux_sama5d3_get_drivestrength, + .set_drivestrength = at91_mux_sama5d3_set_drivestrength, .irq_type = alt_gpio_irq_type, }; static void at91_pin_dbg(const struct device *dev, const struct at91_pmx_pin *pin) { if (pin->mux) { - dev_dbg(dev, "pio%c%d configured as periph%c with conf = 0x%lu\n", + dev_dbg(dev, "pio%c%d configured as periph%c with conf = 0x%lx\n", pin->bank + 'A', pin->pin, pin->mux - 1 + 'A', pin->conf); } else { - dev_dbg(dev, "pio%c%d configured as gpio with conf = 0x%lu\n", + dev_dbg(dev, "pio%c%d configured as gpio with conf = 0x%lx\n", pin->bank + 'A', pin->pin, pin->conf); } } @@ -554,8 +698,8 @@ static void at91_mux_gpio_enable(void __iomem *pio, unsigned mask, bool input) writel_relaxed(mask, pio + (input ? PIO_ODR : PIO_OER)); } -static int at91_pmx_enable(struct pinctrl_dev *pctldev, unsigned selector, - unsigned group) +static int at91_pmx_set(struct pinctrl_dev *pctldev, unsigned selector, + unsigned group) { struct at91_pinctrl *info = pinctrl_dev_get_drvdata(pctldev); const struct at91_pmx_pin *pins_conf = info->groups[group].pins_conf; @@ -611,26 +755,6 @@ static int at91_pmx_enable(struct pinctrl_dev *pctldev, unsigned selector, return 0; } -static void at91_pmx_disable(struct pinctrl_dev *pctldev, unsigned selector, - unsigned group) -{ - struct at91_pinctrl *info = pinctrl_dev_get_drvdata(pctldev); - const struct at91_pmx_pin *pins_conf = info->groups[group].pins_conf; - const struct at91_pmx_pin *pin; - uint32_t npins = info->groups[group].npins; - int i; - unsigned mask; - void __iomem *pio; - - for (i = 0; i < npins; i++) { - pin = &pins_conf[i]; - at91_pin_dbg(info->dev, pin); - pio = pin_to_controller(info, pin->bank); - mask = pin_to_mask(pin->pin); - at91_mux_gpio_enable(pio, mask, 1); - } -} - static int at91_pmx_get_funcs_count(struct pinctrl_dev *pctldev) { struct at91_pinctrl *info = pinctrl_dev_get_drvdata(pctldev); @@ -704,8 +828,7 @@ static const struct pinmux_ops at91_pmx_ops = { .get_functions_count = at91_pmx_get_funcs_count, .get_function_name = at91_pmx_get_func_name, .get_function_groups = at91_pmx_get_groups, - .enable = at91_pmx_enable, - .disable = at91_pmx_disable, + .set_mux = at91_pmx_set, .gpio_request_enable = at91_gpio_request_enable, .gpio_disable_free = at91_gpio_disable_free, }; @@ -737,6 +860,9 @@ static int at91_pinconf_get(struct pinctrl_dev *pctldev, *config |= PULL_DOWN; if (info->ops->get_schmitt_trig && info->ops->get_schmitt_trig(pio, pin)) *config |= DIS_SCHMIT; + if (info->ops->get_drivestrength) + *config |= (info->ops->get_drivestrength(pio, pin) + << DRIVE_STRENGTH_SHIFT); return 0; } @@ -750,6 +876,7 @@ static int at91_pinconf_set(struct pinctrl_dev *pctldev, void __iomem *pio; int i; unsigned long config; + unsigned pin; for (i = 0; i < num_configs; i++) { config = configs[i]; @@ -758,7 +885,8 @@ static int at91_pinconf_set(struct pinctrl_dev *pctldev, "%s:%d, pin_id=%d, config=0x%lx", __func__, __LINE__, pin_id, config); pio = pin_to_controller(info, pin_to_bank(pin_id)); - mask = pin_to_mask(pin_id % MAX_NB_GPIO_PER_BANK); + pin = pin_id % MAX_NB_GPIO_PER_BANK; + mask = pin_to_mask(pin); if (config & PULL_UP && config & PULL_DOWN) return -EINVAL; @@ -774,6 +902,10 @@ static int at91_pinconf_set(struct pinctrl_dev *pctldev, info->ops->set_pulldown(pio, mask, config & PULL_DOWN); if (info->ops->disable_schmitt_trig && config & DIS_SCHMIT) info->ops->disable_schmitt_trig(pio, mask); + if (info->ops->set_drivestrength) + info->ops->set_drivestrength(pio, pin, + (config & DRIVE_STRENGTH) + >> DRIVE_STRENGTH_SHIFT); } /* for each config */ @@ -789,19 +921,31 @@ static int at91_pinconf_set(struct pinctrl_dev *pctldev, } \ } while (0) +#define DBG_SHOW_FLAG_MASKED(mask,flag) do { \ + if ((config & mask) == flag) { \ + if (num_conf) \ + seq_puts(s, "|"); \ + seq_puts(s, #flag); \ + num_conf++; \ + } \ +} while (0) + static void at91_pinconf_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s, unsigned pin_id) { unsigned long config; - int ret, val, num_conf = 0; + int val, num_conf = 0; - ret = at91_pinconf_get(pctldev, pin_id, &config); + at91_pinconf_get(pctldev, pin_id, &config); DBG_SHOW_FLAG(MULTI_DRIVE); DBG_SHOW_FLAG(PULL_UP); DBG_SHOW_FLAG(PULL_DOWN); DBG_SHOW_FLAG(DIS_SCHMIT); DBG_SHOW_FLAG(DEGLITCH); + DBG_SHOW_FLAG_MASKED(DRIVE_STRENGTH, DRIVE_STRENGTH_LOW); + DBG_SHOW_FLAG_MASKED(DRIVE_STRENGTH, DRIVE_STRENGTH_MED); + DBG_SHOW_FLAG_MASKED(DRIVE_STRENGTH, DRIVE_STRENGTH_HI); DBG_SHOW_FLAG(DEBOUNCE); if (config & DEBOUNCE) { val = config >> DEBOUNCE_VAL_SHIFT; @@ -945,7 +1089,7 @@ static int at91_pinctrl_parse_functions(struct device_node *np, /* Initialise function */ func->name = np->name; func->ngroups = of_get_child_count(np); - if (func->ngroups <= 0) { + if (func->ngroups == 0) { dev_err(info->dev, "no groups defined\n"); return -EINVAL; } @@ -966,6 +1110,7 @@ static int at91_pinctrl_parse_functions(struct device_node *np, } static struct of_device_id at91_pinctrl_of_match[] = { + { .compatible = "atmel,sama5d3-pinctrl", .data = &sama5d3_ops }, { .compatible = "atmel,at91sam9x5-pinctrl", .data = &at91sam9x5_ops }, { .compatible = "atmel,at91rm9200-pinctrl", .data = &at91rm9200_ops }, { /* sentinel */ } @@ -1466,7 +1611,7 @@ static void gpio_irq_handler(unsigned irq, struct irq_desc *desc) /* now it may re-trigger */ } -static int at91_gpio_of_irq_setup(struct device_node *node, +static int at91_gpio_of_irq_setup(struct platform_device *pdev, struct at91_gpio_chip *at91_gpio) { struct at91_gpio_chip *prev = NULL; @@ -1491,9 +1636,11 @@ static int at91_gpio_of_irq_setup(struct device_node *node, 0, handle_edge_irq, IRQ_TYPE_EDGE_BOTH); - if (ret) - panic("at91_gpio.%d: couldn't allocate irq domain (DT).\n", + if (ret) { + dev_err(&pdev->dev, "at91_gpio.%d: Couldn't add irqchip to gpiochip.\n", at91_gpio->pioc_idx); + return ret; + } /* Setup chained handler */ if (at91_gpio->pioc_idx) @@ -1596,19 +1743,22 @@ static int at91_gpio_probe(struct platform_device *pdev) at91_chip->pioc_virq = irq; at91_chip->pioc_idx = alias_idx; - at91_chip->clock = clk_get(&pdev->dev, NULL); + at91_chip->clock = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(at91_chip->clock)) { dev_err(&pdev->dev, "failed to get clock, ignoring.\n"); + ret = PTR_ERR(at91_chip->clock); goto err; } - if (clk_prepare(at91_chip->clock)) - goto clk_prep_err; + ret = clk_prepare(at91_chip->clock); + if (ret) + goto clk_prepare_err; /* enable PIO controller's clock */ - if (clk_enable(at91_chip->clock)) { + ret = clk_enable(at91_chip->clock); + if (ret) { dev_err(&pdev->dev, "failed to enable clock, ignoring.\n"); - goto clk_err; + goto clk_enable_err; } at91_chip->chip = at91_gpio_template; @@ -1633,7 +1783,7 @@ static int at91_gpio_probe(struct platform_device *pdev) if (!names) { ret = -ENOMEM; - goto clk_err; + goto clk_enable_err; } for (i = 0; i < chip->ngpio; i++) @@ -1651,23 +1801,28 @@ static int at91_gpio_probe(struct platform_device *pdev) ret = gpiochip_add(chip); if (ret) - goto clk_err; + goto gpiochip_add_err; gpio_chips[alias_idx] = at91_chip; gpio_banks = max(gpio_banks, alias_idx + 1); at91_gpio_probe_fixup(); - at91_gpio_of_irq_setup(np, at91_chip); + ret = at91_gpio_of_irq_setup(pdev, at91_chip); + if (ret) + goto irq_setup_err; dev_info(&pdev->dev, "at address %p\n", at91_chip->regbase); return 0; -clk_err: +irq_setup_err: + gpiochip_remove(chip); +gpiochip_add_err: + clk_disable(at91_chip->clock); +clk_enable_err: clk_unprepare(at91_chip->clock); -clk_prep_err: - clk_put(at91_chip->clock); +clk_prepare_err: err: dev_err(&pdev->dev, "Failure %i for GPIO %i\n", ret, alias_idx); diff --git a/drivers/pinctrl/pinctrl-baytrail.c b/drivers/pinctrl/pinctrl-baytrail.c index 975572e2f260..e12e5b07f6d7 100644 --- a/drivers/pinctrl/pinctrl-baytrail.c +++ b/drivers/pinctrl/pinctrl-baytrail.c @@ -25,9 +25,7 @@ #include <linux/types.h> #include <linux/bitops.h> #include <linux/interrupt.h> -#include <linux/irq.h> #include <linux/gpio.h> -#include <linux/irqdomain.h> #include <linux/acpi.h> #include <linux/platform_device.h> #include <linux/seq_file.h> @@ -44,6 +42,7 @@ /* BYT_CONF0_REG register bits */ #define BYT_IODEN BIT(31) +#define BYT_DIRECT_IRQ_EN BIT(27) #define BYT_TRIG_NEG BIT(26) #define BYT_TRIG_POS BIT(25) #define BYT_TRIG_LVL BIT(24) @@ -137,7 +136,6 @@ static struct pinctrl_gpio_range byt_ranges[] = { struct byt_gpio { struct gpio_chip chip; - struct irq_domain *domain; struct platform_device *pdev; spinlock_t lock; void __iomem *reg_base; @@ -217,7 +215,7 @@ static void byt_gpio_free(struct gpio_chip *chip, unsigned offset) static int byt_irq_type(struct irq_data *d, unsigned type) { - struct byt_gpio *vg = irq_data_get_irq_chip_data(d); + struct byt_gpio *vg = to_byt_gpio(irq_data_get_irq_chip_data(d)); u32 offset = irqd_to_hwirq(d); u32 value; unsigned long flags; @@ -303,12 +301,22 @@ static int byt_gpio_direction_output(struct gpio_chip *chip, unsigned gpio, int value) { struct byt_gpio *vg = to_byt_gpio(chip); + void __iomem *conf_reg = byt_gpio_reg(chip, gpio, BYT_CONF0_REG); void __iomem *reg = byt_gpio_reg(chip, gpio, BYT_VAL_REG); unsigned long flags; u32 reg_val; spin_lock_irqsave(&vg->lock, flags); + /* + * Before making any direction modifications, do a check if gpio + * is set for direct IRQ. On baytrail, setting GPIO to output does + * not make sense, so let's at least warn the caller before they shoot + * themselves in the foot. + */ + WARN(readl(conf_reg) & BYT_DIRECT_IRQ_EN, + "Potential Error: Setting GPIO with direct_irq_en to output"); + reg_val = readl(reg) | BYT_DIR_MASK; reg_val &= ~BYT_OUTPUT_EN; @@ -393,16 +401,10 @@ static void byt_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip) spin_unlock_irqrestore(&vg->lock, flags); } -static int byt_gpio_to_irq(struct gpio_chip *chip, unsigned offset) -{ - struct byt_gpio *vg = to_byt_gpio(chip); - return irq_create_mapping(vg->domain, offset); -} - static void byt_gpio_irq_handler(unsigned irq, struct irq_desc *desc) { struct irq_data *data = irq_desc_get_irq_data(desc); - struct byt_gpio *vg = irq_data_get_irq_handler_data(data); + struct byt_gpio *vg = to_byt_gpio(irq_desc_get_handler_data(desc)); struct irq_chip *chip = irq_data_get_irq_chip(data); u32 base, pin, mask; void __iomem *reg; @@ -421,7 +423,7 @@ static void byt_gpio_irq_handler(unsigned irq, struct irq_desc *desc) /* Clear before handling so we can't lose an edge */ writel(mask, reg); - virq = irq_find_mapping(vg->domain, base + pin); + virq = irq_find_mapping(vg->chip.irqdomain, base + pin); generic_handle_irq(virq); /* In case bios or user sets triggering incorretly a pin @@ -454,33 +456,12 @@ static void byt_irq_mask(struct irq_data *d) { } -static int byt_irq_reqres(struct irq_data *d) -{ - struct byt_gpio *vg = irq_data_get_irq_chip_data(d); - - if (gpio_lock_as_irq(&vg->chip, irqd_to_hwirq(d))) { - dev_err(vg->chip.dev, - "unable to lock HW IRQ %lu for IRQ\n", - irqd_to_hwirq(d)); - return -EINVAL; - } - return 0; -} - -static void byt_irq_relres(struct irq_data *d) -{ - struct byt_gpio *vg = irq_data_get_irq_chip_data(d); - - gpio_unlock_as_irq(&vg->chip, irqd_to_hwirq(d)); -} - static struct irq_chip byt_irqchip = { .name = "BYT-GPIO", .irq_mask = byt_irq_mask, .irq_unmask = byt_irq_unmask, .irq_set_type = byt_irq_type, - .irq_request_resources = byt_irq_reqres, - .irq_release_resources = byt_irq_relres, + .flags = IRQCHIP_SKIP_SET_WAKE, }; static void byt_gpio_irq_init_hw(struct byt_gpio *vg) @@ -501,23 +482,6 @@ static void byt_gpio_irq_init_hw(struct byt_gpio *vg) } } -static int byt_gpio_irq_map(struct irq_domain *d, unsigned int virq, - irq_hw_number_t hw) -{ - struct byt_gpio *vg = d->host_data; - - irq_set_chip_and_handler_name(virq, &byt_irqchip, handle_simple_irq, - "demux"); - irq_set_chip_data(virq, vg); - irq_set_irq_type(virq, IRQ_TYPE_NONE); - - return 0; -} - -static const struct irq_domain_ops byt_gpio_irq_ops = { - .map = byt_gpio_irq_map, -}; - static int byt_gpio_probe(struct platform_device *pdev) { struct byt_gpio *vg; @@ -527,7 +491,6 @@ static int byt_gpio_probe(struct platform_device *pdev) struct acpi_device *acpi_dev; struct pinctrl_gpio_range *range; acpi_handle handle = ACPI_HANDLE(dev); - unsigned hwirq; int ret; if (acpi_bus_get_device(handle, &acpi_dev)) @@ -574,27 +537,27 @@ static int byt_gpio_probe(struct platform_device *pdev) gc->can_sleep = false; gc->dev = dev; + ret = gpiochip_add(gc); + if (ret) { + dev_err(&pdev->dev, "failed adding byt-gpio chip\n"); + return ret; + } + /* set up interrupts */ irq_rc = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (irq_rc && irq_rc->start) { - hwirq = irq_rc->start; - gc->to_irq = byt_gpio_to_irq; - - vg->domain = irq_domain_add_linear(NULL, gc->ngpio, - &byt_gpio_irq_ops, vg); - if (!vg->domain) - return -ENXIO; - byt_gpio_irq_init_hw(vg); + ret = gpiochip_irqchip_add(gc, &byt_irqchip, 0, + handle_simple_irq, IRQ_TYPE_NONE); + if (ret) { + dev_err(dev, "failed to add irqchip\n"); + gpiochip_remove(gc); + return ret; + } - irq_set_handler_data(hwirq, vg); - irq_set_chained_handler(hwirq, byt_gpio_irq_handler); - } - - ret = gpiochip_add(gc); - if (ret) { - dev_err(&pdev->dev, "failed adding byt-gpio chip\n"); - return ret; + gpiochip_set_chained_irqchip(gc, &byt_irqchip, + (unsigned)irq_rc->start, + byt_gpio_irq_handler); } pm_runtime_enable(dev); @@ -627,12 +590,9 @@ MODULE_DEVICE_TABLE(acpi, byt_gpio_acpi_match); static int byt_gpio_remove(struct platform_device *pdev) { struct byt_gpio *vg = platform_get_drvdata(pdev); - int err; pm_runtime_disable(&pdev->dev); - err = gpiochip_remove(&vg->chip); - if (err) - dev_warn(&pdev->dev, "failed to remove gpio_chip.\n"); + gpiochip_remove(&vg->chip); return 0; } diff --git a/drivers/pinctrl/pinctrl-bcm281xx.c b/drivers/pinctrl/pinctrl-bcm281xx.c index 3bed792b2c03..a26e0c2ba33e 100644 --- a/drivers/pinctrl/pinctrl-bcm281xx.c +++ b/drivers/pinctrl/pinctrl-bcm281xx.c @@ -1055,9 +1055,9 @@ static int bcm281xx_pinctrl_get_fcn_groups(struct pinctrl_dev *pctldev, return 0; } -static int bcm281xx_pinmux_enable(struct pinctrl_dev *pctldev, - unsigned function, - unsigned group) +static int bcm281xx_pinmux_set(struct pinctrl_dev *pctldev, + unsigned function, + unsigned group) { struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev); const struct bcm281xx_pin_function *f = &pdata->functions[function]; @@ -1084,7 +1084,7 @@ static struct pinmux_ops bcm281xx_pinctrl_pinmux_ops = { .get_functions_count = bcm281xx_pinctrl_get_fcns_count, .get_function_name = bcm281xx_pinctrl_get_fcn_name, .get_function_groups = bcm281xx_pinctrl_get_fcn_groups, - .enable = bcm281xx_pinmux_enable, + .set_mux = bcm281xx_pinmux_set, }; static int bcm281xx_pinctrl_pin_config_get(struct pinctrl_dev *pctldev, @@ -1396,7 +1396,7 @@ static struct pinctrl_desc bcm281xx_pinctrl_desc = { .owner = THIS_MODULE, }; -int __init bcm281xx_pinctrl_probe(struct platform_device *pdev) +static int __init bcm281xx_pinctrl_probe(struct platform_device *pdev) { struct bcm281xx_pinctrl_data *pdata = &bcm281xx_pinctrl; struct resource *res; diff --git a/drivers/pinctrl/pinctrl-bcm2835.c b/drivers/pinctrl/pinctrl-bcm2835.c index 3d907de9bc91..eabba02f71f9 100644 --- a/drivers/pinctrl/pinctrl-bcm2835.c +++ b/drivers/pinctrl/pinctrl-bcm2835.c @@ -830,7 +830,7 @@ static int bcm2835_pmx_get_function_groups(struct pinctrl_dev *pctldev, return 0; } -static int bcm2835_pmx_enable(struct pinctrl_dev *pctldev, +static int bcm2835_pmx_set(struct pinctrl_dev *pctldev, unsigned func_selector, unsigned group_selector) { @@ -841,16 +841,6 @@ static int bcm2835_pmx_enable(struct pinctrl_dev *pctldev, return 0; } -static void bcm2835_pmx_disable(struct pinctrl_dev *pctldev, - unsigned func_selector, - unsigned group_selector) -{ - struct bcm2835_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev); - - /* disable by setting to GPIO_IN */ - bcm2835_pinctrl_fsel_set(pc, group_selector, BCM2835_FSEL_GPIO_IN); -} - static void bcm2835_pmx_gpio_disable_free(struct pinctrl_dev *pctldev, struct pinctrl_gpio_range *range, unsigned offset) @@ -879,8 +869,7 @@ static const struct pinmux_ops bcm2835_pmx_ops = { .get_functions_count = bcm2835_pmx_get_functions_count, .get_function_name = bcm2835_pmx_get_function_name, .get_function_groups = bcm2835_pmx_get_function_groups, - .enable = bcm2835_pmx_enable, - .disable = bcm2835_pmx_disable, + .set_mux = bcm2835_pmx_set, .gpio_disable_free = bcm2835_pmx_gpio_disable_free, .gpio_set_direction = bcm2835_pmx_gpio_set_direction, }; diff --git a/drivers/pinctrl/pinctrl-coh901.c b/drivers/pinctrl/pinctrl-coh901.c index d182fdd2e715..29cbbab8c3a6 100644 --- a/drivers/pinctrl/pinctrl-coh901.c +++ b/drivers/pinctrl/pinctrl-coh901.c @@ -756,8 +756,7 @@ static int __init u300_gpio_probe(struct platform_device *pdev) err_no_range: err_no_irqchip: - if (gpiochip_remove(&gpio->chip)) - dev_err(&pdev->dev, "failed to remove gpio chip\n"); + gpiochip_remove(&gpio->chip); err_no_chip: clk_disable_unprepare(gpio->clk); dev_err(&pdev->dev, "module ERROR:%d\n", err); @@ -767,16 +766,11 @@ err_no_chip: static int __exit u300_gpio_remove(struct platform_device *pdev) { struct u300_gpio *gpio = platform_get_drvdata(pdev); - int err; /* Turn off the GPIO block */ writel(0x00000000U, gpio->base + U300_GPIO_CR); - err = gpiochip_remove(&gpio->chip); - if (err < 0) { - dev_err(gpio->dev, "unable to remove gpiochip: %d\n", err); - return err; - } + gpiochip_remove(&gpio->chip); clk_disable_unprepare(gpio->clk); return 0; } diff --git a/drivers/pinctrl/pinctrl-lantiq.c b/drivers/pinctrl/pinctrl-lantiq.c index d22ca252b80d..296e5b37f768 100644 --- a/drivers/pinctrl/pinctrl-lantiq.c +++ b/drivers/pinctrl/pinctrl-lantiq.c @@ -257,9 +257,9 @@ static int match_group_mux(const struct ltq_pin_group *grp, return ret; } -static int ltq_pmx_enable(struct pinctrl_dev *pctrldev, - unsigned func, - unsigned group) +static int ltq_pmx_set(struct pinctrl_dev *pctrldev, + unsigned func, + unsigned group) { struct ltq_pinmux_info *info = pinctrl_dev_get_drvdata(pctrldev); const struct ltq_pin_group *pin_grp = &info->grps[group]; @@ -316,7 +316,7 @@ static const struct pinmux_ops ltq_pmx_ops = { .get_functions_count = ltq_pmx_func_count, .get_function_name = ltq_pmx_func_name, .get_function_groups = ltq_pmx_get_groups, - .enable = ltq_pmx_enable, + .set_mux = ltq_pmx_set, .gpio_request_enable = ltq_pmx_gpio_request_enable, }; diff --git a/drivers/pinctrl/pinctrl-palmas.c b/drivers/pinctrl/pinctrl-palmas.c index f13d0e78a41c..e3079d3d19fe 100644 --- a/drivers/pinctrl/pinctrl-palmas.c +++ b/drivers/pinctrl/pinctrl-palmas.c @@ -685,7 +685,8 @@ static int palmas_pinctrl_get_func_groups(struct pinctrl_dev *pctldev, return 0; } -static int palmas_pinctrl_enable(struct pinctrl_dev *pctldev, unsigned function, +static int palmas_pinctrl_set_mux(struct pinctrl_dev *pctldev, + unsigned function, unsigned group) { struct palmas_pctrl_chip_info *pci = pinctrl_dev_get_drvdata(pctldev); @@ -742,7 +743,7 @@ static const struct pinmux_ops palmas_pinmux_ops = { .get_functions_count = palmas_pinctrl_get_funcs_count, .get_function_name = palmas_pinctrl_get_func_name, .get_function_groups = palmas_pinctrl_get_func_groups, - .enable = palmas_pinctrl_enable, + .set_mux = palmas_pinctrl_set_mux, }; static int palmas_pinconf_get(struct pinctrl_dev *pctldev, diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c index bb805d5e9ff0..016f4578e494 100644 --- a/drivers/pinctrl/pinctrl-rockchip.c +++ b/drivers/pinctrl/pinctrl-rockchip.c @@ -62,11 +62,26 @@ enum rockchip_pinctrl_type { RK2928, RK3066B, RK3188, + RK3288, }; -enum rockchip_pin_bank_type { - COMMON_BANK, - RK3188_BANK0, +/** + * Encode variants of iomux registers into a type variable + */ +#define IOMUX_GPIO_ONLY BIT(0) +#define IOMUX_WIDTH_4BIT BIT(1) +#define IOMUX_SOURCE_PMU BIT(2) +#define IOMUX_UNROUTED BIT(3) + +/** + * @type: iomux variant using IOMUX_* constants + * @offset: if initialized to -1 it will be autocalculated, by specifying + * an initial offset value the relevant source offset can be reset + * to a new value for autocalculating the following iomux registers. + */ +struct rockchip_iomux { + int type; + int offset; }; /** @@ -78,6 +93,7 @@ enum rockchip_pin_bank_type { * @nr_pins: number of pins in this bank * @name: name of the bank * @bank_num: number of the bank, to account for holes + * @iomux: array describing the 4 iomux sources of the bank * @valid: are all necessary informations present * @of_node: dt node of this bank * @drvdata: common pinctrl basedata @@ -95,7 +111,7 @@ struct rockchip_pin_bank { u8 nr_pins; char *name; u8 bank_num; - enum rockchip_pin_bank_type bank_type; + struct rockchip_iomux iomux[4]; bool valid; struct device_node *of_node; struct rockchip_pinctrl *drvdata; @@ -111,6 +127,25 @@ struct rockchip_pin_bank { .bank_num = id, \ .nr_pins = pins, \ .name = label, \ + .iomux = { \ + { .offset = -1 }, \ + { .offset = -1 }, \ + { .offset = -1 }, \ + { .offset = -1 }, \ + }, \ + } + +#define PIN_BANK_IOMUX_FLAGS(id, pins, label, iom0, iom1, iom2, iom3) \ + { \ + .bank_num = id, \ + .nr_pins = pins, \ + .name = label, \ + .iomux = { \ + { .type = iom0, .offset = -1 }, \ + { .type = iom1, .offset = -1 }, \ + { .type = iom2, .offset = -1 }, \ + { .type = iom3, .offset = -1 }, \ + }, \ } /** @@ -121,7 +156,8 @@ struct rockchip_pin_ctrl { u32 nr_pins; char *label; enum rockchip_pinctrl_type type; - int mux_offset; + int grf_mux_offset; + int pmu_mux_offset; void (*pull_calc_reg)(struct rockchip_pin_bank *bank, int pin_num, struct regmap **regmap, int *reg, u8 *bit); @@ -343,24 +379,42 @@ static const struct pinctrl_ops rockchip_pctrl_ops = { static int rockchip_get_mux(struct rockchip_pin_bank *bank, int pin) { struct rockchip_pinctrl *info = bank->drvdata; + int iomux_num = (pin / 8); + struct regmap *regmap; unsigned int val; - int reg, ret; + int reg, ret, mask; u8 bit; - if (bank->bank_type == RK3188_BANK0 && pin < 16) + if (iomux_num > 3) + return -EINVAL; + + if (bank->iomux[iomux_num].type & IOMUX_UNROUTED) { + dev_err(info->dev, "pin %d is unrouted\n", pin); + return -EINVAL; + } + + if (bank->iomux[iomux_num].type & IOMUX_GPIO_ONLY) return RK_FUNC_GPIO; + regmap = (bank->iomux[iomux_num].type & IOMUX_SOURCE_PMU) + ? info->regmap_pmu : info->regmap_base; + /* get basic quadrupel of mux registers and the correct reg inside */ - reg = info->ctrl->mux_offset; - reg += bank->bank_num * 0x10; - reg += (pin / 8) * 4; - bit = (pin % 8) * 2; + mask = (bank->iomux[iomux_num].type & IOMUX_WIDTH_4BIT) ? 0xf : 0x3; + reg = bank->iomux[iomux_num].offset; + if (bank->iomux[iomux_num].type & IOMUX_WIDTH_4BIT) { + if ((pin % 8) >= 4) + reg += 0x4; + bit = (pin % 4) * 4; + } else { + bit = (pin % 8) * 2; + } - ret = regmap_read(info->regmap_base, reg, &val); + ret = regmap_read(regmap, reg, &val); if (ret) return ret; - return ((val >> bit) & 3); + return ((val >> bit) & mask); } /* @@ -379,16 +433,22 @@ static int rockchip_get_mux(struct rockchip_pin_bank *bank, int pin) static int rockchip_set_mux(struct rockchip_pin_bank *bank, int pin, int mux) { struct rockchip_pinctrl *info = bank->drvdata; - int reg, ret; + int iomux_num = (pin / 8); + struct regmap *regmap; + int reg, ret, mask; unsigned long flags; u8 bit; - u32 data; + u32 data, rmask; - /* - * The first 16 pins of rk3188_bank0 are always gpios and do not have - * a mux register at all. - */ - if (bank->bank_type == RK3188_BANK0 && pin < 16) { + if (iomux_num > 3) + return -EINVAL; + + if (bank->iomux[iomux_num].type & IOMUX_UNROUTED) { + dev_err(info->dev, "pin %d is unrouted\n", pin); + return -EINVAL; + } + + if (bank->iomux[iomux_num].type & IOMUX_GPIO_ONLY) { if (mux != RK_FUNC_GPIO) { dev_err(info->dev, "pin %d only supports a gpio mux\n", pin); @@ -401,17 +461,26 @@ static int rockchip_set_mux(struct rockchip_pin_bank *bank, int pin, int mux) dev_dbg(info->dev, "setting mux of GPIO%d-%d to %d\n", bank->bank_num, pin, mux); + regmap = (bank->iomux[iomux_num].type & IOMUX_SOURCE_PMU) + ? info->regmap_pmu : info->regmap_base; + /* get basic quadrupel of mux registers and the correct reg inside */ - reg = info->ctrl->mux_offset; - reg += bank->bank_num * 0x10; - reg += (pin / 8) * 4; - bit = (pin % 8) * 2; + mask = (bank->iomux[iomux_num].type & IOMUX_WIDTH_4BIT) ? 0xf : 0x3; + reg = bank->iomux[iomux_num].offset; + if (bank->iomux[iomux_num].type & IOMUX_WIDTH_4BIT) { + if ((pin % 8) >= 4) + reg += 0x4; + bit = (pin % 4) * 4; + } else { + bit = (pin % 8) * 2; + } spin_lock_irqsave(&bank->slock, flags); - data = (3 << (bit + 16)); - data |= (mux & 3) << bit; - ret = regmap_write(info->regmap_base, reg, data); + data = (mask << (bit + 16)); + rmask = data | (data >> 16); + data |= (mux & mask) << bit; + ret = regmap_update_bits(regmap, reg, rmask, data); spin_unlock_irqrestore(&bank->slock, flags); @@ -449,7 +518,7 @@ static void rk3188_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank, struct rockchip_pinctrl *info = bank->drvdata; /* The first 12 pins of the first bank are located elsewhere */ - if (bank->bank_type == RK3188_BANK0 && pin_num < 12) { + if (bank->bank_num == 0 && pin_num < 12) { *regmap = info->regmap_pmu ? info->regmap_pmu : bank->regmap_pull; *reg = info->regmap_pmu ? RK3188_PULL_PMU_OFFSET : 0; @@ -476,6 +545,128 @@ static void rk3188_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank, } } +#define RK3288_PULL_OFFSET 0x140 +static void rk3288_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank, + int pin_num, struct regmap **regmap, + int *reg, u8 *bit) +{ + struct rockchip_pinctrl *info = bank->drvdata; + + /* The first 24 pins of the first bank are located in PMU */ + if (bank->bank_num == 0) { + *regmap = info->regmap_pmu; + *reg = RK3188_PULL_PMU_OFFSET; + + *reg += ((pin_num / RK3188_PULL_PINS_PER_REG) * 4); + *bit = pin_num % RK3188_PULL_PINS_PER_REG; + *bit *= RK3188_PULL_BITS_PER_PIN; + } else { + *regmap = info->regmap_base; + *reg = RK3288_PULL_OFFSET; + + /* correct the offset, as we're starting with the 2nd bank */ + *reg -= 0x10; + *reg += bank->bank_num * RK3188_PULL_BANK_STRIDE; + *reg += ((pin_num / RK3188_PULL_PINS_PER_REG) * 4); + + *bit = (pin_num % RK3188_PULL_PINS_PER_REG); + *bit *= RK3188_PULL_BITS_PER_PIN; + } +} + +#define RK3288_DRV_PMU_OFFSET 0x70 +#define RK3288_DRV_GRF_OFFSET 0x1c0 +#define RK3288_DRV_BITS_PER_PIN 2 +#define RK3288_DRV_PINS_PER_REG 8 +#define RK3288_DRV_BANK_STRIDE 16 +static int rk3288_drv_list[] = { 2, 4, 8, 12 }; + +static void rk3288_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank, + int pin_num, struct regmap **regmap, + int *reg, u8 *bit) +{ + struct rockchip_pinctrl *info = bank->drvdata; + + /* The first 24 pins of the first bank are located in PMU */ + if (bank->bank_num == 0) { + *regmap = info->regmap_pmu; + *reg = RK3288_DRV_PMU_OFFSET; + + *reg += ((pin_num / RK3288_DRV_PINS_PER_REG) * 4); + *bit = pin_num % RK3288_DRV_PINS_PER_REG; + *bit *= RK3288_DRV_BITS_PER_PIN; + } else { + *regmap = info->regmap_base; + *reg = RK3288_DRV_GRF_OFFSET; + + /* correct the offset, as we're starting with the 2nd bank */ + *reg -= 0x10; + *reg += bank->bank_num * RK3288_DRV_BANK_STRIDE; + *reg += ((pin_num / RK3288_DRV_PINS_PER_REG) * 4); + + *bit = (pin_num % RK3288_DRV_PINS_PER_REG); + *bit *= RK3288_DRV_BITS_PER_PIN; + } +} + +static int rk3288_get_drive(struct rockchip_pin_bank *bank, int pin_num) +{ + struct regmap *regmap; + int reg, ret; + u32 data; + u8 bit; + + rk3288_calc_drv_reg_and_bit(bank, pin_num, ®map, ®, &bit); + + ret = regmap_read(regmap, reg, &data); + if (ret) + return ret; + + data >>= bit; + data &= (1 << RK3288_DRV_BITS_PER_PIN) - 1; + + return rk3288_drv_list[data]; +} + +static int rk3288_set_drive(struct rockchip_pin_bank *bank, int pin_num, + int strength) +{ + struct rockchip_pinctrl *info = bank->drvdata; + struct regmap *regmap; + unsigned long flags; + int reg, ret, i; + u32 data, rmask; + u8 bit; + + rk3288_calc_drv_reg_and_bit(bank, pin_num, ®map, ®, &bit); + + ret = -EINVAL; + for (i = 0; i < ARRAY_SIZE(rk3288_drv_list); i++) { + if (rk3288_drv_list[i] == strength) { + ret = i; + break; + } + } + + if (ret < 0) { + dev_err(info->dev, "unsupported driver strength %d\n", + strength); + return ret; + } + + spin_lock_irqsave(&bank->slock, flags); + + /* enable the write to the equivalent lower bits */ + data = ((1 << RK3288_DRV_BITS_PER_PIN) - 1) << (bit + 16); + rmask = data | (data >> 16); + data |= (ret << bit); + + ret = regmap_update_bits(regmap, reg, rmask, data); + spin_unlock_irqrestore(&bank->slock, flags); + + return ret; +} + static int rockchip_get_pull(struct rockchip_pin_bank *bank, int pin_num) { struct rockchip_pinctrl *info = bank->drvdata; @@ -501,6 +692,7 @@ static int rockchip_get_pull(struct rockchip_pin_bank *bank, int pin_num) ? PIN_CONFIG_BIAS_PULL_PIN_DEFAULT : PIN_CONFIG_BIAS_DISABLE; case RK3188: + case RK3288: data >>= bit; data &= (1 << RK3188_PULL_BITS_PER_PIN) - 1; @@ -532,7 +724,7 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank, int reg, ret; unsigned long flags; u8 bit; - u32 data; + u32 data, rmask; dev_dbg(info->dev, "setting pull of GPIO%d-%d to %d\n", bank->bank_num, pin_num, pull); @@ -555,10 +747,12 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank, spin_unlock_irqrestore(&bank->slock, flags); break; case RK3188: + case RK3288: spin_lock_irqsave(&bank->slock, flags); /* enable the write to the equivalent lower bits */ data = ((1 << RK3188_PULL_BITS_PER_PIN) - 1) << (bit + 16); + rmask = data | (data >> 16); switch (pull) { case PIN_CONFIG_BIAS_DISABLE: @@ -579,7 +773,7 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank, return -EINVAL; } - ret = regmap_write(regmap, reg, data); + ret = regmap_update_bits(regmap, reg, rmask, data); spin_unlock_irqrestore(&bank->slock, flags); break; @@ -622,8 +816,8 @@ static int rockchip_pmx_get_groups(struct pinctrl_dev *pctldev, return 0; } -static int rockchip_pmx_enable(struct pinctrl_dev *pctldev, unsigned selector, - unsigned group) +static int rockchip_pmx_set(struct pinctrl_dev *pctldev, unsigned selector, + unsigned group) { struct rockchip_pinctrl *info = pinctrl_dev_get_drvdata(pctldev); const unsigned int *pins = info->groups[group].pins; @@ -657,23 +851,6 @@ static int rockchip_pmx_enable(struct pinctrl_dev *pctldev, unsigned selector, return 0; } -static void rockchip_pmx_disable(struct pinctrl_dev *pctldev, - unsigned selector, unsigned group) -{ - struct rockchip_pinctrl *info = pinctrl_dev_get_drvdata(pctldev); - const unsigned int *pins = info->groups[group].pins; - struct rockchip_pin_bank *bank; - int cnt; - - dev_dbg(info->dev, "disable function %s group %s\n", - info->functions[selector].name, info->groups[group].name); - - for (cnt = 0; cnt < info->groups[group].npins; cnt++) { - bank = pin_to_bank(info, pins[cnt]); - rockchip_set_mux(bank, pins[cnt] - bank->pin_base, 0); - } -} - /* * The calls to gpio_direction_output() and gpio_direction_input() * leads to this function call (via the pinctrl_gpio_direction_{input|output}() @@ -715,8 +892,7 @@ static const struct pinmux_ops rockchip_pmx_ops = { .get_functions_count = rockchip_pmx_get_funcs_count, .get_function_name = rockchip_pmx_get_func_name, .get_function_groups = rockchip_pmx_get_groups, - .enable = rockchip_pmx_enable, - .disable = rockchip_pmx_disable, + .set_mux = rockchip_pmx_set, .gpio_set_direction = rockchip_pmx_gpio_set_direction, }; @@ -734,6 +910,7 @@ static bool rockchip_pinconf_pull_valid(struct rockchip_pin_ctrl *ctrl, case RK3066B: return pull ? false : true; case RK3188: + case RK3288: return (pull != PIN_CONFIG_BIAS_PULL_PIN_DEFAULT); } @@ -788,6 +965,15 @@ static int rockchip_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin, if (rc) return rc; break; + case PIN_CONFIG_DRIVE_STRENGTH: + /* rk3288 is the first with per-pin drive-strength */ + if (info->ctrl->type != RK3288) + return -ENOTSUPP; + + rc = rk3288_set_drive(bank, pin - bank->pin_base, arg); + if (rc < 0) + return rc; + break; default: return -ENOTSUPP; break; @@ -837,6 +1023,17 @@ static int rockchip_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin, arg = rc ? 1 : 0; break; + case PIN_CONFIG_DRIVE_STRENGTH: + /* rk3288 is the first with per-pin drive-strength */ + if (info->ctrl->type != RK3288) + return -ENOTSUPP; + + rc = rk3288_get_drive(bank, pin - bank->pin_base); + if (rc < 0) + return rc; + + arg = rc; + break; default: return -ENOTSUPP; break; @@ -850,6 +1047,7 @@ static int rockchip_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin, static const struct pinconf_ops rockchip_pinconf_ops = { .pin_config_get = rockchip_pinconf_get, .pin_config_set = rockchip_pinconf_set, + .is_generic = true, }; static const struct of_device_id rockchip_bank_match[] = { @@ -1414,10 +1612,7 @@ fail: for (--i, --bank; i >= 0; --i, --bank) { if (!bank->valid) continue; - - if (gpiochip_remove(&bank->gpio_chip)) - dev_err(&pdev->dev, "gpio chip %s remove failed\n", - bank->gpio_chip.label); + gpiochip_remove(&bank->gpio_chip); } return ret; } @@ -1427,20 +1622,15 @@ static int rockchip_gpiolib_unregister(struct platform_device *pdev, { struct rockchip_pin_ctrl *ctrl = info->ctrl; struct rockchip_pin_bank *bank = ctrl->pin_banks; - int ret = 0; int i; - for (i = 0; !ret && i < ctrl->nr_banks; ++i, ++bank) { + for (i = 0; i < ctrl->nr_banks; ++i, ++bank) { if (!bank->valid) continue; - - ret = gpiochip_remove(&bank->gpio_chip); + gpiochip_remove(&bank->gpio_chip); } - if (ret) - dev_err(&pdev->dev, "gpio chip remove failed\n"); - - return ret; + return 0; } static int rockchip_get_bank_data(struct rockchip_pin_bank *bank, @@ -1466,8 +1656,6 @@ static int rockchip_get_bank_data(struct rockchip_pin_bank *bank, "rockchip,rk3188-gpio-bank0")) { struct device_node *node; - bank->bank_type = RK3188_BANK0; - node = of_parse_phandle(bank->of_node->parent, "rockchip,pmu", 0); if (!node) { @@ -1487,9 +1675,6 @@ static int rockchip_get_bank_data(struct rockchip_pin_bank *bank, base, &rockchip_regmap_config); } - - } else { - bank->bank_type = COMMON_BANK; } bank->irq = irq_of_parse_and_map(bank->of_node, 0); @@ -1513,7 +1698,7 @@ static struct rockchip_pin_ctrl *rockchip_pinctrl_get_soc_data( struct device_node *np; struct rockchip_pin_ctrl *ctrl; struct rockchip_pin_bank *bank; - int i; + int grf_offs, pmu_offs, i, j; match = of_match_node(rockchip_pinctrl_dt_match, node); ctrl = (struct rockchip_pin_ctrl *)match->data; @@ -1535,12 +1720,51 @@ static struct rockchip_pin_ctrl *rockchip_pinctrl_get_soc_data( } } + grf_offs = ctrl->grf_mux_offset; + pmu_offs = ctrl->pmu_mux_offset; bank = ctrl->pin_banks; for (i = 0; i < ctrl->nr_banks; ++i, ++bank) { + int bank_pins = 0; + spin_lock_init(&bank->slock); bank->drvdata = d; bank->pin_base = ctrl->nr_pins; ctrl->nr_pins += bank->nr_pins; + + /* calculate iomux offsets */ + for (j = 0; j < 4; j++) { + struct rockchip_iomux *iom = &bank->iomux[j]; + int inc; + + if (bank_pins >= bank->nr_pins) + break; + + /* preset offset value, set new start value */ + if (iom->offset >= 0) { + if (iom->type & IOMUX_SOURCE_PMU) + pmu_offs = iom->offset; + else + grf_offs = iom->offset; + } else { /* set current offset */ + iom->offset = (iom->type & IOMUX_SOURCE_PMU) ? + pmu_offs : grf_offs; + } + + dev_dbg(d->dev, "bank %d, iomux %d has offset 0x%x\n", + i, j, iom->offset); + + /* + * Increase offset according to iomux width. + * 4bit iomux'es are spread over two registers. + */ + inc = (iom->type & IOMUX_WIDTH_4BIT) ? 8 : 4; + if (iom->type & IOMUX_SOURCE_PMU) + pmu_offs += inc; + else + grf_offs += inc; + + bank_pins += 8; + } } return ctrl; @@ -1644,7 +1868,7 @@ static struct rockchip_pin_ctrl rk2928_pin_ctrl = { .nr_banks = ARRAY_SIZE(rk2928_pin_banks), .label = "RK2928-GPIO", .type = RK2928, - .mux_offset = 0xa8, + .grf_mux_offset = 0xa8, .pull_calc_reg = rk2928_calc_pull_reg_and_bit, }; @@ -1662,7 +1886,7 @@ static struct rockchip_pin_ctrl rk3066a_pin_ctrl = { .nr_banks = ARRAY_SIZE(rk3066a_pin_banks), .label = "RK3066a-GPIO", .type = RK2928, - .mux_offset = 0xa8, + .grf_mux_offset = 0xa8, .pull_calc_reg = rk2928_calc_pull_reg_and_bit, }; @@ -1678,11 +1902,11 @@ static struct rockchip_pin_ctrl rk3066b_pin_ctrl = { .nr_banks = ARRAY_SIZE(rk3066b_pin_banks), .label = "RK3066b-GPIO", .type = RK3066B, - .mux_offset = 0x60, + .grf_mux_offset = 0x60, }; static struct rockchip_pin_bank rk3188_pin_banks[] = { - PIN_BANK(0, 32, "gpio0"), + PIN_BANK_IOMUX_FLAGS(0, 32, "gpio0", IOMUX_GPIO_ONLY, 0, 0, 0), PIN_BANK(1, 32, "gpio1"), PIN_BANK(2, 32, "gpio2"), PIN_BANK(3, 32, "gpio3"), @@ -1693,10 +1917,52 @@ static struct rockchip_pin_ctrl rk3188_pin_ctrl = { .nr_banks = ARRAY_SIZE(rk3188_pin_banks), .label = "RK3188-GPIO", .type = RK3188, - .mux_offset = 0x60, + .grf_mux_offset = 0x60, .pull_calc_reg = rk3188_calc_pull_reg_and_bit, }; +static struct rockchip_pin_bank rk3288_pin_banks[] = { + PIN_BANK_IOMUX_FLAGS(0, 24, "gpio0", IOMUX_SOURCE_PMU, + IOMUX_SOURCE_PMU, + IOMUX_SOURCE_PMU, + IOMUX_UNROUTED + ), + PIN_BANK_IOMUX_FLAGS(1, 32, "gpio1", IOMUX_UNROUTED, + IOMUX_UNROUTED, + IOMUX_UNROUTED, + 0 + ), + PIN_BANK_IOMUX_FLAGS(2, 32, "gpio2", 0, 0, 0, IOMUX_UNROUTED), + PIN_BANK_IOMUX_FLAGS(3, 32, "gpio3", 0, 0, 0, IOMUX_WIDTH_4BIT), + PIN_BANK_IOMUX_FLAGS(4, 32, "gpio4", IOMUX_WIDTH_4BIT, + IOMUX_WIDTH_4BIT, + 0, + 0 + ), + PIN_BANK_IOMUX_FLAGS(5, 32, "gpio5", IOMUX_UNROUTED, + 0, + 0, + IOMUX_UNROUTED + ), + PIN_BANK_IOMUX_FLAGS(6, 32, "gpio6", 0, 0, 0, IOMUX_UNROUTED), + PIN_BANK_IOMUX_FLAGS(7, 32, "gpio7", 0, + 0, + IOMUX_WIDTH_4BIT, + IOMUX_UNROUTED + ), + PIN_BANK(8, 16, "gpio8"), +}; + +static struct rockchip_pin_ctrl rk3288_pin_ctrl = { + .pin_banks = rk3288_pin_banks, + .nr_banks = ARRAY_SIZE(rk3288_pin_banks), + .label = "RK3288-GPIO", + .type = RK3288, + .grf_mux_offset = 0x0, + .pmu_mux_offset = 0x84, + .pull_calc_reg = rk3288_calc_pull_reg_and_bit, +}; + static const struct of_device_id rockchip_pinctrl_dt_match[] = { { .compatible = "rockchip,rk2928-pinctrl", .data = (void *)&rk2928_pin_ctrl }, @@ -1706,6 +1972,8 @@ static const struct of_device_id rockchip_pinctrl_dt_match[] = { .data = (void *)&rk3066b_pin_ctrl }, { .compatible = "rockchip,rk3188-pinctrl", .data = (void *)&rk3188_pin_ctrl }, + { .compatible = "rockchip,rk3288-pinctrl", + .data = (void *)&rk3288_pin_ctrl }, {}, }; MODULE_DEVICE_TABLE(of, rockchip_pinctrl_dt_match); diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c index 2960557bfed9..fb94b772ad62 100644 --- a/drivers/pinctrl/pinctrl-single.c +++ b/drivers/pinctrl/pinctrl-single.c @@ -447,7 +447,7 @@ static int pcs_get_function(struct pinctrl_dev *pctldev, unsigned pin, return 0; } -static int pcs_enable(struct pinctrl_dev *pctldev, unsigned fselector, +static int pcs_set_mux(struct pinctrl_dev *pctldev, unsigned fselector, unsigned group) { struct pcs_device *pcs; @@ -488,61 +488,6 @@ static int pcs_enable(struct pinctrl_dev *pctldev, unsigned fselector, return 0; } -static void pcs_disable(struct pinctrl_dev *pctldev, unsigned fselector, - unsigned group) -{ - struct pcs_device *pcs; - struct pcs_function *func; - int i; - - pcs = pinctrl_dev_get_drvdata(pctldev); - /* If function mask is null, needn't disable it. */ - if (!pcs->fmask) - return; - - func = radix_tree_lookup(&pcs->ftree, fselector); - if (!func) { - dev_err(pcs->dev, "%s could not find function%i\n", - __func__, fselector); - return; - } - - /* - * Ignore disable if function-off is not specified. Some hardware - * does not have clearly defined disable function. For pin specific - * off modes, you can use alternate named states as described in - * pinctrl-bindings.txt. - */ - if (pcs->foff == PCS_OFF_DISABLED) { - dev_dbg(pcs->dev, "ignoring disable for %s function%i\n", - func->name, fselector); - return; - } - - dev_dbg(pcs->dev, "disabling function%i %s\n", - fselector, func->name); - - for (i = 0; i < func->nvals; i++) { - struct pcs_func_vals *vals; - unsigned long flags; - unsigned val, mask; - - vals = &func->vals[i]; - raw_spin_lock_irqsave(&pcs->lock, flags); - val = pcs->read(vals->reg); - - if (pcs->bits_per_mux) - mask = vals->mask; - else - mask = pcs->fmask; - - val &= ~mask; - val |= pcs->foff << pcs->fshift; - pcs->write(val, vals->reg); - raw_spin_unlock_irqrestore(&pcs->lock, flags); - } -} - static int pcs_request_gpio(struct pinctrl_dev *pctldev, struct pinctrl_gpio_range *range, unsigned pin) { @@ -574,8 +519,7 @@ static const struct pinmux_ops pcs_pinmux_ops = { .get_functions_count = pcs_get_functions_count, .get_function_name = pcs_get_function_name, .get_function_groups = pcs_get_function_groups, - .enable = pcs_enable, - .disable = pcs_disable, + .set_mux = pcs_set_mux, .gpio_request_enable = pcs_request_gpio, }; @@ -836,7 +780,7 @@ static int pcs_add_pin(struct pcs_device *pcs, unsigned offset, pin = &pcs->pins.pa[i]; pn = &pcs->names[i]; - sprintf(pn->name, "%lx.%d", + sprintf(pn->name, "%lx.%u", (unsigned long)pcs->res->start + offset, pin_pos); pin->name = pn->name; pin->number = i; @@ -1739,11 +1683,10 @@ static void pcs_irq_chain_handler(unsigned int irq, struct irq_desc *desc) { struct pcs_soc_data *pcs_soc = irq_desc_get_handler_data(desc); struct irq_chip *chip; - int res; chip = irq_get_chip(irq); chained_irq_enter(chip, desc); - res = pcs_irq_handle(pcs_soc); + pcs_irq_handle(pcs_soc); /* REVISIT: export and add handle_bad_irq(irq, desc)? */ chained_irq_exit(chip, desc); @@ -2038,6 +1981,18 @@ static const struct pcs_soc_data pinctrl_single_omap_wkup = { .irq_status_mask = (1 << 15), /* OMAP_WAKEUP_EVENT */ }; +static const struct pcs_soc_data pinctrl_single_dra7 = { + .flags = PCS_QUIRK_SHARED_IRQ, + .irq_enable_mask = (1 << 24), /* WAKEUPENABLE */ + .irq_status_mask = (1 << 25), /* WAKEUPEVENT */ +}; + +static const struct pcs_soc_data pinctrl_single_am437x = { + .flags = PCS_QUIRK_SHARED_IRQ, + .irq_enable_mask = (1 << 29), /* OMAP_WAKEUP_EN */ + .irq_status_mask = (1 << 30), /* OMAP_WAKEUP_EVENT */ +}; + static const struct pcs_soc_data pinctrl_single = { }; @@ -2049,6 +2004,8 @@ static struct of_device_id pcs_of_match[] = { { .compatible = "ti,omap3-padconf", .data = &pinctrl_single_omap_wkup }, { .compatible = "ti,omap4-padconf", .data = &pinctrl_single_omap_wkup }, { .compatible = "ti,omap5-padconf", .data = &pinctrl_single_omap_wkup }, + { .compatible = "ti,dra7-padconf", .data = &pinctrl_single_dra7 }, + { .compatible = "ti,am437-padconf", .data = &pinctrl_single_am437x }, { .compatible = "pinctrl-single", .data = &pinctrl_single }, { .compatible = "pinconf-single", .data = &pinconf_single }, { }, diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c index 9f43916637ca..4b1792aad3d8 100644 --- a/drivers/pinctrl/pinctrl-st.c +++ b/drivers/pinctrl/pinctrl-st.c @@ -914,8 +914,8 @@ static struct st_pio_control *st_get_pio_control( return &bank->pc; } -static int st_pmx_enable(struct pinctrl_dev *pctldev, unsigned fselector, - unsigned group) +static int st_pmx_set_mux(struct pinctrl_dev *pctldev, unsigned fselector, + unsigned group) { struct st_pinctrl *info = pinctrl_dev_get_drvdata(pctldev); struct st_pinconf *conf = info->groups[group].pin_conf; @@ -930,11 +930,6 @@ static int st_pmx_enable(struct pinctrl_dev *pctldev, unsigned fselector, return 0; } -static void st_pmx_disable(struct pinctrl_dev *pctldev, unsigned selector, - unsigned group) -{ -} - static int st_pmx_set_gpio_direction(struct pinctrl_dev *pctldev, struct pinctrl_gpio_range *range, unsigned gpio, bool input) @@ -956,8 +951,7 @@ static struct pinmux_ops st_pmxops = { .get_functions_count = st_pmx_get_funcs_count, .get_function_name = st_pmx_get_fname, .get_function_groups = st_pmx_get_groups, - .enable = st_pmx_enable, - .disable = st_pmx_disable, + .set_mux = st_pmx_set_mux, .gpio_set_direction = st_pmx_set_gpio_direction, }; @@ -1178,9 +1172,7 @@ static int st_pctl_dt_parse_groups(struct device_node *np, const __be32 *list; struct property *pp; struct st_pinconf *conf; - phandle phandle; struct device_node *pins; - u32 pin; int i = 0, npins = 0, nr_props; pins = of_get_child_by_name(np, "st,pins"); @@ -1218,8 +1210,8 @@ static int st_pctl_dt_parse_groups(struct device_node *np, conf = &grp->pin_conf[i]; /* bank & offset */ - phandle = be32_to_cpup(list++); - pin = be32_to_cpup(list++); + be32_to_cpup(list++); + be32_to_cpup(list++); conf->pin = of_get_named_gpio(pins, pp->name, 0); conf->name = pp->name; grp->pins[i] = conf->pin; @@ -1256,7 +1248,7 @@ static int st_pctl_parse_functions(struct device_node *np, func = &info->functions[index]; func->name = np->name; func->ngroups = of_get_child_count(np); - if (func->ngroups <= 0) { + if (func->ngroups == 0) { dev_err(info->dev, "No groups defined\n"); return -EINVAL; } @@ -1454,6 +1446,7 @@ static struct irq_chip st_gpio_irqchip = { .irq_mask = st_gpio_irq_mask, .irq_unmask = st_gpio_irq_unmask, .irq_set_type = st_gpio_irq_set_type, + .flags = IRQCHIP_SKIP_SET_WAKE, }; static int st_gpiolib_register_bank(struct st_pinctrl *info, @@ -1524,6 +1517,7 @@ static int st_gpiolib_register_bank(struct st_pinctrl *info, 0, handle_simple_irq, IRQ_TYPE_LEVEL_LOW); if (err) { + gpiochip_remove(&bank->gpio_chip); dev_info(dev, "could not add irqchip\n"); return err; } diff --git a/drivers/pinctrl/pinctrl-tb10x.c b/drivers/pinctrl/pinctrl-tb10x.c index 26ca6855f478..3b9bfcf717ac 100644 --- a/drivers/pinctrl/pinctrl-tb10x.c +++ b/drivers/pinctrl/pinctrl-tb10x.c @@ -697,7 +697,7 @@ static void tb10x_gpio_disable_free(struct pinctrl_dev *pctl, mutex_unlock(&state->mutex); } -static int tb10x_pctl_enable(struct pinctrl_dev *pctl, +static int tb10x_pctl_set_mux(struct pinctrl_dev *pctl, unsigned func_selector, unsigned group_selector) { struct tb10x_pinctrl *state = pinctrl_dev_get_drvdata(pctl); @@ -738,30 +738,13 @@ static int tb10x_pctl_enable(struct pinctrl_dev *pctl, return 0; } -static void tb10x_pctl_disable(struct pinctrl_dev *pctl, - unsigned func_selector, unsigned group_selector) -{ - struct tb10x_pinctrl *state = pinctrl_dev_get_drvdata(pctl); - const struct tb10x_pinfuncgrp *grp = &state->pingroups[group_selector]; - - if (grp->port < 0) - return; - - mutex_lock(&state->mutex); - - state->ports[grp->port].count--; - - mutex_unlock(&state->mutex); -} - static struct pinmux_ops tb10x_pinmux_ops = { .get_functions_count = tb10x_get_functions_count, .get_function_name = tb10x_get_function_name, .get_function_groups = tb10x_get_function_groups, .gpio_request_enable = tb10x_gpio_request_enable, .gpio_disable_free = tb10x_gpio_disable_free, - .enable = tb10x_pctl_enable, - .disable = tb10x_pctl_disable, + .set_mux = tb10x_pctl_set_mux, }; static struct pinctrl_desc tb10x_pindesc = { diff --git a/drivers/pinctrl/pinctrl-tegra-xusb.c b/drivers/pinctrl/pinctrl-tegra-xusb.c new file mode 100644 index 000000000000..1631ec94fb02 --- /dev/null +++ b/drivers/pinctrl/pinctrl-tegra-xusb.c @@ -0,0 +1,974 @@ +/* + * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/phy/phy.h> +#include <linux/pinctrl/pinctrl.h> +#include <linux/pinctrl/pinmux.h> +#include <linux/platform_device.h> +#include <linux/reset.h> + +#include <dt-bindings/pinctrl/pinctrl-tegra-xusb.h> + +#include "core.h" +#include "pinctrl-utils.h" + +#define XUSB_PADCTL_ELPG_PROGRAM 0x01c +#define XUSB_PADCTL_ELPG_PROGRAM_AUX_MUX_LP0_VCORE_DOWN (1 << 26) +#define XUSB_PADCTL_ELPG_PROGRAM_AUX_MUX_LP0_CLAMP_EN_EARLY (1 << 25) +#define XUSB_PADCTL_ELPG_PROGRAM_AUX_MUX_LP0_CLAMP_EN (1 << 24) + +#define XUSB_PADCTL_IOPHY_PLL_P0_CTL1 0x040 +#define XUSB_PADCTL_IOPHY_PLL_P0_CTL1_PLL0_LOCKDET (1 << 19) +#define XUSB_PADCTL_IOPHY_PLL_P0_CTL1_REFCLK_SEL_MASK (0xf << 12) +#define XUSB_PADCTL_IOPHY_PLL_P0_CTL1_PLL_RST (1 << 1) + +#define XUSB_PADCTL_IOPHY_PLL_P0_CTL2 0x044 +#define XUSB_PADCTL_IOPHY_PLL_P0_CTL2_REFCLKBUF_EN (1 << 6) +#define XUSB_PADCTL_IOPHY_PLL_P0_CTL2_TXCLKREF_EN (1 << 5) +#define XUSB_PADCTL_IOPHY_PLL_P0_CTL2_TXCLKREF_SEL (1 << 4) + +#define XUSB_PADCTL_IOPHY_PLL_S0_CTL1 0x138 +#define XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL1_LOCKDET (1 << 27) +#define XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL1_MODE (1 << 24) +#define XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL_PWR_OVRD (1 << 3) +#define XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL_RST (1 << 1) +#define XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL_IDDQ (1 << 0) + +#define XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL1 0x148 +#define XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL1_IDDQ_OVRD (1 << 1) +#define XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL1_IDDQ (1 << 0) + +struct tegra_xusb_padctl_function { + const char *name; + const char * const *groups; + unsigned int num_groups; +}; + +struct tegra_xusb_padctl_group { + const unsigned int *funcs; + unsigned int num_funcs; +}; + +struct tegra_xusb_padctl_soc { + const struct pinctrl_pin_desc *pins; + unsigned int num_pins; + + const struct tegra_xusb_padctl_function *functions; + unsigned int num_functions; + + const struct tegra_xusb_padctl_lane *lanes; + unsigned int num_lanes; +}; + +struct tegra_xusb_padctl_lane { + const char *name; + + unsigned int offset; + unsigned int shift; + unsigned int mask; + unsigned int iddq; + + const unsigned int *funcs; + unsigned int num_funcs; +}; + +struct tegra_xusb_padctl { + struct device *dev; + void __iomem *regs; + struct mutex lock; + struct reset_control *rst; + + const struct tegra_xusb_padctl_soc *soc; + struct pinctrl_dev *pinctrl; + struct pinctrl_desc desc; + + struct phy_provider *provider; + struct phy *phys[2]; + + unsigned int enable; +}; + +static inline void padctl_writel(struct tegra_xusb_padctl *padctl, u32 value, + unsigned long offset) +{ + writel(value, padctl->regs + offset); +} + +static inline u32 padctl_readl(struct tegra_xusb_padctl *padctl, + unsigned long offset) +{ + return readl(padctl->regs + offset); +} + +static int tegra_xusb_padctl_get_groups_count(struct pinctrl_dev *pinctrl) +{ + struct tegra_xusb_padctl *padctl = pinctrl_dev_get_drvdata(pinctrl); + + return padctl->soc->num_pins; +} + +static const char *tegra_xusb_padctl_get_group_name(struct pinctrl_dev *pinctrl, + unsigned int group) +{ + struct tegra_xusb_padctl *padctl = pinctrl_dev_get_drvdata(pinctrl); + + return padctl->soc->pins[group].name; +} + +enum tegra_xusb_padctl_param { + TEGRA_XUSB_PADCTL_IDDQ, +}; + +static const struct tegra_xusb_padctl_property { + const char *name; + enum tegra_xusb_padctl_param param; +} properties[] = { + { "nvidia,iddq", TEGRA_XUSB_PADCTL_IDDQ }, +}; + +#define TEGRA_XUSB_PADCTL_PACK(param, value) ((param) << 16 | (value)) +#define TEGRA_XUSB_PADCTL_UNPACK_PARAM(config) ((config) >> 16) +#define TEGRA_XUSB_PADCTL_UNPACK_VALUE(config) ((config) & 0xffff) + +static int tegra_xusb_padctl_parse_subnode(struct tegra_xusb_padctl *padctl, + struct device_node *np, + struct pinctrl_map **maps, + unsigned int *reserved_maps, + unsigned int *num_maps) +{ + unsigned int i, reserve = 0, num_configs = 0; + unsigned long config, *configs = NULL; + const char *function, *group; + struct property *prop; + int err = 0; + u32 value; + + err = of_property_read_string(np, "nvidia,function", &function); + if (err < 0) { + if (err != -EINVAL) + return err; + + function = NULL; + } + + for (i = 0; i < ARRAY_SIZE(properties); i++) { + err = of_property_read_u32(np, properties[i].name, &value); + if (err < 0) { + if (err == -EINVAL) + continue; + + return err; + } + + config = TEGRA_XUSB_PADCTL_PACK(properties[i].param, value); + + err = pinctrl_utils_add_config(padctl->pinctrl, &configs, + &num_configs, config); + if (err < 0) + return err; + } + + if (function) + reserve++; + + if (num_configs) + reserve++; + + err = of_property_count_strings(np, "nvidia,lanes"); + if (err < 0) + return err; + + reserve *= err; + + err = pinctrl_utils_reserve_map(padctl->pinctrl, maps, reserved_maps, + num_maps, reserve); + if (err < 0) + return err; + + of_property_for_each_string(np, "nvidia,lanes", prop, group) { + if (function) { + err = pinctrl_utils_add_map_mux(padctl->pinctrl, maps, + reserved_maps, num_maps, group, + function); + if (err < 0) + return err; + } + + if (num_configs) { + err = pinctrl_utils_add_map_configs(padctl->pinctrl, + maps, reserved_maps, num_maps, group, + configs, num_configs, + PIN_MAP_TYPE_CONFIGS_GROUP); + if (err < 0) + return err; + } + } + + return 0; +} + +static int tegra_xusb_padctl_dt_node_to_map(struct pinctrl_dev *pinctrl, + struct device_node *parent, + struct pinctrl_map **maps, + unsigned int *num_maps) +{ + struct tegra_xusb_padctl *padctl = pinctrl_dev_get_drvdata(pinctrl); + unsigned int reserved_maps = 0; + struct device_node *np; + int err; + + *num_maps = 0; + *maps = NULL; + + for_each_child_of_node(parent, np) { + err = tegra_xusb_padctl_parse_subnode(padctl, np, maps, + &reserved_maps, + num_maps); + if (err < 0) + return err; + } + + return 0; +} + +static const struct pinctrl_ops tegra_xusb_padctl_pinctrl_ops = { + .get_groups_count = tegra_xusb_padctl_get_groups_count, + .get_group_name = tegra_xusb_padctl_get_group_name, + .dt_node_to_map = tegra_xusb_padctl_dt_node_to_map, + .dt_free_map = pinctrl_utils_dt_free_map, +}; + +static int tegra_xusb_padctl_get_functions_count(struct pinctrl_dev *pinctrl) +{ + struct tegra_xusb_padctl *padctl = pinctrl_dev_get_drvdata(pinctrl); + + return padctl->soc->num_functions; +} + +static const char * +tegra_xusb_padctl_get_function_name(struct pinctrl_dev *pinctrl, + unsigned int function) +{ + struct tegra_xusb_padctl *padctl = pinctrl_dev_get_drvdata(pinctrl); + + return padctl->soc->functions[function].name; +} + +static int tegra_xusb_padctl_get_function_groups(struct pinctrl_dev *pinctrl, + unsigned int function, + const char * const **groups, + unsigned * const num_groups) +{ + struct tegra_xusb_padctl *padctl = pinctrl_dev_get_drvdata(pinctrl); + + *num_groups = padctl->soc->functions[function].num_groups; + *groups = padctl->soc->functions[function].groups; + + return 0; +} + +static int tegra_xusb_padctl_pinmux_set(struct pinctrl_dev *pinctrl, + unsigned int function, + unsigned int group) +{ + struct tegra_xusb_padctl *padctl = pinctrl_dev_get_drvdata(pinctrl); + const struct tegra_xusb_padctl_lane *lane; + unsigned int i; + u32 value; + + lane = &padctl->soc->lanes[group]; + + for (i = 0; i < lane->num_funcs; i++) + if (lane->funcs[i] == function) + break; + + if (i >= lane->num_funcs) + return -EINVAL; + + value = padctl_readl(padctl, lane->offset); + value &= ~(lane->mask << lane->shift); + value |= i << lane->shift; + padctl_writel(padctl, value, lane->offset); + + return 0; +} + +static const struct pinmux_ops tegra_xusb_padctl_pinmux_ops = { + .get_functions_count = tegra_xusb_padctl_get_functions_count, + .get_function_name = tegra_xusb_padctl_get_function_name, + .get_function_groups = tegra_xusb_padctl_get_function_groups, + .set_mux = tegra_xusb_padctl_pinmux_set, +}; + +static int tegra_xusb_padctl_pinconf_group_get(struct pinctrl_dev *pinctrl, + unsigned int group, + unsigned long *config) +{ + struct tegra_xusb_padctl *padctl = pinctrl_dev_get_drvdata(pinctrl); + const struct tegra_xusb_padctl_lane *lane; + enum tegra_xusb_padctl_param param; + u32 value; + + param = TEGRA_XUSB_PADCTL_UNPACK_PARAM(*config); + lane = &padctl->soc->lanes[group]; + + switch (param) { + case TEGRA_XUSB_PADCTL_IDDQ: + /* lanes with iddq == 0 don't support this parameter */ + if (lane->iddq == 0) + return -EINVAL; + + value = padctl_readl(padctl, lane->offset); + + if (value & BIT(lane->iddq)) + value = 0; + else + value = 1; + + *config = TEGRA_XUSB_PADCTL_PACK(param, value); + break; + + default: + dev_err(padctl->dev, "invalid configuration parameter: %04x\n", + param); + return -ENOTSUPP; + } + + return 0; +} + +static int tegra_xusb_padctl_pinconf_group_set(struct pinctrl_dev *pinctrl, + unsigned int group, + unsigned long *configs, + unsigned int num_configs) +{ + struct tegra_xusb_padctl *padctl = pinctrl_dev_get_drvdata(pinctrl); + const struct tegra_xusb_padctl_lane *lane; + enum tegra_xusb_padctl_param param; + unsigned long value; + unsigned int i; + u32 regval; + + lane = &padctl->soc->lanes[group]; + + for (i = 0; i < num_configs; i++) { + param = TEGRA_XUSB_PADCTL_UNPACK_PARAM(configs[i]); + value = TEGRA_XUSB_PADCTL_UNPACK_VALUE(configs[i]); + + switch (param) { + case TEGRA_XUSB_PADCTL_IDDQ: + /* lanes with iddq == 0 don't support this parameter */ + if (lane->iddq == 0) + return -EINVAL; + + regval = padctl_readl(padctl, lane->offset); + + if (value) + regval &= ~BIT(lane->iddq); + else + regval |= BIT(lane->iddq); + + padctl_writel(padctl, regval, lane->offset); + break; + + default: + dev_err(padctl->dev, + "invalid configuration parameter: %04x\n", + param); + return -ENOTSUPP; + } + } + + return 0; +} + +#ifdef CONFIG_DEBUG_FS +static const char *strip_prefix(const char *s) +{ + const char *comma = strchr(s, ','); + if (!comma) + return s; + + return comma + 1; +} + +static void +tegra_xusb_padctl_pinconf_group_dbg_show(struct pinctrl_dev *pinctrl, + struct seq_file *s, + unsigned int group) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(properties); i++) { + unsigned long config, value; + int err; + + config = TEGRA_XUSB_PADCTL_PACK(properties[i].param, 0); + + err = tegra_xusb_padctl_pinconf_group_get(pinctrl, group, + &config); + if (err < 0) + continue; + + value = TEGRA_XUSB_PADCTL_UNPACK_VALUE(config); + + seq_printf(s, "\n\t%s=%lu\n", strip_prefix(properties[i].name), + value); + } +} + +static void +tegra_xusb_padctl_pinconf_config_dbg_show(struct pinctrl_dev *pinctrl, + struct seq_file *s, + unsigned long config) +{ + enum tegra_xusb_padctl_param param; + const char *name = "unknown"; + unsigned long value; + unsigned int i; + + param = TEGRA_XUSB_PADCTL_UNPACK_PARAM(config); + value = TEGRA_XUSB_PADCTL_UNPACK_VALUE(config); + + for (i = 0; i < ARRAY_SIZE(properties); i++) { + if (properties[i].param == param) { + name = properties[i].name; + break; + } + } + + seq_printf(s, "%s=%lu", strip_prefix(name), value); +} +#endif + +static const struct pinconf_ops tegra_xusb_padctl_pinconf_ops = { + .pin_config_group_get = tegra_xusb_padctl_pinconf_group_get, + .pin_config_group_set = tegra_xusb_padctl_pinconf_group_set, +#ifdef CONFIG_DEBUG_FS + .pin_config_group_dbg_show = tegra_xusb_padctl_pinconf_group_dbg_show, + .pin_config_config_dbg_show = tegra_xusb_padctl_pinconf_config_dbg_show, +#endif +}; + +static int tegra_xusb_padctl_enable(struct tegra_xusb_padctl *padctl) +{ + u32 value; + + mutex_lock(&padctl->lock); + + if (padctl->enable++ > 0) + goto out; + + value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM); + value &= ~XUSB_PADCTL_ELPG_PROGRAM_AUX_MUX_LP0_CLAMP_EN; + padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM); + + usleep_range(100, 200); + + value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM); + value &= ~XUSB_PADCTL_ELPG_PROGRAM_AUX_MUX_LP0_CLAMP_EN_EARLY; + padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM); + + usleep_range(100, 200); + + value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM); + value &= ~XUSB_PADCTL_ELPG_PROGRAM_AUX_MUX_LP0_VCORE_DOWN; + padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM); + +out: + mutex_unlock(&padctl->lock); + return 0; +} + +static int tegra_xusb_padctl_disable(struct tegra_xusb_padctl *padctl) +{ + u32 value; + + mutex_lock(&padctl->lock); + + if (WARN_ON(padctl->enable == 0)) + goto out; + + if (--padctl->enable > 0) + goto out; + + value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM); + value |= XUSB_PADCTL_ELPG_PROGRAM_AUX_MUX_LP0_VCORE_DOWN; + padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM); + + usleep_range(100, 200); + + value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM); + value |= XUSB_PADCTL_ELPG_PROGRAM_AUX_MUX_LP0_CLAMP_EN_EARLY; + padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM); + + usleep_range(100, 200); + + value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM); + value |= XUSB_PADCTL_ELPG_PROGRAM_AUX_MUX_LP0_CLAMP_EN; + padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM); + +out: + mutex_unlock(&padctl->lock); + return 0; +} + +static int tegra_xusb_phy_init(struct phy *phy) +{ + struct tegra_xusb_padctl *padctl = phy_get_drvdata(phy); + + return tegra_xusb_padctl_enable(padctl); +} + +static int tegra_xusb_phy_exit(struct phy *phy) +{ + struct tegra_xusb_padctl *padctl = phy_get_drvdata(phy); + + return tegra_xusb_padctl_disable(padctl); +} + +static int pcie_phy_power_on(struct phy *phy) +{ + struct tegra_xusb_padctl *padctl = phy_get_drvdata(phy); + unsigned long timeout; + int err = -ETIMEDOUT; + u32 value; + + value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_P0_CTL1); + value &= ~XUSB_PADCTL_IOPHY_PLL_P0_CTL1_REFCLK_SEL_MASK; + padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_P0_CTL1); + + value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_P0_CTL2); + value |= XUSB_PADCTL_IOPHY_PLL_P0_CTL2_REFCLKBUF_EN | + XUSB_PADCTL_IOPHY_PLL_P0_CTL2_TXCLKREF_EN | + XUSB_PADCTL_IOPHY_PLL_P0_CTL2_TXCLKREF_SEL; + padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_P0_CTL2); + + value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_P0_CTL1); + value |= XUSB_PADCTL_IOPHY_PLL_P0_CTL1_PLL_RST; + padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_P0_CTL1); + + timeout = jiffies + msecs_to_jiffies(50); + + while (time_before(jiffies, timeout)) { + value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_P0_CTL1); + if (value & XUSB_PADCTL_IOPHY_PLL_P0_CTL1_PLL0_LOCKDET) { + err = 0; + break; + } + + usleep_range(100, 200); + } + + return err; +} + +static int pcie_phy_power_off(struct phy *phy) +{ + struct tegra_xusb_padctl *padctl = phy_get_drvdata(phy); + u32 value; + + value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_P0_CTL1); + value &= ~XUSB_PADCTL_IOPHY_PLL_P0_CTL1_PLL_RST; + padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_P0_CTL1); + + return 0; +} + +static const struct phy_ops pcie_phy_ops = { + .init = tegra_xusb_phy_init, + .exit = tegra_xusb_phy_exit, + .power_on = pcie_phy_power_on, + .power_off = pcie_phy_power_off, + .owner = THIS_MODULE, +}; + +static int sata_phy_power_on(struct phy *phy) +{ + struct tegra_xusb_padctl *padctl = phy_get_drvdata(phy); + unsigned long timeout; + int err = -ETIMEDOUT; + u32 value; + + value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL1); + value &= ~XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL1_IDDQ_OVRD; + value &= ~XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL1_IDDQ; + padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL1); + + value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_S0_CTL1); + value &= ~XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL_PWR_OVRD; + value &= ~XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL_IDDQ; + padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_S0_CTL1); + + value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_S0_CTL1); + value |= XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL1_MODE; + padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_S0_CTL1); + + value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_S0_CTL1); + value |= XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL_RST; + padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_S0_CTL1); + + timeout = jiffies + msecs_to_jiffies(50); + + while (time_before(jiffies, timeout)) { + value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_S0_CTL1); + if (value & XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL1_LOCKDET) { + err = 0; + break; + } + + usleep_range(100, 200); + } + + return err; +} + +static int sata_phy_power_off(struct phy *phy) +{ + struct tegra_xusb_padctl *padctl = phy_get_drvdata(phy); + u32 value; + + value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_S0_CTL1); + value &= ~XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL_RST; + padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_S0_CTL1); + + value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_S0_CTL1); + value &= ~XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL1_MODE; + padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_S0_CTL1); + + value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_S0_CTL1); + value |= XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL_PWR_OVRD; + value |= XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL_IDDQ; + padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_S0_CTL1); + + value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL1); + value |= ~XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL1_IDDQ_OVRD; + value |= ~XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL1_IDDQ; + padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL1); + + return 0; +} + +static const struct phy_ops sata_phy_ops = { + .init = tegra_xusb_phy_init, + .exit = tegra_xusb_phy_exit, + .power_on = sata_phy_power_on, + .power_off = sata_phy_power_off, + .owner = THIS_MODULE, +}; + +static struct phy *tegra_xusb_padctl_xlate(struct device *dev, + struct of_phandle_args *args) +{ + struct tegra_xusb_padctl *padctl = dev_get_drvdata(dev); + unsigned int index = args->args[0]; + + if (args->args_count <= 0) + return ERR_PTR(-EINVAL); + + if (index >= ARRAY_SIZE(padctl->phys)) + return ERR_PTR(-EINVAL); + + return padctl->phys[index]; +} + +#define PIN_OTG_0 0 +#define PIN_OTG_1 1 +#define PIN_OTG_2 2 +#define PIN_ULPI_0 3 +#define PIN_HSIC_0 4 +#define PIN_HSIC_1 5 +#define PIN_PCIE_0 6 +#define PIN_PCIE_1 7 +#define PIN_PCIE_2 8 +#define PIN_PCIE_3 9 +#define PIN_PCIE_4 10 +#define PIN_SATA_0 11 + +static const struct pinctrl_pin_desc tegra124_pins[] = { + PINCTRL_PIN(PIN_OTG_0, "otg-0"), + PINCTRL_PIN(PIN_OTG_1, "otg-1"), + PINCTRL_PIN(PIN_OTG_2, "otg-2"), + PINCTRL_PIN(PIN_ULPI_0, "ulpi-0"), + PINCTRL_PIN(PIN_HSIC_0, "hsic-0"), + PINCTRL_PIN(PIN_HSIC_1, "hsic-1"), + PINCTRL_PIN(PIN_PCIE_0, "pcie-0"), + PINCTRL_PIN(PIN_PCIE_1, "pcie-1"), + PINCTRL_PIN(PIN_PCIE_2, "pcie-2"), + PINCTRL_PIN(PIN_PCIE_3, "pcie-3"), + PINCTRL_PIN(PIN_PCIE_4, "pcie-4"), + PINCTRL_PIN(PIN_SATA_0, "sata-0"), +}; + +static const char * const tegra124_snps_groups[] = { + "otg-0", + "otg-1", + "otg-2", + "ulpi-0", + "hsic-0", + "hsic-1", +}; + +static const char * const tegra124_xusb_groups[] = { + "otg-0", + "otg-1", + "otg-2", + "ulpi-0", + "hsic-0", + "hsic-1", +}; + +static const char * const tegra124_uart_groups[] = { + "otg-0", + "otg-1", + "otg-2", +}; + +static const char * const tegra124_pcie_groups[] = { + "pcie-0", + "pcie-1", + "pcie-2", + "pcie-3", + "pcie-4", + "sata-0", +}; + +static const char * const tegra124_usb3_groups[] = { + "pcie-0", + "pcie-1", + "pcie-2", + "pcie-3", + "pcie-4", + "sata-0", +}; + +static const char * const tegra124_sata_groups[] = { + "pcie-0", + "pcie-1", + "pcie-2", + "pcie-3", + "pcie-4", + "sata-0", +}; + +static const char * const tegra124_rsvd_groups[] = { + "otg-0", + "otg-1", + "otg-2", + "pcie-0", + "pcie-1", + "pcie-2", + "pcie-3", + "pcie-4", + "sata-0", +}; + +#define TEGRA124_FUNCTION(_name) \ + { \ + .name = #_name, \ + .num_groups = ARRAY_SIZE(tegra124_##_name##_groups), \ + .groups = tegra124_##_name##_groups, \ + } + +static struct tegra_xusb_padctl_function tegra124_functions[] = { + TEGRA124_FUNCTION(snps), + TEGRA124_FUNCTION(xusb), + TEGRA124_FUNCTION(uart), + TEGRA124_FUNCTION(pcie), + TEGRA124_FUNCTION(usb3), + TEGRA124_FUNCTION(sata), + TEGRA124_FUNCTION(rsvd), +}; + +enum tegra124_function { + TEGRA124_FUNC_SNPS, + TEGRA124_FUNC_XUSB, + TEGRA124_FUNC_UART, + TEGRA124_FUNC_PCIE, + TEGRA124_FUNC_USB3, + TEGRA124_FUNC_SATA, + TEGRA124_FUNC_RSVD, +}; + +static const unsigned int tegra124_otg_functions[] = { + TEGRA124_FUNC_SNPS, + TEGRA124_FUNC_XUSB, + TEGRA124_FUNC_UART, + TEGRA124_FUNC_RSVD, +}; + +static const unsigned int tegra124_usb_functions[] = { + TEGRA124_FUNC_SNPS, + TEGRA124_FUNC_XUSB, +}; + +static const unsigned int tegra124_pci_functions[] = { + TEGRA124_FUNC_PCIE, + TEGRA124_FUNC_USB3, + TEGRA124_FUNC_SATA, + TEGRA124_FUNC_RSVD, +}; + +#define TEGRA124_LANE(_name, _offset, _shift, _mask, _iddq, _funcs) \ + { \ + .name = _name, \ + .offset = _offset, \ + .shift = _shift, \ + .mask = _mask, \ + .iddq = _iddq, \ + .num_funcs = ARRAY_SIZE(tegra124_##_funcs##_functions), \ + .funcs = tegra124_##_funcs##_functions, \ + } + +static const struct tegra_xusb_padctl_lane tegra124_lanes[] = { + TEGRA124_LANE("otg-0", 0x004, 0, 0x3, 0, otg), + TEGRA124_LANE("otg-1", 0x004, 2, 0x3, 0, otg), + TEGRA124_LANE("otg-2", 0x004, 4, 0x3, 0, otg), + TEGRA124_LANE("ulpi-0", 0x004, 12, 0x1, 0, usb), + TEGRA124_LANE("hsic-0", 0x004, 14, 0x1, 0, usb), + TEGRA124_LANE("hsic-1", 0x004, 15, 0x1, 0, usb), + TEGRA124_LANE("pcie-0", 0x134, 16, 0x3, 1, pci), + TEGRA124_LANE("pcie-1", 0x134, 18, 0x3, 2, pci), + TEGRA124_LANE("pcie-2", 0x134, 20, 0x3, 3, pci), + TEGRA124_LANE("pcie-3", 0x134, 22, 0x3, 4, pci), + TEGRA124_LANE("pcie-4", 0x134, 24, 0x3, 5, pci), + TEGRA124_LANE("sata-0", 0x134, 26, 0x3, 6, pci), +}; + +static const struct tegra_xusb_padctl_soc tegra124_soc = { + .num_pins = ARRAY_SIZE(tegra124_pins), + .pins = tegra124_pins, + .num_functions = ARRAY_SIZE(tegra124_functions), + .functions = tegra124_functions, + .num_lanes = ARRAY_SIZE(tegra124_lanes), + .lanes = tegra124_lanes, +}; + +static const struct of_device_id tegra_xusb_padctl_of_match[] = { + { .compatible = "nvidia,tegra124-xusb-padctl", .data = &tegra124_soc }, + { } +}; +MODULE_DEVICE_TABLE(of, tegra_xusb_padctl_of_match); + +static int tegra_xusb_padctl_probe(struct platform_device *pdev) +{ + struct tegra_xusb_padctl *padctl; + const struct of_device_id *match; + struct resource *res; + struct phy *phy; + int err; + + padctl = devm_kzalloc(&pdev->dev, sizeof(*padctl), GFP_KERNEL); + if (!padctl) + return -ENOMEM; + + platform_set_drvdata(pdev, padctl); + mutex_init(&padctl->lock); + padctl->dev = &pdev->dev; + + match = of_match_node(tegra_xusb_padctl_of_match, pdev->dev.of_node); + padctl->soc = match->data; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + padctl->regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(padctl->regs)) + return PTR_ERR(padctl->regs); + + padctl->rst = devm_reset_control_get(&pdev->dev, NULL); + if (IS_ERR(padctl->rst)) + return PTR_ERR(padctl->rst); + + err = reset_control_deassert(padctl->rst); + if (err < 0) + return err; + + memset(&padctl->desc, 0, sizeof(padctl->desc)); + padctl->desc.name = dev_name(padctl->dev); + padctl->desc.pctlops = &tegra_xusb_padctl_pinctrl_ops; + padctl->desc.pmxops = &tegra_xusb_padctl_pinmux_ops; + padctl->desc.confops = &tegra_xusb_padctl_pinconf_ops; + padctl->desc.owner = THIS_MODULE; + + padctl->pinctrl = pinctrl_register(&padctl->desc, &pdev->dev, padctl); + if (!padctl->pinctrl) { + dev_err(&pdev->dev, "failed to register pincontrol\n"); + err = -ENODEV; + goto reset; + } + + phy = devm_phy_create(&pdev->dev, NULL, &pcie_phy_ops, NULL); + if (IS_ERR(phy)) { + err = PTR_ERR(phy); + goto unregister; + } + + padctl->phys[TEGRA_XUSB_PADCTL_PCIE] = phy; + phy_set_drvdata(phy, padctl); + + phy = devm_phy_create(&pdev->dev, NULL, &sata_phy_ops, NULL); + if (IS_ERR(phy)) { + err = PTR_ERR(phy); + goto unregister; + } + + padctl->phys[TEGRA_XUSB_PADCTL_SATA] = phy; + phy_set_drvdata(phy, padctl); + + padctl->provider = devm_of_phy_provider_register(&pdev->dev, + tegra_xusb_padctl_xlate); + if (IS_ERR(padctl->provider)) { + err = PTR_ERR(padctl->provider); + dev_err(&pdev->dev, "failed to register PHYs: %d\n", err); + goto unregister; + } + + return 0; + +unregister: + pinctrl_unregister(padctl->pinctrl); +reset: + reset_control_assert(padctl->rst); + return err; +} + +static int tegra_xusb_padctl_remove(struct platform_device *pdev) +{ + struct tegra_xusb_padctl *padctl = platform_get_drvdata(pdev); + int err; + + pinctrl_unregister(padctl->pinctrl); + + err = reset_control_assert(padctl->rst); + if (err < 0) + dev_err(&pdev->dev, "failed to assert reset: %d\n", err); + + return err; +} + +static struct platform_driver tegra_xusb_padctl_driver = { + .driver = { + .name = "tegra-xusb-padctl", + .of_match_table = tegra_xusb_padctl_of_match, + }, + .probe = tegra_xusb_padctl_probe, + .remove = tegra_xusb_padctl_remove, +}; +module_platform_driver(tegra_xusb_padctl_driver); + +MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>"); +MODULE_DESCRIPTION("Tegra 124 XUSB Pad Control driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pinctrl/pinctrl-tegra.c b/drivers/pinctrl/pinctrl-tegra.c index 2d43bff74f59..e5949d51bc52 100644 --- a/drivers/pinctrl/pinctrl-tegra.c +++ b/drivers/pinctrl/pinctrl-tegra.c @@ -262,8 +262,9 @@ static int tegra_pinctrl_get_func_groups(struct pinctrl_dev *pctldev, return 0; } -static int tegra_pinctrl_enable(struct pinctrl_dev *pctldev, unsigned function, - unsigned group) +static int tegra_pinctrl_set_mux(struct pinctrl_dev *pctldev, + unsigned function, + unsigned group) { struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev); const struct tegra_pingroup *g; @@ -290,24 +291,11 @@ static int tegra_pinctrl_enable(struct pinctrl_dev *pctldev, unsigned function, return 0; } -static void tegra_pinctrl_disable(struct pinctrl_dev *pctldev, - unsigned function, unsigned group) -{ - struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev); - const struct tegra_pingroup *g; - - g = &pmx->soc->groups[group]; - - if (WARN_ON(g->mux_reg < 0)) - return; -} - static const struct pinmux_ops tegra_pinmux_ops = { .get_functions_count = tegra_pinctrl_get_funcs_count, .get_function_name = tegra_pinctrl_get_func_name, .get_function_groups = tegra_pinctrl_get_func_groups, - .enable = tegra_pinctrl_enable, - .disable = tegra_pinctrl_disable, + .set_mux = tegra_pinctrl_set_mux, }; static int tegra_pinconf_reg(struct tegra_pmx *pmx, diff --git a/drivers/pinctrl/pinctrl-tegra114.c b/drivers/pinctrl/pinctrl-tegra114.c index 33614baab4c0..a3db85b0b75f 100644 --- a/drivers/pinctrl/pinctrl-tegra114.c +++ b/drivers/pinctrl/pinctrl-tegra114.c @@ -1850,7 +1850,7 @@ static int tegra114_pinctrl_probe(struct platform_device *pdev) return tegra_pinctrl_probe(pdev, &tegra114_pinctrl); } -static struct of_device_id tegra114_pinctrl_of_match[] = { +static const struct of_device_id tegra114_pinctrl_of_match[] = { { .compatible = "nvidia,tegra114-pinmux", }, { }, }; diff --git a/drivers/pinctrl/pinctrl-tegra124.c b/drivers/pinctrl/pinctrl-tegra124.c index e80797e20017..2f9b75c14967 100644 --- a/drivers/pinctrl/pinctrl-tegra124.c +++ b/drivers/pinctrl/pinctrl-tegra124.c @@ -224,6 +224,16 @@ #define TEGRA_PIN_OWR _PIN(5) #define TEGRA_PIN_CLK_32K_IN _PIN(6) #define TEGRA_PIN_JTAG_RTCK _PIN(7) +#define TEGRA_PIN_DSI_B_CLK_P _PIN(8) +#define TEGRA_PIN_DSI_B_CLK_N _PIN(9) +#define TEGRA_PIN_DSI_B_D0_P _PIN(10) +#define TEGRA_PIN_DSI_B_D0_N _PIN(11) +#define TEGRA_PIN_DSI_B_D1_P _PIN(12) +#define TEGRA_PIN_DSI_B_D1_N _PIN(13) +#define TEGRA_PIN_DSI_B_D2_P _PIN(14) +#define TEGRA_PIN_DSI_B_D2_N _PIN(15) +#define TEGRA_PIN_DSI_B_D3_P _PIN(16) +#define TEGRA_PIN_DSI_B_D3_N _PIN(17) static const struct pinctrl_pin_desc tegra124_pins[] = { PINCTRL_PIN(TEGRA_PIN_CLK_32K_OUT_PA0, "CLK_32K_OUT PA0"), @@ -417,6 +427,16 @@ static const struct pinctrl_pin_desc tegra124_pins[] = { PINCTRL_PIN(TEGRA_PIN_OWR, "OWR"), PINCTRL_PIN(TEGRA_PIN_CLK_32K_IN, "CLK_32K_IN"), PINCTRL_PIN(TEGRA_PIN_JTAG_RTCK, "JTAG_RTCK"), + PINCTRL_PIN(TEGRA_PIN_DSI_B_CLK_P, "DSI_B_CLK_P"), + PINCTRL_PIN(TEGRA_PIN_DSI_B_CLK_N, "DSI_B_CLK_N"), + PINCTRL_PIN(TEGRA_PIN_DSI_B_D0_P, "DSI_B_D0_P"), + PINCTRL_PIN(TEGRA_PIN_DSI_B_D0_N, "DSI_B_D0_N"), + PINCTRL_PIN(TEGRA_PIN_DSI_B_D1_P, "DSI_B_D1_P"), + PINCTRL_PIN(TEGRA_PIN_DSI_B_D1_N, "DSI_B_D1_N"), + PINCTRL_PIN(TEGRA_PIN_DSI_B_D2_P, "DSI_B_D2_P"), + PINCTRL_PIN(TEGRA_PIN_DSI_B_D2_N, "DSI_B_D2_N"), + PINCTRL_PIN(TEGRA_PIN_DSI_B_D3_P, "DSI_B_D3_P"), + PINCTRL_PIN(TEGRA_PIN_DSI_B_D3_N, "DSI_B_D3_N"), }; static const unsigned clk_32k_out_pa0_pins[] = { @@ -1495,6 +1515,19 @@ static const unsigned drive_ao4_pins[] = { TEGRA_PIN_JTAG_RTCK, }; +static const unsigned mipi_pad_ctrl_dsi_b_pins[] = { + TEGRA_PIN_DSI_B_CLK_P, + TEGRA_PIN_DSI_B_CLK_N, + TEGRA_PIN_DSI_B_D0_P, + TEGRA_PIN_DSI_B_D0_N, + TEGRA_PIN_DSI_B_D1_P, + TEGRA_PIN_DSI_B_D1_N, + TEGRA_PIN_DSI_B_D2_P, + TEGRA_PIN_DSI_B_D2_N, + TEGRA_PIN_DSI_B_D3_P, + TEGRA_PIN_DSI_B_D3_N, +}; + enum tegra_mux { TEGRA_MUX_BLINK, TEGRA_MUX_CCLA, @@ -1580,6 +1613,8 @@ enum tegra_mux { TEGRA_MUX_VI_ALT3, TEGRA_MUX_VIMCLK2, TEGRA_MUX_VIMCLK2_ALT, + TEGRA_MUX_CSI, + TEGRA_MUX_DSI_B, }; #define FUNCTION(fname) \ @@ -1672,10 +1707,13 @@ static struct tegra_function tegra124_functions[] = { FUNCTION(vi_alt3), FUNCTION(vimclk2), FUNCTION(vimclk2_alt), + FUNCTION(csi), + FUNCTION(dsi_b), }; #define DRV_PINGROUP_REG_A 0x868 /* bank 0 */ #define PINGROUP_REG_A 0x3000 /* bank 1 */ +#define MIPI_PAD_CTRL_PINGROUP_REG_A 0x820 /* bank 2 */ #define PINGROUP_REG(r) ((r) - PINGROUP_REG_A) @@ -1744,6 +1782,32 @@ static struct tegra_function tegra124_functions[] = { .drvtype_bit = PINGROUP_BIT_##drvtype(6), \ } +#define MIPI_PAD_CTRL_PINGROUP_REG_Y(r) ((r) - MIPI_PAD_CTRL_PINGROUP_REG_A) + +#define MIPI_PAD_CTRL_PINGROUP(pg_name, r, b, f0, f1) \ + { \ + .name = "mipi_pad_ctrl_" #pg_name, \ + .pins = mipi_pad_ctrl_##pg_name##_pins, \ + .npins = ARRAY_SIZE(mipi_pad_ctrl_##pg_name##_pins), \ + .funcs = { \ + TEGRA_MUX_ ## f0, \ + TEGRA_MUX_ ## f1, \ + TEGRA_MUX_RSVD3, \ + TEGRA_MUX_RSVD4, \ + }, \ + .mux_reg = MIPI_PAD_CTRL_PINGROUP_REG_Y(r), \ + .mux_bank = 2, \ + .mux_bit = b, \ + .pupd_reg = -1, \ + .tri_reg = -1, \ + .einput_bit = -1, \ + .odrain_bit = -1, \ + .lock_bit = -1, \ + .ioreset_bit = -1, \ + .rcv_sel_bit = -1, \ + .drv_reg = -1, \ + } + static const struct tegra_pingroup tegra124_groups[] = { /* pg_name, f0, f1, f2, f3, r, od, ior, rcv_sel */ PINGROUP(ulpi_data0_po1, SPI3, HSI, UARTA, ULPI, 0x3000, N, N, N), @@ -1979,6 +2043,9 @@ static const struct tegra_pingroup tegra124_groups[] = { DRV_PINGROUP(hv0, 0x9b4, 2, 3, 4, 12, 5, -1, -1, 28, 2, -1, -1, N), DRV_PINGROUP(sdio4, 0x9c4, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N), DRV_PINGROUP(ao4, 0x9c8, 2, 3, 4, 12, 7, 20, 7, 28, 2, 30, 2, Y), + + /* pg_name, r b f0, f1 */ + MIPI_PAD_CTRL_PINGROUP(dsi_b, 0x820, 1, CSI, DSI_B) }; static const struct tegra_pinctrl_soc_data tegra124_pinctrl = { @@ -1996,7 +2063,7 @@ static int tegra124_pinctrl_probe(struct platform_device *pdev) return tegra_pinctrl_probe(pdev, &tegra124_pinctrl); } -static struct of_device_id tegra124_pinctrl_of_match[] = { +static const struct of_device_id tegra124_pinctrl_of_match[] = { { .compatible = "nvidia,tegra124-pinmux", }, { }, }; diff --git a/drivers/pinctrl/pinctrl-tegra20.c b/drivers/pinctrl/pinctrl-tegra20.c index 7563ebc9c791..c9805d2e71b0 100644 --- a/drivers/pinctrl/pinctrl-tegra20.c +++ b/drivers/pinctrl/pinctrl-tegra20.c @@ -2228,7 +2228,7 @@ static int tegra20_pinctrl_probe(struct platform_device *pdev) return tegra_pinctrl_probe(pdev, &tegra20_pinctrl); } -static struct of_device_id tegra20_pinctrl_of_match[] = { +static const struct of_device_id tegra20_pinctrl_of_match[] = { { .compatible = "nvidia,tegra20-pinmux", }, { }, }; diff --git a/drivers/pinctrl/pinctrl-tegra30.c b/drivers/pinctrl/pinctrl-tegra30.c index fe2d2cf78ad9..e7b72e916558 100644 --- a/drivers/pinctrl/pinctrl-tegra30.c +++ b/drivers/pinctrl/pinctrl-tegra30.c @@ -2484,7 +2484,7 @@ static int tegra30_pinctrl_probe(struct platform_device *pdev) return tegra_pinctrl_probe(pdev, &tegra30_pinctrl); } -static struct of_device_id tegra30_pinctrl_of_match[] = { +static const struct of_device_id tegra30_pinctrl_of_match[] = { { .compatible = "nvidia,tegra30-pinmux", }, { }, }; diff --git a/drivers/pinctrl/pinctrl-tz1090-pdc.c b/drivers/pinctrl/pinctrl-tz1090-pdc.c index 5bf01c28925e..3bb6a3b78864 100644 --- a/drivers/pinctrl/pinctrl-tz1090-pdc.c +++ b/drivers/pinctrl/pinctrl-tz1090-pdc.c @@ -547,8 +547,9 @@ static void tz1090_pdc_pinctrl_mux(struct tz1090_pdc_pmx *pmx, __global_unlock2(flags); } -static int tz1090_pdc_pinctrl_enable(struct pinctrl_dev *pctldev, - unsigned int function, unsigned int group) +static int tz1090_pdc_pinctrl_set_mux(struct pinctrl_dev *pctldev, + unsigned int function, + unsigned int group) { struct tz1090_pdc_pmx *pmx = pinctrl_dev_get_drvdata(pctldev); const struct tz1090_pdc_pingroup *grp = &tz1090_pdc_groups[group]; @@ -574,33 +575,6 @@ static int tz1090_pdc_pinctrl_enable(struct pinctrl_dev *pctldev, return 0; } -static void tz1090_pdc_pinctrl_disable(struct pinctrl_dev *pctldev, - unsigned int function, - unsigned int group) -{ - struct tz1090_pdc_pmx *pmx = pinctrl_dev_get_drvdata(pctldev); - const struct tz1090_pdc_pingroup *grp = &tz1090_pdc_groups[group]; - - dev_dbg(pctldev->dev, "%s(func=%u (%s), group=%u (%s))\n", - __func__, - function, tz1090_pdc_functions[function].name, - group, tz1090_pdc_groups[group].name); - - /* is it even a mux? */ - if (grp->drv) - return; - - /* does this group even control the function? */ - if (function != grp->func) - return; - - /* record the pin being unmuxed and update mux bit */ - spin_lock(&pmx->lock); - pmx->mux_en &= ~BIT(grp->pins[0]); - tz1090_pdc_pinctrl_mux(pmx, grp); - spin_unlock(&pmx->lock); -} - static const struct tz1090_pdc_pingroup *find_mux_group( struct tz1090_pdc_pmx *pmx, unsigned int pin) @@ -661,8 +635,7 @@ static struct pinmux_ops tz1090_pdc_pinmux_ops = { .get_functions_count = tz1090_pdc_pinctrl_get_funcs_count, .get_function_name = tz1090_pdc_pinctrl_get_func_name, .get_function_groups = tz1090_pdc_pinctrl_get_func_groups, - .enable = tz1090_pdc_pinctrl_enable, - .disable = tz1090_pdc_pinctrl_disable, + .set_mux = tz1090_pdc_pinctrl_set_mux, .gpio_request_enable = tz1090_pdc_pinctrl_gpio_request_enable, .gpio_disable_free = tz1090_pdc_pinctrl_gpio_disable_free, }; diff --git a/drivers/pinctrl/pinctrl-tz1090.c b/drivers/pinctrl/pinctrl-tz1090.c index bc9cd7a7602e..48d36413b99f 100644 --- a/drivers/pinctrl/pinctrl-tz1090.c +++ b/drivers/pinctrl/pinctrl-tz1090.c @@ -1415,8 +1415,8 @@ found_mux: * the effect is the same as enabling the function on each individual pin in the * group. */ -static int tz1090_pinctrl_enable(struct pinctrl_dev *pctldev, - unsigned int function, unsigned int group) +static int tz1090_pinctrl_set_mux(struct pinctrl_dev *pctldev, + unsigned int function, unsigned int group) { struct tz1090_pmx *pmx = pinctrl_dev_get_drvdata(pctldev); struct tz1090_pingroup *grp; @@ -1479,63 +1479,6 @@ mux_pins: } /** - * tz1090_pinctrl_disable() - Disable a function on a pin group. - * @pctldev: Pin control data - * @function: Function index to disable - * @group: Group index to disable - * - * Disable a particular function on a group of pins. The per GPIO pin pseudo pin - * groups can be used (in which case the pin will be taken out of peripheral - * mode. Some convenience pin groups can also be used in which case the effect - * is the same as enabling the function on each individual pin in the group. - */ -static void tz1090_pinctrl_disable(struct pinctrl_dev *pctldev, - unsigned int function, unsigned int group) -{ - struct tz1090_pmx *pmx = pinctrl_dev_get_drvdata(pctldev); - struct tz1090_pingroup *grp; - unsigned int pin_num, mux_group, i, npins; - const unsigned int *pins; - - /* group of pins? */ - if (group < ARRAY_SIZE(tz1090_groups)) { - grp = &tz1090_groups[group]; - npins = grp->npins; - pins = grp->pins; - /* - * All pins in the group must belong to the same mux group, - * which allows us to just use the mux group of the first pin. - * By explicitly listing permitted pingroups for each function - * the pinmux core should ensure this is always the case. - */ - } else { - pin_num = group - ARRAY_SIZE(tz1090_groups); - npins = 1; - pins = &pin_num; - } - mux_group = tz1090_mux_pins[*pins]; - - /* no mux group, but can still be individually muxed to peripheral */ - if (mux_group >= TZ1090_MUX_GROUP_MAX) { - if (function == TZ1090_MUX_PERIP) - goto unmux_pins; - return; - } - - /* mux group already set to a different function? */ - grp = &tz1090_mux_groups[mux_group]; - dev_dbg(pctldev->dev, "%s: unmuxing %u pin(s) in '%s' from '%s'\n", - __func__, npins, grp->name, tz1090_functions[function].name); - - /* subtract pins from ref count and unmux individually */ - WARN_ON(grp->func_count < npins); - grp->func_count -= npins; -unmux_pins: - for (i = 0; i < npins; ++i) - tz1090_pinctrl_perip_select(pmx, pins[i], false); -} - -/** * tz1090_pinctrl_gpio_request_enable() - Put pin in GPIO mode. * @pctldev: Pin control data * @range: GPIO range @@ -1574,8 +1517,7 @@ static struct pinmux_ops tz1090_pinmux_ops = { .get_functions_count = tz1090_pinctrl_get_funcs_count, .get_function_name = tz1090_pinctrl_get_func_name, .get_function_groups = tz1090_pinctrl_get_func_groups, - .enable = tz1090_pinctrl_enable, - .disable = tz1090_pinctrl_disable, + .set_mux = tz1090_pinctrl_set_mux, .gpio_request_enable = tz1090_pinctrl_gpio_request_enable, .gpio_disable_free = tz1090_pinctrl_gpio_disable_free, }; diff --git a/drivers/pinctrl/pinctrl-u300.c b/drivers/pinctrl/pinctrl-u300.c index 209a01b8bd3b..e9c7113d81f2 100644 --- a/drivers/pinctrl/pinctrl-u300.c +++ b/drivers/pinctrl/pinctrl-u300.c @@ -955,8 +955,8 @@ static void u300_pmx_endisable(struct u300_pmx *upmx, unsigned selector, } } -static int u300_pmx_enable(struct pinctrl_dev *pctldev, unsigned selector, - unsigned group) +static int u300_pmx_set_mux(struct pinctrl_dev *pctldev, unsigned selector, + unsigned group) { struct u300_pmx *upmx; @@ -970,19 +970,6 @@ static int u300_pmx_enable(struct pinctrl_dev *pctldev, unsigned selector, return 0; } -static void u300_pmx_disable(struct pinctrl_dev *pctldev, unsigned selector, - unsigned group) -{ - struct u300_pmx *upmx; - - /* There is nothing to do with the power pins */ - if (selector == 0) - return; - - upmx = pinctrl_dev_get_drvdata(pctldev); - u300_pmx_endisable(upmx, selector, false); -} - static int u300_pmx_get_funcs_count(struct pinctrl_dev *pctldev) { return ARRAY_SIZE(u300_pmx_functions); @@ -1007,8 +994,7 @@ static const struct pinmux_ops u300_pmx_ops = { .get_functions_count = u300_pmx_get_funcs_count, .get_function_name = u300_pmx_get_func_name, .get_function_groups = u300_pmx_get_groups, - .enable = u300_pmx_enable, - .disable = u300_pmx_disable, + .set_mux = u300_pmx_set_mux, }; static int u300_pin_config_get(struct pinctrl_dev *pctldev, unsigned pin, diff --git a/drivers/pinctrl/pinctrl-xway.c b/drivers/pinctrl/pinctrl-xway.c index e66f4cae7633..37040ab42890 100644 --- a/drivers/pinctrl/pinctrl-xway.c +++ b/drivers/pinctrl/pinctrl-xway.c @@ -801,6 +801,7 @@ static int pinmux_xway_probe(struct platform_device *pdev) of_gpiochip_add(&xway_chip); ret = gpiochip_add(&xway_chip); if (ret) { + of_gpiochip_remove(&xway_chip); dev_err(&pdev->dev, "Failed to register gpio chip\n"); return ret; } @@ -822,6 +823,7 @@ static int pinmux_xway_probe(struct platform_device *pdev) /* register with the generic lantiq layer */ ret = ltq_pinctrl_register(pdev, &xway_info); if (ret) { + gpiochip_remove(&xway_chip); dev_err(&pdev->dev, "Failed to register pinctrl driver\n"); return ret; } diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c index 051e8592990e..b874458dcb88 100644 --- a/drivers/pinctrl/pinmux.c +++ b/drivers/pinctrl/pinmux.c @@ -41,7 +41,7 @@ int pinmux_check_ops(struct pinctrl_dev *pctldev) !ops->get_functions_count || !ops->get_function_name || !ops->get_function_groups || - !ops->enable) { + !ops->set_mux) { dev_err(pctldev->dev, "pinmux ops lacks necessary functions\n"); return -EINVAL; } @@ -445,15 +445,15 @@ int pinmux_enable_setting(struct pinctrl_setting const *setting) desc->mux_setting = &(setting->data.mux); } - ret = ops->enable(pctldev, setting->data.mux.func, - setting->data.mux.group); + ret = ops->set_mux(pctldev, setting->data.mux.func, + setting->data.mux.group); if (ret) - goto err_enable; + goto err_set_mux; return 0; -err_enable: +err_set_mux: for (i = 0; i < num_pins; i++) { desc = pin_desc_get(pctldev, pins[i]); if (desc) @@ -471,7 +471,6 @@ void pinmux_disable_setting(struct pinctrl_setting const *setting) { struct pinctrl_dev *pctldev = setting->pctldev; const struct pinctrl_ops *pctlops = pctldev->desc->pctlops; - const struct pinmux_ops *ops = pctldev->desc->pmxops; int ret = 0; const unsigned *pins = NULL; unsigned num_pins = 0; @@ -518,9 +517,6 @@ void pinmux_disable_setting(struct pinctrl_setting const *setting) pins[i], desc->name, gname); } } - - if (ops->disable) - ops->disable(pctldev, setting->data.mux.func, setting->data.mux.group); } #ifdef CONFIG_DEBUG_FS diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig new file mode 100644 index 000000000000..81275af9638b --- /dev/null +++ b/drivers/pinctrl/qcom/Kconfig @@ -0,0 +1,50 @@ +if (ARCH_QCOM || COMPILE_TEST) + +config PINCTRL_MSM + bool + select PINMUX + select PINCONF + select GENERIC_PINCONF + select GPIOLIB_IRQCHIP + +config PINCTRL_APQ8064 + tristate "Qualcomm APQ8064 pin controller driver" + depends on GPIOLIB && OF + select PINCTRL_MSM + help + This is the pinctrl, pinmux, pinconf and gpiolib driver for the + Qualcomm TLMM block found in the Qualcomm APQ8064 platform. + +config PINCTRL_APQ8084 + tristate "Qualcomm APQ8084 pin controller driver" + depends on GPIOLIB && OF + select PINCTRL_MSM + help + This is the pinctrl, pinmux, pinconf and gpiolib driver for the + Qualcomm TLMM block found in the Qualcomm APQ8084 platform. + +config PINCTRL_IPQ8064 + tristate "Qualcomm IPQ8064 pin controller driver" + depends on GPIOLIB && OF + select PINCTRL_MSM + help + This is the pinctrl, pinmux, pinconf and gpiolib driver for the + Qualcomm TLMM block found in the Qualcomm IPQ8064 platform. + +config PINCTRL_MSM8960 + tristate "Qualcomm 8960 pin controller driver" + depends on GPIOLIB && OF + select PINCTRL_MSM + help + This is the pinctrl, pinmux, pinconf and gpiolib driver for the + Qualcomm TLMM block found in the Qualcomm 8960 platform. + +config PINCTRL_MSM8X74 + tristate "Qualcomm 8x74 pin controller driver" + depends on GPIOLIB && OF + select PINCTRL_MSM + help + This is the pinctrl, pinmux, pinconf and gpiolib driver for the + Qualcomm TLMM block found in the Qualcomm 8974 platform. + +endif diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile new file mode 100644 index 000000000000..ba8519fcd8d3 --- /dev/null +++ b/drivers/pinctrl/qcom/Makefile @@ -0,0 +1,7 @@ +# Qualcomm pin control drivers +obj-$(CONFIG_PINCTRL_MSM) += pinctrl-msm.o +obj-$(CONFIG_PINCTRL_APQ8064) += pinctrl-apq8064.o +obj-$(CONFIG_PINCTRL_APQ8084) += pinctrl-apq8084.o +obj-$(CONFIG_PINCTRL_IPQ8064) += pinctrl-ipq8064.o +obj-$(CONFIG_PINCTRL_MSM8960) += pinctrl-msm8960.o +obj-$(CONFIG_PINCTRL_MSM8X74) += pinctrl-msm8x74.o diff --git a/drivers/pinctrl/pinctrl-apq8064.c b/drivers/pinctrl/qcom/pinctrl-apq8064.c index 519f7886b0f1..c832d7d6b912 100644 --- a/drivers/pinctrl/pinctrl-apq8064.c +++ b/drivers/pinctrl/qcom/pinctrl-apq8064.c @@ -230,7 +230,7 @@ static const unsigned int sdc3_data_pins[] = { 95 }; .pins = gpio##id##_pins, \ .npins = ARRAY_SIZE(gpio##id##_pins), \ .funcs = (int[]){ \ - APQ_MUX_NA, /* gpio mode */ \ + APQ_MUX_gpio, \ APQ_MUX_##f1, \ APQ_MUX_##f2, \ APQ_MUX_##f3, \ @@ -258,6 +258,7 @@ static const unsigned int sdc3_data_pins[] = { 95 }; .intr_status_bit = 0, \ .intr_ack_high = 1, \ .intr_target_bit = 0, \ + .intr_target_kpss_val = 4, \ .intr_raw_status_bit = 3, \ .intr_polarity_bit = 1, \ .intr_detection_bit = 2, \ @@ -283,6 +284,7 @@ static const unsigned int sdc3_data_pins[] = { 95 }; .intr_enable_bit = -1, \ .intr_status_bit = -1, \ .intr_target_bit = -1, \ + .intr_target_kpss_val = -1, \ .intr_raw_status_bit = -1, \ .intr_polarity_bit = -1, \ .intr_detection_bit = -1, \ @@ -293,6 +295,7 @@ enum apq8064_functions { APQ_MUX_cam_mclk, APQ_MUX_codec_mic_i2s, APQ_MUX_codec_spkr_i2s, + APQ_MUX_gpio, APQ_MUX_gsbi1, APQ_MUX_gsbi2, APQ_MUX_gsbi3, @@ -323,6 +326,7 @@ enum apq8064_functions { APQ_MUX_tsif1, APQ_MUX_tsif2, APQ_MUX_usb2_hsic, + APQ_MUX_ps_hold, APQ_MUX_NA, }; @@ -335,6 +339,24 @@ static const char * const codec_mic_i2s_groups[] = { static const char * const codec_spkr_i2s_groups[] = { "gpio39", "gpio40", "gpio41", "gpio42" }; +static const char * const gpio_groups[] = { + "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7", + "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14", + "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21", + "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28", + "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35", + "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42", + "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49", + "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56", + "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63", + "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70", + "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77", + "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84", + "gpio85", "gpio86", "gpio87", "gpio88", "gpio89" +}; +static const char * const ps_hold_groups[] = { + "gpio78" +}; static const char * const gsbi1_groups[] = { "gpio18", "gpio19", "gpio20", "gpio21" }; @@ -430,6 +452,7 @@ static const struct msm_function apq8064_functions[] = { FUNCTION(cam_mclk), FUNCTION(codec_mic_i2s), FUNCTION(codec_spkr_i2s), + FUNCTION(gpio), FUNCTION(gsbi1), FUNCTION(gsbi2), FUNCTION(gsbi3), @@ -460,6 +483,7 @@ static const struct msm_function apq8064_functions[] = { FUNCTION(tsif1), FUNCTION(tsif2), FUNCTION(usb2_hsic), + FUNCTION(ps_hold), }; static const struct msm_pingroup apq8064_groups[] = { @@ -541,7 +565,7 @@ static const struct msm_pingroup apq8064_groups[] = { PINGROUP(75, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), PINGROUP(76, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), PINGROUP(77, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), - PINGROUP(78, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(78, ps_hold, NA, NA, NA, NA, NA, NA, NA, NA, NA), PINGROUP(79, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), PINGROUP(80, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), PINGROUP(81, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), diff --git a/drivers/pinctrl/qcom/pinctrl-apq8084.c b/drivers/pinctrl/qcom/pinctrl-apq8084.c new file mode 100644 index 000000000000..138cbf6134a5 --- /dev/null +++ b/drivers/pinctrl/qcom/pinctrl-apq8084.c @@ -0,0 +1,1245 @@ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/pinctrl/pinctrl.h> + +#include "pinctrl-msm.h" + +static const struct pinctrl_pin_desc apq8084_pins[] = { + PINCTRL_PIN(0, "GPIO_0"), + PINCTRL_PIN(1, "GPIO_1"), + PINCTRL_PIN(2, "GPIO_2"), + PINCTRL_PIN(3, "GPIO_3"), + PINCTRL_PIN(4, "GPIO_4"), + PINCTRL_PIN(5, "GPIO_5"), + PINCTRL_PIN(6, "GPIO_6"), + PINCTRL_PIN(7, "GPIO_7"), + PINCTRL_PIN(8, "GPIO_8"), + PINCTRL_PIN(9, "GPIO_9"), + PINCTRL_PIN(10, "GPIO_10"), + PINCTRL_PIN(11, "GPIO_11"), + PINCTRL_PIN(12, "GPIO_12"), + PINCTRL_PIN(13, "GPIO_13"), + PINCTRL_PIN(14, "GPIO_14"), + PINCTRL_PIN(15, "GPIO_15"), + PINCTRL_PIN(16, "GPIO_16"), + PINCTRL_PIN(17, "GPIO_17"), + PINCTRL_PIN(18, "GPIO_18"), + PINCTRL_PIN(19, "GPIO_19"), + PINCTRL_PIN(20, "GPIO_20"), + PINCTRL_PIN(21, "GPIO_21"), + PINCTRL_PIN(22, "GPIO_22"), + PINCTRL_PIN(23, "GPIO_23"), + PINCTRL_PIN(24, "GPIO_24"), + PINCTRL_PIN(25, "GPIO_25"), + PINCTRL_PIN(26, "GPIO_26"), + PINCTRL_PIN(27, "GPIO_27"), + PINCTRL_PIN(28, "GPIO_28"), + PINCTRL_PIN(29, "GPIO_29"), + PINCTRL_PIN(30, "GPIO_30"), + PINCTRL_PIN(31, "GPIO_31"), + PINCTRL_PIN(32, "GPIO_32"), + PINCTRL_PIN(33, "GPIO_33"), + PINCTRL_PIN(34, "GPIO_34"), + PINCTRL_PIN(35, "GPIO_35"), + PINCTRL_PIN(36, "GPIO_36"), + PINCTRL_PIN(37, "GPIO_37"), + PINCTRL_PIN(38, "GPIO_38"), + PINCTRL_PIN(39, "GPIO_39"), + PINCTRL_PIN(40, "GPIO_40"), + PINCTRL_PIN(41, "GPIO_41"), + PINCTRL_PIN(42, "GPIO_42"), + PINCTRL_PIN(43, "GPIO_43"), + PINCTRL_PIN(44, "GPIO_44"), + PINCTRL_PIN(45, "GPIO_45"), + PINCTRL_PIN(46, "GPIO_46"), + PINCTRL_PIN(47, "GPIO_47"), + PINCTRL_PIN(48, "GPIO_48"), + PINCTRL_PIN(49, "GPIO_49"), + PINCTRL_PIN(50, "GPIO_50"), + PINCTRL_PIN(51, "GPIO_51"), + PINCTRL_PIN(52, "GPIO_52"), + PINCTRL_PIN(53, "GPIO_53"), + PINCTRL_PIN(54, "GPIO_54"), + PINCTRL_PIN(55, "GPIO_55"), + PINCTRL_PIN(56, "GPIO_56"), + PINCTRL_PIN(57, "GPIO_57"), + PINCTRL_PIN(58, "GPIO_58"), + PINCTRL_PIN(59, "GPIO_59"), + PINCTRL_PIN(60, "GPIO_60"), + PINCTRL_PIN(61, "GPIO_61"), + PINCTRL_PIN(62, "GPIO_62"), + PINCTRL_PIN(63, "GPIO_63"), + PINCTRL_PIN(64, "GPIO_64"), + PINCTRL_PIN(65, "GPIO_65"), + PINCTRL_PIN(66, "GPIO_66"), + PINCTRL_PIN(67, "GPIO_67"), + PINCTRL_PIN(68, "GPIO_68"), + PINCTRL_PIN(69, "GPIO_69"), + PINCTRL_PIN(70, "GPIO_70"), + PINCTRL_PIN(71, "GPIO_71"), + PINCTRL_PIN(72, "GPIO_72"), + PINCTRL_PIN(73, "GPIO_73"), + PINCTRL_PIN(74, "GPIO_74"), + PINCTRL_PIN(75, "GPIO_75"), + PINCTRL_PIN(76, "GPIO_76"), + PINCTRL_PIN(77, "GPIO_77"), + PINCTRL_PIN(78, "GPIO_78"), + PINCTRL_PIN(79, "GPIO_79"), + PINCTRL_PIN(80, "GPIO_80"), + PINCTRL_PIN(81, "GPIO_81"), + PINCTRL_PIN(82, "GPIO_82"), + PINCTRL_PIN(83, "GPIO_83"), + PINCTRL_PIN(84, "GPIO_84"), + PINCTRL_PIN(85, "GPIO_85"), + PINCTRL_PIN(86, "GPIO_86"), + PINCTRL_PIN(87, "GPIO_87"), + PINCTRL_PIN(88, "GPIO_88"), + PINCTRL_PIN(89, "GPIO_89"), + PINCTRL_PIN(90, "GPIO_90"), + PINCTRL_PIN(91, "GPIO_91"), + PINCTRL_PIN(92, "GPIO_92"), + PINCTRL_PIN(93, "GPIO_93"), + PINCTRL_PIN(94, "GPIO_94"), + PINCTRL_PIN(95, "GPIO_95"), + PINCTRL_PIN(96, "GPIO_96"), + PINCTRL_PIN(97, "GPIO_97"), + PINCTRL_PIN(98, "GPIO_98"), + PINCTRL_PIN(99, "GPIO_99"), + PINCTRL_PIN(100, "GPIO_100"), + PINCTRL_PIN(101, "GPIO_101"), + PINCTRL_PIN(102, "GPIO_102"), + PINCTRL_PIN(103, "GPIO_103"), + PINCTRL_PIN(104, "GPIO_104"), + PINCTRL_PIN(105, "GPIO_105"), + PINCTRL_PIN(106, "GPIO_106"), + PINCTRL_PIN(107, "GPIO_107"), + PINCTRL_PIN(108, "GPIO_108"), + PINCTRL_PIN(109, "GPIO_109"), + PINCTRL_PIN(110, "GPIO_110"), + PINCTRL_PIN(111, "GPIO_111"), + PINCTRL_PIN(112, "GPIO_112"), + PINCTRL_PIN(113, "GPIO_113"), + PINCTRL_PIN(114, "GPIO_114"), + PINCTRL_PIN(115, "GPIO_115"), + PINCTRL_PIN(116, "GPIO_116"), + PINCTRL_PIN(117, "GPIO_117"), + PINCTRL_PIN(118, "GPIO_118"), + PINCTRL_PIN(119, "GPIO_119"), + PINCTRL_PIN(120, "GPIO_120"), + PINCTRL_PIN(121, "GPIO_121"), + PINCTRL_PIN(122, "GPIO_122"), + PINCTRL_PIN(123, "GPIO_123"), + PINCTRL_PIN(124, "GPIO_124"), + PINCTRL_PIN(125, "GPIO_125"), + PINCTRL_PIN(126, "GPIO_126"), + PINCTRL_PIN(127, "GPIO_127"), + PINCTRL_PIN(128, "GPIO_128"), + PINCTRL_PIN(129, "GPIO_129"), + PINCTRL_PIN(130, "GPIO_130"), + PINCTRL_PIN(131, "GPIO_131"), + PINCTRL_PIN(132, "GPIO_132"), + PINCTRL_PIN(133, "GPIO_133"), + PINCTRL_PIN(134, "GPIO_134"), + PINCTRL_PIN(135, "GPIO_135"), + PINCTRL_PIN(136, "GPIO_136"), + PINCTRL_PIN(137, "GPIO_137"), + PINCTRL_PIN(138, "GPIO_138"), + PINCTRL_PIN(139, "GPIO_139"), + PINCTRL_PIN(140, "GPIO_140"), + PINCTRL_PIN(141, "GPIO_141"), + PINCTRL_PIN(142, "GPIO_142"), + PINCTRL_PIN(143, "GPIO_143"), + PINCTRL_PIN(144, "GPIO_144"), + PINCTRL_PIN(145, "GPIO_145"), + PINCTRL_PIN(146, "GPIO_146"), + + PINCTRL_PIN(147, "SDC1_CLK"), + PINCTRL_PIN(148, "SDC1_CMD"), + PINCTRL_PIN(149, "SDC1_DATA"), + PINCTRL_PIN(150, "SDC2_CLK"), + PINCTRL_PIN(151, "SDC2_CMD"), + PINCTRL_PIN(152, "SDC2_DATA"), +}; + +#define DECLARE_APQ_GPIO_PINS(pin) static const unsigned int gpio##pin##_pins[] = { pin } + +DECLARE_APQ_GPIO_PINS(0); +DECLARE_APQ_GPIO_PINS(1); +DECLARE_APQ_GPIO_PINS(2); +DECLARE_APQ_GPIO_PINS(3); +DECLARE_APQ_GPIO_PINS(4); +DECLARE_APQ_GPIO_PINS(5); +DECLARE_APQ_GPIO_PINS(6); +DECLARE_APQ_GPIO_PINS(7); +DECLARE_APQ_GPIO_PINS(8); +DECLARE_APQ_GPIO_PINS(9); +DECLARE_APQ_GPIO_PINS(10); +DECLARE_APQ_GPIO_PINS(11); +DECLARE_APQ_GPIO_PINS(12); +DECLARE_APQ_GPIO_PINS(13); +DECLARE_APQ_GPIO_PINS(14); +DECLARE_APQ_GPIO_PINS(15); +DECLARE_APQ_GPIO_PINS(16); +DECLARE_APQ_GPIO_PINS(17); +DECLARE_APQ_GPIO_PINS(18); +DECLARE_APQ_GPIO_PINS(19); +DECLARE_APQ_GPIO_PINS(20); +DECLARE_APQ_GPIO_PINS(21); +DECLARE_APQ_GPIO_PINS(22); +DECLARE_APQ_GPIO_PINS(23); +DECLARE_APQ_GPIO_PINS(24); +DECLARE_APQ_GPIO_PINS(25); +DECLARE_APQ_GPIO_PINS(26); +DECLARE_APQ_GPIO_PINS(27); +DECLARE_APQ_GPIO_PINS(28); +DECLARE_APQ_GPIO_PINS(29); +DECLARE_APQ_GPIO_PINS(30); +DECLARE_APQ_GPIO_PINS(31); +DECLARE_APQ_GPIO_PINS(32); +DECLARE_APQ_GPIO_PINS(33); +DECLARE_APQ_GPIO_PINS(34); +DECLARE_APQ_GPIO_PINS(35); +DECLARE_APQ_GPIO_PINS(36); +DECLARE_APQ_GPIO_PINS(37); +DECLARE_APQ_GPIO_PINS(38); +DECLARE_APQ_GPIO_PINS(39); +DECLARE_APQ_GPIO_PINS(40); +DECLARE_APQ_GPIO_PINS(41); +DECLARE_APQ_GPIO_PINS(42); +DECLARE_APQ_GPIO_PINS(43); +DECLARE_APQ_GPIO_PINS(44); +DECLARE_APQ_GPIO_PINS(45); +DECLARE_APQ_GPIO_PINS(46); +DECLARE_APQ_GPIO_PINS(47); +DECLARE_APQ_GPIO_PINS(48); +DECLARE_APQ_GPIO_PINS(49); +DECLARE_APQ_GPIO_PINS(50); +DECLARE_APQ_GPIO_PINS(51); +DECLARE_APQ_GPIO_PINS(52); +DECLARE_APQ_GPIO_PINS(53); +DECLARE_APQ_GPIO_PINS(54); +DECLARE_APQ_GPIO_PINS(55); +DECLARE_APQ_GPIO_PINS(56); +DECLARE_APQ_GPIO_PINS(57); +DECLARE_APQ_GPIO_PINS(58); +DECLARE_APQ_GPIO_PINS(59); +DECLARE_APQ_GPIO_PINS(60); +DECLARE_APQ_GPIO_PINS(61); +DECLARE_APQ_GPIO_PINS(62); +DECLARE_APQ_GPIO_PINS(63); +DECLARE_APQ_GPIO_PINS(64); +DECLARE_APQ_GPIO_PINS(65); +DECLARE_APQ_GPIO_PINS(66); +DECLARE_APQ_GPIO_PINS(67); +DECLARE_APQ_GPIO_PINS(68); +DECLARE_APQ_GPIO_PINS(69); +DECLARE_APQ_GPIO_PINS(70); +DECLARE_APQ_GPIO_PINS(71); +DECLARE_APQ_GPIO_PINS(72); +DECLARE_APQ_GPIO_PINS(73); +DECLARE_APQ_GPIO_PINS(74); +DECLARE_APQ_GPIO_PINS(75); +DECLARE_APQ_GPIO_PINS(76); +DECLARE_APQ_GPIO_PINS(77); +DECLARE_APQ_GPIO_PINS(78); +DECLARE_APQ_GPIO_PINS(79); +DECLARE_APQ_GPIO_PINS(80); +DECLARE_APQ_GPIO_PINS(81); +DECLARE_APQ_GPIO_PINS(82); +DECLARE_APQ_GPIO_PINS(83); +DECLARE_APQ_GPIO_PINS(84); +DECLARE_APQ_GPIO_PINS(85); +DECLARE_APQ_GPIO_PINS(86); +DECLARE_APQ_GPIO_PINS(87); +DECLARE_APQ_GPIO_PINS(88); +DECLARE_APQ_GPIO_PINS(89); +DECLARE_APQ_GPIO_PINS(90); +DECLARE_APQ_GPIO_PINS(91); +DECLARE_APQ_GPIO_PINS(92); +DECLARE_APQ_GPIO_PINS(93); +DECLARE_APQ_GPIO_PINS(94); +DECLARE_APQ_GPIO_PINS(95); +DECLARE_APQ_GPIO_PINS(96); +DECLARE_APQ_GPIO_PINS(97); +DECLARE_APQ_GPIO_PINS(98); +DECLARE_APQ_GPIO_PINS(99); +DECLARE_APQ_GPIO_PINS(100); +DECLARE_APQ_GPIO_PINS(101); +DECLARE_APQ_GPIO_PINS(102); +DECLARE_APQ_GPIO_PINS(103); +DECLARE_APQ_GPIO_PINS(104); +DECLARE_APQ_GPIO_PINS(105); +DECLARE_APQ_GPIO_PINS(106); +DECLARE_APQ_GPIO_PINS(107); +DECLARE_APQ_GPIO_PINS(108); +DECLARE_APQ_GPIO_PINS(109); +DECLARE_APQ_GPIO_PINS(110); +DECLARE_APQ_GPIO_PINS(111); +DECLARE_APQ_GPIO_PINS(112); +DECLARE_APQ_GPIO_PINS(113); +DECLARE_APQ_GPIO_PINS(114); +DECLARE_APQ_GPIO_PINS(115); +DECLARE_APQ_GPIO_PINS(116); +DECLARE_APQ_GPIO_PINS(117); +DECLARE_APQ_GPIO_PINS(118); +DECLARE_APQ_GPIO_PINS(119); +DECLARE_APQ_GPIO_PINS(120); +DECLARE_APQ_GPIO_PINS(121); +DECLARE_APQ_GPIO_PINS(122); +DECLARE_APQ_GPIO_PINS(123); +DECLARE_APQ_GPIO_PINS(124); +DECLARE_APQ_GPIO_PINS(125); +DECLARE_APQ_GPIO_PINS(126); +DECLARE_APQ_GPIO_PINS(127); +DECLARE_APQ_GPIO_PINS(128); +DECLARE_APQ_GPIO_PINS(129); +DECLARE_APQ_GPIO_PINS(130); +DECLARE_APQ_GPIO_PINS(131); +DECLARE_APQ_GPIO_PINS(132); +DECLARE_APQ_GPIO_PINS(133); +DECLARE_APQ_GPIO_PINS(134); +DECLARE_APQ_GPIO_PINS(135); +DECLARE_APQ_GPIO_PINS(136); +DECLARE_APQ_GPIO_PINS(137); +DECLARE_APQ_GPIO_PINS(138); +DECLARE_APQ_GPIO_PINS(139); +DECLARE_APQ_GPIO_PINS(140); +DECLARE_APQ_GPIO_PINS(141); +DECLARE_APQ_GPIO_PINS(142); +DECLARE_APQ_GPIO_PINS(143); +DECLARE_APQ_GPIO_PINS(144); +DECLARE_APQ_GPIO_PINS(145); +DECLARE_APQ_GPIO_PINS(146); + +static const unsigned int sdc1_clk_pins[] = { 147 }; +static const unsigned int sdc1_cmd_pins[] = { 148 }; +static const unsigned int sdc1_data_pins[] = { 149 }; +static const unsigned int sdc2_clk_pins[] = { 150 }; +static const unsigned int sdc2_cmd_pins[] = { 151 }; +static const unsigned int sdc2_data_pins[] = { 152 }; + +#define FUNCTION(fname) \ + [APQ_MUX_##fname] = { \ + .name = #fname, \ + .groups = fname##_groups, \ + .ngroups = ARRAY_SIZE(fname##_groups), \ + } + +#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7) \ + { \ + .name = "gpio" #id, \ + .pins = gpio##id##_pins, \ + .npins = ARRAY_SIZE(gpio##id##_pins), \ + .funcs = (int[]){ \ + APQ_MUX_gpio, \ + APQ_MUX_##f1, \ + APQ_MUX_##f2, \ + APQ_MUX_##f3, \ + APQ_MUX_##f4, \ + APQ_MUX_##f5, \ + APQ_MUX_##f6, \ + APQ_MUX_##f7 \ + }, \ + .nfuncs = 8, \ + .ctl_reg = 0x1000 + 0x10 * id, \ + .io_reg = 0x1004 + 0x10 * id, \ + .intr_cfg_reg = 0x1008 + 0x10 * id, \ + .intr_status_reg = 0x100c + 0x10 * id, \ + .intr_target_reg = 0x1008 + 0x10 * id, \ + .mux_bit = 2, \ + .pull_bit = 0, \ + .drv_bit = 6, \ + .oe_bit = 9, \ + .in_bit = 0, \ + .out_bit = 1, \ + .intr_enable_bit = 0, \ + .intr_status_bit = 0, \ + .intr_ack_high = 0, \ + .intr_target_bit = 5, \ + .intr_target_kpss_val = 3, \ + .intr_raw_status_bit = 4, \ + .intr_polarity_bit = 1, \ + .intr_detection_bit = 2, \ + .intr_detection_width = 2, \ + } + +#define SDC_PINGROUP(pg_name, ctl, pull, drv) \ + { \ + .name = #pg_name, \ + .pins = pg_name##_pins, \ + .npins = ARRAY_SIZE(pg_name##_pins), \ + .ctl_reg = ctl, \ + .io_reg = 0, \ + .intr_cfg_reg = 0, \ + .intr_status_reg = 0, \ + .intr_target_reg = 0, \ + .mux_bit = -1, \ + .pull_bit = pull, \ + .drv_bit = drv, \ + .oe_bit = -1, \ + .in_bit = -1, \ + .out_bit = -1, \ + .intr_enable_bit = -1, \ + .intr_status_bit = -1, \ + .intr_target_bit = -1, \ + .intr_target_kpss_val = -1, \ + .intr_raw_status_bit = -1, \ + .intr_polarity_bit = -1, \ + .intr_detection_bit = -1, \ + .intr_detection_width = -1, \ + } + +enum apq8084_functions { + APQ_MUX_adsp_ext, + APQ_MUX_audio_ref, + APQ_MUX_blsp_i2c1, + APQ_MUX_blsp_i2c2, + APQ_MUX_blsp_i2c3, + APQ_MUX_blsp_i2c4, + APQ_MUX_blsp_i2c5, + APQ_MUX_blsp_i2c6, + APQ_MUX_blsp_i2c7, + APQ_MUX_blsp_i2c8, + APQ_MUX_blsp_i2c9, + APQ_MUX_blsp_i2c10, + APQ_MUX_blsp_i2c11, + APQ_MUX_blsp_i2c12, + APQ_MUX_blsp_spi1, + APQ_MUX_blsp_spi1_cs1, + APQ_MUX_blsp_spi1_cs2, + APQ_MUX_blsp_spi1_cs3, + APQ_MUX_blsp_spi2, + APQ_MUX_blsp_spi3, + APQ_MUX_blsp_spi3_cs1, + APQ_MUX_blsp_spi3_cs2, + APQ_MUX_blsp_spi3_cs3, + APQ_MUX_blsp_spi4, + APQ_MUX_blsp_spi5, + APQ_MUX_blsp_spi6, + APQ_MUX_blsp_spi7, + APQ_MUX_blsp_spi8, + APQ_MUX_blsp_spi9, + APQ_MUX_blsp_spi10, + APQ_MUX_blsp_spi10_cs1, + APQ_MUX_blsp_spi10_cs2, + APQ_MUX_blsp_spi10_cs3, + APQ_MUX_blsp_spi11, + APQ_MUX_blsp_spi12, + APQ_MUX_blsp_uart1, + APQ_MUX_blsp_uart2, + APQ_MUX_blsp_uart3, + APQ_MUX_blsp_uart4, + APQ_MUX_blsp_uart5, + APQ_MUX_blsp_uart6, + APQ_MUX_blsp_uart7, + APQ_MUX_blsp_uart8, + APQ_MUX_blsp_uart9, + APQ_MUX_blsp_uart10, + APQ_MUX_blsp_uart11, + APQ_MUX_blsp_uart12, + APQ_MUX_blsp_uim1, + APQ_MUX_blsp_uim2, + APQ_MUX_blsp_uim3, + APQ_MUX_blsp_uim4, + APQ_MUX_blsp_uim5, + APQ_MUX_blsp_uim6, + APQ_MUX_blsp_uim7, + APQ_MUX_blsp_uim8, + APQ_MUX_blsp_uim9, + APQ_MUX_blsp_uim10, + APQ_MUX_blsp_uim11, + APQ_MUX_blsp_uim12, + APQ_MUX_cam_mclk0, + APQ_MUX_cam_mclk1, + APQ_MUX_cam_mclk2, + APQ_MUX_cam_mclk3, + APQ_MUX_cci_async, + APQ_MUX_cci_async_in0, + APQ_MUX_cci_i2c0, + APQ_MUX_cci_i2c1, + APQ_MUX_cci_timer0, + APQ_MUX_cci_timer1, + APQ_MUX_cci_timer2, + APQ_MUX_cci_timer3, + APQ_MUX_cci_timer4, + APQ_MUX_edp_hpd, + APQ_MUX_gcc_gp1, + APQ_MUX_gcc_gp2, + APQ_MUX_gcc_gp3, + APQ_MUX_gcc_obt, + APQ_MUX_gcc_vtt, + APQ_MUX_gp_mn, + APQ_MUX_gp_pdm0, + APQ_MUX_gp_pdm1, + APQ_MUX_gp_pdm2, + APQ_MUX_gp0_clk, + APQ_MUX_gp1_clk, + APQ_MUX_gpio, + APQ_MUX_hdmi_cec, + APQ_MUX_hdmi_ddc, + APQ_MUX_hdmi_dtest, + APQ_MUX_hdmi_hpd, + APQ_MUX_hdmi_rcv, + APQ_MUX_hsic, + APQ_MUX_ldo_en, + APQ_MUX_ldo_update, + APQ_MUX_mdp_vsync, + APQ_MUX_pci_e0, + APQ_MUX_pci_e0_n, + APQ_MUX_pci_e0_rst, + APQ_MUX_pci_e1, + APQ_MUX_pci_e1_rst, + APQ_MUX_pci_e1_rst_n, + APQ_MUX_pci_e1_clkreq_n, + APQ_MUX_pri_mi2s, + APQ_MUX_qua_mi2s, + APQ_MUX_sata_act, + APQ_MUX_sata_devsleep, + APQ_MUX_sata_devsleep_n, + APQ_MUX_sd_write, + APQ_MUX_sdc_emmc_mode, + APQ_MUX_sdc3, + APQ_MUX_sdc4, + APQ_MUX_sec_mi2s, + APQ_MUX_slimbus, + APQ_MUX_spdif_tx, + APQ_MUX_spkr_i2s, + APQ_MUX_spkr_i2s_ws, + APQ_MUX_spss_geni, + APQ_MUX_ter_mi2s, + APQ_MUX_tsif1, + APQ_MUX_tsif2, + APQ_MUX_uim, + APQ_MUX_uim_batt_alarm, + APQ_MUX_NA, +}; + +static const char * const gpio_groups[] = { + "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7", + "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14", + "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21", + "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28", + "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35", + "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42", + "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49", + "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56", + "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63", + "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70", + "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77", + "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84", + "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91", + "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98", + "gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104", + "gpio105", "gpio106", "gpio107", "gpio108", "gpio109", "gpio110", + "gpio111", "gpio112", "gpio113", "gpio114", "gpio115", "gpio116", + "gpio117", "gpio118", "gpio119", "gpio120", "gpio121", "gpio122", + "gpio123", "gpio124", "gpio125", "gpio126", "gpio127", "gpio128", + "gpio129", "gpio130", "gpio131", "gpio132", "gpio133", "gpio134", + "gpio135", "gpio136", "gpio137", "gpio138", "gpio139", "gpio140", + "gpio141", "gpio142", "gpio143", "gpio144", "gpio145", "gpio146" +}; + +static const char * const adsp_ext_groups[] = { + "gpio34" +}; +static const char * const audio_ref_groups[] = { + "gpio100" +}; +static const char * const blsp_i2c1_groups[] = { + "gpio2", "gpio3" +}; +static const char * const blsp_i2c2_groups[] = { + "gpio6", "gpio7" +}; +static const char * const blsp_i2c3_groups[] = { + "gpio10", "gpio11" +}; +static const char * const blsp_i2c4_groups[] = { + "gpio29", "gpio30" +}; +static const char * const blsp_i2c5_groups[] = { + "gpio41", "gpio42" +}; +static const char * const blsp_i2c6_groups[] = { + "gpio45", "gpio46" +}; +static const char * const blsp_i2c7_groups[] = { + "gpio132", "gpio133" +}; +static const char * const blsp_i2c8_groups[] = { + "gpio53", "gpio54" +}; +static const char * const blsp_i2c9_groups[] = { + "gpio57", "gpio58" +}; +static const char * const blsp_i2c10_groups[] = { + "gpio61", "gpio62" +}; +static const char * const blsp_i2c11_groups[] = { + "gpio65", "gpio66" +}; +static const char * const blsp_i2c12_groups[] = { + "gpio49", "gpio50" +}; +static const char * const blsp_spi1_groups[] = { + "gpio0", "gpio1", "gpio2", "gpio3" +}; +static const char * const blsp_spi2_groups[] = { + "gpio4", "gpio5", "gpio6", "gpio7" +}; +static const char * const blsp_spi3_groups[] = { + "gpio8", "gpio9", "gpio10", "gpio11" +}; +static const char * const blsp_spi4_groups[] = { + "gpio27", "gpio28", "gpio29", "gpio30" +}; +static const char * const blsp_spi5_groups[] = { + "gpio39", "gpio40", "gpio41", "gpio42" +}; +static const char * const blsp_spi6_groups[] = { + "gpio43", "gpio44", "gpio45", "gpio46" +}; +static const char * const blsp_spi7_groups[] = { + "gpio130", "gpio131", "gpio132", "gpio133" +}; +static const char * const blsp_spi8_groups[] = { + "gpio51", "gpio52", "gpio53", "gpio54" +}; +static const char * const blsp_spi9_groups[] = { + "gpio55", "gpio56", "gpio57", "gpio58" +}; +static const char * const blsp_spi10_groups[] = { + "gpio59", "gpio60", "gpio61", "gpio62" +}; +static const char * const blsp_spi11_groups[] = { + "gpio63", "gpio64", "gpio65", "gpio66" +}; +static const char * const blsp_spi12_groups[] = { + "gpio47", "gpio48", "gpio49", "gpio50" +}; +static const char * const blsp_uart1_groups[] = { + "gpio0", "gpio1", "gpio2", "gpio3" +}; +static const char * const blsp_uart2_groups[] = { + "gpio4", "gpio5", "gpio6", "gpio7" +}; +static const char * const blsp_uart3_groups[] = { + "gpio8" +}; +static const char * const blsp_uart4_groups[] = { + "gpio27", "gpio28", "gpio29", "gpio30" +}; +static const char * const blsp_uart5_groups[] = { + "gpio39", "gpio40", "gpio41", "gpio42" +}; +static const char * const blsp_uart6_groups[] = { + "gpio43", "gpio44", "gpio45", "gpio46" +}; +static const char * const blsp_uart7_groups[] = { + "gpio130", "gpio131", "gpio132", "gpio133" +}; +static const char * const blsp_uart8_groups[] = { + "gpio51", "gpio52", "gpio53", "gpio54" +}; +static const char * const blsp_uart9_groups[] = { + "gpio55", "gpio56", "gpio57", "gpio58" +}; +static const char * const blsp_uart10_groups[] = { + "gpio59", "gpio60", "gpio61", "gpio62" +}; +static const char * const blsp_uart11_groups[] = { + "gpio63", "gpio64", "gpio65", "gpio66" +}; +static const char * const blsp_uart12_groups[] = { + "gpio47", "gpio48", "gpio49", "gpio50" +}; +static const char * const blsp_uim1_groups[] = { + "gpio0", "gpio1" +}; +static const char * const blsp_uim2_groups[] = { + "gpio4", "gpio5" +}; +static const char * const blsp_uim3_groups[] = { + "gpio8", "gpio9" +}; +static const char * const blsp_uim4_groups[] = { + "gpio27", "gpio28" +}; +static const char * const blsp_uim5_groups[] = { + "gpio39", "gpio40" +}; +static const char * const blsp_uim6_groups[] = { + "gpio43", "gpio44" +}; +static const char * const blsp_uim7_groups[] = { + "gpio130", "gpio131" +}; +static const char * const blsp_uim8_groups[] = { + "gpio51", "gpio52" +}; +static const char * const blsp_uim9_groups[] = { + "gpio55", "gpio56" +}; +static const char * const blsp_uim10_groups[] = { + "gpio59", "gpio60" +}; +static const char * const blsp_uim11_groups[] = { + "gpio63", "gpio64" +}; +static const char * const blsp_uim12_groups[] = { + "gpio47", "gpio48" +}; +static const char * const blsp_spi1_cs1_groups[] = { + "gpio116" +}; +static const char * const blsp_spi1_cs2_groups[] = { + "gpio117" +}; +static const char * const blsp_spi1_cs3_groups[] = { + "gpio118" +}; +static const char * const blsp_spi3_cs1_groups[] = { + "gpio67" +}; +static const char * const blsp_spi3_cs2_groups[] = { + "gpio71" +}; +static const char * const blsp_spi3_cs3_groups[] = { + "gpio72" +}; +static const char * const blsp_spi10_cs1_groups[] = { + "gpio106" +}; +static const char * const blsp_spi10_cs2_groups[] = { + "gpio111" +}; +static const char * const blsp_spi10_cs3_groups[] = { + "gpio128" +}; +static const char * const cam_mclk0_groups[] = { + "gpio15" +}; +static const char * const cam_mclk1_groups[] = { + "gpio16" +}; +static const char * const cam_mclk2_groups[] = { + "gpio17" +}; +static const char * const cam_mclk3_groups[] = { + "gpio18" +}; +static const char * const cci_async_groups[] = { + "gpio26", "gpio119" +}; +static const char * const cci_async_in0_groups[] = { + "gpio120" +}; +static const char * const cci_i2c0_groups[] = { + "gpio19", "gpio20" +}; +static const char * const cci_i2c1_groups[] = { + "gpio21", "gpio22" +}; +static const char * const cci_timer0_groups[] = { + "gpio23" +}; +static const char * const cci_timer1_groups[] = { + "gpio24" +}; +static const char * const cci_timer2_groups[] = { + "gpio25" +}; +static const char * const cci_timer3_groups[] = { + "gpio26" +}; +static const char * const cci_timer4_groups[] = { + "gpio119" +}; +static const char * const edp_hpd_groups[] = { + "gpio103" +}; +static const char * const gcc_gp1_groups[] = { + "gpio37" +}; +static const char * const gcc_gp2_groups[] = { + "gpio38" +}; +static const char * const gcc_gp3_groups[] = { + "gpio86" +}; +static const char * const gcc_obt_groups[] = { + "gpio127" +}; +static const char * const gcc_vtt_groups[] = { + "gpio126" +}; +static const char * const gp_mn_groups[] = { + "gpio29" +}; +static const char * const gp_pdm0_groups[] = { + "gpio48", "gpio83" +}; +static const char * const gp_pdm1_groups[] = { + "gpio84", "gpio101" +}; +static const char * const gp_pdm2_groups[] = { + "gpio85", "gpio110" +}; +static const char * const gp0_clk_groups[] = { + "gpio25" +}; +static const char * const gp1_clk_groups[] = { + "gpio26" +}; +static const char * const hdmi_cec_groups[] = { + "gpio31" +}; +static const char * const hdmi_ddc_groups[] = { + "gpio32", "gpio33" +}; +static const char * const hdmi_dtest_groups[] = { + "gpio123" +}; +static const char * const hdmi_hpd_groups[] = { + "gpio34" +}; +static const char * const hdmi_rcv_groups[] = { + "gpio125" +}; +static const char * const hsic_groups[] = { + "gpio134", "gpio135" +}; +static const char * const ldo_en_groups[] = { + "gpio124" +}; +static const char * const ldo_update_groups[] = { + "gpio125" +}; +static const char * const mdp_vsync_groups[] = { + "gpio12", "gpio13", "gpio14" +}; +static const char * const pci_e0_groups[] = { + "gpio68", "gpio70" +}; +static const char * const pci_e0_n_groups[] = { + "gpio68", "gpio70" +}; +static const char * const pci_e0_rst_groups[] = { + "gpio70" +}; +static const char * const pci_e1_groups[] = { + "gpio140" +}; +static const char * const pci_e1_rst_groups[] = { + "gpio140" +}; +static const char * const pci_e1_rst_n_groups[] = { + "gpio140" +}; +static const char * const pci_e1_clkreq_n_groups[] = { + "gpio141" +}; +static const char * const pri_mi2s_groups[] = { + "gpio76", "gpio77", "gpio78", "gpio79", "gpio80" +}; +static const char * const qua_mi2s_groups[] = { + "gpio91", "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97" +}; +static const char * const sata_act_groups[] = { + "gpio129" +}; +static const char * const sata_devsleep_groups[] = { + "gpio119" +}; +static const char * const sata_devsleep_n_groups[] = { + "gpio119" +}; +static const char * const sd_write_groups[] = { + "gpio75" +}; +static const char * const sdc_emmc_mode_groups[] = { + "gpio146" +}; +static const char * const sdc3_groups[] = { + "gpio67", "gpio68", "gpio69", "gpio70", "gpio71", "gpio72" +}; +static const char * const sdc4_groups[] = { + "gpio82", "gpio83", "gpio84", "gpio85", "gpio86", + "gpio91", "gpio95", "gpio96", "gpio97", "gpio101" +}; +static const char * const sec_mi2s_groups[] = { + "gpio81", "gpio82", "gpio83", "gpio84", "gpio85" +}; +static const char * const slimbus_groups[] = { + "gpio98", "gpio99" +}; +static const char * const spdif_tx_groups[] = { + "gpio124", "gpio136", "gpio142" +}; +static const char * const spkr_i2s_groups[] = { + "gpio98", "gpio99", "gpio100" +}; +static const char * const spkr_i2s_ws_groups[] = { + "gpio104" +}; +static const char * const spss_geni_groups[] = { + "gpio8", "gpio9" +}; +static const char * const ter_mi2s_groups[] = { + "gpio86", "gpio87", "gpio88", "gpio89", "gpio90" +}; +static const char * const tsif1_groups[] = { + "gpio82", "gpio83", "gpio84", "gpio85", "gpio86" +}; +static const char * const tsif2_groups[] = { + "gpio91", "gpio95", "gpio96", "gpio97", "gpio101" +}; +static const char * const uim_groups[] = { + "gpio130", "gpio131", "gpio132", "gpio133" +}; +static const char * const uim_batt_alarm_groups[] = { + "gpio102" +}; +static const struct msm_function apq8084_functions[] = { + FUNCTION(adsp_ext), + FUNCTION(audio_ref), + FUNCTION(blsp_i2c1), + FUNCTION(blsp_i2c2), + FUNCTION(blsp_i2c3), + FUNCTION(blsp_i2c4), + FUNCTION(blsp_i2c5), + FUNCTION(blsp_i2c6), + FUNCTION(blsp_i2c7), + FUNCTION(blsp_i2c8), + FUNCTION(blsp_i2c9), + FUNCTION(blsp_i2c10), + FUNCTION(blsp_i2c11), + FUNCTION(blsp_i2c12), + FUNCTION(blsp_spi1), + FUNCTION(blsp_spi1_cs1), + FUNCTION(blsp_spi1_cs2), + FUNCTION(blsp_spi1_cs3), + FUNCTION(blsp_spi2), + FUNCTION(blsp_spi3), + FUNCTION(blsp_spi3_cs1), + FUNCTION(blsp_spi3_cs2), + FUNCTION(blsp_spi3_cs3), + FUNCTION(blsp_spi4), + FUNCTION(blsp_spi5), + FUNCTION(blsp_spi6), + FUNCTION(blsp_spi7), + FUNCTION(blsp_spi8), + FUNCTION(blsp_spi9), + FUNCTION(blsp_spi10), + FUNCTION(blsp_spi10_cs1), + FUNCTION(blsp_spi10_cs2), + FUNCTION(blsp_spi10_cs3), + FUNCTION(blsp_spi11), + FUNCTION(blsp_spi12), + FUNCTION(blsp_uart1), + FUNCTION(blsp_uart2), + FUNCTION(blsp_uart3), + FUNCTION(blsp_uart4), + FUNCTION(blsp_uart5), + FUNCTION(blsp_uart6), + FUNCTION(blsp_uart7), + FUNCTION(blsp_uart8), + FUNCTION(blsp_uart9), + FUNCTION(blsp_uart10), + FUNCTION(blsp_uart11), + FUNCTION(blsp_uart12), + FUNCTION(blsp_uim1), + FUNCTION(blsp_uim2), + FUNCTION(blsp_uim3), + FUNCTION(blsp_uim4), + FUNCTION(blsp_uim5), + FUNCTION(blsp_uim6), + FUNCTION(blsp_uim7), + FUNCTION(blsp_uim8), + FUNCTION(blsp_uim9), + FUNCTION(blsp_uim10), + FUNCTION(blsp_uim11), + FUNCTION(blsp_uim12), + FUNCTION(cam_mclk0), + FUNCTION(cam_mclk1), + FUNCTION(cam_mclk2), + FUNCTION(cam_mclk3), + FUNCTION(cci_async), + FUNCTION(cci_async_in0), + FUNCTION(cci_i2c0), + FUNCTION(cci_i2c1), + FUNCTION(cci_timer0), + FUNCTION(cci_timer1), + FUNCTION(cci_timer2), + FUNCTION(cci_timer3), + FUNCTION(cci_timer4), + FUNCTION(edp_hpd), + FUNCTION(gcc_gp1), + FUNCTION(gcc_gp2), + FUNCTION(gcc_gp3), + FUNCTION(gcc_obt), + FUNCTION(gcc_vtt), + FUNCTION(gp_mn), + FUNCTION(gp_pdm0), + FUNCTION(gp_pdm1), + FUNCTION(gp_pdm2), + FUNCTION(gp0_clk), + FUNCTION(gp1_clk), + FUNCTION(gpio), + FUNCTION(hdmi_cec), + FUNCTION(hdmi_ddc), + FUNCTION(hdmi_dtest), + FUNCTION(hdmi_hpd), + FUNCTION(hdmi_rcv), + FUNCTION(hsic), + FUNCTION(ldo_en), + FUNCTION(ldo_update), + FUNCTION(mdp_vsync), + FUNCTION(pci_e0), + FUNCTION(pci_e0_n), + FUNCTION(pci_e0_rst), + FUNCTION(pci_e1), + FUNCTION(pci_e1_rst), + FUNCTION(pci_e1_rst_n), + FUNCTION(pci_e1_clkreq_n), + FUNCTION(pri_mi2s), + FUNCTION(qua_mi2s), + FUNCTION(sata_act), + FUNCTION(sata_devsleep), + FUNCTION(sata_devsleep_n), + FUNCTION(sd_write), + FUNCTION(sdc_emmc_mode), + FUNCTION(sdc3), + FUNCTION(sdc4), + FUNCTION(sec_mi2s), + FUNCTION(slimbus), + FUNCTION(spdif_tx), + FUNCTION(spkr_i2s), + FUNCTION(spkr_i2s_ws), + FUNCTION(spss_geni), + FUNCTION(ter_mi2s), + FUNCTION(tsif1), + FUNCTION(tsif2), + FUNCTION(uim), + FUNCTION(uim_batt_alarm), +}; + +static const struct msm_pingroup apq8084_groups[] = { + PINGROUP(0, blsp_spi1, blsp_uart1, blsp_uim1, NA, NA, NA, NA), + PINGROUP(1, blsp_spi1, blsp_uart1, blsp_uim1, NA, NA, NA, NA), + PINGROUP(2, blsp_spi1, blsp_uart1, blsp_i2c1, NA, NA, NA, NA), + PINGROUP(3, blsp_spi1, blsp_uart1, blsp_i2c1, NA, NA, NA, NA), + PINGROUP(4, blsp_spi2, blsp_uart2, blsp_uim2, NA, NA, NA, NA), + PINGROUP(5, blsp_spi2, blsp_uart2, blsp_uim2, NA, NA, NA, NA), + PINGROUP(6, blsp_spi2, blsp_uart2, blsp_i2c2, NA, NA, NA, NA), + PINGROUP(7, blsp_spi2, blsp_uart2, blsp_i2c2, NA, NA, NA, NA), + PINGROUP(8, blsp_spi3, blsp_uart3, blsp_uim3, spss_geni, NA, NA, NA), + PINGROUP(9, blsp_spi3, blsp_uim3, blsp_uart3, spss_geni, NA, NA, NA), + PINGROUP(10, blsp_spi3, blsp_uart3, blsp_i2c3, NA, NA, NA, NA), + PINGROUP(11, blsp_spi3, blsp_uart3, blsp_i2c3, NA, NA, NA, NA), + PINGROUP(12, mdp_vsync, NA, NA, NA, NA, NA, NA), + PINGROUP(13, mdp_vsync, NA, NA, NA, NA, NA, NA), + PINGROUP(14, mdp_vsync, NA, NA, NA, NA, NA, NA), + PINGROUP(15, cam_mclk0, NA, NA, NA, NA, NA, NA), + PINGROUP(16, cam_mclk1, NA, NA, NA, NA, NA, NA), + PINGROUP(17, cam_mclk2, NA, NA, NA, NA, NA, NA), + PINGROUP(18, cam_mclk3, NA, NA, NA, NA, NA, NA), + PINGROUP(19, cci_i2c0, NA, NA, NA, NA, NA, NA), + PINGROUP(20, cci_i2c0, NA, NA, NA, NA, NA, NA), + PINGROUP(21, cci_i2c1, NA, NA, NA, NA, NA, NA), + PINGROUP(22, cci_i2c1, NA, NA, NA, NA, NA, NA), + PINGROUP(23, cci_timer0, NA, NA, NA, NA, NA, NA), + PINGROUP(24, cci_timer1, NA, NA, NA, NA, NA, NA), + PINGROUP(25, cci_timer2, gp0_clk, NA, NA, NA, NA, NA), + PINGROUP(26, cci_timer3, cci_async, gp1_clk, NA, NA, NA, NA), + PINGROUP(27, blsp_spi4, blsp_uart4, blsp_uim4, NA, NA, NA, NA), + PINGROUP(28, blsp_spi4, blsp_uart4, blsp_uim4, NA, NA, NA, NA), + PINGROUP(29, blsp_spi4, blsp_uart4, blsp_i2c4, gp_mn, NA, NA, NA), + PINGROUP(30, blsp_spi4, blsp_uart4, blsp_i2c4, NA, NA, NA, NA), + PINGROUP(31, hdmi_cec, NA, NA, NA, NA, NA, NA), + PINGROUP(32, hdmi_ddc, NA, NA, NA, NA, NA, NA), + PINGROUP(33, hdmi_ddc, NA, NA, NA, NA, NA, NA), + PINGROUP(34, hdmi_hpd, NA, adsp_ext, NA, NA, NA, NA), + PINGROUP(35, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(36, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(37, gcc_gp1, NA, NA, NA, NA, NA, NA), + PINGROUP(38, gcc_gp2, NA, NA, NA, NA, NA, NA), + PINGROUP(39, blsp_spi5, blsp_uart5, blsp_uim5, NA, NA, NA, NA), + PINGROUP(40, blsp_spi5, blsp_uart5, blsp_uim5, NA, NA, NA, NA), + PINGROUP(41, blsp_spi5, blsp_uart5, blsp_i2c5, NA, NA, NA, NA), + PINGROUP(42, blsp_spi5, blsp_uart5, blsp_i2c5, NA, NA, NA, NA), + PINGROUP(43, blsp_spi6, blsp_uart6, blsp_uim6, NA, NA, NA, NA), + PINGROUP(44, blsp_spi6, blsp_uart6, blsp_uim6, NA, NA, NA, NA), + PINGROUP(45, blsp_spi6, blsp_uart6, blsp_i2c6, NA, NA, NA, NA), + PINGROUP(46, blsp_spi6, blsp_uart6, blsp_i2c6, NA, NA, NA, NA), + PINGROUP(47, blsp_spi12, blsp_uart12, blsp_uim12, NA, NA, NA, NA), + PINGROUP(48, blsp_spi12, blsp_uart12, blsp_uim12, gp_pdm0, NA, NA, NA), + PINGROUP(49, blsp_spi12, blsp_uart12, blsp_i2c12, NA, NA, NA, NA), + PINGROUP(50, blsp_spi12, blsp_uart12, blsp_i2c12, NA, NA, NA, NA), + PINGROUP(51, blsp_spi8, blsp_uart8, blsp_uim8, NA, NA, NA, NA), + PINGROUP(52, blsp_spi8, blsp_uart8, blsp_uim8, NA, NA, NA, NA), + PINGROUP(53, blsp_spi8, blsp_uart8, blsp_i2c8, NA, NA, NA, NA), + PINGROUP(54, blsp_spi8, blsp_uart8, blsp_i2c8, NA, NA, NA, NA), + PINGROUP(55, blsp_spi9, blsp_uart9, blsp_uim9, NA, NA, NA, NA), + PINGROUP(56, blsp_spi9, blsp_uart9, blsp_uim9, NA, NA, NA, NA), + PINGROUP(57, blsp_spi9, blsp_uart9, blsp_i2c9, NA, NA, NA, NA), + PINGROUP(58, blsp_spi9, blsp_uart9, blsp_i2c9, NA, NA, NA, NA), + PINGROUP(59, blsp_spi10, blsp_uart10, blsp_uim10, NA, NA, NA, NA), + PINGROUP(60, blsp_spi10, blsp_uart10, blsp_uim10, NA, NA, NA, NA), + PINGROUP(61, blsp_spi10, blsp_uart10, blsp_i2c10, NA, NA, NA, NA), + PINGROUP(62, blsp_spi10, blsp_uart10, blsp_i2c10, NA, NA, NA, NA), + PINGROUP(63, blsp_spi11, blsp_uart11, blsp_uim11, NA, NA, NA, NA), + PINGROUP(64, blsp_spi11, blsp_uart11, blsp_uim11, NA, NA, NA, NA), + PINGROUP(65, blsp_spi11, blsp_uart11, blsp_i2c11, NA, NA, NA, NA), + PINGROUP(66, blsp_spi11, blsp_uart11, blsp_i2c11, NA, NA, NA, NA), + PINGROUP(67, sdc3, blsp_spi3_cs1, NA, NA, NA, NA, NA), + PINGROUP(68, sdc3, pci_e0, NA, NA, NA, NA, NA), + PINGROUP(69, sdc3, NA, NA, NA, NA, NA, NA), + PINGROUP(70, sdc3, pci_e0_n, pci_e0, NA, NA, NA, NA), + PINGROUP(71, sdc3, blsp_spi3_cs2, NA, NA, NA, NA, NA), + PINGROUP(72, sdc3, blsp_spi3_cs3, NA, NA, NA, NA, NA), + PINGROUP(73, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(74, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(75, sd_write, NA, NA, NA, NA, NA, NA), + PINGROUP(76, pri_mi2s, NA, NA, NA, NA, NA, NA), + PINGROUP(77, pri_mi2s, NA, NA, NA, NA, NA, NA), + PINGROUP(78, pri_mi2s, NA, NA, NA, NA, NA, NA), + PINGROUP(79, pri_mi2s, NA, NA, NA, NA, NA, NA), + PINGROUP(80, pri_mi2s, NA, NA, NA, NA, NA, NA), + PINGROUP(81, sec_mi2s, NA, NA, NA, NA, NA, NA), + PINGROUP(82, sec_mi2s, sdc4, tsif1, NA, NA, NA, NA), + PINGROUP(83, sec_mi2s, sdc4, tsif1, NA, NA, NA, gp_pdm0), + PINGROUP(84, sec_mi2s, sdc4, tsif1, NA, NA, NA, gp_pdm1), + PINGROUP(85, sec_mi2s, sdc4, tsif1, NA, gp_pdm2, NA, NA), + PINGROUP(86, ter_mi2s, sdc4, tsif1, NA, NA, NA, gcc_gp3), + PINGROUP(87, ter_mi2s, NA, NA, NA, NA, NA, NA), + PINGROUP(88, ter_mi2s, NA, NA, NA, NA, NA, NA), + PINGROUP(89, ter_mi2s, NA, NA, NA, NA, NA, NA), + PINGROUP(90, ter_mi2s, NA, NA, NA, NA, NA, NA), + PINGROUP(91, qua_mi2s, sdc4, tsif2, NA, NA, NA, NA), + PINGROUP(92, qua_mi2s, NA, NA, NA, NA, NA, NA), + PINGROUP(93, qua_mi2s, NA, NA, NA, NA, NA, NA), + PINGROUP(94, qua_mi2s, NA, NA, NA, NA, NA, NA), + PINGROUP(95, qua_mi2s, sdc4, tsif2, NA, NA, NA, gcc_gp1), + PINGROUP(96, qua_mi2s, sdc4, tsif2, NA, NA, NA, gcc_gp2), + PINGROUP(97, qua_mi2s, sdc4, tsif2, NA, gcc_gp3, NA, NA), + PINGROUP(98, slimbus, spkr_i2s, NA, NA, NA, NA, NA), + PINGROUP(99, slimbus, spkr_i2s, NA, NA, NA, NA, NA), + PINGROUP(100, audio_ref, spkr_i2s, NA, NA, NA, NA, NA), + PINGROUP(101, sdc4, tsif2, gp_pdm1, NA, NA, NA, NA), + PINGROUP(102, uim_batt_alarm, NA, NA, NA, NA, NA, NA), + PINGROUP(103, edp_hpd, NA, NA, NA, NA, NA, NA), + PINGROUP(104, spkr_i2s, NA, NA, NA, NA, NA, NA), + PINGROUP(105, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(106, blsp_spi10_cs1, NA, NA, NA, NA, NA, NA), + PINGROUP(107, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(108, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(109, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(110, gp_pdm2, NA, NA, NA, NA, NA, NA), + PINGROUP(111, blsp_spi10_cs2, NA, NA, NA, NA, NA, NA), + PINGROUP(112, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(113, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(114, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(115, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(116, blsp_spi1_cs1, NA, NA, NA, NA, NA, NA), + PINGROUP(117, blsp_spi1_cs2, NA, NA, NA, NA, NA, NA), + PINGROUP(118, blsp_spi1_cs3, NA, NA, NA, NA, NA, NA), + PINGROUP(119, cci_timer4, cci_async, sata_devsleep, sata_devsleep_n, NA, NA, NA), + PINGROUP(120, cci_async, NA, NA, NA, NA, NA, NA), + PINGROUP(121, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(122, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(123, hdmi_dtest, NA, NA, NA, NA, NA, NA), + PINGROUP(124, spdif_tx, ldo_en, NA, NA, NA, NA, NA), + PINGROUP(125, ldo_update, hdmi_rcv, NA, NA, NA, NA, NA), + PINGROUP(126, gcc_vtt, NA, NA, NA, NA, NA, NA), + PINGROUP(127, gcc_obt, NA, NA, NA, NA, NA, NA), + PINGROUP(128, blsp_spi10_cs3, NA, NA, NA, NA, NA, NA), + PINGROUP(129, sata_act, NA, NA, NA, NA, NA, NA), + PINGROUP(130, uim, blsp_spi7, blsp_uart7, blsp_uim7, NA, NA, NA), + PINGROUP(131, uim, blsp_spi7, blsp_uart7, blsp_uim7, NA, NA, NA), + PINGROUP(132, uim, blsp_spi7, blsp_uart7, blsp_i2c7, NA, NA, NA), + PINGROUP(133, uim, blsp_spi7, blsp_uart7, blsp_i2c7, NA, NA, NA), + PINGROUP(134, hsic, NA, NA, NA, NA, NA, NA), + PINGROUP(135, hsic, NA, NA, NA, NA, NA, NA), + PINGROUP(136, spdif_tx, NA, NA, NA, NA, NA, NA), + PINGROUP(137, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(138, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(139, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(140, pci_e1_rst_n, pci_e1_rst, NA, NA, NA, NA, NA), + PINGROUP(141, pci_e1_clkreq_n, NA, NA, NA, NA, NA, NA), + PINGROUP(142, spdif_tx, NA, NA, NA, NA, NA, NA), + PINGROUP(143, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(144, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(145, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(146, sdc_emmc_mode, NA, NA, NA, NA, NA, NA), + + SDC_PINGROUP(sdc1_clk, 0x2044, 13, 6), + SDC_PINGROUP(sdc1_cmd, 0x2044, 11, 3), + SDC_PINGROUP(sdc1_data, 0x2044, 9, 0), + SDC_PINGROUP(sdc2_clk, 0x2048, 14, 6), + SDC_PINGROUP(sdc2_cmd, 0x2048, 11, 3), + SDC_PINGROUP(sdc2_data, 0x2048, 9, 0), +}; + +#define NUM_GPIO_PINGROUPS 147 + +static const struct msm_pinctrl_soc_data apq8084_pinctrl = { + .pins = apq8084_pins, + .npins = ARRAY_SIZE(apq8084_pins), + .functions = apq8084_functions, + .nfunctions = ARRAY_SIZE(apq8084_functions), + .groups = apq8084_groups, + .ngroups = ARRAY_SIZE(apq8084_groups), + .ngpios = NUM_GPIO_PINGROUPS, +}; + +static int apq8084_pinctrl_probe(struct platform_device *pdev) +{ + return msm_pinctrl_probe(pdev, &apq8084_pinctrl); +} + +static const struct of_device_id apq8084_pinctrl_of_match[] = { + { .compatible = "qcom,apq8084-pinctrl", }, + { }, +}; + +static struct platform_driver apq8084_pinctrl_driver = { + .driver = { + .name = "apq8084-pinctrl", + .owner = THIS_MODULE, + .of_match_table = apq8084_pinctrl_of_match, + }, + .probe = apq8084_pinctrl_probe, + .remove = msm_pinctrl_remove, +}; + +static int __init apq8084_pinctrl_init(void) +{ + return platform_driver_register(&apq8084_pinctrl_driver); +} +arch_initcall(apq8084_pinctrl_init); + +static void __exit apq8084_pinctrl_exit(void) +{ + platform_driver_unregister(&apq8084_pinctrl_driver); +} +module_exit(apq8084_pinctrl_exit); + +MODULE_DESCRIPTION("Qualcomm APQ8084 pinctrl driver"); +MODULE_LICENSE("GPL v2"); +MODULE_DEVICE_TABLE(of, apq8084_pinctrl_of_match); diff --git a/drivers/pinctrl/pinctrl-ipq8064.c b/drivers/pinctrl/qcom/pinctrl-ipq8064.c index acafea4c3a33..81f49a9b4dbe 100644 --- a/drivers/pinctrl/pinctrl-ipq8064.c +++ b/drivers/pinctrl/qcom/pinctrl-ipq8064.c @@ -183,7 +183,7 @@ static const unsigned int sdc3_data_pins[] = { 71 }; .pins = gpio##id##_pins, \ .npins = ARRAY_SIZE(gpio##id##_pins), \ .funcs = (int[]){ \ - IPQ_MUX_NA, /* gpio mode */ \ + IPQ_MUX_gpio, \ IPQ_MUX_##f1, \ IPQ_MUX_##f2, \ IPQ_MUX_##f3, \ @@ -211,6 +211,7 @@ static const unsigned int sdc3_data_pins[] = { 71 }; .intr_status_bit = 0, \ .intr_ack_high = 1, \ .intr_target_bit = 0, \ + .intr_target_kpss_val = 4, \ .intr_raw_status_bit = 3, \ .intr_polarity_bit = 1, \ .intr_detection_bit = 2, \ @@ -236,6 +237,7 @@ static const unsigned int sdc3_data_pins[] = { 71 }; .intr_enable_bit = -1, \ .intr_status_bit = -1, \ .intr_target_bit = -1, \ + .intr_target_kpss_val = -1, \ .intr_raw_status_bit = -1, \ .intr_polarity_bit = -1, \ .intr_detection_bit = -1, \ @@ -243,6 +245,7 @@ static const unsigned int sdc3_data_pins[] = { 71 }; } enum ipq8064_functions { + IPQ_MUX_gpio, IPQ_MUX_mdio, IPQ_MUX_mi2s, IPQ_MUX_pdm, @@ -291,6 +294,19 @@ enum ipq8064_functions { IPQ_MUX_NA, }; +static const char * const gpio_groups[] = { + "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7", + "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14", + "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21", + "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28", + "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35", + "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42", + "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49", + "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56", + "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63", + "gpio64", "gpio65", "gpio66", "gpio67", "gpio68" +}; + static const char * const mdio_groups[] = { "gpio0", "gpio1", "gpio10", "gpio11", }; @@ -481,6 +497,7 @@ static const char * const ps_hold_groups[] = { }; static const struct msm_function ipq8064_functions[] = { + FUNCTION(gpio), FUNCTION(mdio), FUNCTION(ssbi), FUNCTION(spmi), diff --git a/drivers/pinctrl/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c index df6dda4ce803..d30dddd21323 100644 --- a/drivers/pinctrl/pinctrl-msm.c +++ b/drivers/pinctrl/qcom/pinctrl-msm.c @@ -12,6 +12,7 @@ * GNU General Public License for more details. */ +#include <linux/delay.h> #include <linux/err.h> #include <linux/io.h> #include <linux/module.h> @@ -26,19 +27,22 @@ #include <linux/gpio.h> #include <linux/interrupt.h> #include <linux/spinlock.h> +#include <linux/reboot.h> -#include "core.h" -#include "pinconf.h" +#include "../core.h" +#include "../pinconf.h" #include "pinctrl-msm.h" -#include "pinctrl-utils.h" +#include "../pinctrl-utils.h" #define MAX_NR_GPIO 300 +#define PS_HOLD_OFFSET 0x820 /** * struct msm_pinctrl - state for a pinctrl-msm device * @dev: device handle. * @pctrl: pinctrl handle. * @chip: gpiochip handle. + * @restart_nb: restart notifier block. * @irq: parent irq for the TLMM irq_chip. * @lock: Spinlock to protect register resources as well * as msm_pinctrl data structures. @@ -52,6 +56,7 @@ struct msm_pinctrl { struct device *dev; struct pinctrl_dev *pctrl; struct gpio_chip chip; + struct notifier_block restart_nb; int irq; spinlock_t lock; @@ -130,9 +135,9 @@ static int msm_get_function_groups(struct pinctrl_dev *pctldev, return 0; } -static int msm_pinmux_enable(struct pinctrl_dev *pctldev, - unsigned function, - unsigned group) +static int msm_pinmux_set_mux(struct pinctrl_dev *pctldev, + unsigned function, + unsigned group) { struct msm_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); const struct msm_pingroup *g; @@ -142,9 +147,6 @@ static int msm_pinmux_enable(struct pinctrl_dev *pctldev, g = &pctrl->soc->groups[group]; - if (WARN_ON(g->mux_bit < 0)) - return -EINVAL; - for (i = 0; i < g->nfuncs; i++) { if (g->funcs[i] == function) break; @@ -165,36 +167,11 @@ static int msm_pinmux_enable(struct pinctrl_dev *pctldev, return 0; } -static void msm_pinmux_disable(struct pinctrl_dev *pctldev, - unsigned function, - unsigned group) -{ - struct msm_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); - const struct msm_pingroup *g; - unsigned long flags; - u32 val; - - g = &pctrl->soc->groups[group]; - - if (WARN_ON(g->mux_bit < 0)) - return; - - spin_lock_irqsave(&pctrl->lock, flags); - - /* Clear the mux bits to select gpio mode */ - val = readl(pctrl->regs + g->ctl_reg); - val &= ~(0x7 << g->mux_bit); - writel(val, pctrl->regs + g->ctl_reg); - - spin_unlock_irqrestore(&pctrl->lock, flags); -} - static const struct pinmux_ops msm_pinmux_ops = { .get_functions_count = msm_get_functions_count, .get_function_name = msm_get_function_name, .get_function_groups = msm_get_function_groups, - .enable = msm_pinmux_enable, - .disable = msm_pinmux_disable, + .set_mux = msm_pinmux_set_mux, }; static int msm_config_reg(struct msm_pinctrl *pctrl, @@ -206,6 +183,7 @@ static int msm_config_reg(struct msm_pinctrl *pctrl, switch (param) { case PIN_CONFIG_BIAS_DISABLE: case PIN_CONFIG_BIAS_PULL_DOWN: + case PIN_CONFIG_BIAS_BUS_HOLD: case PIN_CONFIG_BIAS_PULL_UP: *bit = g->pull_bit; *mask = 3; @@ -243,6 +221,7 @@ static int msm_config_set(struct pinctrl_dev *pctldev, unsigned int pin, #define MSM_NO_PULL 0 #define MSM_PULL_DOWN 1 +#define MSM_KEEPER 2 #define MSM_PULL_UP 3 static unsigned msm_regval_to_drive(u32 val) @@ -280,6 +259,9 @@ static int msm_config_group_get(struct pinctrl_dev *pctldev, case PIN_CONFIG_BIAS_PULL_DOWN: arg = arg == MSM_PULL_DOWN; break; + case PIN_CONFIG_BIAS_BUS_HOLD: + arg = arg == MSM_KEEPER; + break; case PIN_CONFIG_BIAS_PULL_UP: arg = arg == MSM_PULL_UP; break; @@ -339,6 +321,9 @@ static int msm_config_group_set(struct pinctrl_dev *pctldev, case PIN_CONFIG_BIAS_PULL_DOWN: arg = MSM_PULL_DOWN; break; + case PIN_CONFIG_BIAS_BUS_HOLD: + arg = MSM_KEEPER; + break; case PIN_CONFIG_BIAS_PULL_UP: arg = MSM_PULL_UP; break; @@ -669,8 +654,6 @@ static void msm_gpio_irq_ack(struct irq_data *d) spin_unlock_irqrestore(&pctrl->lock, flags); } -#define INTR_TARGET_PROC_APPS 4 - static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); @@ -694,7 +677,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type) /* Route interrupts to application cpu */ val = readl(pctrl->regs + g->intr_target_reg); val &= ~(7 << g->intr_target_bit); - val |= INTR_TARGET_PROC_APPS << g->intr_target_bit; + val |= g->intr_target_kpss_val << g->intr_target_bit; writel(val, pctrl->regs + g->intr_target_reg); /* Update configuration for gpio. @@ -849,6 +832,7 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl) ret = gpiochip_add_pin_range(&pctrl->chip, dev_name(pctrl->dev), 0, 0, chip->ngpio); if (ret) { dev_err(pctrl->dev, "Failed to add pin range\n"); + gpiochip_remove(&pctrl->chip); return ret; } @@ -859,6 +843,7 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl) IRQ_TYPE_NONE); if (ret) { dev_err(pctrl->dev, "Failed to add irqchip to gpiochip\n"); + gpiochip_remove(&pctrl->chip); return -ENOSYS; } @@ -868,6 +853,32 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl) return 0; } +static int msm_ps_hold_restart(struct notifier_block *nb, unsigned long action, + void *data) +{ + struct msm_pinctrl *pctrl = container_of(nb, struct msm_pinctrl, restart_nb); + + writel(0, pctrl->regs + PS_HOLD_OFFSET); + mdelay(1000); + return NOTIFY_DONE; +} + +static void msm_pinctrl_setup_pm_reset(struct msm_pinctrl *pctrl) +{ + int i = 0; + const struct msm_function *func = pctrl->soc->functions; + + for (; i <= pctrl->soc->nfunctions; i++) + if (!strcmp(func[i].name, "ps_hold")) { + pctrl->restart_nb.notifier_call = msm_ps_hold_restart; + pctrl->restart_nb.priority = 128; + if (register_restart_handler(&pctrl->restart_nb)) + dev_err(pctrl->dev, + "failed to setup restart handler.\n"); + break; + } +} + int msm_pinctrl_probe(struct platform_device *pdev, const struct msm_pinctrl_soc_data *soc_data) { @@ -891,6 +902,8 @@ int msm_pinctrl_probe(struct platform_device *pdev, if (IS_ERR(pctrl->regs)) return PTR_ERR(pctrl->regs); + msm_pinctrl_setup_pm_reset(pctrl); + pctrl->irq = platform_get_irq(pdev, 0); if (pctrl->irq < 0) { dev_err(&pdev->dev, "No interrupt defined for msmgpio\n"); @@ -933,6 +946,8 @@ int msm_pinctrl_remove(struct platform_device *pdev) pinctrl_unregister(pctrl->pctrl); + unregister_restart_handler(&pctrl->restart_nb); + return 0; } EXPORT_SYMBOL(msm_pinctrl_remove); diff --git a/drivers/pinctrl/pinctrl-msm.h b/drivers/pinctrl/qcom/pinctrl-msm.h index 7b2a227a590a..b952c4b4a8e9 100644 --- a/drivers/pinctrl/pinctrl-msm.h +++ b/drivers/pinctrl/qcom/pinctrl-msm.h @@ -53,6 +53,8 @@ struct msm_function { * @intr_status_bit: Offset in @intr_status_reg for reading and acking the interrupt * status. * @intr_target_bit: Offset in @intr_target_reg for configuring the interrupt routing. + * @intr_target_kpss_val: Value in @intr_target_bit for specifying that the interrupt from + * this gpio should get routed to the KPSS processor. * @intr_raw_status_bit: Offset in @intr_cfg_reg for the raw status bit. * @intr_polarity_bit: Offset in @intr_cfg_reg for specifying polarity of the interrupt. * @intr_detection_bit: Offset in @intr_cfg_reg for specifying interrupt type. @@ -88,6 +90,7 @@ struct msm_pingroup { unsigned intr_ack_high:1; unsigned intr_target_bit:5; + unsigned intr_target_kpss_val:5; unsigned intr_raw_status_bit:5; unsigned intr_polarity_bit:5; unsigned intr_detection_bit:5; diff --git a/drivers/pinctrl/qcom/pinctrl-msm8960.c b/drivers/pinctrl/qcom/pinctrl-msm8960.c new file mode 100644 index 000000000000..2ab21ce5575a --- /dev/null +++ b/drivers/pinctrl/qcom/pinctrl-msm8960.c @@ -0,0 +1,1284 @@ +/* + * Copyright (c) 2014, Sony Mobile Communications AB. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/pinctrl/pinctrl.h> +#include <linux/pinctrl/pinmux.h> + +#include "pinctrl-msm.h" + +static const struct pinctrl_pin_desc msm8960_pins[] = { + PINCTRL_PIN(0, "GPIO_0"), + PINCTRL_PIN(1, "GPIO_1"), + PINCTRL_PIN(2, "GPIO_2"), + PINCTRL_PIN(3, "GPIO_3"), + PINCTRL_PIN(4, "GPIO_4"), + PINCTRL_PIN(5, "GPIO_5"), + PINCTRL_PIN(6, "GPIO_6"), + PINCTRL_PIN(7, "GPIO_7"), + PINCTRL_PIN(8, "GPIO_8"), + PINCTRL_PIN(9, "GPIO_9"), + PINCTRL_PIN(10, "GPIO_10"), + PINCTRL_PIN(11, "GPIO_11"), + PINCTRL_PIN(12, "GPIO_12"), + PINCTRL_PIN(13, "GPIO_13"), + PINCTRL_PIN(14, "GPIO_14"), + PINCTRL_PIN(15, "GPIO_15"), + PINCTRL_PIN(16, "GPIO_16"), + PINCTRL_PIN(17, "GPIO_17"), + PINCTRL_PIN(18, "GPIO_18"), + PINCTRL_PIN(19, "GPIO_19"), + PINCTRL_PIN(20, "GPIO_20"), + PINCTRL_PIN(21, "GPIO_21"), + PINCTRL_PIN(22, "GPIO_22"), + PINCTRL_PIN(23, "GPIO_23"), + PINCTRL_PIN(24, "GPIO_24"), + PINCTRL_PIN(25, "GPIO_25"), + PINCTRL_PIN(26, "GPIO_26"), + PINCTRL_PIN(27, "GPIO_27"), + PINCTRL_PIN(28, "GPIO_28"), + PINCTRL_PIN(29, "GPIO_29"), + PINCTRL_PIN(30, "GPIO_30"), + PINCTRL_PIN(31, "GPIO_31"), + PINCTRL_PIN(32, "GPIO_32"), + PINCTRL_PIN(33, "GPIO_33"), + PINCTRL_PIN(34, "GPIO_34"), + PINCTRL_PIN(35, "GPIO_35"), + PINCTRL_PIN(36, "GPIO_36"), + PINCTRL_PIN(37, "GPIO_37"), + PINCTRL_PIN(38, "GPIO_38"), + PINCTRL_PIN(39, "GPIO_39"), + PINCTRL_PIN(40, "GPIO_40"), + PINCTRL_PIN(41, "GPIO_41"), + PINCTRL_PIN(42, "GPIO_42"), + PINCTRL_PIN(43, "GPIO_43"), + PINCTRL_PIN(44, "GPIO_44"), + PINCTRL_PIN(45, "GPIO_45"), + PINCTRL_PIN(46, "GPIO_46"), + PINCTRL_PIN(47, "GPIO_47"), + PINCTRL_PIN(48, "GPIO_48"), + PINCTRL_PIN(49, "GPIO_49"), + PINCTRL_PIN(50, "GPIO_50"), + PINCTRL_PIN(51, "GPIO_51"), + PINCTRL_PIN(52, "GPIO_52"), + PINCTRL_PIN(53, "GPIO_53"), + PINCTRL_PIN(54, "GPIO_54"), + PINCTRL_PIN(55, "GPIO_55"), + PINCTRL_PIN(56, "GPIO_56"), + PINCTRL_PIN(57, "GPIO_57"), + PINCTRL_PIN(58, "GPIO_58"), + PINCTRL_PIN(59, "GPIO_59"), + PINCTRL_PIN(60, "GPIO_60"), + PINCTRL_PIN(61, "GPIO_61"), + PINCTRL_PIN(62, "GPIO_62"), + PINCTRL_PIN(63, "GPIO_63"), + PINCTRL_PIN(64, "GPIO_64"), + PINCTRL_PIN(65, "GPIO_65"), + PINCTRL_PIN(66, "GPIO_66"), + PINCTRL_PIN(67, "GPIO_67"), + PINCTRL_PIN(68, "GPIO_68"), + PINCTRL_PIN(69, "GPIO_69"), + PINCTRL_PIN(70, "GPIO_70"), + PINCTRL_PIN(71, "GPIO_71"), + PINCTRL_PIN(72, "GPIO_72"), + PINCTRL_PIN(73, "GPIO_73"), + PINCTRL_PIN(74, "GPIO_74"), + PINCTRL_PIN(75, "GPIO_75"), + PINCTRL_PIN(76, "GPIO_76"), + PINCTRL_PIN(77, "GPIO_77"), + PINCTRL_PIN(78, "GPIO_78"), + PINCTRL_PIN(79, "GPIO_79"), + PINCTRL_PIN(80, "GPIO_80"), + PINCTRL_PIN(81, "GPIO_81"), + PINCTRL_PIN(82, "GPIO_82"), + PINCTRL_PIN(83, "GPIO_83"), + PINCTRL_PIN(84, "GPIO_84"), + PINCTRL_PIN(85, "GPIO_85"), + PINCTRL_PIN(86, "GPIO_86"), + PINCTRL_PIN(87, "GPIO_87"), + PINCTRL_PIN(88, "GPIO_88"), + PINCTRL_PIN(89, "GPIO_89"), + PINCTRL_PIN(90, "GPIO_90"), + PINCTRL_PIN(91, "GPIO_91"), + PINCTRL_PIN(92, "GPIO_92"), + PINCTRL_PIN(93, "GPIO_93"), + PINCTRL_PIN(94, "GPIO_94"), + PINCTRL_PIN(95, "GPIO_95"), + PINCTRL_PIN(96, "GPIO_96"), + PINCTRL_PIN(97, "GPIO_97"), + PINCTRL_PIN(98, "GPIO_98"), + PINCTRL_PIN(99, "GPIO_99"), + PINCTRL_PIN(100, "GPIO_100"), + PINCTRL_PIN(101, "GPIO_101"), + PINCTRL_PIN(102, "GPIO_102"), + PINCTRL_PIN(103, "GPIO_103"), + PINCTRL_PIN(104, "GPIO_104"), + PINCTRL_PIN(105, "GPIO_105"), + PINCTRL_PIN(106, "GPIO_106"), + PINCTRL_PIN(107, "GPIO_107"), + PINCTRL_PIN(108, "GPIO_108"), + PINCTRL_PIN(109, "GPIO_109"), + PINCTRL_PIN(110, "GPIO_110"), + PINCTRL_PIN(111, "GPIO_111"), + PINCTRL_PIN(112, "GPIO_112"), + PINCTRL_PIN(113, "GPIO_113"), + PINCTRL_PIN(114, "GPIO_114"), + PINCTRL_PIN(115, "GPIO_115"), + PINCTRL_PIN(116, "GPIO_116"), + PINCTRL_PIN(117, "GPIO_117"), + PINCTRL_PIN(118, "GPIO_118"), + PINCTRL_PIN(119, "GPIO_119"), + PINCTRL_PIN(120, "GPIO_120"), + PINCTRL_PIN(121, "GPIO_121"), + PINCTRL_PIN(122, "GPIO_122"), + PINCTRL_PIN(123, "GPIO_123"), + PINCTRL_PIN(124, "GPIO_124"), + PINCTRL_PIN(125, "GPIO_125"), + PINCTRL_PIN(126, "GPIO_126"), + PINCTRL_PIN(127, "GPIO_127"), + PINCTRL_PIN(128, "GPIO_128"), + PINCTRL_PIN(129, "GPIO_129"), + PINCTRL_PIN(130, "GPIO_130"), + PINCTRL_PIN(131, "GPIO_131"), + PINCTRL_PIN(132, "GPIO_132"), + PINCTRL_PIN(133, "GPIO_133"), + PINCTRL_PIN(134, "GPIO_134"), + PINCTRL_PIN(135, "GPIO_135"), + PINCTRL_PIN(136, "GPIO_136"), + PINCTRL_PIN(137, "GPIO_137"), + PINCTRL_PIN(138, "GPIO_138"), + PINCTRL_PIN(139, "GPIO_139"), + PINCTRL_PIN(140, "GPIO_140"), + PINCTRL_PIN(141, "GPIO_141"), + PINCTRL_PIN(142, "GPIO_142"), + PINCTRL_PIN(143, "GPIO_143"), + PINCTRL_PIN(144, "GPIO_144"), + PINCTRL_PIN(145, "GPIO_145"), + PINCTRL_PIN(146, "GPIO_146"), + PINCTRL_PIN(147, "GPIO_147"), + PINCTRL_PIN(148, "GPIO_148"), + PINCTRL_PIN(149, "GPIO_149"), + PINCTRL_PIN(150, "GPIO_150"), + PINCTRL_PIN(151, "GPIO_151"), + + PINCTRL_PIN(152, "SDC1_CLK"), + PINCTRL_PIN(153, "SDC1_CMD"), + PINCTRL_PIN(154, "SDC1_DATA"), + PINCTRL_PIN(155, "SDC3_CLK"), + PINCTRL_PIN(156, "SDC3_CMD"), + PINCTRL_PIN(157, "SDC3_DATA"), +}; + +#define DECLARE_MSM_GPIO_PINS(pin) static const unsigned int gpio##pin##_pins[] = { pin } +DECLARE_MSM_GPIO_PINS(0); +DECLARE_MSM_GPIO_PINS(1); +DECLARE_MSM_GPIO_PINS(2); +DECLARE_MSM_GPIO_PINS(3); +DECLARE_MSM_GPIO_PINS(4); +DECLARE_MSM_GPIO_PINS(5); +DECLARE_MSM_GPIO_PINS(6); +DECLARE_MSM_GPIO_PINS(7); +DECLARE_MSM_GPIO_PINS(8); +DECLARE_MSM_GPIO_PINS(9); +DECLARE_MSM_GPIO_PINS(10); +DECLARE_MSM_GPIO_PINS(11); +DECLARE_MSM_GPIO_PINS(12); +DECLARE_MSM_GPIO_PINS(13); +DECLARE_MSM_GPIO_PINS(14); +DECLARE_MSM_GPIO_PINS(15); +DECLARE_MSM_GPIO_PINS(16); +DECLARE_MSM_GPIO_PINS(17); +DECLARE_MSM_GPIO_PINS(18); +DECLARE_MSM_GPIO_PINS(19); +DECLARE_MSM_GPIO_PINS(20); +DECLARE_MSM_GPIO_PINS(21); +DECLARE_MSM_GPIO_PINS(22); +DECLARE_MSM_GPIO_PINS(23); +DECLARE_MSM_GPIO_PINS(24); +DECLARE_MSM_GPIO_PINS(25); +DECLARE_MSM_GPIO_PINS(26); +DECLARE_MSM_GPIO_PINS(27); +DECLARE_MSM_GPIO_PINS(28); +DECLARE_MSM_GPIO_PINS(29); +DECLARE_MSM_GPIO_PINS(30); +DECLARE_MSM_GPIO_PINS(31); +DECLARE_MSM_GPIO_PINS(32); +DECLARE_MSM_GPIO_PINS(33); +DECLARE_MSM_GPIO_PINS(34); +DECLARE_MSM_GPIO_PINS(35); +DECLARE_MSM_GPIO_PINS(36); +DECLARE_MSM_GPIO_PINS(37); +DECLARE_MSM_GPIO_PINS(38); +DECLARE_MSM_GPIO_PINS(39); +DECLARE_MSM_GPIO_PINS(40); +DECLARE_MSM_GPIO_PINS(41); +DECLARE_MSM_GPIO_PINS(42); +DECLARE_MSM_GPIO_PINS(43); +DECLARE_MSM_GPIO_PINS(44); +DECLARE_MSM_GPIO_PINS(45); +DECLARE_MSM_GPIO_PINS(46); +DECLARE_MSM_GPIO_PINS(47); +DECLARE_MSM_GPIO_PINS(48); +DECLARE_MSM_GPIO_PINS(49); +DECLARE_MSM_GPIO_PINS(50); +DECLARE_MSM_GPIO_PINS(51); +DECLARE_MSM_GPIO_PINS(52); +DECLARE_MSM_GPIO_PINS(53); +DECLARE_MSM_GPIO_PINS(54); +DECLARE_MSM_GPIO_PINS(55); +DECLARE_MSM_GPIO_PINS(56); +DECLARE_MSM_GPIO_PINS(57); +DECLARE_MSM_GPIO_PINS(58); +DECLARE_MSM_GPIO_PINS(59); +DECLARE_MSM_GPIO_PINS(60); +DECLARE_MSM_GPIO_PINS(61); +DECLARE_MSM_GPIO_PINS(62); +DECLARE_MSM_GPIO_PINS(63); +DECLARE_MSM_GPIO_PINS(64); +DECLARE_MSM_GPIO_PINS(65); +DECLARE_MSM_GPIO_PINS(66); +DECLARE_MSM_GPIO_PINS(67); +DECLARE_MSM_GPIO_PINS(68); +DECLARE_MSM_GPIO_PINS(69); +DECLARE_MSM_GPIO_PINS(70); +DECLARE_MSM_GPIO_PINS(71); +DECLARE_MSM_GPIO_PINS(72); +DECLARE_MSM_GPIO_PINS(73); +DECLARE_MSM_GPIO_PINS(74); +DECLARE_MSM_GPIO_PINS(75); +DECLARE_MSM_GPIO_PINS(76); +DECLARE_MSM_GPIO_PINS(77); +DECLARE_MSM_GPIO_PINS(78); +DECLARE_MSM_GPIO_PINS(79); +DECLARE_MSM_GPIO_PINS(80); +DECLARE_MSM_GPIO_PINS(81); +DECLARE_MSM_GPIO_PINS(82); +DECLARE_MSM_GPIO_PINS(83); +DECLARE_MSM_GPIO_PINS(84); +DECLARE_MSM_GPIO_PINS(85); +DECLARE_MSM_GPIO_PINS(86); +DECLARE_MSM_GPIO_PINS(87); +DECLARE_MSM_GPIO_PINS(88); +DECLARE_MSM_GPIO_PINS(89); +DECLARE_MSM_GPIO_PINS(90); +DECLARE_MSM_GPIO_PINS(91); +DECLARE_MSM_GPIO_PINS(92); +DECLARE_MSM_GPIO_PINS(93); +DECLARE_MSM_GPIO_PINS(94); +DECLARE_MSM_GPIO_PINS(95); +DECLARE_MSM_GPIO_PINS(96); +DECLARE_MSM_GPIO_PINS(97); +DECLARE_MSM_GPIO_PINS(98); +DECLARE_MSM_GPIO_PINS(99); +DECLARE_MSM_GPIO_PINS(100); +DECLARE_MSM_GPIO_PINS(101); +DECLARE_MSM_GPIO_PINS(102); +DECLARE_MSM_GPIO_PINS(103); +DECLARE_MSM_GPIO_PINS(104); +DECLARE_MSM_GPIO_PINS(105); +DECLARE_MSM_GPIO_PINS(106); +DECLARE_MSM_GPIO_PINS(107); +DECLARE_MSM_GPIO_PINS(108); +DECLARE_MSM_GPIO_PINS(109); +DECLARE_MSM_GPIO_PINS(110); +DECLARE_MSM_GPIO_PINS(111); +DECLARE_MSM_GPIO_PINS(112); +DECLARE_MSM_GPIO_PINS(113); +DECLARE_MSM_GPIO_PINS(114); +DECLARE_MSM_GPIO_PINS(115); +DECLARE_MSM_GPIO_PINS(116); +DECLARE_MSM_GPIO_PINS(117); +DECLARE_MSM_GPIO_PINS(118); +DECLARE_MSM_GPIO_PINS(119); +DECLARE_MSM_GPIO_PINS(120); +DECLARE_MSM_GPIO_PINS(121); +DECLARE_MSM_GPIO_PINS(122); +DECLARE_MSM_GPIO_PINS(123); +DECLARE_MSM_GPIO_PINS(124); +DECLARE_MSM_GPIO_PINS(125); +DECLARE_MSM_GPIO_PINS(126); +DECLARE_MSM_GPIO_PINS(127); +DECLARE_MSM_GPIO_PINS(128); +DECLARE_MSM_GPIO_PINS(129); +DECLARE_MSM_GPIO_PINS(130); +DECLARE_MSM_GPIO_PINS(131); +DECLARE_MSM_GPIO_PINS(132); +DECLARE_MSM_GPIO_PINS(133); +DECLARE_MSM_GPIO_PINS(134); +DECLARE_MSM_GPIO_PINS(135); +DECLARE_MSM_GPIO_PINS(136); +DECLARE_MSM_GPIO_PINS(137); +DECLARE_MSM_GPIO_PINS(138); +DECLARE_MSM_GPIO_PINS(139); +DECLARE_MSM_GPIO_PINS(140); +DECLARE_MSM_GPIO_PINS(141); +DECLARE_MSM_GPIO_PINS(142); +DECLARE_MSM_GPIO_PINS(143); +DECLARE_MSM_GPIO_PINS(144); +DECLARE_MSM_GPIO_PINS(145); +DECLARE_MSM_GPIO_PINS(146); +DECLARE_MSM_GPIO_PINS(147); +DECLARE_MSM_GPIO_PINS(148); +DECLARE_MSM_GPIO_PINS(149); +DECLARE_MSM_GPIO_PINS(150); +DECLARE_MSM_GPIO_PINS(151); + +static const unsigned int sdc1_clk_pins[] = { 152 }; +static const unsigned int sdc1_cmd_pins[] = { 153 }; +static const unsigned int sdc1_data_pins[] = { 154 }; +static const unsigned int sdc3_clk_pins[] = { 155 }; +static const unsigned int sdc3_cmd_pins[] = { 156 }; +static const unsigned int sdc3_data_pins[] = { 157 }; + +#define FUNCTION(fname) \ + [MSM_MUX_##fname] = { \ + .name = #fname, \ + .groups = fname##_groups, \ + .ngroups = ARRAY_SIZE(fname##_groups), \ + } + +#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11) \ + { \ + .name = "gpio" #id, \ + .pins = gpio##id##_pins, \ + .npins = ARRAY_SIZE(gpio##id##_pins), \ + .funcs = (int[]){ \ + MSM_MUX_gpio, \ + MSM_MUX_##f1, \ + MSM_MUX_##f2, \ + MSM_MUX_##f3, \ + MSM_MUX_##f4, \ + MSM_MUX_##f5, \ + MSM_MUX_##f6, \ + MSM_MUX_##f7, \ + MSM_MUX_##f8, \ + MSM_MUX_##f9, \ + MSM_MUX_##f10, \ + MSM_MUX_##f11 \ + }, \ + .nfuncs = 12, \ + .ctl_reg = 0x1000 + 0x10 * id, \ + .io_reg = 0x1004 + 0x10 * id, \ + .intr_cfg_reg = 0x1008 + 0x10 * id, \ + .intr_status_reg = 0x100c + 0x10 * id, \ + .intr_target_reg = 0x400 + 0x4 * id, \ + .mux_bit = 2, \ + .pull_bit = 0, \ + .drv_bit = 6, \ + .oe_bit = 9, \ + .in_bit = 0, \ + .out_bit = 1, \ + .intr_enable_bit = 0, \ + .intr_status_bit = 0, \ + .intr_ack_high = 1, \ + .intr_target_bit = 0, \ + .intr_target_kpss_val = 4, \ + .intr_raw_status_bit = 3, \ + .intr_polarity_bit = 1, \ + .intr_detection_bit = 2, \ + .intr_detection_width = 1, \ + } + +#define SDC_PINGROUP(pg_name, ctl, pull, drv) \ + { \ + .name = #pg_name, \ + .pins = pg_name##_pins, \ + .npins = ARRAY_SIZE(pg_name##_pins), \ + .ctl_reg = ctl, \ + .io_reg = 0, \ + .intr_cfg_reg = 0, \ + .intr_status_reg = 0, \ + .intr_target_reg = 0, \ + .mux_bit = -1, \ + .pull_bit = pull, \ + .drv_bit = drv, \ + .oe_bit = -1, \ + .in_bit = -1, \ + .out_bit = -1, \ + .intr_enable_bit = -1, \ + .intr_status_bit = -1, \ + .intr_target_bit = -1, \ + .intr_target_kpss_val = -1, \ + .intr_raw_status_bit = -1, \ + .intr_polarity_bit = -1, \ + .intr_detection_bit = -1, \ + .intr_detection_width = -1, \ + } + +enum msm8960_functions { + MSM_MUX_audio_pcm, + MSM_MUX_bt, + MSM_MUX_cam_mclk0, + MSM_MUX_cam_mclk1, + MSM_MUX_cam_mclk2, + MSM_MUX_codec_mic_i2s, + MSM_MUX_codec_spkr_i2s, + MSM_MUX_ext_gps, + MSM_MUX_fm, + MSM_MUX_gps_blanking, + MSM_MUX_gps_pps_in, + MSM_MUX_gps_pps_out, + MSM_MUX_gp_clk_0a, + MSM_MUX_gp_clk_0b, + MSM_MUX_gp_clk_1a, + MSM_MUX_gp_clk_1b, + MSM_MUX_gp_clk_2a, + MSM_MUX_gp_clk_2b, + MSM_MUX_gp_mn, + MSM_MUX_gp_pdm_0a, + MSM_MUX_gp_pdm_0b, + MSM_MUX_gp_pdm_1a, + MSM_MUX_gp_pdm_1b, + MSM_MUX_gp_pdm_2a, + MSM_MUX_gp_pdm_2b, + MSM_MUX_gpio, + MSM_MUX_gsbi1, + MSM_MUX_gsbi1_spi_cs1_n, + MSM_MUX_gsbi1_spi_cs2a_n, + MSM_MUX_gsbi1_spi_cs2b_n, + MSM_MUX_gsbi1_spi_cs3_n, + MSM_MUX_gsbi2, + MSM_MUX_gsbi2_spi_cs1_n, + MSM_MUX_gsbi2_spi_cs2_n, + MSM_MUX_gsbi2_spi_cs3_n, + MSM_MUX_gsbi3, + MSM_MUX_gsbi4, + MSM_MUX_gsbi4_3d_cam_i2c_l, + MSM_MUX_gsbi4_3d_cam_i2c_r, + MSM_MUX_gsbi5, + MSM_MUX_gsbi5_3d_cam_i2c_l, + MSM_MUX_gsbi5_3d_cam_i2c_r, + MSM_MUX_gsbi6, + MSM_MUX_gsbi7, + MSM_MUX_gsbi8, + MSM_MUX_gsbi9, + MSM_MUX_gsbi10, + MSM_MUX_gsbi11, + MSM_MUX_gsbi11_spi_cs1a_n, + MSM_MUX_gsbi11_spi_cs1b_n, + MSM_MUX_gsbi11_spi_cs2a_n, + MSM_MUX_gsbi11_spi_cs2b_n, + MSM_MUX_gsbi11_spi_cs3_n, + MSM_MUX_gsbi12, + MSM_MUX_hdmi_cec, + MSM_MUX_hdmi_ddc_clock, + MSM_MUX_hdmi_ddc_data, + MSM_MUX_hdmi_hot_plug_detect, + MSM_MUX_hsic, + MSM_MUX_mdp_vsync, + MSM_MUX_mi2s, + MSM_MUX_mic_i2s, + MSM_MUX_pmb_clk, + MSM_MUX_pmb_ext_ctrl, + MSM_MUX_ps_hold, + MSM_MUX_rpm_wdog, + MSM_MUX_sdc2, + MSM_MUX_sdc4, + MSM_MUX_sdc5, + MSM_MUX_slimbus1, + MSM_MUX_slimbus2, + MSM_MUX_spkr_i2s, + MSM_MUX_ssbi1, + MSM_MUX_ssbi2, + MSM_MUX_ssbi_ext_gps, + MSM_MUX_ssbi_pmic2, + MSM_MUX_ssbi_qpa1, + MSM_MUX_ssbi_ts, + MSM_MUX_tsif1, + MSM_MUX_tsif2, + MSM_MUX_ts_eoc, + MSM_MUX_usb_fs1, + MSM_MUX_usb_fs1_oe, + MSM_MUX_usb_fs1_oe_n, + MSM_MUX_usb_fs2, + MSM_MUX_usb_fs2_oe, + MSM_MUX_usb_fs2_oe_n, + MSM_MUX_vfe_camif_timer1_a, + MSM_MUX_vfe_camif_timer1_b, + MSM_MUX_vfe_camif_timer2, + MSM_MUX_vfe_camif_timer3_a, + MSM_MUX_vfe_camif_timer3_b, + MSM_MUX_vfe_camif_timer4_a, + MSM_MUX_vfe_camif_timer4_b, + MSM_MUX_vfe_camif_timer4_c, + MSM_MUX_vfe_camif_timer5_a, + MSM_MUX_vfe_camif_timer5_b, + MSM_MUX_vfe_camif_timer6_a, + MSM_MUX_vfe_camif_timer6_b, + MSM_MUX_vfe_camif_timer6_c, + MSM_MUX_vfe_camif_timer7_a, + MSM_MUX_vfe_camif_timer7_b, + MSM_MUX_vfe_camif_timer7_c, + MSM_MUX_wlan, + MSM_MUX_NA, +}; + +static const char * const audio_pcm_groups[] = { + "gpio63", "gpio64", "gpio65", "gpio66" +}; + +static const char * const bt_groups[] = { + "gpio28", "gpio29", "gpio83" +}; + +static const char * const cam_mclk0_groups[] = { + "gpio5" +}; + +static const char * const cam_mclk1_groups[] = { + "gpio4" +}; + +static const char * const cam_mclk2_groups[] = { + "gpio2" +}; + +static const char * const codec_mic_i2s_groups[] = { + "gpio54", "gpio55", "gpio56", "gpio57", "gpio58" +}; + +static const char * const codec_spkr_i2s_groups[] = { + "gpio59", "gpio60", "gpio61", "gpio62" +}; + +static const char * const ext_gps_groups[] = { + "gpio22", "gpio23", "gpio24", "gpio25" +}; + +static const char * const fm_groups[] = { + "gpio26", "gpio27" +}; + +static const char * const gps_blanking_groups[] = { + "gpio137" +}; + +static const char * const gps_pps_in_groups[] = { + "gpio37" +}; + +static const char * const gps_pps_out_groups[] = { + "gpio37" +}; + +static const char * const gp_clk_0a_groups[] = { + "gpio3" +}; + +static const char * const gp_clk_0b_groups[] = { + "gpio54" +}; + +static const char * const gp_clk_1a_groups[] = { + "gpio4" +}; + +static const char * const gp_clk_1b_groups[] = { + "gpio70" +}; + +static const char * const gp_clk_2a_groups[] = { + "gpio52" +}; + +static const char * const gp_clk_2b_groups[] = { + "gpio37" +}; + +static const char * const gp_mn_groups[] = { + "gpio2" +}; + +static const char * const gp_pdm_0a_groups[] = { + "gpio58" +}; + +static const char * const gp_pdm_0b_groups[] = { + "gpio39" +}; + +static const char * const gp_pdm_1a_groups[] = { + "gpio94" +}; + +static const char * const gp_pdm_1b_groups[] = { + "gpio64" +}; + +static const char * const gp_pdm_2a_groups[] = { + "gpio69" +}; + +static const char * const gp_pdm_2b_groups[] = { + "gpio53" +}; + +static const char * const gpio_groups[] = { + "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7", + "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14", + "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21", + "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28", + "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35", + "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42", + "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49", + "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56", + "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63", + "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70", + "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77", + "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84", + "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91", + "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98", + "gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104", + "gpio105", "gpio106", "gpio107", "gpio108", "gpio109", "gpio110", + "gpio111", "gpio112", "gpio113", "gpio114", "gpio115", "gpio116", + "gpio117", "gpio118", "gpio119", "gpio120", "gpio121", "gpio122", + "gpio123", "gpio124", "gpio125", "gpio126", "gpio127", "gpio128", + "gpio129", "gpio130", "gpio131", "gpio132", "gpio133", "gpio134", + "gpio135", "gpio136", "gpio137", "gpio138", "gpio139", "gpio140", + "gpio141", "gpio142", "gpio143", "gpio144", "gpio145", "gpio146", + "gpio147", "gpio148", "gpio149", "gpio150", "gpio151" +}; + +static const char * const gsbi1_groups[] = { + "gpio6", "gpio7", "gpio8", "gpio9" +}; + +static const char * const gsbi1_spi_cs1_n_groups[] = { + "gpio14" +}; + +static const char * const gsbi1_spi_cs2a_n_groups[] = { + "gpio15" +}; + +static const char * const gsbi1_spi_cs2b_n_groups[] = { + "gpio17" +}; + +static const char * const gsbi1_spi_cs3_n_groups[] = { + "gpio16" +}; + +static const char * const gsbi2_groups[] = { + "gpio10", "gpio11", "gpio12", "gpio13" +}; + +static const char * const gsbi2_spi_cs1_n_groups[] = { + "gpio52" +}; + +static const char * const gsbi2_spi_cs2_n_groups[] = { + "gpio68" +}; + +static const char * const gsbi2_spi_cs3_n_groups[] = { + "gpio56" +}; + +static const char * const gsbi3_groups[] = { + "gpio14", "gpio15", "gpio16", "gpio17" +}; + +static const char * const gsbi4_groups[] = { + "gpio18", "gpio19", "gpio20", "gpio21" +}; + +static const char * const gsbi4_3d_cam_i2c_l_groups[] = { + "gpio18", "gpio19" +}; + +static const char * const gsbi4_3d_cam_i2c_r_groups[] = { + "gpio20", "gpio21" +}; + +static const char * const gsbi5_groups[] = { + "gpio22", "gpio23", "gpio24", "gpio25" +}; + +static const char * const gsbi5_3d_cam_i2c_l_groups[] = { + "gpio22", "gpio23" +}; + +static const char * const gsbi5_3d_cam_i2c_r_groups[] = { + "gpio24", "gpio25" +}; + +static const char * const gsbi6_groups[] = { + "gpio26", "gpio27", "gpio28", "gpio29" +}; + +static const char * const gsbi7_groups[] = { + "gpio30", "gpio31", "gpio32", "gpio33" +}; + +static const char * const gsbi8_groups[] = { + "gpio34", "gpio35", "gpio36", "gpio37" +}; + +static const char * const gsbi9_groups[] = { + "gpio93", "gpio94", "gpio95", "gpio96" +}; + +static const char * const gsbi10_groups[] = { + "gpio71", "gpio72", "gpio73", "gpio74" +}; + +static const char * const gsbi11_groups[] = { + "gpio38", "gpio39", "gpio40", "gpio41" +}; + +static const char * const gsbi11_spi_cs1a_n_groups[] = { + "gpio36" +}; + +static const char * const gsbi11_spi_cs1b_n_groups[] = { + "gpio18" +}; + +static const char * const gsbi11_spi_cs2a_n_groups[] = { + "gpio37" +}; + +static const char * const gsbi11_spi_cs2b_n_groups[] = { + "gpio19" +}; + +static const char * const gsbi11_spi_cs3_n_groups[] = { + "gpio76" +}; + +static const char * const gsbi12_groups[] = { + "gpio42", "gpio43", "gpio44", "gpio45" +}; + +static const char * const hdmi_cec_groups[] = { + "gpio99" +}; + +static const char * const hdmi_ddc_clock_groups[] = { + "gpio100" +}; + +static const char * const hdmi_ddc_data_groups[] = { + "gpio101" +}; + +static const char * const hdmi_hot_plug_detect_groups[] = { + "gpio102" +}; + +static const char * const hsic_groups[] = { + "gpio150", "gpio151" +}; + +static const char * const mdp_vsync_groups[] = { + "gpio0", "gpio1", "gpio19" +}; + +static const char * const mi2s_groups[] = { + "gpio47", "gpio48", "gpio49", "gpio50", "gpio51", "gpio52", "gpio53" +}; + +static const char * const mic_i2s_groups[] = { + "gpio71", "gpio72", "gpio73", "gpio74" +}; + +static const char * const pmb_clk_groups[] = { + "gpio21", "gpio86", "gpio112" +}; + +static const char * const pmb_ext_ctrl_groups[] = { + "gpio4", "gpio5" +}; + +static const char * const ps_hold_groups[] = { + "gpio108" +}; + +static const char * const rpm_wdog_groups[] = { + "gpio12" +}; + +static const char * const sdc2_groups[] = { + "gpio89", "gpio90", "gpio91", "gpio92", "gpio93", "gpio94", "gpio95", + "gpio96", "gpio97", "gpio98" +}; + +static const char * const sdc4_groups[] = { + "gpio83", "gpio84", "gpio85", "gpio86", "gpio87", "gpio88" +}; + +static const char * const sdc5_groups[] = { + "gpio77", "gpio78", "gpio79", "gpio80", "gpio81", "gpio82" +}; + +static const char * const slimbus1_groups[] = { + "gpio50", "gpio51", "gpio60", "gpio61" +}; + +static const char * const slimbus2_groups[] = { + "gpio42", "gpio43" +}; + +static const char * const spkr_i2s_groups[] = { + "gpio67", "gpio68", "gpio69", "gpio70" +}; + +static const char * const ssbi1_groups[] = { + "gpio141", "gpio143" +}; + +static const char * const ssbi2_groups[] = { + "gpio140", "gpio142" +}; + +static const char * const ssbi_ext_gps_groups[] = { + "gpio23" +}; + +static const char * const ssbi_pmic2_groups[] = { + "gpio149" +}; + +static const char * const ssbi_qpa1_groups[] = { + "gpio131" +}; + +static const char * const ssbi_ts_groups[] = { + "gpio10" +}; + +static const char * const tsif1_groups[] = { + "gpio75", "gpio76", "gpio77", "gpio82" +}; + +static const char * const tsif2_groups[] = { + "gpio78", "gpio79", "gpio80", "gpio81" +}; + +static const char * const ts_eoc_groups[] = { + "gpio11" +}; + +static const char * const usb_fs1_groups[] = { + "gpio32", "gpio33" +}; + +static const char * const usb_fs1_oe_groups[] = { + "gpio31" +}; + +static const char * const usb_fs1_oe_n_groups[] = { + "gpio31" +}; + +static const char * const usb_fs2_groups[] = { + "gpio34", "gpio35" +}; + +static const char * const usb_fs2_oe_groups[] = { + "gpio36" +}; + +static const char * const usb_fs2_oe_n_groups[] = { + "gpio36" +}; + +static const char * const vfe_camif_timer1_a_groups[] = { + "gpio2" +}; + +static const char * const vfe_camif_timer1_b_groups[] = { + "gpio38" +}; + +static const char * const vfe_camif_timer2_groups[] = { + "gpio3" +}; + +static const char * const vfe_camif_timer3_a_groups[] = { + "gpio4" +}; + +static const char * const vfe_camif_timer3_b_groups[] = { + "gpio151" +}; + +static const char * const vfe_camif_timer4_a_groups[] = { + "gpio65" +}; + +static const char * const vfe_camif_timer4_b_groups[] = { + "gpio150" +}; + +static const char * const vfe_camif_timer4_c_groups[] = { + "gpio10" +}; + +static const char * const vfe_camif_timer5_a_groups[] = { + "gpio66" +}; + +static const char * const vfe_camif_timer5_b_groups[] = { + "gpio39" +}; + +static const char * const vfe_camif_timer6_a_groups[] = { + "gpio71" +}; + +static const char * const vfe_camif_timer6_b_groups[] = { + "gpio0" +}; + +static const char * const vfe_camif_timer6_c_groups[] = { + "gpio18" +}; + +static const char * const vfe_camif_timer7_a_groups[] = { + "gpio67" +}; + +static const char * const vfe_camif_timer7_b_groups[] = { + "gpio1" +}; + +static const char * const vfe_camif_timer7_c_groups[] = { + "gpio19" +}; + +static const char * const wlan_groups[] = { + "gpio84", "gpio85", "gpio86", "gpio87", "gpio88" +}; + +static const struct msm_function msm8960_functions[] = { + FUNCTION(audio_pcm), + FUNCTION(bt), + FUNCTION(cam_mclk0), + FUNCTION(cam_mclk1), + FUNCTION(cam_mclk2), + FUNCTION(codec_mic_i2s), + FUNCTION(codec_spkr_i2s), + FUNCTION(ext_gps), + FUNCTION(fm), + FUNCTION(gps_blanking), + FUNCTION(gps_pps_in), + FUNCTION(gps_pps_out), + FUNCTION(gp_clk_0a), + FUNCTION(gp_clk_0b), + FUNCTION(gp_clk_1a), + FUNCTION(gp_clk_1b), + FUNCTION(gp_clk_2a), + FUNCTION(gp_clk_2b), + FUNCTION(gp_mn), + FUNCTION(gp_pdm_0a), + FUNCTION(gp_pdm_0b), + FUNCTION(gp_pdm_1a), + FUNCTION(gp_pdm_1b), + FUNCTION(gp_pdm_2a), + FUNCTION(gp_pdm_2b), + FUNCTION(gpio), + FUNCTION(gsbi1), + FUNCTION(gsbi1_spi_cs1_n), + FUNCTION(gsbi1_spi_cs2a_n), + FUNCTION(gsbi1_spi_cs2b_n), + FUNCTION(gsbi1_spi_cs3_n), + FUNCTION(gsbi2), + FUNCTION(gsbi2_spi_cs1_n), + FUNCTION(gsbi2_spi_cs2_n), + FUNCTION(gsbi2_spi_cs3_n), + FUNCTION(gsbi3), + FUNCTION(gsbi4), + FUNCTION(gsbi4_3d_cam_i2c_l), + FUNCTION(gsbi4_3d_cam_i2c_r), + FUNCTION(gsbi5), + FUNCTION(gsbi5_3d_cam_i2c_l), + FUNCTION(gsbi5_3d_cam_i2c_r), + FUNCTION(gsbi6), + FUNCTION(gsbi7), + FUNCTION(gsbi8), + FUNCTION(gsbi9), + FUNCTION(gsbi10), + FUNCTION(gsbi11), + FUNCTION(gsbi11_spi_cs1a_n), + FUNCTION(gsbi11_spi_cs1b_n), + FUNCTION(gsbi11_spi_cs2a_n), + FUNCTION(gsbi11_spi_cs2b_n), + FUNCTION(gsbi11_spi_cs3_n), + FUNCTION(gsbi12), + FUNCTION(hdmi_cec), + FUNCTION(hdmi_ddc_clock), + FUNCTION(hdmi_ddc_data), + FUNCTION(hdmi_hot_plug_detect), + FUNCTION(hsic), + FUNCTION(mdp_vsync), + FUNCTION(mi2s), + FUNCTION(mic_i2s), + FUNCTION(pmb_clk), + FUNCTION(pmb_ext_ctrl), + FUNCTION(ps_hold), + FUNCTION(rpm_wdog), + FUNCTION(sdc2), + FUNCTION(sdc4), + FUNCTION(sdc5), + FUNCTION(slimbus1), + FUNCTION(slimbus2), + FUNCTION(spkr_i2s), + FUNCTION(ssbi1), + FUNCTION(ssbi2), + FUNCTION(ssbi_ext_gps), + FUNCTION(ssbi_pmic2), + FUNCTION(ssbi_qpa1), + FUNCTION(ssbi_ts), + FUNCTION(tsif1), + FUNCTION(tsif2), + FUNCTION(ts_eoc), + FUNCTION(usb_fs1), + FUNCTION(usb_fs1_oe), + FUNCTION(usb_fs1_oe_n), + FUNCTION(usb_fs2), + FUNCTION(usb_fs2_oe), + FUNCTION(usb_fs2_oe_n), + FUNCTION(vfe_camif_timer1_a), + FUNCTION(vfe_camif_timer1_b), + FUNCTION(vfe_camif_timer2), + FUNCTION(vfe_camif_timer3_a), + FUNCTION(vfe_camif_timer3_b), + FUNCTION(vfe_camif_timer4_a), + FUNCTION(vfe_camif_timer4_b), + FUNCTION(vfe_camif_timer4_c), + FUNCTION(vfe_camif_timer5_a), + FUNCTION(vfe_camif_timer5_b), + FUNCTION(vfe_camif_timer6_a), + FUNCTION(vfe_camif_timer6_b), + FUNCTION(vfe_camif_timer6_c), + FUNCTION(vfe_camif_timer7_a), + FUNCTION(vfe_camif_timer7_b), + FUNCTION(vfe_camif_timer7_c), + FUNCTION(wlan), +}; + +static const struct msm_pingroup msm8960_groups[] = { + PINGROUP(0, mdp_vsync, vfe_camif_timer6_b, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(1, mdp_vsync, vfe_camif_timer7_b, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(2, vfe_camif_timer1_a, gp_mn, NA, cam_mclk2, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(3, vfe_camif_timer2, gp_clk_0a, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(4, vfe_camif_timer3_a, cam_mclk1, gp_clk_1a, pmb_ext_ctrl, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(5, cam_mclk0, pmb_ext_ctrl, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(6, gsbi1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(7, gsbi1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(8, gsbi1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(9, gsbi1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(10, gsbi2, ssbi_ts, NA, vfe_camif_timer4_c, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(11, gsbi2, ts_eoc, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(12, gsbi2, rpm_wdog, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(13, gsbi2, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(14, gsbi3, gsbi1_spi_cs1_n, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(15, gsbi3, gsbi1_spi_cs2a_n, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(16, gsbi3, gsbi1_spi_cs3_n, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(17, gsbi3, gsbi1_spi_cs2b_n, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(18, gsbi4, gsbi11_spi_cs1b_n, NA, NA, gsbi4_3d_cam_i2c_l, vfe_camif_timer6_c, NA, NA, NA, NA, NA), + PINGROUP(19, gsbi4, gsbi11_spi_cs2b_n, NA, mdp_vsync, NA, gsbi4_3d_cam_i2c_l, vfe_camif_timer7_c, NA, NA, NA, NA), + PINGROUP(20, gsbi4, gsbi4_3d_cam_i2c_r, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(21, gsbi4, pmb_clk, gsbi4_3d_cam_i2c_r, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(22, gsbi5, ext_gps, NA, NA, NA, NA, NA, NA, NA, gsbi5_3d_cam_i2c_l, NA), + PINGROUP(23, gsbi5, ssbi_ext_gps, NA, NA, NA, NA, NA, NA, NA, gsbi5_3d_cam_i2c_l, NA), + PINGROUP(24, gsbi5, ext_gps, NA, NA, NA, NA, NA, NA, NA, gsbi5_3d_cam_i2c_r, NA), + PINGROUP(25, gsbi5, ext_gps, NA, NA, NA, NA, NA, NA, NA, gsbi5_3d_cam_i2c_r, NA), + PINGROUP(26, fm, gsbi6, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(27, fm, gsbi6, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(28, bt, gsbi6, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(29, bt, gsbi6, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(30, gsbi7, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(31, gsbi7, usb_fs1_oe, usb_fs1_oe_n, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(32, gsbi7, usb_fs1, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(33, gsbi7, usb_fs1, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(34, gsbi8, usb_fs2, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(35, gsbi8, usb_fs2, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(36, gsbi8, usb_fs2_oe, usb_fs2_oe_n, gsbi11_spi_cs1a_n, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(37, gsbi8, gps_pps_out, gps_pps_in, gsbi11_spi_cs2a_n, gp_clk_2b, NA, NA, NA, NA, NA, NA), + PINGROUP(38, gsbi11, NA, NA, NA, NA, NA, NA, NA, NA, vfe_camif_timer1_b, NA), + PINGROUP(39, gsbi11, gp_pdm_0b, NA, NA, NA, NA, NA, NA, NA, NA, vfe_camif_timer5_b), + PINGROUP(40, gsbi11, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(41, gsbi11, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(42, gsbi12, slimbus2, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(43, gsbi12, slimbus2, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(44, gsbi12, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(45, gsbi12, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(46, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(47, mi2s, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(48, mi2s, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(49, mi2s, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(50, mi2s, slimbus1, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(51, mi2s, slimbus1, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(52, mi2s, gp_clk_2a, gsbi2_spi_cs1_n, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(53, mi2s, gp_pdm_2b, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(54, codec_mic_i2s, gp_clk_0b, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(55, codec_mic_i2s, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(56, codec_mic_i2s, gsbi2_spi_cs3_n, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(57, codec_mic_i2s, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(58, codec_mic_i2s, gp_pdm_0a, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(59, codec_spkr_i2s, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(60, slimbus1, codec_spkr_i2s, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(61, slimbus1, codec_spkr_i2s, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(62, codec_spkr_i2s, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(63, audio_pcm, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(64, audio_pcm, gp_pdm_1b, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(65, audio_pcm, vfe_camif_timer4_a, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(66, audio_pcm, vfe_camif_timer5_a, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(67, spkr_i2s, vfe_camif_timer7_a, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(68, spkr_i2s, gsbi2_spi_cs2_n, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(69, spkr_i2s, gp_pdm_2a, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(70, spkr_i2s, gp_clk_1b, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(71, mic_i2s, gsbi10, vfe_camif_timer6_a, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(72, mic_i2s, gsbi10, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(73, mic_i2s, gsbi10, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(74, mic_i2s, gsbi10, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(75, tsif1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(76, tsif1, gsbi11_spi_cs3_n, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(77, tsif1, sdc5, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(78, tsif2, sdc5, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(79, tsif2, sdc5, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(80, tsif2, sdc5, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(81, tsif2, sdc5, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(82, tsif1, sdc5, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(83, bt, sdc4, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(84, wlan, sdc4, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(85, wlan, sdc4, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(86, wlan, sdc4, pmb_clk, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(87, wlan, sdc4, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(88, wlan, sdc4, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(89, sdc2, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(90, sdc2, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(91, sdc2, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(92, sdc2, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(93, sdc2, gsbi9, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(94, sdc2, gsbi9, gp_pdm_1a, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(95, sdc2, gsbi9, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(96, sdc2, gsbi9, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(97, sdc2, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(98, sdc2, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(99, hdmi_cec, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(100, hdmi_ddc_clock, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(101, hdmi_ddc_data, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(102, hdmi_hot_plug_detect, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(103, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(104, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(105, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(106, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(107, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(108, ps_hold, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(109, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(110, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(111, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(112, NA, pmb_clk, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(113, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(114, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(115, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(116, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(117, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(118, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(119, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(120, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(121, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(122, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(123, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(124, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(125, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(126, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(127, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(128, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(129, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(130, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(131, NA, ssbi_qpa1, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(132, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(133, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(134, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(135, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(136, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(137, gps_blanking, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(138, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(139, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(140, ssbi2, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(141, ssbi1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(142, ssbi2, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(143, ssbi1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(144, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(145, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(146, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(147, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(148, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(149, ssbi_pmic2, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(150, hsic, NA, vfe_camif_timer4_b, NA, NA, NA, NA, NA, NA, NA, NA), + PINGROUP(151, hsic, NA, vfe_camif_timer3_b, NA, NA, NA, NA, NA, NA, NA, NA), + + SDC_PINGROUP(sdc1_clk, 0x20a0, 13, 6), + SDC_PINGROUP(sdc1_cmd, 0x20a0, 11, 3), + SDC_PINGROUP(sdc1_data, 0x20a0, 9, 0), + + SDC_PINGROUP(sdc3_clk, 0x20a4, 14, 6), + SDC_PINGROUP(sdc3_cmd, 0x20a4, 11, 3), + SDC_PINGROUP(sdc3_data, 0x20a4, 9, 0), +}; + +#define NUM_GPIO_PINGROUPS 152 + +static const struct msm_pinctrl_soc_data msm8960_pinctrl = { + .pins = msm8960_pins, + .npins = ARRAY_SIZE(msm8960_pins), + .functions = msm8960_functions, + .nfunctions = ARRAY_SIZE(msm8960_functions), + .groups = msm8960_groups, + .ngroups = ARRAY_SIZE(msm8960_groups), + .ngpios = NUM_GPIO_PINGROUPS, +}; + +static int msm8960_pinctrl_probe(struct platform_device *pdev) +{ + return msm_pinctrl_probe(pdev, &msm8960_pinctrl); +} + +static const struct of_device_id msm8960_pinctrl_of_match[] = { + { .compatible = "qcom,msm8960-pinctrl", }, + { }, +}; + +static struct platform_driver msm8960_pinctrl_driver = { + .driver = { + .name = "msm8960-pinctrl", + .owner = THIS_MODULE, + .of_match_table = msm8960_pinctrl_of_match, + }, + .probe = msm8960_pinctrl_probe, + .remove = msm_pinctrl_remove, +}; + +static int __init msm8960_pinctrl_init(void) +{ + return platform_driver_register(&msm8960_pinctrl_driver); +} +arch_initcall(msm8960_pinctrl_init); + +static void __exit msm8960_pinctrl_exit(void) +{ + platform_driver_unregister(&msm8960_pinctrl_driver); +} +module_exit(msm8960_pinctrl_exit); + +MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>"); +MODULE_DESCRIPTION("Qualcomm MSM8960 pinctrl driver"); +MODULE_LICENSE("GPL v2"); +MODULE_DEVICE_TABLE(of, msm8960_pinctrl_of_match); diff --git a/drivers/pinctrl/pinctrl-msm8x74.c b/drivers/pinctrl/qcom/pinctrl-msm8x74.c index 418306911a6f..3c858384d041 100644 --- a/drivers/pinctrl/pinctrl-msm8x74.c +++ b/drivers/pinctrl/qcom/pinctrl-msm8x74.c @@ -342,7 +342,7 @@ static const unsigned int sdc2_data_pins[] = { 151 }; .pins = gpio##id##_pins, \ .npins = ARRAY_SIZE(gpio##id##_pins), \ .funcs = (int[]){ \ - MSM_MUX_NA, /* gpio mode */ \ + MSM_MUX_gpio, \ MSM_MUX_##f1, \ MSM_MUX_##f2, \ MSM_MUX_##f3, \ @@ -366,6 +366,7 @@ static const unsigned int sdc2_data_pins[] = { 151 }; .intr_enable_bit = 0, \ .intr_status_bit = 0, \ .intr_target_bit = 5, \ + .intr_target_kpss_val = 4, \ .intr_raw_status_bit = 4, \ .intr_polarity_bit = 1, \ .intr_detection_bit = 2, \ @@ -391,6 +392,7 @@ static const unsigned int sdc2_data_pins[] = { 151 }; .intr_enable_bit = -1, \ .intr_status_bit = -1, \ .intr_target_bit = -1, \ + .intr_target_kpss_val = -1, \ .intr_raw_status_bit = -1, \ .intr_polarity_bit = -1, \ .intr_detection_bit = -1, \ @@ -402,6 +404,7 @@ static const unsigned int sdc2_data_pins[] = { 151 }; * the pingroup table below. */ enum msm8x74_functions { + MSM_MUX_gpio, MSM_MUX_cci_i2c0, MSM_MUX_cci_i2c1, MSM_MUX_blsp_i2c1, @@ -509,6 +512,31 @@ enum msm8x74_functions { MSM_MUX_NA, }; +static const char * const gpio_groups[] = { + "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7", + "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14", + "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21", + "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28", + "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35", + "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42", + "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49", + "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56", + "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63", + "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70", + "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77", + "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84", + "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91", + "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98", + "gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104", + "gpio105", "gpio106", "gpio107", "gpio108", "gpio109", "gpio110", + "gpio111", "gpio112", "gpio113", "gpio114", "gpio115", "gpio116", + "gpio117", "gpio118", "gpio119", "gpio120", "gpio121", "gpio122", + "gpio123", "gpio124", "gpio125", "gpio126", "gpio127", "gpio128", + "gpio129", "gpio130", "gpio131", "gpio132", "gpio133", "gpio134", + "gpio135", "gpio136", "gpio137", "gpio138", "gpio139", "gpio140", + "gpio141", "gpio142", "gpio143", "gpio144", "gpio145" +}; + static const char * const blsp_uart1_groups[] = { "gpio0", "gpio1", "gpio2", "gpio3" }; @@ -728,6 +756,7 @@ static const char * const wlan_groups[] = { static const char * const slimbus_groups[] = { "gpio70", "gpio71" }; static const struct msm_function msm8x74_functions[] = { + FUNCTION(gpio), FUNCTION(cci_i2c0), FUNCTION(cci_i2c1), FUNCTION(uim1), diff --git a/drivers/pinctrl/samsung/Kconfig b/drivers/pinctrl/samsung/Kconfig new file mode 100644 index 000000000000..d0461cd5d707 --- /dev/null +++ b/drivers/pinctrl/samsung/Kconfig @@ -0,0 +1,28 @@ +# +# Samsung Pin control drivers +# +config PINCTRL_SAMSUNG + bool + select PINMUX + select PINCONF + +config PINCTRL_EXYNOS + bool "Pinctrl driver data for Samsung EXYNOS SoCs other than 5440" + depends on OF && GPIOLIB && (ARCH_EXYNOS || ARCH_S5PV210) + select PINCTRL_SAMSUNG + +config PINCTRL_EXYNOS5440 + bool "Samsung EXYNOS5440 SoC pinctrl driver" + depends on SOC_EXYNOS5440 + select PINMUX + select PINCONF + +config PINCTRL_S3C24XX + bool "Samsung S3C24XX SoC pinctrl driver" + depends on ARCH_S3C24XX + select PINCTRL_SAMSUNG + +config PINCTRL_S3C64XX + bool "Samsung S3C64XX SoC pinctrl driver" + depends on ARCH_S3C64XX + select PINCTRL_SAMSUNG diff --git a/drivers/pinctrl/samsung/Makefile b/drivers/pinctrl/samsung/Makefile new file mode 100644 index 000000000000..70160c059edd --- /dev/null +++ b/drivers/pinctrl/samsung/Makefile @@ -0,0 +1,7 @@ +# Samsung pin control drivers + +obj-$(CONFIG_PINCTRL_SAMSUNG) += pinctrl-samsung.o +obj-$(CONFIG_PINCTRL_EXYNOS) += pinctrl-exynos.o +obj-$(CONFIG_PINCTRL_EXYNOS5440) += pinctrl-exynos5440.o +obj-$(CONFIG_PINCTRL_S3C24XX) += pinctrl-s3c24xx.o +obj-$(CONFIG_PINCTRL_S3C64XX) += pinctrl-s3c64xx.o diff --git a/drivers/pinctrl/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c index 9609c23834ce..d7154ed0b0eb 100644 --- a/drivers/pinctrl/pinctrl-exynos.c +++ b/drivers/pinctrl/samsung/pinctrl-exynos.c @@ -33,6 +33,18 @@ #include "pinctrl-samsung.h" #include "pinctrl-exynos.h" +struct exynos_irq_chip { + struct irq_chip chip; + + u32 eint_con; + u32 eint_mask; + u32 eint_pend; +}; + +static inline struct exynos_irq_chip *to_exynos_irq_chip(struct irq_chip *chip) +{ + return container_of(chip, struct exynos_irq_chip, chip); +} static struct samsung_pin_bank_type bank_type_off = { .fld_width = { 4, 1, 2, 2, 2, 2, }, @@ -50,11 +62,13 @@ static const struct of_device_id exynos_wkup_irq_ids[] = { { } }; -static void exynos_gpio_irq_mask(struct irq_data *irqd) +static void exynos_irq_mask(struct irq_data *irqd) { + struct irq_chip *chip = irq_data_get_irq_chip(irqd); + struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip); struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd); struct samsung_pinctrl_drv_data *d = bank->drvdata; - unsigned long reg_mask = d->ctrl->geint_mask + bank->eint_offset; + unsigned long reg_mask = our_chip->eint_mask + bank->eint_offset; unsigned long mask; unsigned long flags; @@ -67,20 +81,24 @@ static void exynos_gpio_irq_mask(struct irq_data *irqd) spin_unlock_irqrestore(&bank->slock, flags); } -static void exynos_gpio_irq_ack(struct irq_data *irqd) +static void exynos_irq_ack(struct irq_data *irqd) { + struct irq_chip *chip = irq_data_get_irq_chip(irqd); + struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip); struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd); struct samsung_pinctrl_drv_data *d = bank->drvdata; - unsigned long reg_pend = d->ctrl->geint_pend + bank->eint_offset; + unsigned long reg_pend = our_chip->eint_pend + bank->eint_offset; writel(1 << irqd->hwirq, d->virt_base + reg_pend); } -static void exynos_gpio_irq_unmask(struct irq_data *irqd) +static void exynos_irq_unmask(struct irq_data *irqd) { + struct irq_chip *chip = irq_data_get_irq_chip(irqd); + struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip); struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd); struct samsung_pinctrl_drv_data *d = bank->drvdata; - unsigned long reg_mask = d->ctrl->geint_mask + bank->eint_offset; + unsigned long reg_mask = our_chip->eint_mask + bank->eint_offset; unsigned long mask; unsigned long flags; @@ -93,7 +111,7 @@ static void exynos_gpio_irq_unmask(struct irq_data *irqd) * masked. */ if (irqd_get_trigger_type(irqd) & IRQ_TYPE_LEVEL_MASK) - exynos_gpio_irq_ack(irqd); + exynos_irq_ack(irqd); spin_lock_irqsave(&bank->slock, flags); @@ -104,18 +122,15 @@ static void exynos_gpio_irq_unmask(struct irq_data *irqd) spin_unlock_irqrestore(&bank->slock, flags); } -static int exynos_gpio_irq_set_type(struct irq_data *irqd, unsigned int type) +static int exynos_irq_set_type(struct irq_data *irqd, unsigned int type) { + struct irq_chip *chip = irq_data_get_irq_chip(irqd); + struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip); struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd); - struct samsung_pin_bank_type *bank_type = bank->type; struct samsung_pinctrl_drv_data *d = bank->drvdata; - struct samsung_pin_ctrl *ctrl = d->ctrl; - unsigned int pin = irqd->hwirq; - unsigned int shift = EXYNOS_EINT_CON_LEN * pin; + unsigned int shift = EXYNOS_EINT_CON_LEN * irqd->hwirq; unsigned int con, trig_type; - unsigned long reg_con = ctrl->geint_con + bank->eint_offset; - unsigned long flags; - unsigned int mask; + unsigned long reg_con = our_chip->eint_con + bank->eint_offset; switch (type) { case IRQ_TYPE_EDGE_RISING: @@ -148,8 +163,32 @@ static int exynos_gpio_irq_set_type(struct irq_data *irqd, unsigned int type) con |= trig_type << shift; writel(con, d->virt_base + reg_con); + return 0; +} + +static int exynos_irq_request_resources(struct irq_data *irqd) +{ + struct irq_chip *chip = irq_data_get_irq_chip(irqd); + struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip); + struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd); + struct samsung_pin_bank_type *bank_type = bank->type; + struct samsung_pinctrl_drv_data *d = bank->drvdata; + unsigned int shift = EXYNOS_EINT_CON_LEN * irqd->hwirq; + unsigned long reg_con = our_chip->eint_con + bank->eint_offset; + unsigned long flags; + unsigned int mask; + unsigned int con; + int ret; + + ret = gpio_lock_as_irq(&bank->gpio_chip, irqd->hwirq); + if (ret) { + dev_err(bank->gpio_chip.dev, "unable to lock pin %s-%lu IRQ\n", + bank->name, irqd->hwirq); + return ret; + } + reg_con = bank->pctl_offset + bank_type->reg_offset[PINCFG_TYPE_FUNC]; - shift = pin * bank_type->fld_width[PINCFG_TYPE_FUNC]; + shift = irqd->hwirq * bank_type->fld_width[PINCFG_TYPE_FUNC]; mask = (1 << bank_type->fld_width[PINCFG_TYPE_FUNC]) - 1; spin_lock_irqsave(&bank->slock, flags); @@ -161,18 +200,58 @@ static int exynos_gpio_irq_set_type(struct irq_data *irqd, unsigned int type) spin_unlock_irqrestore(&bank->slock, flags); + exynos_irq_unmask(irqd); + return 0; } +static void exynos_irq_release_resources(struct irq_data *irqd) +{ + struct irq_chip *chip = irq_data_get_irq_chip(irqd); + struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip); + struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd); + struct samsung_pin_bank_type *bank_type = bank->type; + struct samsung_pinctrl_drv_data *d = bank->drvdata; + unsigned int shift = EXYNOS_EINT_CON_LEN * irqd->hwirq; + unsigned long reg_con = our_chip->eint_con + bank->eint_offset; + unsigned long flags; + unsigned int mask; + unsigned int con; + + reg_con = bank->pctl_offset + bank_type->reg_offset[PINCFG_TYPE_FUNC]; + shift = irqd->hwirq * bank_type->fld_width[PINCFG_TYPE_FUNC]; + mask = (1 << bank_type->fld_width[PINCFG_TYPE_FUNC]) - 1; + + exynos_irq_mask(irqd); + + spin_lock_irqsave(&bank->slock, flags); + + con = readl(d->virt_base + reg_con); + con &= ~(mask << shift); + con |= FUNC_INPUT << shift; + writel(con, d->virt_base + reg_con); + + spin_unlock_irqrestore(&bank->slock, flags); + + gpio_unlock_as_irq(&bank->gpio_chip, irqd->hwirq); +} + /* * irq_chip for gpio interrupts. */ -static struct irq_chip exynos_gpio_irq_chip = { - .name = "exynos_gpio_irq_chip", - .irq_unmask = exynos_gpio_irq_unmask, - .irq_mask = exynos_gpio_irq_mask, - .irq_ack = exynos_gpio_irq_ack, - .irq_set_type = exynos_gpio_irq_set_type, +static struct exynos_irq_chip exynos_gpio_irq_chip = { + .chip = { + .name = "exynos_gpio_irq_chip", + .irq_unmask = exynos_irq_unmask, + .irq_mask = exynos_irq_mask, + .irq_ack = exynos_irq_ack, + .irq_set_type = exynos_irq_set_type, + .irq_request_resources = exynos_irq_request_resources, + .irq_release_resources = exynos_irq_release_resources, + }, + .eint_con = EXYNOS_GPIO_ECON_OFFSET, + .eint_mask = EXYNOS_GPIO_EMASK_OFFSET, + .eint_pend = EXYNOS_GPIO_EPEND_OFFSET, }; static int exynos_gpio_irq_map(struct irq_domain *h, unsigned int virq, @@ -181,7 +260,7 @@ static int exynos_gpio_irq_map(struct irq_domain *h, unsigned int virq, struct samsung_pin_bank *b = h->host_data; irq_set_chip_data(virq, b); - irq_set_chip_and_handler(virq, &exynos_gpio_irq_chip, + irq_set_chip_and_handler(virq, &exynos_gpio_irq_chip.chip, handle_level_irq); set_irq_flags(virq, IRQF_VALID); return 0; @@ -202,7 +281,7 @@ static irqreturn_t exynos_eint_gpio_irq(int irq, void *data) struct samsung_pin_bank *bank = ctrl->pin_banks; unsigned int svc, group, pin, virq; - svc = readl(d->virt_base + ctrl->svc); + svc = readl(d->virt_base + EXYNOS_SVC_OFFSET); group = EXYNOS_SVC_GROUP(svc); pin = svc & EXYNOS_SVC_NUM_MASK; @@ -279,119 +358,6 @@ err_domains: return ret; } -static void exynos_wkup_irq_mask(struct irq_data *irqd) -{ - struct samsung_pin_bank *b = irq_data_get_irq_chip_data(irqd); - struct samsung_pinctrl_drv_data *d = b->drvdata; - unsigned long reg_mask = d->ctrl->weint_mask + b->eint_offset; - unsigned long mask; - unsigned long flags; - - spin_lock_irqsave(&b->slock, flags); - - mask = readl(d->virt_base + reg_mask); - mask |= 1 << irqd->hwirq; - writel(mask, d->virt_base + reg_mask); - - spin_unlock_irqrestore(&b->slock, flags); -} - -static void exynos_wkup_irq_ack(struct irq_data *irqd) -{ - struct samsung_pin_bank *b = irq_data_get_irq_chip_data(irqd); - struct samsung_pinctrl_drv_data *d = b->drvdata; - unsigned long pend = d->ctrl->weint_pend + b->eint_offset; - - writel(1 << irqd->hwirq, d->virt_base + pend); -} - -static void exynos_wkup_irq_unmask(struct irq_data *irqd) -{ - struct samsung_pin_bank *b = irq_data_get_irq_chip_data(irqd); - struct samsung_pinctrl_drv_data *d = b->drvdata; - unsigned long reg_mask = d->ctrl->weint_mask + b->eint_offset; - unsigned long mask; - unsigned long flags; - - /* - * Ack level interrupts right before unmask - * - * If we don't do this we'll get a double-interrupt. Level triggered - * interrupts must not fire an interrupt if the level is not - * _currently_ active, even if it was active while the interrupt was - * masked. - */ - if (irqd_get_trigger_type(irqd) & IRQ_TYPE_LEVEL_MASK) - exynos_wkup_irq_ack(irqd); - - spin_lock_irqsave(&b->slock, flags); - - mask = readl(d->virt_base + reg_mask); - mask &= ~(1 << irqd->hwirq); - writel(mask, d->virt_base + reg_mask); - - spin_unlock_irqrestore(&b->slock, flags); -} - -static int exynos_wkup_irq_set_type(struct irq_data *irqd, unsigned int type) -{ - struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd); - struct samsung_pin_bank_type *bank_type = bank->type; - struct samsung_pinctrl_drv_data *d = bank->drvdata; - unsigned int pin = irqd->hwirq; - unsigned long reg_con = d->ctrl->weint_con + bank->eint_offset; - unsigned long shift = EXYNOS_EINT_CON_LEN * pin; - unsigned long con, trig_type; - unsigned long flags; - unsigned int mask; - - switch (type) { - case IRQ_TYPE_EDGE_RISING: - trig_type = EXYNOS_EINT_EDGE_RISING; - break; - case IRQ_TYPE_EDGE_FALLING: - trig_type = EXYNOS_EINT_EDGE_FALLING; - break; - case IRQ_TYPE_EDGE_BOTH: - trig_type = EXYNOS_EINT_EDGE_BOTH; - break; - case IRQ_TYPE_LEVEL_HIGH: - trig_type = EXYNOS_EINT_LEVEL_HIGH; - break; - case IRQ_TYPE_LEVEL_LOW: - trig_type = EXYNOS_EINT_LEVEL_LOW; - break; - default: - pr_err("unsupported external interrupt type\n"); - return -EINVAL; - } - - if (type & IRQ_TYPE_EDGE_BOTH) - __irq_set_handler_locked(irqd->irq, handle_edge_irq); - else - __irq_set_handler_locked(irqd->irq, handle_level_irq); - - con = readl(d->virt_base + reg_con); - con &= ~(EXYNOS_EINT_CON_MASK << shift); - con |= trig_type << shift; - writel(con, d->virt_base + reg_con); - - reg_con = bank->pctl_offset + bank_type->reg_offset[PINCFG_TYPE_FUNC]; - shift = pin * bank_type->fld_width[PINCFG_TYPE_FUNC]; - mask = (1 << bank_type->fld_width[PINCFG_TYPE_FUNC]) - 1; - - spin_lock_irqsave(&bank->slock, flags); - - con = readl(d->virt_base + reg_con); - con &= ~(mask << shift); - con |= EXYNOS_EINT_FUNC << shift; - writel(con, d->virt_base + reg_con); - - spin_unlock_irqrestore(&bank->slock, flags); - - return 0; -} - static u32 exynos_eint_wake_mask = 0xffffffff; u32 exynos_get_eint_wake_mask(void) @@ -417,13 +383,20 @@ static int exynos_wkup_irq_set_wake(struct irq_data *irqd, unsigned int on) /* * irq_chip for wakeup interrupts */ -static struct irq_chip exynos_wkup_irq_chip = { - .name = "exynos_wkup_irq_chip", - .irq_unmask = exynos_wkup_irq_unmask, - .irq_mask = exynos_wkup_irq_mask, - .irq_ack = exynos_wkup_irq_ack, - .irq_set_type = exynos_wkup_irq_set_type, - .irq_set_wake = exynos_wkup_irq_set_wake, +static struct exynos_irq_chip exynos_wkup_irq_chip = { + .chip = { + .name = "exynos_wkup_irq_chip", + .irq_unmask = exynos_irq_unmask, + .irq_mask = exynos_irq_mask, + .irq_ack = exynos_irq_ack, + .irq_set_type = exynos_irq_set_type, + .irq_set_wake = exynos_wkup_irq_set_wake, + .irq_request_resources = exynos_irq_request_resources, + .irq_release_resources = exynos_irq_release_resources, + }, + .eint_con = EXYNOS_WKUP_ECON_OFFSET, + .eint_mask = EXYNOS_WKUP_EMASK_OFFSET, + .eint_pend = EXYNOS_WKUP_EPEND_OFFSET, }; /* interrupt handler for wakeup interrupts 0..15 */ @@ -464,7 +437,6 @@ static void exynos_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc) struct irq_chip *chip = irq_get_chip(irq); struct exynos_muxed_weint_data *eintd = irq_get_handler_data(irq); struct samsung_pinctrl_drv_data *d = eintd->banks[0]->drvdata; - struct samsung_pin_ctrl *ctrl = d->ctrl; unsigned long pend; unsigned long mask; int i; @@ -473,8 +445,10 @@ static void exynos_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc) for (i = 0; i < eintd->nr_banks; ++i) { struct samsung_pin_bank *b = eintd->banks[i]; - pend = readl(d->virt_base + ctrl->weint_pend + b->eint_offset); - mask = readl(d->virt_base + ctrl->weint_mask + b->eint_offset); + pend = readl(d->virt_base + EXYNOS_WKUP_EPEND_OFFSET + + b->eint_offset); + mask = readl(d->virt_base + EXYNOS_WKUP_EMASK_OFFSET + + b->eint_offset); exynos_irq_demux_eint(pend & ~mask, b->irq_domain); } @@ -484,7 +458,8 @@ static void exynos_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc) static int exynos_wkup_irq_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { - irq_set_chip_and_handler(virq, &exynos_wkup_irq_chip, handle_level_irq); + irq_set_chip_and_handler(virq, &exynos_wkup_irq_chip.chip, + handle_level_irq); irq_set_chip_data(virq, h->host_data); set_irq_flags(virq, IRQF_VALID); return 0; @@ -703,13 +678,6 @@ struct samsung_pin_ctrl s5pv210_pin_ctrl[] = { /* pin-controller instance 0 data */ .pin_banks = s5pv210_pin_bank, .nr_banks = ARRAY_SIZE(s5pv210_pin_bank), - .geint_con = EXYNOS_GPIO_ECON_OFFSET, - .geint_mask = EXYNOS_GPIO_EMASK_OFFSET, - .geint_pend = EXYNOS_GPIO_EPEND_OFFSET, - .weint_con = EXYNOS_WKUP_ECON_OFFSET, - .weint_mask = EXYNOS_WKUP_EMASK_OFFSET, - .weint_pend = EXYNOS_WKUP_EPEND_OFFSET, - .svc = EXYNOS_SVC_OFFSET, .eint_gpio_init = exynos_eint_gpio_init, .eint_wkup_init = exynos_eint_wkup_init, .suspend = exynos_pinctrl_suspend, @@ -758,10 +726,6 @@ struct samsung_pin_ctrl exynos3250_pin_ctrl[] = { /* pin-controller instance 0 data */ .pin_banks = exynos3250_pin_banks0, .nr_banks = ARRAY_SIZE(exynos3250_pin_banks0), - .geint_con = EXYNOS_GPIO_ECON_OFFSET, - .geint_mask = EXYNOS_GPIO_EMASK_OFFSET, - .geint_pend = EXYNOS_GPIO_EPEND_OFFSET, - .svc = EXYNOS_SVC_OFFSET, .eint_gpio_init = exynos_eint_gpio_init, .suspend = exynos_pinctrl_suspend, .resume = exynos_pinctrl_resume, @@ -770,13 +734,6 @@ struct samsung_pin_ctrl exynos3250_pin_ctrl[] = { /* pin-controller instance 1 data */ .pin_banks = exynos3250_pin_banks1, .nr_banks = ARRAY_SIZE(exynos3250_pin_banks1), - .geint_con = EXYNOS_GPIO_ECON_OFFSET, - .geint_mask = EXYNOS_GPIO_EMASK_OFFSET, - .geint_pend = EXYNOS_GPIO_EPEND_OFFSET, - .weint_con = EXYNOS_WKUP_ECON_OFFSET, - .weint_mask = EXYNOS_WKUP_EMASK_OFFSET, - .weint_pend = EXYNOS_WKUP_EPEND_OFFSET, - .svc = EXYNOS_SVC_OFFSET, .eint_gpio_init = exynos_eint_gpio_init, .eint_wkup_init = exynos_eint_wkup_init, .suspend = exynos_pinctrl_suspend, @@ -843,10 +800,6 @@ struct samsung_pin_ctrl exynos4210_pin_ctrl[] = { /* pin-controller instance 0 data */ .pin_banks = exynos4210_pin_banks0, .nr_banks = ARRAY_SIZE(exynos4210_pin_banks0), - .geint_con = EXYNOS_GPIO_ECON_OFFSET, - .geint_mask = EXYNOS_GPIO_EMASK_OFFSET, - .geint_pend = EXYNOS_GPIO_EPEND_OFFSET, - .svc = EXYNOS_SVC_OFFSET, .eint_gpio_init = exynos_eint_gpio_init, .suspend = exynos_pinctrl_suspend, .resume = exynos_pinctrl_resume, @@ -855,13 +808,6 @@ struct samsung_pin_ctrl exynos4210_pin_ctrl[] = { /* pin-controller instance 1 data */ .pin_banks = exynos4210_pin_banks1, .nr_banks = ARRAY_SIZE(exynos4210_pin_banks1), - .geint_con = EXYNOS_GPIO_ECON_OFFSET, - .geint_mask = EXYNOS_GPIO_EMASK_OFFSET, - .geint_pend = EXYNOS_GPIO_EPEND_OFFSET, - .weint_con = EXYNOS_WKUP_ECON_OFFSET, - .weint_mask = EXYNOS_WKUP_EMASK_OFFSET, - .weint_pend = EXYNOS_WKUP_EPEND_OFFSET, - .svc = EXYNOS_SVC_OFFSET, .eint_gpio_init = exynos_eint_gpio_init, .eint_wkup_init = exynos_eint_wkup_init, .suspend = exynos_pinctrl_suspend, @@ -942,10 +888,6 @@ struct samsung_pin_ctrl exynos4x12_pin_ctrl[] = { /* pin-controller instance 0 data */ .pin_banks = exynos4x12_pin_banks0, .nr_banks = ARRAY_SIZE(exynos4x12_pin_banks0), - .geint_con = EXYNOS_GPIO_ECON_OFFSET, - .geint_mask = EXYNOS_GPIO_EMASK_OFFSET, - .geint_pend = EXYNOS_GPIO_EPEND_OFFSET, - .svc = EXYNOS_SVC_OFFSET, .eint_gpio_init = exynos_eint_gpio_init, .suspend = exynos_pinctrl_suspend, .resume = exynos_pinctrl_resume, @@ -954,13 +896,6 @@ struct samsung_pin_ctrl exynos4x12_pin_ctrl[] = { /* pin-controller instance 1 data */ .pin_banks = exynos4x12_pin_banks1, .nr_banks = ARRAY_SIZE(exynos4x12_pin_banks1), - .geint_con = EXYNOS_GPIO_ECON_OFFSET, - .geint_mask = EXYNOS_GPIO_EMASK_OFFSET, - .geint_pend = EXYNOS_GPIO_EPEND_OFFSET, - .weint_con = EXYNOS_WKUP_ECON_OFFSET, - .weint_mask = EXYNOS_WKUP_EMASK_OFFSET, - .weint_pend = EXYNOS_WKUP_EPEND_OFFSET, - .svc = EXYNOS_SVC_OFFSET, .eint_gpio_init = exynos_eint_gpio_init, .eint_wkup_init = exynos_eint_wkup_init, .suspend = exynos_pinctrl_suspend, @@ -970,10 +905,6 @@ struct samsung_pin_ctrl exynos4x12_pin_ctrl[] = { /* pin-controller instance 2 data */ .pin_banks = exynos4x12_pin_banks2, .nr_banks = ARRAY_SIZE(exynos4x12_pin_banks2), - .geint_con = EXYNOS_GPIO_ECON_OFFSET, - .geint_mask = EXYNOS_GPIO_EMASK_OFFSET, - .geint_pend = EXYNOS_GPIO_EPEND_OFFSET, - .svc = EXYNOS_SVC_OFFSET, .eint_gpio_init = exynos_eint_gpio_init, .suspend = exynos_pinctrl_suspend, .resume = exynos_pinctrl_resume, @@ -982,10 +913,6 @@ struct samsung_pin_ctrl exynos4x12_pin_ctrl[] = { /* pin-controller instance 3 data */ .pin_banks = exynos4x12_pin_banks3, .nr_banks = ARRAY_SIZE(exynos4x12_pin_banks3), - .geint_con = EXYNOS_GPIO_ECON_OFFSET, - .geint_mask = EXYNOS_GPIO_EMASK_OFFSET, - .geint_pend = EXYNOS_GPIO_EPEND_OFFSET, - .svc = EXYNOS_SVC_OFFSET, .eint_gpio_init = exynos_eint_gpio_init, .suspend = exynos_pinctrl_suspend, .resume = exynos_pinctrl_resume, @@ -1058,13 +985,6 @@ struct samsung_pin_ctrl exynos5250_pin_ctrl[] = { /* pin-controller instance 0 data */ .pin_banks = exynos5250_pin_banks0, .nr_banks = ARRAY_SIZE(exynos5250_pin_banks0), - .geint_con = EXYNOS_GPIO_ECON_OFFSET, - .geint_mask = EXYNOS_GPIO_EMASK_OFFSET, - .geint_pend = EXYNOS_GPIO_EPEND_OFFSET, - .weint_con = EXYNOS_WKUP_ECON_OFFSET, - .weint_mask = EXYNOS_WKUP_EMASK_OFFSET, - .weint_pend = EXYNOS_WKUP_EPEND_OFFSET, - .svc = EXYNOS_SVC_OFFSET, .eint_gpio_init = exynos_eint_gpio_init, .eint_wkup_init = exynos_eint_wkup_init, .suspend = exynos_pinctrl_suspend, @@ -1074,10 +994,6 @@ struct samsung_pin_ctrl exynos5250_pin_ctrl[] = { /* pin-controller instance 1 data */ .pin_banks = exynos5250_pin_banks1, .nr_banks = ARRAY_SIZE(exynos5250_pin_banks1), - .geint_con = EXYNOS_GPIO_ECON_OFFSET, - .geint_mask = EXYNOS_GPIO_EMASK_OFFSET, - .geint_pend = EXYNOS_GPIO_EPEND_OFFSET, - .svc = EXYNOS_SVC_OFFSET, .eint_gpio_init = exynos_eint_gpio_init, .suspend = exynos_pinctrl_suspend, .resume = exynos_pinctrl_resume, @@ -1086,10 +1002,6 @@ struct samsung_pin_ctrl exynos5250_pin_ctrl[] = { /* pin-controller instance 2 data */ .pin_banks = exynos5250_pin_banks2, .nr_banks = ARRAY_SIZE(exynos5250_pin_banks2), - .geint_con = EXYNOS_GPIO_ECON_OFFSET, - .geint_mask = EXYNOS_GPIO_EMASK_OFFSET, - .geint_pend = EXYNOS_GPIO_EPEND_OFFSET, - .svc = EXYNOS_SVC_OFFSET, .eint_gpio_init = exynos_eint_gpio_init, .suspend = exynos_pinctrl_suspend, .resume = exynos_pinctrl_resume, @@ -1098,10 +1010,6 @@ struct samsung_pin_ctrl exynos5250_pin_ctrl[] = { /* pin-controller instance 3 data */ .pin_banks = exynos5250_pin_banks3, .nr_banks = ARRAY_SIZE(exynos5250_pin_banks3), - .geint_con = EXYNOS_GPIO_ECON_OFFSET, - .geint_mask = EXYNOS_GPIO_EMASK_OFFSET, - .geint_pend = EXYNOS_GPIO_EPEND_OFFSET, - .svc = EXYNOS_SVC_OFFSET, .eint_gpio_init = exynos_eint_gpio_init, .suspend = exynos_pinctrl_suspend, .resume = exynos_pinctrl_resume, @@ -1158,13 +1066,6 @@ struct samsung_pin_ctrl exynos5260_pin_ctrl[] = { /* pin-controller instance 0 data */ .pin_banks = exynos5260_pin_banks0, .nr_banks = ARRAY_SIZE(exynos5260_pin_banks0), - .geint_con = EXYNOS_GPIO_ECON_OFFSET, - .geint_mask = EXYNOS_GPIO_EMASK_OFFSET, - .geint_pend = EXYNOS_GPIO_EPEND_OFFSET, - .weint_con = EXYNOS_WKUP_ECON_OFFSET, - .weint_mask = EXYNOS_WKUP_EMASK_OFFSET, - .weint_pend = EXYNOS_WKUP_EPEND_OFFSET, - .svc = EXYNOS_SVC_OFFSET, .eint_gpio_init = exynos_eint_gpio_init, .eint_wkup_init = exynos_eint_wkup_init, .label = "exynos5260-gpio-ctrl0", @@ -1172,20 +1073,12 @@ struct samsung_pin_ctrl exynos5260_pin_ctrl[] = { /* pin-controller instance 1 data */ .pin_banks = exynos5260_pin_banks1, .nr_banks = ARRAY_SIZE(exynos5260_pin_banks1), - .geint_con = EXYNOS_GPIO_ECON_OFFSET, - .geint_mask = EXYNOS_GPIO_EMASK_OFFSET, - .geint_pend = EXYNOS_GPIO_EPEND_OFFSET, - .svc = EXYNOS_SVC_OFFSET, .eint_gpio_init = exynos_eint_gpio_init, .label = "exynos5260-gpio-ctrl1", }, { /* pin-controller instance 2 data */ .pin_banks = exynos5260_pin_banks2, .nr_banks = ARRAY_SIZE(exynos5260_pin_banks2), - .geint_con = EXYNOS_GPIO_ECON_OFFSET, - .geint_mask = EXYNOS_GPIO_EMASK_OFFSET, - .geint_pend = EXYNOS_GPIO_EPEND_OFFSET, - .svc = EXYNOS_SVC_OFFSET, .eint_gpio_init = exynos_eint_gpio_init, .label = "exynos5260-gpio-ctrl2", }, @@ -1256,13 +1149,6 @@ struct samsung_pin_ctrl exynos5420_pin_ctrl[] = { /* pin-controller instance 0 data */ .pin_banks = exynos5420_pin_banks0, .nr_banks = ARRAY_SIZE(exynos5420_pin_banks0), - .geint_con = EXYNOS_GPIO_ECON_OFFSET, - .geint_mask = EXYNOS_GPIO_EMASK_OFFSET, - .geint_pend = EXYNOS_GPIO_EPEND_OFFSET, - .weint_con = EXYNOS_WKUP_ECON_OFFSET, - .weint_mask = EXYNOS_WKUP_EMASK_OFFSET, - .weint_pend = EXYNOS_WKUP_EPEND_OFFSET, - .svc = EXYNOS_SVC_OFFSET, .eint_gpio_init = exynos_eint_gpio_init, .eint_wkup_init = exynos_eint_wkup_init, .label = "exynos5420-gpio-ctrl0", @@ -1270,40 +1156,24 @@ struct samsung_pin_ctrl exynos5420_pin_ctrl[] = { /* pin-controller instance 1 data */ .pin_banks = exynos5420_pin_banks1, .nr_banks = ARRAY_SIZE(exynos5420_pin_banks1), - .geint_con = EXYNOS_GPIO_ECON_OFFSET, - .geint_mask = EXYNOS_GPIO_EMASK_OFFSET, - .geint_pend = EXYNOS_GPIO_EPEND_OFFSET, - .svc = EXYNOS_SVC_OFFSET, .eint_gpio_init = exynos_eint_gpio_init, .label = "exynos5420-gpio-ctrl1", }, { /* pin-controller instance 2 data */ .pin_banks = exynos5420_pin_banks2, .nr_banks = ARRAY_SIZE(exynos5420_pin_banks2), - .geint_con = EXYNOS_GPIO_ECON_OFFSET, - .geint_mask = EXYNOS_GPIO_EMASK_OFFSET, - .geint_pend = EXYNOS_GPIO_EPEND_OFFSET, - .svc = EXYNOS_SVC_OFFSET, .eint_gpio_init = exynos_eint_gpio_init, .label = "exynos5420-gpio-ctrl2", }, { /* pin-controller instance 3 data */ .pin_banks = exynos5420_pin_banks3, .nr_banks = ARRAY_SIZE(exynos5420_pin_banks3), - .geint_con = EXYNOS_GPIO_ECON_OFFSET, - .geint_mask = EXYNOS_GPIO_EMASK_OFFSET, - .geint_pend = EXYNOS_GPIO_EPEND_OFFSET, - .svc = EXYNOS_SVC_OFFSET, .eint_gpio_init = exynos_eint_gpio_init, .label = "exynos5420-gpio-ctrl3", }, { /* pin-controller instance 4 data */ .pin_banks = exynos5420_pin_banks4, .nr_banks = ARRAY_SIZE(exynos5420_pin_banks4), - .geint_con = EXYNOS_GPIO_ECON_OFFSET, - .geint_mask = EXYNOS_GPIO_EMASK_OFFSET, - .geint_pend = EXYNOS_GPIO_EPEND_OFFSET, - .svc = EXYNOS_SVC_OFFSET, .eint_gpio_init = exynos_eint_gpio_init, .label = "exynos5420-gpio-ctrl4", }, diff --git a/drivers/pinctrl/pinctrl-exynos.h b/drivers/pinctrl/samsung/pinctrl-exynos.h index 3c91c357792f..3c91c357792f 100644 --- a/drivers/pinctrl/pinctrl-exynos.h +++ b/drivers/pinctrl/samsung/pinctrl-exynos.h diff --git a/drivers/pinctrl/pinctrl-exynos5440.c b/drivers/pinctrl/samsung/pinctrl-exynos5440.c index 8fe2ab0a7698..b995ec2c5d16 100644 --- a/drivers/pinctrl/pinctrl-exynos5440.c +++ b/drivers/pinctrl/samsung/pinctrl-exynos5440.c @@ -23,7 +23,7 @@ #include <linux/interrupt.h> #include <linux/irqdomain.h> #include <linux/of_irq.h> -#include "core.h" +#include "../core.h" /* EXYNOS5440 GPIO and Pinctrl register offsets */ #define GPIO_MUX 0x00 @@ -364,20 +364,14 @@ static void exynos5440_pinmux_setup(struct pinctrl_dev *pctldev, unsigned select } /* enable a specified pinmux by writing to registers */ -static int exynos5440_pinmux_enable(struct pinctrl_dev *pctldev, unsigned selector, - unsigned group) +static int exynos5440_pinmux_set_mux(struct pinctrl_dev *pctldev, + unsigned selector, + unsigned group) { exynos5440_pinmux_setup(pctldev, selector, group, true); return 0; } -/* disable a specified pinmux by writing to registers */ -static void exynos5440_pinmux_disable(struct pinctrl_dev *pctldev, - unsigned selector, unsigned group) -{ - exynos5440_pinmux_setup(pctldev, selector, group, false); -} - /* * The calls to gpio_direction_output() and gpio_direction_input() * leads to this function call (via the pinctrl_gpio_direction_{input|output}() @@ -394,8 +388,7 @@ static const struct pinmux_ops exynos5440_pinmux_ops = { .get_functions_count = exynos5440_get_functions_count, .get_function_name = exynos5440_pinmux_get_fname, .get_function_groups = exynos5440_pinmux_get_groups, - .enable = exynos5440_pinmux_enable, - .disable = exynos5440_pinmux_disable, + .set_mux = exynos5440_pinmux_set_mux, .gpio_set_direction = exynos5440_pinmux_gpio_set_direction, }; diff --git a/drivers/pinctrl/pinctrl-s3c24xx.c b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c index ad3eaad17001..ad3eaad17001 100644 --- a/drivers/pinctrl/pinctrl-s3c24xx.c +++ b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c diff --git a/drivers/pinctrl/pinctrl-s3c64xx.c b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c index 89143c903000..89143c903000 100644 --- a/drivers/pinctrl/pinctrl-s3c64xx.c +++ b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c diff --git a/drivers/pinctrl/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c index 3e61d0f8f146..4a47691c32b1 100644 --- a/drivers/pinctrl/pinctrl-samsung.c +++ b/drivers/pinctrl/samsung/pinctrl-samsung.c @@ -30,7 +30,7 @@ #include <linux/spinlock.h> #include <linux/syscore_ops.h> -#include "core.h" +#include "../core.h" #include "pinctrl-samsung.h" #define GROUP_SUFFIX "-grp" @@ -40,13 +40,14 @@ /* list of all possible config options supported */ static struct pin_config { - char *prop_cfg; - unsigned int cfg_type; -} pcfgs[] = { + const char *property; + enum pincfg_type param; +} cfg_params[] = { { "samsung,pin-pud", PINCFG_TYPE_PUD }, { "samsung,pin-drv", PINCFG_TYPE_DRV }, { "samsung,pin-con-pdn", PINCFG_TYPE_CON_PDN }, { "samsung,pin-pud-pdn", PINCFG_TYPE_PUD_PDN }, + { "samsung,pin-val", PINCFG_TYPE_DAT }, }; /* Global list of devices (struct samsung_pinctrl_drv_data) */ @@ -59,163 +60,242 @@ static inline struct samsung_pin_bank *gc_to_pin_bank(struct gpio_chip *gc) return container_of(gc, struct samsung_pin_bank, gpio_chip); } -/* check if the selector is a valid pin group selector */ static int samsung_get_group_count(struct pinctrl_dev *pctldev) { - struct samsung_pinctrl_drv_data *drvdata; + struct samsung_pinctrl_drv_data *pmx = pinctrl_dev_get_drvdata(pctldev); - drvdata = pinctrl_dev_get_drvdata(pctldev); - return drvdata->nr_groups; + return pmx->nr_groups; } -/* return the name of the group selected by the group selector */ static const char *samsung_get_group_name(struct pinctrl_dev *pctldev, - unsigned selector) + unsigned group) { - struct samsung_pinctrl_drv_data *drvdata; + struct samsung_pinctrl_drv_data *pmx = pinctrl_dev_get_drvdata(pctldev); - drvdata = pinctrl_dev_get_drvdata(pctldev); - return drvdata->pin_groups[selector].name; + return pmx->pin_groups[group].name; } -/* return the pin numbers associated with the specified group */ static int samsung_get_group_pins(struct pinctrl_dev *pctldev, - unsigned selector, const unsigned **pins, unsigned *num_pins) + unsigned group, + const unsigned **pins, + unsigned *num_pins) { - struct samsung_pinctrl_drv_data *drvdata; + struct samsung_pinctrl_drv_data *pmx = pinctrl_dev_get_drvdata(pctldev); + + *pins = pmx->pin_groups[group].pins; + *num_pins = pmx->pin_groups[group].num_pins; - drvdata = pinctrl_dev_get_drvdata(pctldev); - *pins = drvdata->pin_groups[selector].pins; - *num_pins = drvdata->pin_groups[selector].num_pins; return 0; } -/* create pinctrl_map entries by parsing device tree nodes */ -static int samsung_dt_node_to_map(struct pinctrl_dev *pctldev, - struct device_node *np, struct pinctrl_map **maps, - unsigned *nmaps) +static int reserve_map(struct device *dev, struct pinctrl_map **map, + unsigned *reserved_maps, unsigned *num_maps, + unsigned reserve) { - struct device *dev = pctldev->dev; - struct pinctrl_map *map; - unsigned long *cfg = NULL; - char *gname, *fname; - int cfg_cnt = 0, map_cnt = 0, idx = 0; - - /* count the number of config options specfied in the node */ - for (idx = 0; idx < ARRAY_SIZE(pcfgs); idx++) { - if (of_find_property(np, pcfgs[idx].prop_cfg, NULL)) - cfg_cnt++; - } + unsigned old_num = *reserved_maps; + unsigned new_num = *num_maps + reserve; + struct pinctrl_map *new_map; - /* - * Find out the number of map entries to create. All the config options - * can be accomadated into a single config map entry. - */ - if (cfg_cnt) - map_cnt = 1; - if (of_find_property(np, "samsung,pin-function", NULL)) - map_cnt++; - if (!map_cnt) { - dev_err(dev, "node %s does not have either config or function " - "configurations\n", np->name); - return -EINVAL; - } + if (old_num >= new_num) + return 0; - /* Allocate memory for pin-map entries */ - map = kzalloc(sizeof(*map) * map_cnt, GFP_KERNEL); - if (!map) { - dev_err(dev, "could not alloc memory for pin-maps\n"); + new_map = krealloc(*map, sizeof(*new_map) * new_num, GFP_KERNEL); + if (!new_map) { + dev_err(dev, "krealloc(map) failed\n"); return -ENOMEM; } - *nmaps = 0; - /* - * Allocate memory for pin group name. The pin group name is derived - * from the node name from which these map entries are be created. - */ - gname = kzalloc(strlen(np->name) + GSUFFIX_LEN, GFP_KERNEL); - if (!gname) { - dev_err(dev, "failed to alloc memory for group name\n"); - goto free_map; + memset(new_map + old_num, 0, (new_num - old_num) * sizeof(*new_map)); + + *map = new_map; + *reserved_maps = new_num; + + return 0; +} + +static int add_map_mux(struct pinctrl_map **map, unsigned *reserved_maps, + unsigned *num_maps, const char *group, + const char *function) +{ + if (WARN_ON(*num_maps == *reserved_maps)) + return -ENOSPC; + + (*map)[*num_maps].type = PIN_MAP_TYPE_MUX_GROUP; + (*map)[*num_maps].data.mux.group = group; + (*map)[*num_maps].data.mux.function = function; + (*num_maps)++; + + return 0; +} + +static int add_map_configs(struct device *dev, struct pinctrl_map **map, + unsigned *reserved_maps, unsigned *num_maps, + const char *group, unsigned long *configs, + unsigned num_configs) +{ + unsigned long *dup_configs; + + if (WARN_ON(*num_maps == *reserved_maps)) + return -ENOSPC; + + dup_configs = kmemdup(configs, num_configs * sizeof(*dup_configs), + GFP_KERNEL); + if (!dup_configs) { + dev_err(dev, "kmemdup(configs) failed\n"); + return -ENOMEM; } - sprintf(gname, "%s%s", np->name, GROUP_SUFFIX); - /* - * don't have config options? then skip over to creating function - * map entries. - */ - if (!cfg_cnt) - goto skip_cfgs; - - /* Allocate memory for config entries */ - cfg = kzalloc(sizeof(*cfg) * cfg_cnt, GFP_KERNEL); - if (!cfg) { - dev_err(dev, "failed to alloc memory for configs\n"); - goto free_gname; + (*map)[*num_maps].type = PIN_MAP_TYPE_CONFIGS_GROUP; + (*map)[*num_maps].data.configs.group_or_pin = group; + (*map)[*num_maps].data.configs.configs = dup_configs; + (*map)[*num_maps].data.configs.num_configs = num_configs; + (*num_maps)++; + + return 0; +} + +static int add_config(struct device *dev, unsigned long **configs, + unsigned *num_configs, unsigned long config) +{ + unsigned old_num = *num_configs; + unsigned new_num = old_num + 1; + unsigned long *new_configs; + + new_configs = krealloc(*configs, sizeof(*new_configs) * new_num, + GFP_KERNEL); + if (!new_configs) { + dev_err(dev, "krealloc(configs) failed\n"); + return -ENOMEM; } - /* Prepare a list of config settings */ - for (idx = 0, cfg_cnt = 0; idx < ARRAY_SIZE(pcfgs); idx++) { - u32 value; - if (!of_property_read_u32(np, pcfgs[idx].prop_cfg, &value)) - cfg[cfg_cnt++] = - PINCFG_PACK(pcfgs[idx].cfg_type, value); + new_configs[old_num] = config; + + *configs = new_configs; + *num_configs = new_num; + + return 0; +} + +static void samsung_dt_free_map(struct pinctrl_dev *pctldev, + struct pinctrl_map *map, + unsigned num_maps) +{ + int i; + + for (i = 0; i < num_maps; i++) + if (map[i].type == PIN_MAP_TYPE_CONFIGS_GROUP) + kfree(map[i].data.configs.configs); + + kfree(map); +} + +static int samsung_dt_subnode_to_map(struct samsung_pinctrl_drv_data *drvdata, + struct device *dev, + struct device_node *np, + struct pinctrl_map **map, + unsigned *reserved_maps, + unsigned *num_maps) +{ + int ret, i; + u32 val; + unsigned long config; + unsigned long *configs = NULL; + unsigned num_configs = 0; + unsigned reserve; + struct property *prop; + const char *group; + bool has_func = false; + + ret = of_property_read_u32(np, "samsung,pin-function", &val); + if (!ret) + has_func = true; + + for (i = 0; i < ARRAY_SIZE(cfg_params); i++) { + ret = of_property_read_u32(np, cfg_params[i].property, &val); + if (!ret) { + config = PINCFG_PACK(cfg_params[i].param, val); + ret = add_config(dev, &configs, &num_configs, config); + if (ret < 0) + goto exit; + /* EINVAL=missing, which is fine since it's optional */ + } else if (ret != -EINVAL) { + dev_err(dev, "could not parse property %s\n", + cfg_params[i].property); + } } - /* create the config map entry */ - map[*nmaps].data.configs.group_or_pin = gname; - map[*nmaps].data.configs.configs = cfg; - map[*nmaps].data.configs.num_configs = cfg_cnt; - map[*nmaps].type = PIN_MAP_TYPE_CONFIGS_GROUP; - *nmaps += 1; - -skip_cfgs: - /* create the function map entry */ - if (of_find_property(np, "samsung,pin-function", NULL)) { - fname = kzalloc(strlen(np->name) + FSUFFIX_LEN, GFP_KERNEL); - if (!fname) { - dev_err(dev, "failed to alloc memory for func name\n"); - goto free_cfg; + reserve = 0; + if (has_func) + reserve++; + if (num_configs) + reserve++; + ret = of_property_count_strings(np, "samsung,pins"); + if (ret < 0) { + dev_err(dev, "could not parse property samsung,pins\n"); + goto exit; + } + reserve *= ret; + + ret = reserve_map(dev, map, reserved_maps, num_maps, reserve); + if (ret < 0) + goto exit; + + of_property_for_each_string(np, "samsung,pins", prop, group) { + if (has_func) { + ret = add_map_mux(map, reserved_maps, + num_maps, group, np->full_name); + if (ret < 0) + goto exit; } - sprintf(fname, "%s%s", np->name, FUNCTION_SUFFIX); - map[*nmaps].data.mux.group = gname; - map[*nmaps].data.mux.function = fname; - map[*nmaps].type = PIN_MAP_TYPE_MUX_GROUP; - *nmaps += 1; + if (num_configs) { + ret = add_map_configs(dev, map, reserved_maps, + num_maps, group, configs, + num_configs); + if (ret < 0) + goto exit; + } } - *maps = map; - return 0; + ret = 0; -free_cfg: - kfree(cfg); -free_gname: - kfree(gname); -free_map: - kfree(map); - return -ENOMEM; +exit: + kfree(configs); + return ret; } -/* free the memory allocated to hold the pin-map table */ -static void samsung_dt_free_map(struct pinctrl_dev *pctldev, - struct pinctrl_map *map, unsigned num_maps) +static int samsung_dt_node_to_map(struct pinctrl_dev *pctldev, + struct device_node *np_config, + struct pinctrl_map **map, + unsigned *num_maps) { - int idx; - - for (idx = 0; idx < num_maps; idx++) { - if (map[idx].type == PIN_MAP_TYPE_MUX_GROUP) { - kfree(map[idx].data.mux.function); - if (!idx) - kfree(map[idx].data.mux.group); - } else if (map->type == PIN_MAP_TYPE_CONFIGS_GROUP) { - kfree(map[idx].data.configs.configs); - if (!idx) - kfree(map[idx].data.configs.group_or_pin); + struct samsung_pinctrl_drv_data *drvdata; + unsigned reserved_maps; + struct device_node *np; + int ret; + + drvdata = pinctrl_dev_get_drvdata(pctldev); + + reserved_maps = 0; + *map = NULL; + *num_maps = 0; + + if (!of_get_child_count(np_config)) + return samsung_dt_subnode_to_map(drvdata, pctldev->dev, + np_config, map, + &reserved_maps, + num_maps); + + for_each_child_of_node(np_config, np) { + ret = samsung_dt_subnode_to_map(drvdata, pctldev->dev, np, map, + &reserved_maps, num_maps); + if (ret < 0) { + samsung_dt_free_map(pctldev, *map, *num_maps); + return ret; } - }; + } - kfree(map); + return 0; } /* list of pinctrl callbacks for the pinctrl core */ @@ -286,83 +366,21 @@ static void samsung_pinmux_setup(struct pinctrl_dev *pctldev, unsigned selector, unsigned group, bool enable) { struct samsung_pinctrl_drv_data *drvdata; - const unsigned int *pins; - struct samsung_pin_bank *bank; - void __iomem *reg; - u32 mask, shift, data, pin_offset, cnt; - unsigned long flags; - - drvdata = pinctrl_dev_get_drvdata(pctldev); - pins = drvdata->pin_groups[group].pins; - - /* - * for each pin in the pin group selected, program the correspoding pin - * pin function number in the config register. - */ - for (cnt = 0; cnt < drvdata->pin_groups[group].num_pins; cnt++) { - struct samsung_pin_bank_type *type; - - pin_to_reg_bank(drvdata, pins[cnt] - drvdata->ctrl->base, - ®, &pin_offset, &bank); - type = bank->type; - mask = (1 << type->fld_width[PINCFG_TYPE_FUNC]) - 1; - shift = pin_offset * type->fld_width[PINCFG_TYPE_FUNC]; - if (shift >= 32) { - /* Some banks have two config registers */ - shift -= 32; - reg += 4; - } - - spin_lock_irqsave(&bank->slock, flags); - - data = readl(reg + type->reg_offset[PINCFG_TYPE_FUNC]); - data &= ~(mask << shift); - if (enable) - data |= drvdata->pin_groups[group].func << shift; - writel(data, reg + type->reg_offset[PINCFG_TYPE_FUNC]); - - spin_unlock_irqrestore(&bank->slock, flags); - } -} - -/* enable a specified pinmux by writing to registers */ -static int samsung_pinmux_enable(struct pinctrl_dev *pctldev, unsigned selector, - unsigned group) -{ - samsung_pinmux_setup(pctldev, selector, group, true); - return 0; -} - -/* disable a specified pinmux by writing to registers */ -static void samsung_pinmux_disable(struct pinctrl_dev *pctldev, - unsigned selector, unsigned group) -{ - samsung_pinmux_setup(pctldev, selector, group, false); -} - -/* - * The calls to gpio_direction_output() and gpio_direction_input() - * leads to this function call (via the pinctrl_gpio_direction_{input|output}() - * function called from the gpiolib interface). - */ -static int samsung_pinmux_gpio_set_direction(struct pinctrl_dev *pctldev, - struct pinctrl_gpio_range *range, unsigned offset, bool input) -{ struct samsung_pin_bank_type *type; struct samsung_pin_bank *bank; - struct samsung_pinctrl_drv_data *drvdata; void __iomem *reg; - u32 data, pin_offset, mask, shift; + u32 mask, shift, data, pin_offset; unsigned long flags; + const struct samsung_pmx_func *func; + const struct samsung_pin_group *grp; - bank = gc_to_pin_bank(range->gc); - type = bank->type; drvdata = pinctrl_dev_get_drvdata(pctldev); + func = &drvdata->pmx_functions[selector]; + grp = &drvdata->pin_groups[group]; - pin_offset = offset - bank->pin_base; - reg = drvdata->virt_base + bank->pctl_offset + - type->reg_offset[PINCFG_TYPE_FUNC]; - + pin_to_reg_bank(drvdata, grp->pins[0] - drvdata->ctrl->base, + ®, &pin_offset, &bank); + type = bank->type; mask = (1 << type->fld_width[PINCFG_TYPE_FUNC]) - 1; shift = pin_offset * type->fld_width[PINCFG_TYPE_FUNC]; if (shift >= 32) { @@ -373,14 +391,21 @@ static int samsung_pinmux_gpio_set_direction(struct pinctrl_dev *pctldev, spin_lock_irqsave(&bank->slock, flags); - data = readl(reg); + data = readl(reg + type->reg_offset[PINCFG_TYPE_FUNC]); data &= ~(mask << shift); - if (!input) - data |= FUNC_OUTPUT << shift; - writel(data, reg); + if (enable) + data |= func->val << shift; + writel(data, reg + type->reg_offset[PINCFG_TYPE_FUNC]); spin_unlock_irqrestore(&bank->slock, flags); +} +/* enable a specified pinmux by writing to registers */ +static int samsung_pinmux_set_mux(struct pinctrl_dev *pctldev, + unsigned selector, + unsigned group) +{ + samsung_pinmux_setup(pctldev, selector, group, true); return 0; } @@ -389,9 +414,7 @@ static const struct pinmux_ops samsung_pinmux_ops = { .get_functions_count = samsung_get_functions_count, .get_function_name = samsung_pinmux_get_fname, .get_function_groups = samsung_pinmux_get_groups, - .enable = samsung_pinmux_enable, - .disable = samsung_pinmux_disable, - .gpio_set_direction = samsung_pinmux_gpio_set_direction, + .set_mux = samsung_pinmux_set_mux, }; /* set or get the pin config settings for a specified pin */ @@ -540,25 +563,59 @@ static int samsung_gpio_get(struct gpio_chip *gc, unsigned offset) } /* - * gpiolib gpio_direction_input callback function. The setting of the pin - * mux function as 'gpio input' will be handled by the pinctrl susbsystem - * interface. + * The calls to gpio_direction_output() and gpio_direction_input() + * leads to this function call. */ +static int samsung_gpio_set_direction(struct gpio_chip *gc, + unsigned offset, bool input) +{ + struct samsung_pin_bank_type *type; + struct samsung_pin_bank *bank; + struct samsung_pinctrl_drv_data *drvdata; + void __iomem *reg; + u32 data, mask, shift; + unsigned long flags; + + bank = gc_to_pin_bank(gc); + type = bank->type; + drvdata = bank->drvdata; + + reg = drvdata->virt_base + bank->pctl_offset + + type->reg_offset[PINCFG_TYPE_FUNC]; + + mask = (1 << type->fld_width[PINCFG_TYPE_FUNC]) - 1; + shift = offset * type->fld_width[PINCFG_TYPE_FUNC]; + if (shift >= 32) { + /* Some banks have two config registers */ + shift -= 32; + reg += 4; + } + + spin_lock_irqsave(&bank->slock, flags); + + data = readl(reg); + data &= ~(mask << shift); + if (!input) + data |= FUNC_OUTPUT << shift; + writel(data, reg); + + spin_unlock_irqrestore(&bank->slock, flags); + + return 0; +} + +/* gpiolib gpio_direction_input callback function. */ static int samsung_gpio_direction_input(struct gpio_chip *gc, unsigned offset) { - return pinctrl_gpio_direction_input(gc->base + offset); + return samsung_gpio_set_direction(gc, offset, true); } -/* - * gpiolib gpio_direction_output callback function. The setting of the pin - * mux function as 'gpio output' will be handled by the pinctrl susbsystem - * interface. - */ +/* gpiolib gpio_direction_output callback function. */ static int samsung_gpio_direction_output(struct gpio_chip *gc, unsigned offset, int value) { samsung_gpio_set(gc, offset, value); - return pinctrl_gpio_direction_output(gc->base + offset); + return samsung_gpio_set_direction(gc, offset, false); } /* @@ -578,87 +635,115 @@ static int samsung_gpio_to_irq(struct gpio_chip *gc, unsigned offset) return (virq) ? : -ENXIO; } -/* - * Parse the pin names listed in the 'samsung,pins' property and convert it - * into a list of gpio numbers are create a pin group from it. - */ -static int samsung_pinctrl_parse_dt_pins(struct platform_device *pdev, - struct device_node *cfg_np, - struct pinctrl_desc *pctl, - unsigned int **pin_list, - unsigned int *npins) +static struct samsung_pin_group *samsung_pinctrl_create_groups( + struct device *dev, + struct samsung_pinctrl_drv_data *drvdata, + unsigned int *cnt) { - struct device *dev = &pdev->dev; - struct property *prop; - struct pinctrl_pin_desc const *pdesc = pctl->pins; - unsigned int idx = 0, cnt; - const char *pin_name; + struct pinctrl_desc *ctrldesc = &drvdata->pctl; + struct samsung_pin_group *groups, *grp; + const struct pinctrl_pin_desc *pdesc; + int i; + + groups = devm_kzalloc(dev, ctrldesc->npins * sizeof(*groups), + GFP_KERNEL); + if (!groups) + return ERR_PTR(-EINVAL); + grp = groups; - *npins = of_property_count_strings(cfg_np, "samsung,pins"); - if (IS_ERR_VALUE(*npins)) { - dev_err(dev, "invalid pin list in %s node", cfg_np->name); + pdesc = ctrldesc->pins; + for (i = 0; i < ctrldesc->npins; ++i, ++pdesc, ++grp) { + grp->name = pdesc->name; + grp->pins = &pdesc->number; + grp->num_pins = 1; + } + + *cnt = ctrldesc->npins; + return groups; +} + +static int samsung_pinctrl_create_function(struct device *dev, + struct samsung_pinctrl_drv_data *drvdata, + struct device_node *func_np, + struct samsung_pmx_func *func) +{ + int npins; + int ret; + int i; + + if (of_property_read_u32(func_np, "samsung,pin-function", &func->val)) + return 0; + + npins = of_property_count_strings(func_np, "samsung,pins"); + if (npins < 1) { + dev_err(dev, "invalid pin list in %s node", func_np->name); return -EINVAL; } - *pin_list = devm_kzalloc(dev, *npins * sizeof(**pin_list), GFP_KERNEL); - if (!*pin_list) { - dev_err(dev, "failed to allocate memory for pin list\n"); + func->name = func_np->full_name; + + func->groups = devm_kzalloc(dev, npins * sizeof(char *), GFP_KERNEL); + if (!func->groups) return -ENOMEM; - } - of_property_for_each_string(cfg_np, "samsung,pins", prop, pin_name) { - for (cnt = 0; cnt < pctl->npins; cnt++) { - if (pdesc[cnt].name) { - if (!strcmp(pin_name, pdesc[cnt].name)) { - (*pin_list)[idx++] = pdesc[cnt].number; - break; - } - } - } - if (cnt == pctl->npins) { - dev_err(dev, "pin %s not valid in %s node\n", - pin_name, cfg_np->name); - devm_kfree(dev, *pin_list); - return -EINVAL; + for (i = 0; i < npins; ++i) { + const char *gname; + + ret = of_property_read_string_index(func_np, "samsung,pins", + i, &gname); + if (ret) { + dev_err(dev, + "failed to read pin name %d from %s node\n", + i, func_np->name); + return ret; } + + func->groups[i] = gname; } - return 0; + func->num_groups = npins; + return 1; } -/* - * Parse the information about all the available pin groups and pin functions - * from device node of the pin-controller. A pin group is formed with all - * the pins listed in the "samsung,pins" property. - */ -static int samsung_pinctrl_parse_dt(struct platform_device *pdev, - struct samsung_pinctrl_drv_data *drvdata) +static struct samsung_pmx_func *samsung_pinctrl_create_functions( + struct device *dev, + struct samsung_pinctrl_drv_data *drvdata, + unsigned int *cnt) { - struct device *dev = &pdev->dev; + struct samsung_pmx_func *functions, *func; struct device_node *dev_np = dev->of_node; struct device_node *cfg_np; - struct samsung_pin_group *groups, *grp; - struct samsung_pmx_func *functions, *func; - unsigned *pin_list; - unsigned int npins, grp_cnt, func_idx = 0; - char *gname, *fname; + unsigned int func_cnt = 0; int ret; - grp_cnt = of_get_child_count(dev_np); - if (!grp_cnt) - return -EINVAL; + /* + * Iterate over all the child nodes of the pin controller node + * and create pin groups and pin function lists. + */ + for_each_child_of_node(dev_np, cfg_np) { + struct device_node *func_np; - groups = devm_kzalloc(dev, grp_cnt * sizeof(*groups), GFP_KERNEL); - if (!groups) { - dev_err(dev, "failed allocate memory for ping group list\n"); - return -EINVAL; + if (!of_get_child_count(cfg_np)) { + if (!of_find_property(cfg_np, + "samsung,pin-function", NULL)) + continue; + ++func_cnt; + continue; + } + + for_each_child_of_node(cfg_np, func_np) { + if (!of_find_property(func_np, + "samsung,pin-function", NULL)) + continue; + ++func_cnt; + } } - grp = groups; - functions = devm_kzalloc(dev, grp_cnt * sizeof(*functions), GFP_KERNEL); + functions = devm_kzalloc(dev, func_cnt * sizeof(*functions), + GFP_KERNEL); if (!functions) { dev_err(dev, "failed to allocate memory for function list\n"); - return -EINVAL; + return ERR_PTR(-EINVAL); } func = functions; @@ -666,61 +751,68 @@ static int samsung_pinctrl_parse_dt(struct platform_device *pdev, * Iterate over all the child nodes of the pin controller node * and create pin groups and pin function lists. */ + func_cnt = 0; for_each_child_of_node(dev_np, cfg_np) { - u32 function; - if (!of_find_property(cfg_np, "samsung,pins", NULL)) + struct device_node *func_np; + + if (!of_get_child_count(cfg_np)) { + ret = samsung_pinctrl_create_function(dev, drvdata, + cfg_np, func); + if (ret < 0) + return ERR_PTR(ret); + if (ret > 0) { + ++func; + ++func_cnt; + } continue; + } - ret = samsung_pinctrl_parse_dt_pins(pdev, cfg_np, - &drvdata->pctl, &pin_list, &npins); - if (ret) - return ret; - - /* derive pin group name from the node name */ - gname = devm_kzalloc(dev, strlen(cfg_np->name) + GSUFFIX_LEN, - GFP_KERNEL); - if (!gname) { - dev_err(dev, "failed to alloc memory for group name\n"); - return -ENOMEM; + for_each_child_of_node(cfg_np, func_np) { + ret = samsung_pinctrl_create_function(dev, drvdata, + func_np, func); + if (ret < 0) + return ERR_PTR(ret); + if (ret > 0) { + ++func; + ++func_cnt; + } } - sprintf(gname, "%s%s", cfg_np->name, GROUP_SUFFIX); + } - grp->name = gname; - grp->pins = pin_list; - grp->num_pins = npins; - of_property_read_u32(cfg_np, "samsung,pin-function", &function); - grp->func = function; - grp++; + *cnt = func_cnt; + return functions; +} - if (!of_find_property(cfg_np, "samsung,pin-function", NULL)) - continue; +/* + * Parse the information about all the available pin groups and pin functions + * from device node of the pin-controller. A pin group is formed with all + * the pins listed in the "samsung,pins" property. + */ - /* derive function name from the node name */ - fname = devm_kzalloc(dev, strlen(cfg_np->name) + FSUFFIX_LEN, - GFP_KERNEL); - if (!fname) { - dev_err(dev, "failed to alloc memory for func name\n"); - return -ENOMEM; - } - sprintf(fname, "%s%s", cfg_np->name, FUNCTION_SUFFIX); - - func->name = fname; - func->groups = devm_kzalloc(dev, sizeof(char *), GFP_KERNEL); - if (!func->groups) { - dev_err(dev, "failed to alloc memory for group list " - "in pin function"); - return -ENOMEM; - } - func->groups[0] = gname; - func->num_groups = 1; - func++; - func_idx++; +static int samsung_pinctrl_parse_dt(struct platform_device *pdev, + struct samsung_pinctrl_drv_data *drvdata) +{ + struct device *dev = &pdev->dev; + struct samsung_pin_group *groups; + struct samsung_pmx_func *functions; + unsigned int grp_cnt = 0, func_cnt = 0; + + groups = samsung_pinctrl_create_groups(dev, drvdata, &grp_cnt); + if (IS_ERR(groups)) { + dev_err(dev, "failed to parse pin groups\n"); + return PTR_ERR(groups); + } + + functions = samsung_pinctrl_create_functions(dev, drvdata, &func_cnt); + if (IS_ERR(functions)) { + dev_err(dev, "failed to parse pin functions\n"); + return PTR_ERR(groups); } drvdata->pin_groups = groups; drvdata->nr_groups = grp_cnt; drvdata->pmx_functions = functions; - drvdata->nr_functions = func_idx; + drvdata->nr_functions = func_cnt; return 0; } @@ -790,7 +882,8 @@ static int samsung_pinctrl_register(struct platform_device *pdev, pin_bank = &drvdata->ctrl->pin_banks[bank]; pin_bank->grange.name = pin_bank->name; pin_bank->grange.id = bank; - pin_bank->grange.pin_base = pin_bank->pin_base; + pin_bank->grange.pin_base = drvdata->ctrl->base + + pin_bank->pin_base; pin_bank->grange.base = pin_bank->gpio_chip.base; pin_bank->grange.npins = pin_bank->gpio_chip.ngpio; pin_bank->grange.gc = &pin_bank->gpio_chip; @@ -800,7 +893,19 @@ static int samsung_pinctrl_register(struct platform_device *pdev, return 0; } +static int samsung_gpio_request(struct gpio_chip *chip, unsigned offset) +{ + return pinctrl_request_gpio(chip->base + offset); +} + +static void samsung_gpio_free(struct gpio_chip *chip, unsigned offset) +{ + pinctrl_free_gpio(chip->base + offset); +} + static const struct gpio_chip samsung_gpiolib_chip = { + .request = samsung_gpio_request, + .free = samsung_gpio_free, .set = samsung_gpio_set, .get = samsung_gpio_get, .direction_input = samsung_gpio_direction_input, diff --git a/drivers/pinctrl/pinctrl-samsung.h b/drivers/pinctrl/samsung/pinctrl-samsung.h index b3e41fa5798b..5cedc9d26390 100644 --- a/drivers/pinctrl/pinctrl-samsung.h +++ b/drivers/pinctrl/samsung/pinctrl-samsung.h @@ -26,6 +26,7 @@ #include <linux/gpio.h> /* pinmux function number for pin as gpio output line */ +#define FUNC_INPUT 0x0 #define FUNC_OUTPUT 0x1 /** @@ -156,13 +157,6 @@ struct samsung_pin_bank { * @nr_banks: number of pin banks. * @base: starting system wide pin number. * @nr_pins: number of pins supported by the controller. - * @geint_con: offset of the ext-gpio controller registers. - * @geint_mask: offset of the ext-gpio interrupt mask registers. - * @geint_pend: offset of the ext-gpio interrupt pending registers. - * @weint_con: offset of the ext-wakeup controller registers. - * @weint_mask: offset of the ext-wakeup interrupt mask registers. - * @weint_pend: offset of the ext-wakeup interrupt pending registers. - * @svc: offset of the interrupt service register. * @eint_gpio_init: platform specific callback to setup the external gpio * interrupts for the controller. * @eint_wkup_init: platform specific callback to setup the external wakeup @@ -176,16 +170,6 @@ struct samsung_pin_ctrl { u32 base; u32 nr_pins; - u32 geint_con; - u32 geint_mask; - u32 geint_pend; - - u32 weint_con; - u32 weint_mask; - u32 weint_pend; - - u32 svc; - int (*eint_gpio_init)(struct samsung_pinctrl_drv_data *); int (*eint_wkup_init)(struct samsung_pinctrl_drv_data *); void (*suspend)(struct samsung_pinctrl_drv_data *); @@ -248,6 +232,7 @@ struct samsung_pmx_func { const char *name; const char **groups; u8 num_groups; + u32 val; }; /* list of all exported SoC specific data */ diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c index b9b464d0578c..6572c233f73d 100644 --- a/drivers/pinctrl/sh-pfc/core.c +++ b/drivers/pinctrl/sh-pfc/core.c @@ -542,7 +542,7 @@ static int sh_pfc_probe(struct platform_device *pdev) */ ret = sh_pfc_register_pinctrl(pfc); if (unlikely(ret != 0)) - goto error; + return ret; #ifdef CONFIG_GPIO_SH_PFC /* @@ -564,11 +564,6 @@ static int sh_pfc_probe(struct platform_device *pdev) dev_info(pfc->dev, "%s support registered\n", info->name); return 0; - -error: - if (info->ops && info->ops->exit) - info->ops->exit(pfc); - return ret; } static int sh_pfc_remove(struct platform_device *pdev) @@ -580,9 +575,6 @@ static int sh_pfc_remove(struct platform_device *pdev) #endif sh_pfc_unregister_pinctrl(pfc); - if (pfc->info->ops && pfc->info->ops->exit) - pfc->info->ops->exit(pfc); - return 0; } diff --git a/drivers/pinctrl/sh-pfc/core.h b/drivers/pinctrl/sh-pfc/core.h index b7b0e6ccf305..3daaa5241c47 100644 --- a/drivers/pinctrl/sh-pfc/core.h +++ b/drivers/pinctrl/sh-pfc/core.h @@ -33,7 +33,6 @@ struct sh_pfc_pin_range { struct sh_pfc { struct device *dev; const struct sh_pfc_soc_info *info; - void *soc_data; spinlock_t lock; unsigned int num_windows; diff --git a/drivers/pinctrl/sh-pfc/gpio.c b/drivers/pinctrl/sh-pfc/gpio.c index a9288ab01f7b..80f641ee4dea 100644 --- a/drivers/pinctrl/sh-pfc/gpio.c +++ b/drivers/pinctrl/sh-pfc/gpio.c @@ -409,11 +409,8 @@ int sh_pfc_register_gpiochip(struct sh_pfc *pfc) int sh_pfc_unregister_gpiochip(struct sh_pfc *pfc) { - int err; - int ret; - - ret = gpiochip_remove(&pfc->gpio->gpio_chip); - err = gpiochip_remove(&pfc->func->gpio_chip); + gpiochip_remove(&pfc->gpio->gpio_chip); + gpiochip_remove(&pfc->func->gpio_chip); - return ret < 0 ? ret : err; + return 0; } diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a73a4.c b/drivers/pinctrl/sh-pfc/pfc-r8a73a4.c index ce9fb7aa8ba3..280a56f97786 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a73a4.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a73a4.c @@ -2717,14 +2717,14 @@ static void r8a73a4_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin, iowrite8(value, addr); } -static const struct sh_pfc_soc_operations r8a73a4_pinmux_ops = { +static const struct sh_pfc_soc_operations r8a73a4_pfc_ops = { .get_bias = r8a73a4_pinmux_get_bias, .set_bias = r8a73a4_pinmux_set_bias, }; const struct sh_pfc_soc_info r8a73a4_pinmux_info = { .name = "r8a73a4_pfc", - .ops = &r8a73a4_pinmux_ops, + .ops = &r8a73a4_pfc_ops, .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END }, .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END }, diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7740.c b/drivers/pinctrl/sh-pfc/pfc-r8a7740.c index e4c1ef477053..b486e9d20cc2 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a7740.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a7740.c @@ -3752,14 +3752,14 @@ static void r8a7740_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin, iowrite8(value, addr); } -static const struct sh_pfc_soc_operations r8a7740_pinmux_ops = { +static const struct sh_pfc_soc_operations r8a7740_pfc_ops = { .get_bias = r8a7740_pinmux_get_bias, .set_bias = r8a7740_pinmux_set_bias, }; const struct sh_pfc_soc_info r8a7740_pinmux_info = { .name = "r8a7740_pfc", - .ops = &r8a7740_pinmux_ops, + .ops = &r8a7740_pfc_ops, .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END }, diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c index 2e688dc4a3c8..c6e5deba238e 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c @@ -1726,6 +1726,133 @@ static const unsigned int audio_clkout_mux[] = { AUDIO_CLKOUT_MARK, }; +/* - CAN -------------------------------------------------------------------- */ + +static const unsigned int can0_data_pins[] = { + /* TX, RX */ + RCAR_GP_PIN(3, 26), RCAR_GP_PIN(3, 29), +}; + +static const unsigned int can0_data_mux[] = { + CAN0_TX_MARK, CAN0_RX_MARK, +}; + +static const unsigned int can0_data_b_pins[] = { + /* TX, RX */ + RCAR_GP_PIN(7, 4), RCAR_GP_PIN(7, 3), +}; + +static const unsigned int can0_data_b_mux[] = { + CAN0_TX_B_MARK, CAN0_RX_B_MARK, +}; + +static const unsigned int can0_data_c_pins[] = { + /* TX, RX */ + RCAR_GP_PIN(5, 17), RCAR_GP_PIN(5, 18), +}; + +static const unsigned int can0_data_c_mux[] = { + CAN0_TX_C_MARK, CAN0_RX_C_MARK, +}; + +static const unsigned int can0_data_d_pins[] = { + /* TX, RX */ + RCAR_GP_PIN(2, 26), RCAR_GP_PIN(2, 27), +}; + +static const unsigned int can0_data_d_mux[] = { + CAN0_TX_D_MARK, CAN0_RX_D_MARK, +}; + +static const unsigned int can0_data_e_pins[] = { + /* TX, RX */ + RCAR_GP_PIN(4, 18), RCAR_GP_PIN(4, 28), +}; + +static const unsigned int can0_data_e_mux[] = { + CAN0_TX_E_MARK, CAN0_RX_E_MARK, +}; + +static const unsigned int can0_data_f_pins[] = { + /* TX, RX */ + RCAR_GP_PIN(6, 7), RCAR_GP_PIN(6, 6), +}; + +static const unsigned int can0_data_f_mux[] = { + CAN0_TX_F_MARK, CAN0_RX_F_MARK, +}; + +static const unsigned int can1_data_pins[] = { + /* TX, RX */ + RCAR_GP_PIN(3, 21), RCAR_GP_PIN(3, 20), +}; + +static const unsigned int can1_data_mux[] = { + CAN1_TX_MARK, CAN1_RX_MARK, +}; + +static const unsigned int can1_data_b_pins[] = { + /* TX, RX */ + RCAR_GP_PIN(7, 8), RCAR_GP_PIN(7, 9), +}; + +static const unsigned int can1_data_b_mux[] = { + CAN1_TX_B_MARK, CAN1_RX_B_MARK, +}; + +static const unsigned int can1_data_c_pins[] = { + /* TX, RX */ + RCAR_GP_PIN(5, 20), RCAR_GP_PIN(5, 19), +}; + +static const unsigned int can1_data_c_mux[] = { + CAN1_TX_C_MARK, CAN1_RX_C_MARK, +}; + +static const unsigned int can1_data_d_pins[] = { + /* TX, RX */ + RCAR_GP_PIN(4, 29), RCAR_GP_PIN(4, 31), +}; + +static const unsigned int can1_data_d_mux[] = { + CAN1_TX_D_MARK, CAN1_RX_D_MARK, +}; + +static const unsigned int can_clk_pins[] = { + /* CLK */ + RCAR_GP_PIN(7, 2), +}; + +static const unsigned int can_clk_mux[] = { + CAN_CLK_MARK, +}; + +static const unsigned int can_clk_b_pins[] = { + /* CLK */ + RCAR_GP_PIN(5, 21), +}; + +static const unsigned int can_clk_b_mux[] = { + CAN_CLK_B_MARK, +}; + +static const unsigned int can_clk_c_pins[] = { + /* CLK */ + RCAR_GP_PIN(4, 30), +}; + +static const unsigned int can_clk_c_mux[] = { + CAN_CLK_C_MARK, +}; + +static const unsigned int can_clk_d_pins[] = { + /* CLK */ + RCAR_GP_PIN(7, 19), +}; + +static const unsigned int can_clk_d_mux[] = { + CAN_CLK_D_MARK, +}; /* - DU --------------------------------------------------------------------- */ static const unsigned int du_rgb666_pins[] = { @@ -1867,6 +1994,192 @@ static const unsigned int eth_rmii_mux[] = { ETH_RXD0_MARK, ETH_RXD1_MARK, ETH_RX_ER_MARK, ETH_CRS_DV_MARK, ETH_TXD0_MARK, ETH_TXD1_MARK, ETH_TX_EN_MARK, ETH_REFCLK_MARK, }; + +/* - HSCIF0 ----------------------------------------------------------------- */ +static const unsigned int hscif0_data_pins[] = { + /* RX, TX */ + RCAR_GP_PIN(7, 3), RCAR_GP_PIN(7, 4), +}; +static const unsigned int hscif0_data_mux[] = { + HRX0_MARK, HTX0_MARK, +}; +static const unsigned int hscif0_clk_pins[] = { + /* SCK */ + RCAR_GP_PIN(7, 2), +}; +static const unsigned int hscif0_clk_mux[] = { + HSCK0_MARK, +}; +static const unsigned int hscif0_ctrl_pins[] = { + /* RTS, CTS */ + RCAR_GP_PIN(7, 1), RCAR_GP_PIN(7, 0), +}; +static const unsigned int hscif0_ctrl_mux[] = { + HRTS0_N_MARK, HCTS0_N_MARK, +}; +static const unsigned int hscif0_data_b_pins[] = { + /* RX, TX */ + RCAR_GP_PIN(3, 12), RCAR_GP_PIN(3, 15), +}; +static const unsigned int hscif0_data_b_mux[] = { + HRX0_B_MARK, HTX0_B_MARK, +}; +static const unsigned int hscif0_ctrl_b_pins[] = { + /* RTS, CTS */ + RCAR_GP_PIN(3, 14), RCAR_GP_PIN(3, 13), +}; +static const unsigned int hscif0_ctrl_b_mux[] = { + HRTS0_N_B_MARK, HCTS0_N_B_MARK, +}; +static const unsigned int hscif0_data_c_pins[] = { + /* RX, TX */ + RCAR_GP_PIN(2, 0), RCAR_GP_PIN(2, 1), +}; +static const unsigned int hscif0_data_c_mux[] = { + HRX0_C_MARK, HTX0_C_MARK, +}; +static const unsigned int hscif0_clk_c_pins[] = { + /* SCK */ + RCAR_GP_PIN(5, 31), +}; +static const unsigned int hscif0_clk_c_mux[] = { + HSCK0_C_MARK, +}; +/* - HSCIF1 ----------------------------------------------------------------- */ +static const unsigned int hscif1_data_pins[] = { + /* RX, TX */ + RCAR_GP_PIN(7, 5), RCAR_GP_PIN(7, 6), +}; +static const unsigned int hscif1_data_mux[] = { + HRX1_MARK, HTX1_MARK, +}; +static const unsigned int hscif1_clk_pins[] = { + /* SCK */ + RCAR_GP_PIN(7, 7), +}; +static const unsigned int hscif1_clk_mux[] = { + HSCK1_MARK, +}; +static const unsigned int hscif1_ctrl_pins[] = { + /* RTS, CTS */ + RCAR_GP_PIN(7, 9), RCAR_GP_PIN(7, 8), +}; +static const unsigned int hscif1_ctrl_mux[] = { + HRTS1_N_MARK, HCTS1_N_MARK, +}; +static const unsigned int hscif1_data_b_pins[] = { + /* RX, TX */ + RCAR_GP_PIN(1, 17), RCAR_GP_PIN(1, 18), +}; +static const unsigned int hscif1_data_b_mux[] = { + HRX1_B_MARK, HTX1_B_MARK, +}; +static const unsigned int hscif1_data_c_pins[] = { + /* RX, TX */ + RCAR_GP_PIN(7, 14), RCAR_GP_PIN(7, 15), +}; +static const unsigned int hscif1_data_c_mux[] = { + HRX1_C_MARK, HTX1_C_MARK, +}; +static const unsigned int hscif1_clk_c_pins[] = { + /* SCK */ + RCAR_GP_PIN(7, 16), +}; +static const unsigned int hscif1_clk_c_mux[] = { + HSCK1_C_MARK, +}; +static const unsigned int hscif1_ctrl_c_pins[] = { + /* RTS, CTS */ + RCAR_GP_PIN(7, 18), RCAR_GP_PIN(7, 17), +}; +static const unsigned int hscif1_ctrl_c_mux[] = { + HRTS1_N_C_MARK, HCTS1_N_C_MARK, +}; +static const unsigned int hscif1_data_d_pins[] = { + /* RX, TX */ + RCAR_GP_PIN(4, 28), RCAR_GP_PIN(4, 18), +}; +static const unsigned int hscif1_data_d_mux[] = { + HRX1_D_MARK, HTX1_D_MARK, +}; +static const unsigned int hscif1_data_e_pins[] = { + /* RX, TX */ + RCAR_GP_PIN(7, 14), RCAR_GP_PIN(7, 15), +}; +static const unsigned int hscif1_data_e_mux[] = { + HRX1_C_MARK, HTX1_C_MARK, +}; +static const unsigned int hscif1_clk_e_pins[] = { + /* SCK */ + RCAR_GP_PIN(2, 6), +}; +static const unsigned int hscif1_clk_e_mux[] = { + HSCK1_E_MARK, +}; +static const unsigned int hscif1_ctrl_e_pins[] = { + /* RTS, CTS */ + RCAR_GP_PIN(2, 8), RCAR_GP_PIN(2, 7), +}; +static const unsigned int hscif1_ctrl_e_mux[] = { + HRTS1_N_E_MARK, HCTS1_N_E_MARK, +}; +/* - HSCIF2 ----------------------------------------------------------------- */ +static const unsigned int hscif2_data_pins[] = { + /* RX, TX */ + RCAR_GP_PIN(4, 16), RCAR_GP_PIN(4, 17), +}; +static const unsigned int hscif2_data_mux[] = { + HRX2_MARK, HTX2_MARK, +}; +static const unsigned int hscif2_clk_pins[] = { + /* SCK */ + RCAR_GP_PIN(4, 15), +}; +static const unsigned int hscif2_clk_mux[] = { + HSCK2_MARK, +}; +static const unsigned int hscif2_ctrl_pins[] = { + /* RTS, CTS */ + RCAR_GP_PIN(4, 14), RCAR_GP_PIN(4, 13), +}; +static const unsigned int hscif2_ctrl_mux[] = { + HRTS2_N_MARK, HCTS2_N_MARK, +}; +static const unsigned int hscif2_data_b_pins[] = { + /* RX, TX */ + RCAR_GP_PIN(1, 20), RCAR_GP_PIN(1, 22), +}; +static const unsigned int hscif2_data_b_mux[] = { + HRX2_B_MARK, HTX2_B_MARK, +}; +static const unsigned int hscif2_ctrl_b_pins[] = { + /* RTS, CTS */ + RCAR_GP_PIN(1, 23), RCAR_GP_PIN(1, 21), +}; +static const unsigned int hscif2_ctrl_b_mux[] = { + HRTS2_N_B_MARK, HCTS2_N_B_MARK, +}; +static const unsigned int hscif2_data_c_pins[] = { + /* RX, TX */ + RCAR_GP_PIN(2, 0), RCAR_GP_PIN(2, 1), +}; +static const unsigned int hscif2_data_c_mux[] = { + HRX2_C_MARK, HTX2_C_MARK, +}; +static const unsigned int hscif2_clk_c_pins[] = { + /* SCK */ + RCAR_GP_PIN(5, 31), +}; +static const unsigned int hscif2_clk_c_mux[] = { + HSCK2_C_MARK, +}; +static const unsigned int hscif2_data_d_pins[] = { + /* RX, TX */ + RCAR_GP_PIN(1, 20), RCAR_GP_PIN(5, 31), +}; +static const unsigned int hscif2_data_d_mux[] = { + HRX2_B_MARK, HTX2_D_MARK, +}; /* - I2C0 ------------------------------------------------------------------- */ static const unsigned int i2c0_pins[] = { /* SCL, SDA */ @@ -3869,6 +4182,20 @@ static const struct sh_pfc_pin_group pinmux_groups[] = { SH_PFC_PIN_GROUP(audio_clk_b_b), SH_PFC_PIN_GROUP(audio_clk_c), SH_PFC_PIN_GROUP(audio_clkout), + SH_PFC_PIN_GROUP(can0_data), + SH_PFC_PIN_GROUP(can0_data_b), + SH_PFC_PIN_GROUP(can0_data_c), + SH_PFC_PIN_GROUP(can0_data_d), + SH_PFC_PIN_GROUP(can0_data_e), + SH_PFC_PIN_GROUP(can0_data_f), + SH_PFC_PIN_GROUP(can1_data), + SH_PFC_PIN_GROUP(can1_data_b), + SH_PFC_PIN_GROUP(can1_data_c), + SH_PFC_PIN_GROUP(can1_data_d), + SH_PFC_PIN_GROUP(can_clk), + SH_PFC_PIN_GROUP(can_clk_b), + SH_PFC_PIN_GROUP(can_clk_c), + SH_PFC_PIN_GROUP(can_clk_d), SH_PFC_PIN_GROUP(du_rgb666), SH_PFC_PIN_GROUP(du_rgb888), SH_PFC_PIN_GROUP(du_clk_out_0), @@ -3885,6 +4212,32 @@ static const struct sh_pfc_pin_group pinmux_groups[] = { SH_PFC_PIN_GROUP(eth_magic), SH_PFC_PIN_GROUP(eth_mdio), SH_PFC_PIN_GROUP(eth_rmii), + SH_PFC_PIN_GROUP(hscif0_data), + SH_PFC_PIN_GROUP(hscif0_clk), + SH_PFC_PIN_GROUP(hscif0_ctrl), + SH_PFC_PIN_GROUP(hscif0_data_b), + SH_PFC_PIN_GROUP(hscif0_ctrl_b), + SH_PFC_PIN_GROUP(hscif0_data_c), + SH_PFC_PIN_GROUP(hscif0_clk_c), + SH_PFC_PIN_GROUP(hscif1_data), + SH_PFC_PIN_GROUP(hscif1_clk), + SH_PFC_PIN_GROUP(hscif1_ctrl), + SH_PFC_PIN_GROUP(hscif1_data_b), + SH_PFC_PIN_GROUP(hscif1_data_c), + SH_PFC_PIN_GROUP(hscif1_clk_c), + SH_PFC_PIN_GROUP(hscif1_ctrl_c), + SH_PFC_PIN_GROUP(hscif1_data_d), + SH_PFC_PIN_GROUP(hscif1_data_e), + SH_PFC_PIN_GROUP(hscif1_clk_e), + SH_PFC_PIN_GROUP(hscif1_ctrl_e), + SH_PFC_PIN_GROUP(hscif2_data), + SH_PFC_PIN_GROUP(hscif2_clk), + SH_PFC_PIN_GROUP(hscif2_ctrl), + SH_PFC_PIN_GROUP(hscif2_data_b), + SH_PFC_PIN_GROUP(hscif2_ctrl_b), + SH_PFC_PIN_GROUP(hscif2_data_c), + SH_PFC_PIN_GROUP(hscif2_clk_c), + SH_PFC_PIN_GROUP(hscif2_data_d), SH_PFC_PIN_GROUP(i2c0), SH_PFC_PIN_GROUP(i2c0_b), SH_PFC_PIN_GROUP(i2c0_c), @@ -4155,6 +4508,30 @@ static const char * const audio_clk_groups[] = { "audio_clkout", }; +static const char * const can0_groups[] = { + "can0_data", + "can0_data_b", + "can0_data_c", + "can0_data_d", + "can0_data_e", + "can0_data_f", + "can_clk", + "can_clk_b", + "can_clk_c", + "can_clk_d", +}; + +static const char * const can1_groups[] = { + "can1_data", + "can1_data_b", + "can1_data_c", + "can1_data_d", + "can_clk", + "can_clk_b", + "can_clk_c", + "can_clk_d", +}; + static const char * const du_groups[] = { "du_rgb666", "du_rgb888", @@ -4183,6 +4560,41 @@ static const char * const eth_groups[] = { "eth_rmii", }; +static const char * const hscif0_groups[] = { + "hscif0_data", + "hscif0_clk", + "hscif0_ctrl", + "hscif0_data_b", + "hscif0_ctrl_b", + "hscif0_data_c", + "hscif0_clk_c", +}; + +static const char * const hscif1_groups[] = { + "hscif1_data", + "hscif1_clk", + "hscif1_ctrl", + "hscif1_data_b", + "hscif1_data_c", + "hscif1_clk_c", + "hscif1_ctrl_c", + "hscif1_data_d", + "hscif1_data_e", + "hscif1_clk_e", + "hscif1_ctrl_e", +}; + +static const char * const hscif2_groups[] = { + "hscif2_data", + "hscif2_clk", + "hscif2_ctrl", + "hscif2_data_b", + "hscif2_ctrl_b", + "hscif2_data_c", + "hscif2_clk_c", + "hscif2_data_d", +}; + static const char * const i2c0_groups[] = { "i2c0", "i2c0_b", @@ -4543,10 +4955,15 @@ static const char * const vin2_groups[] = { static const struct sh_pfc_function pinmux_functions[] = { SH_PFC_FUNCTION(audio_clk), + SH_PFC_FUNCTION(can0), + SH_PFC_FUNCTION(can1), SH_PFC_FUNCTION(du), SH_PFC_FUNCTION(du0), SH_PFC_FUNCTION(du1), SH_PFC_FUNCTION(eth), + SH_PFC_FUNCTION(hscif0), + SH_PFC_FUNCTION(hscif1), + SH_PFC_FUNCTION(hscif2), SH_PFC_FUNCTION(i2c0), SH_PFC_FUNCTION(i2c1), SH_PFC_FUNCTION(i2c2), diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7372.c b/drivers/pinctrl/sh-pfc/pfc-sh7372.c index d9158b3b2919..8211f66a2f68 100644 --- a/drivers/pinctrl/sh-pfc/pfc-sh7372.c +++ b/drivers/pinctrl/sh-pfc/pfc-sh7372.c @@ -2614,14 +2614,14 @@ static void sh7372_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin, iowrite8(value, addr); } -static const struct sh_pfc_soc_operations sh7372_pinmux_ops = { +static const struct sh_pfc_soc_operations sh7372_pfc_ops = { .get_bias = sh7372_pinmux_get_bias, .set_bias = sh7372_pinmux_set_bias, }; const struct sh_pfc_soc_info sh7372_pinmux_info = { .name = "sh7372_pfc", - .ops = &sh7372_pinmux_ops, + .ops = &sh7372_pfc_ops, .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END }, .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END }, diff --git a/drivers/pinctrl/sh-pfc/pfc-sh73a0.c b/drivers/pinctrl/sh-pfc/pfc-sh73a0.c index ee370de4609a..d2efbfb776ac 100644 --- a/drivers/pinctrl/sh-pfc/pfc-sh73a0.c +++ b/drivers/pinctrl/sh-pfc/pfc-sh73a0.c @@ -3824,54 +3824,36 @@ static void sh73a0_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin, * SoC information */ -struct sh73a0_pinmux_data { - struct regulator_dev *vccq_mc0; -}; - static int sh73a0_pinmux_soc_init(struct sh_pfc *pfc) { - struct sh73a0_pinmux_data *data; struct regulator_config cfg = { }; + struct regulator_dev *vccq; int ret; - data = devm_kzalloc(pfc->dev, sizeof(*data), GFP_KERNEL); - if (data == NULL) - return -ENOMEM; - cfg.dev = pfc->dev; cfg.init_data = &sh73a0_vccq_mc0_init_data; cfg.driver_data = pfc; - data->vccq_mc0 = regulator_register(&sh73a0_vccq_mc0_desc, &cfg); - if (IS_ERR(data->vccq_mc0)) { - ret = PTR_ERR(data->vccq_mc0); + vccq = devm_regulator_register(pfc->dev, &sh73a0_vccq_mc0_desc, &cfg); + if (IS_ERR(vccq)) { + ret = PTR_ERR(vccq); dev_err(pfc->dev, "Failed to register VCCQ MC0 regulator: %d\n", ret); return ret; } - pfc->soc_data = data; - return 0; } -static void sh73a0_pinmux_soc_exit(struct sh_pfc *pfc) -{ - struct sh73a0_pinmux_data *data = pfc->soc_data; - - regulator_unregister(data->vccq_mc0); -} - -static const struct sh_pfc_soc_operations sh73a0_pinmux_ops = { +static const struct sh_pfc_soc_operations sh73a0_pfc_ops = { .init = sh73a0_pinmux_soc_init, - .exit = sh73a0_pinmux_soc_exit, .get_bias = sh73a0_pinmux_get_bias, .set_bias = sh73a0_pinmux_set_bias, }; const struct sh_pfc_soc_info sh73a0_pinmux_info = { .name = "sh73a0_pfc", - .ops = &sh73a0_pinmux_ops, + .ops = &sh73a0_pfc_ops, .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END }, .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END }, diff --git a/drivers/pinctrl/sh-pfc/pinctrl.c b/drivers/pinctrl/sh-pfc/pinctrl.c index e758af95c209..910deaefa0ac 100644 --- a/drivers/pinctrl/sh-pfc/pinctrl.c +++ b/drivers/pinctrl/sh-pfc/pinctrl.c @@ -312,8 +312,8 @@ static int sh_pfc_get_function_groups(struct pinctrl_dev *pctldev, return 0; } -static int sh_pfc_func_enable(struct pinctrl_dev *pctldev, unsigned selector, - unsigned group) +static int sh_pfc_func_set_mux(struct pinctrl_dev *pctldev, unsigned selector, + unsigned group) { struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev); struct sh_pfc *pfc = pmx->pfc; @@ -345,27 +345,6 @@ done: return ret; } -static void sh_pfc_func_disable(struct pinctrl_dev *pctldev, unsigned selector, - unsigned group) -{ - struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev); - struct sh_pfc *pfc = pmx->pfc; - const struct sh_pfc_pin_group *grp = &pfc->info->groups[group]; - unsigned long flags; - unsigned int i; - - spin_lock_irqsave(&pfc->lock, flags); - - for (i = 0; i < grp->nr_pins; ++i) { - int idx = sh_pfc_get_pin_index(pfc, grp->pins[i]); - struct sh_pfc_pin_config *cfg = &pmx->configs[idx]; - - cfg->type = PINMUX_TYPE_NONE; - } - - spin_unlock_irqrestore(&pfc->lock, flags); -} - static int sh_pfc_gpio_request_enable(struct pinctrl_dev *pctldev, struct pinctrl_gpio_range *range, unsigned offset) @@ -463,8 +442,7 @@ static const struct pinmux_ops sh_pfc_pinmux_ops = { .get_functions_count = sh_pfc_get_functions_count, .get_function_name = sh_pfc_get_function_name, .get_function_groups = sh_pfc_get_function_groups, - .enable = sh_pfc_func_enable, - .disable = sh_pfc_func_disable, + .set_mux = sh_pfc_func_set_mux, .gpio_request_enable = sh_pfc_gpio_request_enable, .gpio_disable_free = sh_pfc_gpio_disable_free, .gpio_set_direction = sh_pfc_gpio_set_direction, diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h index d482c40b012a..5b7283182c1e 100644 --- a/drivers/pinctrl/sh-pfc/sh_pfc.h +++ b/drivers/pinctrl/sh-pfc/sh_pfc.h @@ -116,7 +116,6 @@ struct sh_pfc; struct sh_pfc_soc_operations { int (*init)(struct sh_pfc *pfc); - void (*exit)(struct sh_pfc *pfc); unsigned int (*get_bias)(struct sh_pfc *pfc, unsigned int pin); void (*set_bias)(struct sh_pfc *pfc, unsigned int pin, unsigned int bias); diff --git a/drivers/pinctrl/sirf/pinctrl-atlas6.c b/drivers/pinctrl/sirf/pinctrl-atlas6.c index c4dd3d5cf9c3..45f8391ddb34 100644 --- a/drivers/pinctrl/sirf/pinctrl-atlas6.c +++ b/drivers/pinctrl/sirf/pinctrl-atlas6.c @@ -134,8 +134,9 @@ static const struct sirfsoc_muxmask lcd_16bits_sirfsoc_muxmask[] = { .mask = BIT(30) | BIT(31), }, { .group = 2, - .mask = BIT(1) | BIT(6) | BIT(7) | BIT(8) | BIT(9) | BIT(10) | BIT(11) | - BIT(12) | BIT(13) | BIT(15) | BIT(16) | BIT(17) | BIT(18) | BIT(19) | + .mask = BIT(1) | BIT(6) | BIT(7) | BIT(8) | BIT(9) | + BIT(10) | BIT(11) | BIT(12) | BIT(13) | BIT(15) | + BIT(16) | BIT(17) | BIT(18) | BIT(19) | BIT(20) | BIT(21) | BIT(22) | BIT(31), }, }; @@ -148,14 +149,15 @@ static const struct sirfsoc_padmux lcd_16bits_padmux = { .funcval = 0, }; -static const unsigned lcd_16bits_pins[] = { 62, 63, 65, 70, 71, 72, 73, 74, 75, 76, 77, 79, 80, 81, 82, 83, - 84, 85, 86, 95 }; +static const unsigned lcd_16bits_pins[] = { 62, 63, 65, 70, 71, 72, 73, 74, 75, + 76, 77, 79, 80, 81, 82, 83, 84, 85, 86, 95 }; static const struct sirfsoc_muxmask lcd_18bits_muxmask[] = { { .group = 2, - .mask = BIT(1) | BIT(6) | BIT(7) | BIT(8) | BIT(9) | BIT(10) | BIT(11) | - BIT(12) | BIT(13) | BIT(15) | BIT(16) | BIT(17) | BIT(18) | BIT(19) | + .mask = BIT(1) | BIT(6) | BIT(7) | BIT(8) | BIT(9) | + BIT(10) | BIT(11) | BIT(12) | BIT(13) | BIT(15) | + BIT(16) | BIT(17) | BIT(18) | BIT(19) | BIT(20) | BIT(21) | BIT(22) | BIT(31), }, { .group = 1, @@ -174,21 +176,23 @@ static const struct sirfsoc_padmux lcd_18bits_padmux = { .funcval = 0, }; -static const unsigned lcd_18bits_pins[] = { 16, 17, 62, 63, 65, 70, 71, 72, 73, 74, 75, 76, 77, 79, 80, 81, 82, 83, - 84, 85, 86, 95 }; +static const unsigned lcd_18bits_pins[] = { 16, 17, 62, 63, 65, 70, 71, 72, 73, + 74, 75, 76, 77, 79, 80, 81, 82, 83, 84, 85, 86, 95 }; static const struct sirfsoc_muxmask lcd_24bits_muxmask[] = { { .group = 2, - .mask = BIT(1) | BIT(6) | BIT(7) | BIT(8) | BIT(9) | BIT(10) | BIT(11) | - BIT(12) | BIT(13) | BIT(15) | BIT(16) | BIT(17) | BIT(18) | BIT(19) | + .mask = BIT(1) | BIT(6) | BIT(7) | BIT(8) | BIT(9) | + BIT(10) | BIT(11) | BIT(12) | BIT(13) | BIT(15) | + BIT(16) | BIT(17) | BIT(18) | BIT(19) | BIT(20) | BIT(21) | BIT(22) | BIT(31), }, { .group = 1, .mask = BIT(30) | BIT(31), }, { .group = 0, - .mask = BIT(16) | BIT(17) | BIT(18) | BIT(19) | BIT(20) | BIT(21) | BIT(22) | BIT(23), + .mask = BIT(16) | BIT(17) | BIT(18) | BIT(19) | BIT(20) | + BIT(21) | BIT(22) | BIT(23), }, }; @@ -200,14 +204,16 @@ static const struct sirfsoc_padmux lcd_24bits_padmux = { .funcval = 0, }; -static const unsigned lcd_24bits_pins[] = { 16, 17, 18, 19, 20, 21, 22, 23, 62, 63, 65, 70, 71, 72, 73, 74, 75, 76, 77, 79, - 80, 81, 82, 83, 84, 85, 86, 95}; +static const unsigned lcd_24bits_pins[] = { 16, 17, 18, 19, 20, 21, 22, 23, 62, + 63, 65, 70, 71, 72, 73, 74, 75, 76, 77, 79, 80, 81, 82, 83, 84, + 85, 86, 95}; static const struct sirfsoc_muxmask lcdrom_muxmask[] = { { .group = 2, - .mask = BIT(1) | BIT(6) | BIT(7) | BIT(8) | BIT(9) | BIT(10) | BIT(11) | - BIT(12) | BIT(13) | BIT(15) | BIT(16) | BIT(17) | BIT(18) | BIT(19) | + .mask = BIT(1) | BIT(6) | BIT(7) | BIT(8) | BIT(9) | BIT(10) | + BIT(11) | BIT(12) | BIT(13) | BIT(15) | BIT(16) | + BIT(17) | BIT(18) | BIT(19) | BIT(20) | BIT(21) | BIT(22) | BIT(31), }, { .group = 1, @@ -226,8 +232,8 @@ static const struct sirfsoc_padmux lcdrom_padmux = { .funcval = BIT(4), }; -static const unsigned lcdrom_pins[] = { 8, 62, 63, 65, 70, 71, 72, 73, 74, 75, 76, 77, 79, 80, 81, 82, 83, - 84, 85, 86, 95}; +static const unsigned lcdrom_pins[] = { 8, 62, 63, 65, 70, 71, 72, 73, 74, 75, + 76, 77, 79, 80, 81, 82, 83, 84, 85, 86, 95}; static const struct sirfsoc_muxmask uart0_muxmask[] = { { @@ -371,11 +377,42 @@ static const struct sirfsoc_padmux cko1_padmux = { static const unsigned cko1_pins[] = { 42 }; -static const struct sirfsoc_muxmask i2s_muxmask[] = { +static const struct sirfsoc_muxmask i2s_mclk_muxmask[] = { { .group = 1, .mask = BIT(10), - }, { + }, +}; + +static const struct sirfsoc_padmux i2s_mclk_padmux = { + .muxmask_counts = ARRAY_SIZE(i2s_mclk_muxmask), + .muxmask = i2s_mclk_muxmask, + .ctrlreg = SIRFSOC_RSC_PIN_MUX, + .funcmask = BIT(3), + .funcval = BIT(3), +}; + +static const unsigned i2s_mclk_pins[] = { 42 }; + +static const struct sirfsoc_muxmask i2s_ext_clk_input_muxmask[] = { + { + .group = 1, + .mask = BIT(19), + }, +}; + +static const struct sirfsoc_padmux i2s_ext_clk_input_padmux = { + .muxmask_counts = ARRAY_SIZE(i2s_ext_clk_input_muxmask), + .muxmask = i2s_ext_clk_input_muxmask, + .ctrlreg = SIRFSOC_RSC_PIN_MUX, + .funcmask = BIT(2), + .funcval = BIT(2), +}; + +static const unsigned i2s_ext_clk_input_pins[] = { 51 }; + +static const struct sirfsoc_muxmask i2s_muxmask[] = { + { .group = 3, .mask = BIT(2) | BIT(3) | BIT(4) | BIT(5), }, @@ -385,17 +422,12 @@ static const struct sirfsoc_padmux i2s_padmux = { .muxmask_counts = ARRAY_SIZE(i2s_muxmask), .muxmask = i2s_muxmask, .ctrlreg = SIRFSOC_RSC_PIN_MUX, - .funcmask = BIT(3), - .funcval = BIT(3), }; -static const unsigned i2s_pins[] = { 42, 98, 99, 100, 101 }; +static const unsigned i2s_pins[] = { 98, 99, 100, 101 }; static const struct sirfsoc_muxmask i2s_no_din_muxmask[] = { { - .group = 1, - .mask = BIT(10), - }, { .group = 3, .mask = BIT(2) | BIT(3) | BIT(4), }, @@ -405,17 +437,12 @@ static const struct sirfsoc_padmux i2s_no_din_padmux = { .muxmask_counts = ARRAY_SIZE(i2s_no_din_muxmask), .muxmask = i2s_no_din_muxmask, .ctrlreg = SIRFSOC_RSC_PIN_MUX, - .funcmask = BIT(3), - .funcval = BIT(3), }; -static const unsigned i2s_no_din_pins[] = { 42, 98, 99, 100 }; +static const unsigned i2s_no_din_pins[] = { 98, 99, 100 }; static const struct sirfsoc_muxmask i2s_6chn_muxmask[] = { { - .group = 1, - .mask = BIT(10) | BIT(20) | BIT(23), - }, { .group = 3, .mask = BIT(2) | BIT(3) | BIT(4) | BIT(5), }, @@ -425,11 +452,11 @@ static const struct sirfsoc_padmux i2s_6chn_padmux = { .muxmask_counts = ARRAY_SIZE(i2s_6chn_muxmask), .muxmask = i2s_6chn_muxmask, .ctrlreg = SIRFSOC_RSC_PIN_MUX, - .funcmask = BIT(1) | BIT(3) | BIT(9), - .funcval = BIT(1) | BIT(3) | BIT(9), + .funcmask = BIT(1) | BIT(9), + .funcval = BIT(1) | BIT(9), }; -static const unsigned i2s_6chn_pins[] = { 42, 52, 55, 98, 99, 100, 101 }; +static const unsigned i2s_6chn_pins[] = { 52, 55, 98, 99, 100, 101 }; static const struct sirfsoc_muxmask ac97_muxmask[] = { { @@ -716,7 +743,8 @@ static const struct sirfsoc_padmux vip_padmux = { .funcval = BIT(18), }; -static const unsigned vip_pins[] = { 36, 37, 38, 40, 41, 56, 57, 58, 59, 60, 61 }; +static const unsigned vip_pins[] = { 36, 37, 38, 40, 41, 56, 57, 58, 59, + 60, 61 }; static const struct sirfsoc_muxmask vip_noupli_muxmask[] = { { @@ -737,7 +765,8 @@ static const struct sirfsoc_padmux vip_noupli_padmux = { .funcval = BIT(15), }; -static const unsigned vip_noupli_pins[] = { 16, 17, 18, 19, 20, 21, 22, 23, 87, 88, 89 }; +static const unsigned vip_noupli_pins[] = { 16, 17, 18, 19, 20, 21, 22, 23, + 87, 88, 89 }; static const struct sirfsoc_muxmask i2c0_muxmask[] = { { @@ -876,7 +905,8 @@ static const struct sirfsoc_padmux usb0_upli_drvbus_padmux = { .funcval = 0, }; -static const unsigned usb0_upli_drvbus_pins[] = { 36, 37, 38, 39, 40, 41, 56, 57, 58, 59, 60, 61 }; +static const unsigned usb0_upli_drvbus_pins[] = { 36, 37, 38, 39, 40, + 41, 56, 57, 58, 59, 60, 61 }; static const struct sirfsoc_muxmask usb1_utmi_drvbus_muxmask[] = { { @@ -968,6 +998,8 @@ static const struct sirfsoc_pin_group sirfsoc_pin_groups[] = { SIRFSOC_PIN_GROUP("usb1_dp_dngrp", usb1_dp_dn_pins), SIRFSOC_PIN_GROUP("uart1_route_io_usb1grp", uart1_route_io_usb1_pins), SIRFSOC_PIN_GROUP("pulse_countgrp", pulse_count_pins), + SIRFSOC_PIN_GROUP("i2smclkgrp", i2s_mclk_pins), + SIRFSOC_PIN_GROUP("i2s_ext_clk_inputgrp", i2s_ext_clk_input_pins), SIRFSOC_PIN_GROUP("i2sgrp", i2s_pins), SIRFSOC_PIN_GROUP("i2s_no_dingrp", i2s_no_din_pins), SIRFSOC_PIN_GROUP("i2s_6chngrp", i2s_6chn_pins), @@ -1017,8 +1049,11 @@ static const char * const sdmmc2_nowpgrp[] = { "sdmmc2_nowpgrp" }; static const char * const usb0_upli_drvbusgrp[] = { "usb0_upli_drvbusgrp" }; static const char * const usb1_utmi_drvbusgrp[] = { "usb1_utmi_drvbusgrp" }; static const char * const usb1_dp_dngrp[] = { "usb1_dp_dngrp" }; -static const char * const uart1_route_io_usb1grp[] = { "uart1_route_io_usb1grp" }; +static const char * const + uart1_route_io_usb1grp[] = { "uart1_route_io_usb1grp" }; static const char * const pulse_countgrp[] = { "pulse_countgrp" }; +static const char * const i2smclkgrp[] = { "i2smclkgrp" }; +static const char * const i2s_ext_clk_inputgrp[] = { "i2s_ext_clk_inputgrp" }; static const char * const i2sgrp[] = { "i2sgrp" }; static const char * const i2s_no_dingrp[] = { "i2s_no_dingrp" }; static const char * const i2s_6chngrp[] = { "i2s_6chngrp" }; @@ -1038,7 +1073,8 @@ static const struct sirfsoc_pmx_func sirfsoc_pmx_functions[] = { uart0_nostreamctrl_padmux), SIRFSOC_PMX_FUNCTION("uart1", uart1grp, uart1_padmux), SIRFSOC_PMX_FUNCTION("uart2", uart2grp, uart2_padmux), - SIRFSOC_PMX_FUNCTION("uart2_nostreamctrl", uart2_nostreamctrlgrp, uart2_nostreamctrl_padmux), + SIRFSOC_PMX_FUNCTION("uart2_nostreamctrl", + uart2_nostreamctrlgrp, uart2_nostreamctrl_padmux), SIRFSOC_PMX_FUNCTION("usp0", usp0grp, usp0_padmux), SIRFSOC_PMX_FUNCTION("usp0_uart_nostreamctrl", usp0_uart_nostreamctrl_grp, @@ -1068,12 +1104,19 @@ static const struct sirfsoc_pmx_func sirfsoc_pmx_functions[] = { SIRFSOC_PMX_FUNCTION("sdmmc2", sdmmc2grp, sdmmc2_padmux), SIRFSOC_PMX_FUNCTION("sdmmc3", sdmmc3grp, sdmmc3_padmux), SIRFSOC_PMX_FUNCTION("sdmmc5", sdmmc5grp, sdmmc5_padmux), - SIRFSOC_PMX_FUNCTION("sdmmc2_nowp", sdmmc2_nowpgrp, sdmmc2_nowp_padmux), - SIRFSOC_PMX_FUNCTION("usb0_upli_drvbus", usb0_upli_drvbusgrp, usb0_upli_drvbus_padmux), - SIRFSOC_PMX_FUNCTION("usb1_utmi_drvbus", usb1_utmi_drvbusgrp, usb1_utmi_drvbus_padmux), + SIRFSOC_PMX_FUNCTION("sdmmc2_nowp", + sdmmc2_nowpgrp, sdmmc2_nowp_padmux), + SIRFSOC_PMX_FUNCTION("usb0_upli_drvbus", + usb0_upli_drvbusgrp, usb0_upli_drvbus_padmux), + SIRFSOC_PMX_FUNCTION("usb1_utmi_drvbus", + usb1_utmi_drvbusgrp, usb1_utmi_drvbus_padmux), SIRFSOC_PMX_FUNCTION("usb1_dp_dn", usb1_dp_dngrp, usb1_dp_dn_padmux), - SIRFSOC_PMX_FUNCTION("uart1_route_io_usb1", uart1_route_io_usb1grp, uart1_route_io_usb1_padmux), + SIRFSOC_PMX_FUNCTION("uart1_route_io_usb1", + uart1_route_io_usb1grp, uart1_route_io_usb1_padmux), SIRFSOC_PMX_FUNCTION("pulse_count", pulse_countgrp, pulse_count_padmux), + SIRFSOC_PMX_FUNCTION("i2s_mclk", i2smclkgrp, i2s_mclk_padmux), + SIRFSOC_PMX_FUNCTION("i2s_ext_clk_input", i2s_ext_clk_inputgrp, + i2s_ext_clk_input_padmux), SIRFSOC_PMX_FUNCTION("i2s", i2sgrp, i2s_padmux), SIRFSOC_PMX_FUNCTION("i2s_no_din", i2s_no_dingrp, i2s_no_din_padmux), SIRFSOC_PMX_FUNCTION("i2s_6chn", i2s_6chngrp, i2s_6chn_padmux), diff --git a/drivers/pinctrl/sirf/pinctrl-prima2.c b/drivers/pinctrl/sirf/pinctrl-prima2.c index 8aa76f0776d7..357678ee28e3 100644 --- a/drivers/pinctrl/sirf/pinctrl-prima2.c +++ b/drivers/pinctrl/sirf/pinctrl-prima2.c @@ -135,8 +135,9 @@ static const struct pinctrl_pin_desc sirfsoc_pads[] = { static const struct sirfsoc_muxmask lcd_16bits_sirfsoc_muxmask[] = { { .group = 3, - .mask = BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | BIT(6) | BIT(7) | BIT(8) | - BIT(9) | BIT(10) | BIT(11) | BIT(12) | BIT(13) | BIT(14) | BIT(15) | BIT(16) | + .mask = BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | + BIT(6) | BIT(7) | BIT(8) | BIT(9) | BIT(10) | BIT(11) | + BIT(12) | BIT(13) | BIT(14) | BIT(15) | BIT(16) | BIT(17) | BIT(18), }, { .group = 2, @@ -152,14 +153,15 @@ static const struct sirfsoc_padmux lcd_16bits_padmux = { .funcval = 0, }; -static const unsigned lcd_16bits_pins[] = { 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, - 105, 106, 107, 108, 109, 110, 111, 112, 113, 114 }; +static const unsigned lcd_16bits_pins[] = { 95, 96, 97, 98, 99, 100, 101, 102, + 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114 }; static const struct sirfsoc_muxmask lcd_18bits_muxmask[] = { { .group = 3, - .mask = BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | BIT(6) | BIT(7) | BIT(8) | - BIT(9) | BIT(10) | BIT(11) | BIT(12) | BIT(13) | BIT(14) | BIT(15) | BIT(16) | + .mask = BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | + BIT(6) | BIT(7) | BIT(8) | BIT(9) | BIT(10) | BIT(11) | + BIT(12) | BIT(13) | BIT(14) | BIT(15) | BIT(16) | BIT(17) | BIT(18), }, { .group = 2, @@ -178,21 +180,23 @@ static const struct sirfsoc_padmux lcd_18bits_padmux = { .funcval = 0, }; -static const unsigned lcd_18bits_pins[] = { 16, 17, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, - 105, 106, 107, 108, 109, 110, 111, 112, 113, 114}; +static const unsigned lcd_18bits_pins[] = { 16, 17, 95, 96, 97, 98, 99, 100, + 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114}; static const struct sirfsoc_muxmask lcd_24bits_muxmask[] = { { .group = 3, - .mask = BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | BIT(6) | BIT(7) | BIT(8) | - BIT(9) | BIT(10) | BIT(11) | BIT(12) | BIT(13) | BIT(14) | BIT(15) | BIT(16) | + .mask = BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | + BIT(6) | BIT(7) | BIT(8) | BIT(9) | BIT(10) | BIT(11) | + BIT(12) | BIT(13) | BIT(14) | BIT(15) | BIT(16) | BIT(17) | BIT(18), }, { .group = 2, .mask = BIT(31), }, { .group = 0, - .mask = BIT(16) | BIT(17) | BIT(18) | BIT(19) | BIT(20) | BIT(21) | BIT(22) | BIT(23), + .mask = BIT(16) | BIT(17) | BIT(18) | BIT(19) | BIT(20) | + BIT(21) | BIT(22) | BIT(23), }, }; @@ -204,14 +208,16 @@ static const struct sirfsoc_padmux lcd_24bits_padmux = { .funcval = 0, }; -static const unsigned lcd_24bits_pins[] = { 16, 17, 18, 19, 20, 21, 22, 23, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, - 105, 106, 107, 108, 109, 110, 111, 112, 113, 114 }; +static const unsigned lcd_24bits_pins[] = { 16, 17, 18, 19, 20, 21, 22, 23, + 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, + 110, 111, 112, 113, 114 }; static const struct sirfsoc_muxmask lcdrom_muxmask[] = { { .group = 3, - .mask = BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | BIT(6) | BIT(7) | BIT(8) | - BIT(9) | BIT(10) | BIT(11) | BIT(12) | BIT(13) | BIT(14) | BIT(15) | BIT(16) | + .mask = BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | + BIT(6) | BIT(7) | BIT(8) | BIT(9) | BIT(10) | BIT(11) | + BIT(12) | BIT(13) | BIT(14) | BIT(15) | BIT(16) | BIT(17) | BIT(18), }, { .group = 2, @@ -230,8 +236,8 @@ static const struct sirfsoc_padmux lcdrom_padmux = { .funcval = BIT(4), }; -static const unsigned lcdrom_pins[] = { 23, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, - 105, 106, 107, 108, 109, 110, 111, 112, 113, 114 }; +static const unsigned lcdrom_pins[] = { 23, 95, 96, 97, 98, 99, 100, 101, 102, + 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114 }; static const struct sirfsoc_muxmask uart0_muxmask[] = { { @@ -380,12 +386,44 @@ static const struct sirfsoc_padmux cko1_padmux = { static const unsigned cko1_pins[] = { 42 }; +static const struct sirfsoc_muxmask i2s_mclk_muxmask[] = { + { + .group = 1, + .mask = BIT(10), + }, +}; + +static const struct sirfsoc_padmux i2s_mclk_padmux = { + .muxmask_counts = ARRAY_SIZE(i2s_mclk_muxmask), + .muxmask = i2s_mclk_muxmask, + .ctrlreg = SIRFSOC_RSC_PIN_MUX, + .funcmask = BIT(3), + .funcval = BIT(3), +}; + +static const unsigned i2s_mclk_pins[] = { 42 }; + +static const struct sirfsoc_muxmask i2s_ext_clk_input_muxmask[] = { + { + .group = 1, + .mask = BIT(19), + }, +}; + +static const struct sirfsoc_padmux i2s_ext_clk_input_padmux = { + .muxmask_counts = ARRAY_SIZE(i2s_ext_clk_input_muxmask), + .muxmask = i2s_ext_clk_input_muxmask, + .ctrlreg = SIRFSOC_RSC_PIN_MUX, + .funcmask = BIT(2), + .funcval = BIT(2), +}; + +static const unsigned i2s_ext_clk_input_pins[] = { 51 }; + static const struct sirfsoc_muxmask i2s_muxmask[] = { { .group = 1, - .mask = - BIT(10) | BIT(11) | BIT(12) | BIT(13) | BIT(14) | BIT(19) - | BIT(23) | BIT(28), + .mask = BIT(11) | BIT(12) | BIT(13) | BIT(14), }, }; @@ -393,11 +431,42 @@ static const struct sirfsoc_padmux i2s_padmux = { .muxmask_counts = ARRAY_SIZE(i2s_muxmask), .muxmask = i2s_muxmask, .ctrlreg = SIRFSOC_RSC_PIN_MUX, - .funcmask = BIT(3) | BIT(9), - .funcval = BIT(3), }; -static const unsigned i2s_pins[] = { 42, 43, 44, 45, 46, 51, 55, 60 }; +static const unsigned i2s_pins[] = { 43, 44, 45, 46 }; + +static const struct sirfsoc_muxmask i2s_no_din_muxmask[] = { + { + .group = 1, + .mask = BIT(11) | BIT(12) | BIT(14), + }, +}; + +static const struct sirfsoc_padmux i2s_no_din_padmux = { + .muxmask_counts = ARRAY_SIZE(i2s_no_din_muxmask), + .muxmask = i2s_no_din_muxmask, + .ctrlreg = SIRFSOC_RSC_PIN_MUX, +}; + +static const unsigned i2s_no_din_pins[] = { 43, 44, 46 }; + +static const struct sirfsoc_muxmask i2s_6chn_muxmask[] = { + { + .group = 1, + .mask = BIT(11) | BIT(12) | BIT(13) | BIT(14) + | BIT(23) | BIT(28), + }, +}; + +static const struct sirfsoc_padmux i2s_6chn_padmux = { + .muxmask_counts = ARRAY_SIZE(i2s_6chn_muxmask), + .muxmask = i2s_6chn_muxmask, + .ctrlreg = SIRFSOC_RSC_PIN_MUX, + .funcmask = BIT(1) | BIT(9), + .funcval = BIT(1) | BIT(9), +}; + +static const unsigned i2s_6chn_pins[] = { 43, 44, 45, 46, 55, 60 }; static const struct sirfsoc_muxmask ac97_muxmask[] = { { @@ -685,7 +754,8 @@ static const struct sirfsoc_padmux vip_padmux = { .funcval = 0, }; -static const unsigned vip_pins[] = { 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89 }; +static const unsigned vip_pins[] = { 79, 80, 81, 82, 83, 84, 85, 86, 87, + 88, 89 }; static const struct sirfsoc_muxmask i2c0_muxmask[] = { { @@ -735,7 +805,8 @@ static const struct sirfsoc_padmux viprom_padmux = { .funcval = BIT(0), }; -static const unsigned viprom_pins[] = { 12, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89 }; +static const unsigned viprom_pins[] = { 12, 79, 80, 81, 82, 83, 84, 85, 86, + 87, 88, 89 }; static const struct sirfsoc_muxmask pwm0_muxmask[] = { { @@ -918,7 +989,11 @@ static const struct sirfsoc_pin_group sirfsoc_pin_groups[] = { SIRFSOC_PIN_GROUP("usb1_dp_dngrp", usb1_dp_dn_pins), SIRFSOC_PIN_GROUP("uart1_route_io_usb1grp", uart1_route_io_usb1_pins), SIRFSOC_PIN_GROUP("pulse_countgrp", pulse_count_pins), + SIRFSOC_PIN_GROUP("i2smclkgrp", i2s_mclk_pins), + SIRFSOC_PIN_GROUP("i2s_ext_clk_inputgrp", i2s_ext_clk_input_pins), SIRFSOC_PIN_GROUP("i2sgrp", i2s_pins), + SIRFSOC_PIN_GROUP("i2s_no_dingrp", i2s_no_din_pins), + SIRFSOC_PIN_GROUP("i2s_6chngrp", i2s_6chn_pins), SIRFSOC_PIN_GROUP("ac97grp", ac97_pins), SIRFSOC_PIN_GROUP("nandgrp", nand_pins), SIRFSOC_PIN_GROUP("spi0grp", spi0_pins), @@ -936,16 +1011,19 @@ static const char * const uart1grp[] = { "uart1grp" }; static const char * const uart2grp[] = { "uart2grp" }; static const char * const uart2_nostreamctrlgrp[] = { "uart2_nostreamctrlgrp" }; static const char * const usp0grp[] = { "usp0grp" }; -static const char * const usp0_uart_nostreamctrl_grp[] = - { "usp0_uart_nostreamctrl_grp" }; +static const char * const usp0_uart_nostreamctrl_grp[] = { + "usp0_uart_nostreamctrl_grp" +}; static const char * const usp0_only_utfs_grp[] = { "usp0_only_utfs_grp" }; static const char * const usp0_only_urfs_grp[] = { "usp0_only_urfs_grp" }; static const char * const usp1grp[] = { "usp1grp" }; -static const char * const usp1_uart_nostreamctrl_grp[] = - { "usp1_uart_nostreamctrl_grp" }; +static const char * const usp1_uart_nostreamctrl_grp[] = { + "usp1_uart_nostreamctrl_grp" +}; static const char * const usp2grp[] = { "usp2grp" }; -static const char * const usp2_uart_nostreamctrl_grp[] = - { "usp2_uart_nostreamctrl_grp" }; +static const char * const usp2_uart_nostreamctrl_grp[] = { + "usp2_uart_nostreamctrl_grp" +}; static const char * const i2c0grp[] = { "i2c0grp" }; static const char * const i2c1grp[] = { "i2c1grp" }; static const char * const pwm0grp[] = { "pwm0grp" }; @@ -966,9 +1044,14 @@ static const char * const sdmmc5grp[] = { "sdmmc5grp" }; static const char * const usb0_utmi_drvbusgrp[] = { "usb0_utmi_drvbusgrp" }; static const char * const usb1_utmi_drvbusgrp[] = { "usb1_utmi_drvbusgrp" }; static const char * const usb1_dp_dngrp[] = { "usb1_dp_dngrp" }; -static const char * const uart1_route_io_usb1grp[] = { "uart1_route_io_usb1grp" }; +static const char * const + uart1_route_io_usb1grp[] = { "uart1_route_io_usb1grp" }; static const char * const pulse_countgrp[] = { "pulse_countgrp" }; +static const char * const i2smclkgrp[] = { "i2smclkgrp" }; +static const char * const i2s_ext_clk_inputgrp[] = { "i2s_ext_clk_inputgrp" }; static const char * const i2sgrp[] = { "i2sgrp" }; +static const char * const i2s_no_dingrp[] = { "i2s_no_dingrp" }; +static const char * const i2s_6chngrp[] = { "i2s_6chngrp" }; static const char * const ac97grp[] = { "ac97grp" }; static const char * const nandgrp[] = { "nandgrp" }; static const char * const spi0grp[] = { "spi0grp" }; @@ -981,15 +1064,19 @@ static const struct sirfsoc_pmx_func sirfsoc_pmx_functions[] = { SIRFSOC_PMX_FUNCTION("lcd_24bits", lcd_24bitsgrp, lcd_24bits_padmux), SIRFSOC_PMX_FUNCTION("lcdrom", lcdromgrp, lcdrom_padmux), SIRFSOC_PMX_FUNCTION("uart0", uart0grp, uart0_padmux), - SIRFSOC_PMX_FUNCTION("uart0_nostreamctrl", uart0_nostreamctrlgrp, uart0_nostreamctrl_padmux), + SIRFSOC_PMX_FUNCTION("uart0_nostreamctrl", + uart0_nostreamctrlgrp, uart0_nostreamctrl_padmux), SIRFSOC_PMX_FUNCTION("uart1", uart1grp, uart1_padmux), SIRFSOC_PMX_FUNCTION("uart2", uart2grp, uart2_padmux), - SIRFSOC_PMX_FUNCTION("uart2_nostreamctrl", uart2_nostreamctrlgrp, uart2_nostreamctrl_padmux), + SIRFSOC_PMX_FUNCTION("uart2_nostreamctrl", + uart2_nostreamctrlgrp, uart2_nostreamctrl_padmux), SIRFSOC_PMX_FUNCTION("usp0", usp0grp, usp0_padmux), SIRFSOC_PMX_FUNCTION("usp0_uart_nostreamctrl", usp0_uart_nostreamctrl_grp, usp0_uart_nostreamctrl_padmux), - SIRFSOC_PMX_FUNCTION("usp0_only_utfs", usp0_only_utfs_grp, usp0_only_utfs_padmux), - SIRFSOC_PMX_FUNCTION("usp0_only_urfs", usp0_only_urfs_grp, usp0_only_urfs_padmux), + SIRFSOC_PMX_FUNCTION("usp0_only_utfs", + usp0_only_utfs_grp, usp0_only_utfs_padmux), + SIRFSOC_PMX_FUNCTION("usp0_only_urfs", + usp0_only_urfs_grp, usp0_only_urfs_padmux), SIRFSOC_PMX_FUNCTION("usp1", usp1grp, usp1_padmux), SIRFSOC_PMX_FUNCTION("usp1_uart_nostreamctrl", usp1_uart_nostreamctrl_grp, usp1_uart_nostreamctrl_padmux), @@ -1013,12 +1100,20 @@ static const struct sirfsoc_pmx_func sirfsoc_pmx_functions[] = { SIRFSOC_PMX_FUNCTION("sdmmc3", sdmmc3grp, sdmmc3_padmux), SIRFSOC_PMX_FUNCTION("sdmmc4", sdmmc4grp, sdmmc4_padmux), SIRFSOC_PMX_FUNCTION("sdmmc5", sdmmc5grp, sdmmc5_padmux), - SIRFSOC_PMX_FUNCTION("usb0_utmi_drvbus", usb0_utmi_drvbusgrp, usb0_utmi_drvbus_padmux), - SIRFSOC_PMX_FUNCTION("usb1_utmi_drvbus", usb1_utmi_drvbusgrp, usb1_utmi_drvbus_padmux), + SIRFSOC_PMX_FUNCTION("usb0_utmi_drvbus", + usb0_utmi_drvbusgrp, usb0_utmi_drvbus_padmux), + SIRFSOC_PMX_FUNCTION("usb1_utmi_drvbus", + usb1_utmi_drvbusgrp, usb1_utmi_drvbus_padmux), SIRFSOC_PMX_FUNCTION("usb1_dp_dn", usb1_dp_dngrp, usb1_dp_dn_padmux), - SIRFSOC_PMX_FUNCTION("uart1_route_io_usb1", uart1_route_io_usb1grp, uart1_route_io_usb1_padmux), + SIRFSOC_PMX_FUNCTION("uart1_route_io_usb1", + uart1_route_io_usb1grp, uart1_route_io_usb1_padmux), SIRFSOC_PMX_FUNCTION("pulse_count", pulse_countgrp, pulse_count_padmux), + SIRFSOC_PMX_FUNCTION("i2s_mclk", i2smclkgrp, i2s_mclk_padmux), + SIRFSOC_PMX_FUNCTION("i2s_ext_clk_input", i2s_ext_clk_inputgrp, + i2s_ext_clk_input_padmux), SIRFSOC_PMX_FUNCTION("i2s", i2sgrp, i2s_padmux), + SIRFSOC_PMX_FUNCTION("i2s_no_din", i2s_no_dingrp, i2s_no_din_padmux), + SIRFSOC_PMX_FUNCTION("i2s_6chn", i2s_6chngrp, i2s_6chn_padmux), SIRFSOC_PMX_FUNCTION("ac97", ac97grp, ac97_padmux), SIRFSOC_PMX_FUNCTION("nand", nandgrp, nand_padmux), SIRFSOC_PMX_FUNCTION("spi0", spi0grp, spi0_padmux), diff --git a/drivers/pinctrl/sirf/pinctrl-sirf.c b/drivers/pinctrl/sirf/pinctrl-sirf.c index 014f5b1fee55..b713bd59ffbb 100644 --- a/drivers/pinctrl/sirf/pinctrl-sirf.c +++ b/drivers/pinctrl/sirf/pinctrl-sirf.c @@ -58,17 +58,18 @@ static const char *sirfsoc_get_group_name(struct pinctrl_dev *pctldev, return sirfsoc_pin_groups[selector].name; } -static int sirfsoc_get_group_pins(struct pinctrl_dev *pctldev, unsigned selector, - const unsigned **pins, - unsigned *num_pins) +static int sirfsoc_get_group_pins(struct pinctrl_dev *pctldev, + unsigned selector, + const unsigned **pins, + unsigned *num_pins) { *pins = sirfsoc_pin_groups[selector].pins; *num_pins = sirfsoc_pin_groups[selector].num_pins; return 0; } -static void sirfsoc_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s, - unsigned offset) +static void sirfsoc_pin_dbg_show(struct pinctrl_dev *pctldev, + struct seq_file *s, unsigned offset) { seq_printf(s, " " DRIVER_NAME); } @@ -138,22 +139,25 @@ static struct pinctrl_ops sirfsoc_pctrl_ops = { static struct sirfsoc_pmx_func *sirfsoc_pmx_functions; static int sirfsoc_pmxfunc_cnt; -static void sirfsoc_pinmux_endisable(struct sirfsoc_pmx *spmx, unsigned selector, - bool enable) +static void sirfsoc_pinmux_endisable(struct sirfsoc_pmx *spmx, + unsigned selector, bool enable) { int i; - const struct sirfsoc_padmux *mux = sirfsoc_pmx_functions[selector].padmux; + const struct sirfsoc_padmux *mux = + sirfsoc_pmx_functions[selector].padmux; const struct sirfsoc_muxmask *mask = mux->muxmask; for (i = 0; i < mux->muxmask_counts; i++) { u32 muxval; if (!spmx->is_marco) { - muxval = readl(spmx->gpio_virtbase + SIRFSOC_GPIO_PAD_EN(mask[i].group)); + muxval = readl(spmx->gpio_virtbase + + SIRFSOC_GPIO_PAD_EN(mask[i].group)); if (enable) muxval = muxval & ~mask[i].mask; else muxval = muxval | mask[i].mask; - writel(muxval, spmx->gpio_virtbase + SIRFSOC_GPIO_PAD_EN(mask[i].group)); + writel(muxval, spmx->gpio_virtbase + + SIRFSOC_GPIO_PAD_EN(mask[i].group)); } else { if (enable) writel(mask[i].mask, spmx->gpio_virtbase + @@ -175,8 +179,9 @@ static void sirfsoc_pinmux_endisable(struct sirfsoc_pmx *spmx, unsigned selector } } -static int sirfsoc_pinmux_enable(struct pinctrl_dev *pmxdev, unsigned selector, - unsigned group) +static int sirfsoc_pinmux_set_mux(struct pinctrl_dev *pmxdev, + unsigned selector, + unsigned group) { struct sirfsoc_pmx *spmx; @@ -186,15 +191,6 @@ static int sirfsoc_pinmux_enable(struct pinctrl_dev *pmxdev, unsigned selector, return 0; } -static void sirfsoc_pinmux_disable(struct pinctrl_dev *pmxdev, unsigned selector, - unsigned group) -{ - struct sirfsoc_pmx *spmx; - - spmx = pinctrl_dev_get_drvdata(pmxdev); - sirfsoc_pinmux_endisable(spmx, selector, false); -} - static int sirfsoc_pinmux_get_funcs_count(struct pinctrl_dev *pmxdev) { return sirfsoc_pmxfunc_cnt; @@ -206,9 +202,10 @@ static const char *sirfsoc_pinmux_get_func_name(struct pinctrl_dev *pctldev, return sirfsoc_pmx_functions[selector].name; } -static int sirfsoc_pinmux_get_groups(struct pinctrl_dev *pctldev, unsigned selector, - const char * const **groups, - unsigned * const num_groups) +static int sirfsoc_pinmux_get_groups(struct pinctrl_dev *pctldev, + unsigned selector, + const char * const **groups, + unsigned * const num_groups) { *groups = sirfsoc_pmx_functions[selector].groups; *num_groups = sirfsoc_pmx_functions[selector].num_groups; @@ -227,9 +224,11 @@ static int sirfsoc_pinmux_request_gpio(struct pinctrl_dev *pmxdev, spmx = pinctrl_dev_get_drvdata(pmxdev); if (!spmx->is_marco) { - muxval = readl(spmx->gpio_virtbase + SIRFSOC_GPIO_PAD_EN(group)); + muxval = readl(spmx->gpio_virtbase + + SIRFSOC_GPIO_PAD_EN(group)); muxval = muxval | (1 << (offset - range->pin_base)); - writel(muxval, spmx->gpio_virtbase + SIRFSOC_GPIO_PAD_EN(group)); + writel(muxval, spmx->gpio_virtbase + + SIRFSOC_GPIO_PAD_EN(group)); } else { writel(1 << (offset - range->pin_base), spmx->gpio_virtbase + SIRFSOC_GPIO_PAD_EN(group)); @@ -239,8 +238,7 @@ static int sirfsoc_pinmux_request_gpio(struct pinctrl_dev *pmxdev, } static struct pinmux_ops sirfsoc_pinmux_ops = { - .enable = sirfsoc_pinmux_enable, - .disable = sirfsoc_pinmux_disable, + .set_mux = sirfsoc_pinmux_set_mux, .get_functions_count = sirfsoc_pinmux_get_funcs_count, .get_function_name = sirfsoc_pinmux_get_func_name, .get_function_groups = sirfsoc_pinmux_get_groups, @@ -528,24 +526,29 @@ static int sirfsoc_gpio_irq_type(struct irq_data *d, unsigned type) case IRQ_TYPE_NONE: break; case IRQ_TYPE_EDGE_RISING: - val |= SIRFSOC_GPIO_CTL_INTR_HIGH_MASK | SIRFSOC_GPIO_CTL_INTR_TYPE_MASK; + val |= SIRFSOC_GPIO_CTL_INTR_HIGH_MASK | + SIRFSOC_GPIO_CTL_INTR_TYPE_MASK; val &= ~SIRFSOC_GPIO_CTL_INTR_LOW_MASK; break; case IRQ_TYPE_EDGE_FALLING: val &= ~SIRFSOC_GPIO_CTL_INTR_HIGH_MASK; - val |= SIRFSOC_GPIO_CTL_INTR_LOW_MASK | SIRFSOC_GPIO_CTL_INTR_TYPE_MASK; + val |= SIRFSOC_GPIO_CTL_INTR_LOW_MASK | + SIRFSOC_GPIO_CTL_INTR_TYPE_MASK; break; case IRQ_TYPE_EDGE_BOTH: - val |= SIRFSOC_GPIO_CTL_INTR_HIGH_MASK | SIRFSOC_GPIO_CTL_INTR_LOW_MASK | - SIRFSOC_GPIO_CTL_INTR_TYPE_MASK; + val |= SIRFSOC_GPIO_CTL_INTR_HIGH_MASK | + SIRFSOC_GPIO_CTL_INTR_LOW_MASK | + SIRFSOC_GPIO_CTL_INTR_TYPE_MASK; break; case IRQ_TYPE_LEVEL_LOW: - val &= ~(SIRFSOC_GPIO_CTL_INTR_HIGH_MASK | SIRFSOC_GPIO_CTL_INTR_TYPE_MASK); + val &= ~(SIRFSOC_GPIO_CTL_INTR_HIGH_MASK | + SIRFSOC_GPIO_CTL_INTR_TYPE_MASK); val |= SIRFSOC_GPIO_CTL_INTR_LOW_MASK; break; case IRQ_TYPE_LEVEL_HIGH: val |= SIRFSOC_GPIO_CTL_INTR_HIGH_MASK; - val &= ~(SIRFSOC_GPIO_CTL_INTR_LOW_MASK | SIRFSOC_GPIO_CTL_INTR_TYPE_MASK); + val &= ~(SIRFSOC_GPIO_CTL_INTR_LOW_MASK | + SIRFSOC_GPIO_CTL_INTR_TYPE_MASK); break; } @@ -704,7 +707,8 @@ static inline void sirfsoc_gpio_set_output(struct sirfsoc_gpio_chip *sgpio, spin_unlock_irqrestore(&bank->lock, flags); } -static int sirfsoc_gpio_direction_output(struct gpio_chip *chip, unsigned gpio, int value) +static int sirfsoc_gpio_direction_output(struct gpio_chip *chip, + unsigned gpio, int value) { struct sirfsoc_gpio_chip *sgpio = to_sirfsoc_gpio(chip); struct sirfsoc_gpio_bank *bank = sirfsoc_gpio_to_bank(sgpio, gpio); @@ -849,7 +853,7 @@ static int sirfsoc_gpio_probe(struct device_node *np) if (err) { dev_err(&pdev->dev, "could not connect irqchip to gpiochip\n"); - goto out; + goto out_banks; } for (i = 0; i < SIRFSOC_GPIO_NO_OF_BANKS; i++) { @@ -908,8 +912,8 @@ static int __init sirfsoc_gpio_init(void) } subsys_initcall(sirfsoc_gpio_init); -MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>, " - "Yuping Luo <yuping.luo@csr.com>, " - "Barry Song <baohua.song@csr.com>"); +MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>"); +MODULE_AUTHOR("Yuping Luo <yuping.luo@csr.com>"); +MODULE_AUTHOR("Barry Song <baohua.song@csr.com>"); MODULE_DESCRIPTION("SIRFSOC pin control driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/pinctrl/spear/Kconfig b/drivers/pinctrl/spear/Kconfig index 04d93e602674..9ef18eb958e1 100644 --- a/drivers/pinctrl/spear/Kconfig +++ b/drivers/pinctrl/spear/Kconfig @@ -48,6 +48,7 @@ config PINCTRL_SPEAR1340 config PINCTRL_SPEAR_PLGPIO bool "SPEAr SoC PLGPIO Controller" depends on GPIOLIB && PINCTRL_SPEAR + select GPIOLIB_IRQCHIP help Say yes here to support PLGPIO controller on ST Microelectronics SPEAr SoCs. diff --git a/drivers/pinctrl/spear/pinctrl-plgpio.c b/drivers/pinctrl/spear/pinctrl-plgpio.c index ff2940e9f2a7..bddb79105d67 100644 --- a/drivers/pinctrl/spear/pinctrl-plgpio.c +++ b/drivers/pinctrl/spear/pinctrl-plgpio.c @@ -11,12 +11,11 @@ #include <linux/clk.h> #include <linux/err.h> -#include <linux/gpio.h> +#include <linux/gpio/driver.h> #include <linux/io.h> -#include <linux/irq.h> -#include <linux/irqdomain.h> -#include <linux/irqchip/chained_irq.h> #include <linux/module.h> +#include <linux/of.h> +#include <linux/of_platform.h> #include <linux/pinctrl/consumer.h> #include <linux/platform_device.h> #include <linux/pm.h> @@ -54,7 +53,6 @@ struct plgpio_regs { * * lock: lock for guarding gpio registers * base: base address of plgpio block - * irq_base: irq number of plgpio0 * chip: gpio framework specific chip information structure * p2o: function ptr for pin to offset conversion. This is required only for * machines where mapping b/w pin and offset is not 1-to-1. @@ -68,8 +66,6 @@ struct plgpio { spinlock_t lock; void __iomem *base; struct clk *clk; - unsigned irq_base; - struct irq_domain *irq_domain; struct gpio_chip chip; int (*p2o)(int pin); /* pin_to_offset */ int (*o2p)(int offset); /* offset_to_pin */ @@ -280,21 +276,12 @@ disable_clk: pinctrl_free_gpio(gpio); } -static int plgpio_to_irq(struct gpio_chip *chip, unsigned offset) -{ - struct plgpio *plgpio = container_of(chip, struct plgpio, chip); - - if (IS_ERR_VALUE(plgpio->irq_base)) - return -EINVAL; - - return irq_find_mapping(plgpio->irq_domain, offset); -} - /* PLGPIO IRQ */ static void plgpio_irq_disable(struct irq_data *d) { - struct plgpio *plgpio = irq_data_get_irq_chip_data(d); - int offset = d->irq - plgpio->irq_base; + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct plgpio *plgpio = container_of(gc, struct plgpio, chip); + int offset = d->hwirq; unsigned long flags; /* get correct offset for "offset" pin */ @@ -311,8 +298,9 @@ static void plgpio_irq_disable(struct irq_data *d) static void plgpio_irq_enable(struct irq_data *d) { - struct plgpio *plgpio = irq_data_get_irq_chip_data(d); - int offset = d->irq - plgpio->irq_base; + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct plgpio *plgpio = container_of(gc, struct plgpio, chip); + int offset = d->hwirq; unsigned long flags; /* get correct offset for "offset" pin */ @@ -329,8 +317,9 @@ static void plgpio_irq_enable(struct irq_data *d) static int plgpio_irq_set_type(struct irq_data *d, unsigned trigger) { - struct plgpio *plgpio = irq_data_get_irq_chip_data(d); - int offset = d->irq - plgpio->irq_base; + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct plgpio *plgpio = container_of(gc, struct plgpio, chip); + int offset = d->hwirq; void __iomem *reg_off; unsigned int supported_type = 0, val; @@ -369,7 +358,8 @@ static struct irq_chip plgpio_irqchip = { static void plgpio_irq_handler(unsigned irq, struct irq_desc *desc) { - struct plgpio *plgpio = irq_get_handler_data(irq); + struct gpio_chip *gc = irq_desc_get_handler_data(desc); + struct plgpio *plgpio = container_of(gc, struct plgpio, chip); struct irq_chip *irqchip = irq_desc_get_chip(desc); int regs_count, count, pin, offset, i = 0; unsigned long pending; @@ -410,7 +400,8 @@ static void plgpio_irq_handler(unsigned irq, struct irq_desc *desc) /* get correct irq line number */ pin = i * MAX_GPIO_PER_REG + pin; - generic_handle_irq(plgpio_to_irq(&plgpio->chip, pin)); + generic_handle_irq( + irq_find_mapping(gc->irqdomain, pin)); } } chained_irq_exit(irqchip, desc); @@ -523,10 +514,9 @@ end: } static int plgpio_probe(struct platform_device *pdev) { - struct device_node *np = pdev->dev.of_node; struct plgpio *plgpio; struct resource *res; - int ret, irq, i; + int ret, irq; plgpio = devm_kzalloc(&pdev->dev, sizeof(*plgpio), GFP_KERNEL); if (!plgpio) { @@ -563,7 +553,6 @@ static int plgpio_probe(struct platform_device *pdev) platform_set_drvdata(pdev, plgpio); spin_lock_init(&plgpio->lock); - plgpio->irq_base = -1; plgpio->chip.base = -1; plgpio->chip.request = plgpio_request; plgpio->chip.free = plgpio_free; @@ -571,10 +560,10 @@ static int plgpio_probe(struct platform_device *pdev) plgpio->chip.direction_output = plgpio_direction_output; plgpio->chip.get = plgpio_get_value; plgpio->chip.set = plgpio_set_value; - plgpio->chip.to_irq = plgpio_to_irq; plgpio->chip.label = dev_name(&pdev->dev); plgpio->chip.dev = &pdev->dev; plgpio->chip.owner = THIS_MODULE; + plgpio->chip.of_node = pdev->dev.of_node; if (!IS_ERR(plgpio->clk)) { ret = clk_prepare(plgpio->clk); @@ -592,43 +581,32 @@ static int plgpio_probe(struct platform_device *pdev) irq = platform_get_irq(pdev, 0); if (irq < 0) { - dev_info(&pdev->dev, "irqs not supported\n"); - return 0; - } - - plgpio->irq_base = irq_alloc_descs(-1, 0, plgpio->chip.ngpio, 0); - if (IS_ERR_VALUE(plgpio->irq_base)) { - /* we would not support irq for gpio */ - dev_warn(&pdev->dev, "couldn't allocate irq base\n"); + dev_info(&pdev->dev, "PLGPIO registered without IRQs\n"); return 0; } - plgpio->irq_domain = irq_domain_add_legacy(np, plgpio->chip.ngpio, - plgpio->irq_base, 0, &irq_domain_simple_ops, NULL); - if (WARN_ON(!plgpio->irq_domain)) { - dev_err(&pdev->dev, "irq domain init failed\n"); - irq_free_descs(plgpio->irq_base, plgpio->chip.ngpio); - ret = -ENXIO; + ret = gpiochip_irqchip_add(&plgpio->chip, + &plgpio_irqchip, + 0, + handle_simple_irq, + IRQ_TYPE_NONE); + if (ret) { + dev_err(&pdev->dev, "failed to add irqchip to gpiochip\n"); goto remove_gpiochip; } - irq_set_chained_handler(irq, plgpio_irq_handler); - for (i = 0; i < plgpio->chip.ngpio; i++) { - irq_set_chip_and_handler(i + plgpio->irq_base, &plgpio_irqchip, - handle_simple_irq); - set_irq_flags(i + plgpio->irq_base, IRQF_VALID); - irq_set_chip_data(i + plgpio->irq_base, plgpio); - } + gpiochip_set_chained_irqchip(&plgpio->chip, + &plgpio_irqchip, + irq, + plgpio_irq_handler); - irq_set_handler_data(irq, plgpio); dev_info(&pdev->dev, "PLGPIO registered with IRQs\n"); return 0; remove_gpiochip: dev_info(&pdev->dev, "Remove gpiochip\n"); - if (gpiochip_remove(&plgpio->chip)) - dev_err(&pdev->dev, "unable to remove gpiochip\n"); + gpiochip_remove(&plgpio->chip); unprepare_clk: if (!IS_ERR(plgpio->clk)) clk_unprepare(plgpio->clk); diff --git a/drivers/pinctrl/spear/pinctrl-spear.c b/drivers/pinctrl/spear/pinctrl-spear.c index 58bf6867aa17..abdb05ac43dc 100644 --- a/drivers/pinctrl/spear/pinctrl-spear.c +++ b/drivers/pinctrl/spear/pinctrl-spear.c @@ -268,18 +268,12 @@ static int spear_pinctrl_endisable(struct pinctrl_dev *pctldev, return 0; } -static int spear_pinctrl_enable(struct pinctrl_dev *pctldev, unsigned function, +static int spear_pinctrl_set_mux(struct pinctrl_dev *pctldev, unsigned function, unsigned group) { return spear_pinctrl_endisable(pctldev, function, group, true); } -static void spear_pinctrl_disable(struct pinctrl_dev *pctldev, - unsigned function, unsigned group) -{ - spear_pinctrl_endisable(pctldev, function, group, false); -} - /* gpio with pinmux */ static struct spear_gpio_pingroup *get_gpio_pingroup(struct spear_pmx *pmx, unsigned pin) @@ -344,8 +338,7 @@ static const struct pinmux_ops spear_pinmux_ops = { .get_functions_count = spear_pinctrl_get_funcs_count, .get_function_name = spear_pinctrl_get_func_name, .get_function_groups = spear_pinctrl_get_func_groups, - .enable = spear_pinctrl_enable, - .disable = spear_pinctrl_disable, + .set_mux = spear_pinctrl_set_mux, .gpio_request_enable = gpio_request_enable, .gpio_disable_free = gpio_disable_free, }; diff --git a/drivers/pinctrl/spear/pinctrl-spear1310.c b/drivers/pinctrl/spear/pinctrl-spear1310.c index 1a8bbfec60ca..6d57d43ab640 100644 --- a/drivers/pinctrl/spear/pinctrl-spear1310.c +++ b/drivers/pinctrl/spear/pinctrl-spear1310.c @@ -2692,7 +2692,7 @@ static struct spear_pinctrl_machdata spear1310_machdata = { .modes_supported = false, }; -static struct of_device_id spear1310_pinctrl_of_match[] = { +static const struct of_device_id spear1310_pinctrl_of_match[] = { { .compatible = "st,spear1310-pinmux", }, diff --git a/drivers/pinctrl/spear/pinctrl-spear1340.c b/drivers/pinctrl/spear/pinctrl-spear1340.c index 873966e2b99f..d243e43e7f6d 100644 --- a/drivers/pinctrl/spear/pinctrl-spear1340.c +++ b/drivers/pinctrl/spear/pinctrl-spear1340.c @@ -2008,7 +2008,7 @@ static struct spear_pinctrl_machdata spear1340_machdata = { .modes_supported = false, }; -static struct of_device_id spear1340_pinctrl_of_match[] = { +static const struct of_device_id spear1340_pinctrl_of_match[] = { { .compatible = "st,spear1340-pinmux", }, diff --git a/drivers/pinctrl/spear/pinctrl-spear300.c b/drivers/pinctrl/spear/pinctrl-spear300.c index 4777c0d0e730..9db83e9ee18c 100644 --- a/drivers/pinctrl/spear/pinctrl-spear300.c +++ b/drivers/pinctrl/spear/pinctrl-spear300.c @@ -646,7 +646,7 @@ static struct spear_function *spear300_functions[] = { &gpio1_function, }; -static struct of_device_id spear300_pinctrl_of_match[] = { +static const struct of_device_id spear300_pinctrl_of_match[] = { { .compatible = "st,spear300-pinmux", }, diff --git a/drivers/pinctrl/spear/pinctrl-spear310.c b/drivers/pinctrl/spear/pinctrl-spear310.c index ed1d3608f486..db775a414b7a 100644 --- a/drivers/pinctrl/spear/pinctrl-spear310.c +++ b/drivers/pinctrl/spear/pinctrl-spear310.c @@ -371,7 +371,7 @@ static struct spear_function *spear310_functions[] = { &tdm_function, }; -static struct of_device_id spear310_pinctrl_of_match[] = { +static const struct of_device_id spear310_pinctrl_of_match[] = { { .compatible = "st,spear310-pinmux", }, diff --git a/drivers/pinctrl/spear/pinctrl-spear320.c b/drivers/pinctrl/spear/pinctrl-spear320.c index b8e290a8c8c9..80fbd68e17bc 100644 --- a/drivers/pinctrl/spear/pinctrl-spear320.c +++ b/drivers/pinctrl/spear/pinctrl-spear320.c @@ -3410,7 +3410,7 @@ static struct spear_function *spear320_functions[] = { &i2c2_function, }; -static struct of_device_id spear320_pinctrl_of_match[] = { +static const struct of_device_id spear320_pinctrl_of_match[] = { { .compatible = "st,spear320-pinmux", }, diff --git a/drivers/pinctrl/sunxi/Kconfig b/drivers/pinctrl/sunxi/Kconfig index 73e0a305ea13..a5e10f777ed2 100644 --- a/drivers/pinctrl/sunxi/Kconfig +++ b/drivers/pinctrl/sunxi/Kconfig @@ -1,36 +1,42 @@ if ARCH_SUNXI -config PINCTRL_SUNXI - bool - config PINCTRL_SUNXI_COMMON bool select PINMUX select GENERIC_PINCONF config PINCTRL_SUN4I_A10 - def_bool PINCTRL_SUNXI || MACH_SUN4I + def_bool MACH_SUN4I select PINCTRL_SUNXI_COMMON config PINCTRL_SUN5I_A10S - def_bool PINCTRL_SUNXI || MACH_SUN5I + def_bool MACH_SUN5I select PINCTRL_SUNXI_COMMON config PINCTRL_SUN5I_A13 - def_bool PINCTRL_SUNXI || MACH_SUN5I + def_bool MACH_SUN5I select PINCTRL_SUNXI_COMMON config PINCTRL_SUN6I_A31 - def_bool PINCTRL_SUNXI || MACH_SUN6I + def_bool MACH_SUN6I select PINCTRL_SUNXI_COMMON config PINCTRL_SUN6I_A31_R - def_bool PINCTRL_SUNXI || MACH_SUN6I + def_bool MACH_SUN6I depends on RESET_CONTROLLER select PINCTRL_SUNXI_COMMON config PINCTRL_SUN7I_A20 - def_bool PINCTRL_SUNXI || MACH_SUN7I + def_bool MACH_SUN7I + select PINCTRL_SUNXI_COMMON + +config PINCTRL_SUN8I_A23 + def_bool MACH_SUN8I + select PINCTRL_SUNXI_COMMON + +config PINCTRL_SUN8I_A23_R + def_bool MACH_SUN8I + depends on RESET_CONTROLLER select PINCTRL_SUNXI_COMMON endif diff --git a/drivers/pinctrl/sunxi/Makefile b/drivers/pinctrl/sunxi/Makefile index 0f4461cbe11d..e797efb02901 100644 --- a/drivers/pinctrl/sunxi/Makefile +++ b/drivers/pinctrl/sunxi/Makefile @@ -8,3 +8,5 @@ obj-$(CONFIG_PINCTRL_SUN5I_A13) += pinctrl-sun5i-a13.o obj-$(CONFIG_PINCTRL_SUN6I_A31) += pinctrl-sun6i-a31.o obj-$(CONFIG_PINCTRL_SUN6I_A31_R) += pinctrl-sun6i-a31-r.o obj-$(CONFIG_PINCTRL_SUN7I_A20) += pinctrl-sun7i-a20.o +obj-$(CONFIG_PINCTRL_SUN8I_A23) += pinctrl-sun8i-a23.o +obj-$(CONFIG_PINCTRL_SUN8I_A23_R) += pinctrl-sun8i-a23-r.o diff --git a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c index fa1ff7c7e357..86b608bedca6 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c +++ b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c @@ -1010,6 +1010,7 @@ static const struct sunxi_desc_pin sun4i_a10_pins[] = { static const struct sunxi_pinctrl_desc sun4i_a10_pinctrl_data = { .pins = sun4i_a10_pins, .npins = ARRAY_SIZE(sun4i_a10_pins), + .irq_banks = 1, }; static int sun4i_a10_pinctrl_probe(struct platform_device *pdev) diff --git a/drivers/pinctrl/sunxi/pinctrl-sun5i-a10s.c b/drivers/pinctrl/sunxi/pinctrl-sun5i-a10s.c index 164d743f526c..2fa7430cabaf 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sun5i-a10s.c +++ b/drivers/pinctrl/sunxi/pinctrl-sun5i-a10s.c @@ -661,6 +661,7 @@ static const struct sunxi_desc_pin sun5i_a10s_pins[] = { static const struct sunxi_pinctrl_desc sun5i_a10s_pinctrl_data = { .pins = sun5i_a10s_pins, .npins = ARRAY_SIZE(sun5i_a10s_pins), + .irq_banks = 1, }; static int sun5i_a10s_pinctrl_probe(struct platform_device *pdev) diff --git a/drivers/pinctrl/sunxi/pinctrl-sun5i-a13.c b/drivers/pinctrl/sunxi/pinctrl-sun5i-a13.c index 1188a2b7b988..e47c33dbae3a 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sun5i-a13.c +++ b/drivers/pinctrl/sunxi/pinctrl-sun5i-a13.c @@ -330,15 +330,12 @@ static const struct sunxi_desc_pin sun5i_a13_pins[] = { /* Hole */ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 0), SUNXI_FUNCTION(0x0, "gpio_in"), - SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION_IRQ(0x6, 0)), /* EINT0 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 1), SUNXI_FUNCTION(0x0, "gpio_in"), - SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION_IRQ(0x6, 1)), /* EINT1 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 2), SUNXI_FUNCTION(0x0, "gpio_in"), - SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION_IRQ(0x6, 2)), /* EINT2 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 3), SUNXI_FUNCTION(0x0, "gpio_in"), @@ -382,6 +379,7 @@ static const struct sunxi_desc_pin sun5i_a13_pins[] = { static const struct sunxi_pinctrl_desc sun5i_a13_pinctrl_data = { .pins = sun5i_a13_pins, .npins = ARRAY_SIZE(sun5i_a13_pins), + .irq_banks = 1, }; static int sun5i_a13_pinctrl_probe(struct platform_device *pdev) diff --git a/drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c b/drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c index 8fcba48e0a42..9a2517b65113 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c +++ b/drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c @@ -93,6 +93,7 @@ static const struct sunxi_pinctrl_desc sun6i_a31_r_pinctrl_data = { .pins = sun6i_a31_r_pins, .npins = ARRAY_SIZE(sun6i_a31_r_pins), .pin_base = PL_BASE, + .irq_banks = 2, }; static int sun6i_a31_r_pinctrl_probe(struct platform_device *pdev) diff --git a/drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c b/drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c index 8dea5856458b..a2b4b85c5ad5 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c +++ b/drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c @@ -24,208 +24,244 @@ static const struct sunxi_desc_pin sun6i_a31_pins[] = { SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "gmac"), /* TXD0 */ SUNXI_FUNCTION(0x3, "lcd1"), /* D0 */ - SUNXI_FUNCTION(0x4, "uart1")), /* DTR */ + SUNXI_FUNCTION(0x4, "uart1"), /* DTR */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 0)), /* PA_EINT0 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 1), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "gmac"), /* TXD1 */ SUNXI_FUNCTION(0x3, "lcd1"), /* D1 */ - SUNXI_FUNCTION(0x4, "uart1")), /* DSR */ + SUNXI_FUNCTION(0x4, "uart1"), /* DSR */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 1)), /* PA_EINT1 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 2), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "gmac"), /* TXD2 */ SUNXI_FUNCTION(0x3, "lcd1"), /* D2 */ - SUNXI_FUNCTION(0x4, "uart1")), /* DCD */ + SUNXI_FUNCTION(0x4, "uart1"), /* DCD */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 2)), /* PA_EINT2 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 3), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "gmac"), /* TXD3 */ SUNXI_FUNCTION(0x3, "lcd1"), /* D3 */ - SUNXI_FUNCTION(0x4, "uart1")), /* RING */ + SUNXI_FUNCTION(0x4, "uart1"), /* RING */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 3)), /* PA_EINT3 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 4), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "gmac"), /* TXD4 */ SUNXI_FUNCTION(0x3, "lcd1"), /* D4 */ - SUNXI_FUNCTION(0x4, "uart1")), /* TX */ + SUNXI_FUNCTION(0x4, "uart1"), /* TX */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 4)), /* PA_EINT4 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 5), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "gmac"), /* TXD5 */ SUNXI_FUNCTION(0x3, "lcd1"), /* D5 */ - SUNXI_FUNCTION(0x4, "uart1")), /* RX */ + SUNXI_FUNCTION(0x4, "uart1"), /* RX */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 5)), /* PA_EINT5 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 6), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "gmac"), /* TXD6 */ SUNXI_FUNCTION(0x3, "lcd1"), /* D6 */ - SUNXI_FUNCTION(0x4, "uart1")), /* RTS */ + SUNXI_FUNCTION(0x4, "uart1"), /* RTS */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 6)), /* PA_EINT6 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 7), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "gmac"), /* TXD7 */ SUNXI_FUNCTION(0x3, "lcd1"), /* D7 */ - SUNXI_FUNCTION(0x4, "uart1")), /* CTS */ + SUNXI_FUNCTION(0x4, "uart1"), /* CTS */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 7)), /* PA_EINT7 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 8), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "gmac"), /* TXCLK */ - SUNXI_FUNCTION(0x3, "lcd1")), /* D8 */ + SUNXI_FUNCTION(0x3, "lcd1"), /* D8 */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 8)), /* PA_EINT8 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 9), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "gmac"), /* TXEN */ SUNXI_FUNCTION(0x3, "lcd1"), /* D9 */ SUNXI_FUNCTION(0x4, "mmc3"), /* CMD */ - SUNXI_FUNCTION(0x5, "mmc2")), /* CMD */ + SUNXI_FUNCTION(0x5, "mmc2"), /* CMD */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 9)), /* PA_EINT9 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 10), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "gmac"), /* GTXCLK */ SUNXI_FUNCTION(0x3, "lcd1"), /* D10 */ SUNXI_FUNCTION(0x4, "mmc3"), /* CLK */ - SUNXI_FUNCTION(0x5, "mmc2")), /* CLK */ + SUNXI_FUNCTION(0x5, "mmc2"), /* CLK */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 10)), /* PA_EINT10 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 11), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "gmac"), /* RXD0 */ SUNXI_FUNCTION(0x3, "lcd1"), /* D11 */ SUNXI_FUNCTION(0x4, "mmc3"), /* D0 */ - SUNXI_FUNCTION(0x5, "mmc2")), /* D0 */ + SUNXI_FUNCTION(0x5, "mmc2"), /* D0 */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 11)), /* PA_EINT11 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 12), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "gmac"), /* RXD1 */ SUNXI_FUNCTION(0x3, "lcd1"), /* D12 */ SUNXI_FUNCTION(0x4, "mmc3"), /* D1 */ - SUNXI_FUNCTION(0x5, "mmc2")), /* D1 */ + SUNXI_FUNCTION(0x5, "mmc2"), /* D1 */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 12)), /* PA_EINT12 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 13), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "gmac"), /* RXD2 */ SUNXI_FUNCTION(0x3, "lcd1"), /* D13 */ SUNXI_FUNCTION(0x4, "mmc3"), /* D2 */ - SUNXI_FUNCTION(0x5, "mmc2")), /* D2 */ + SUNXI_FUNCTION(0x5, "mmc2"), /* D2 */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 13)), /* PA_EINT13 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 14), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "gmac"), /* RXD3 */ SUNXI_FUNCTION(0x3, "lcd1"), /* D14 */ SUNXI_FUNCTION(0x4, "mmc3"), /* D3 */ - SUNXI_FUNCTION(0x5, "mmc2")), /* D3 */ + SUNXI_FUNCTION(0x5, "mmc2"), /* D3 */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 14)), /* PA_EINT14 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 15), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "gmac"), /* RXD4 */ - SUNXI_FUNCTION(0x3, "lcd1")), /* D15 */ + SUNXI_FUNCTION(0x3, "lcd1"), /* D15 */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 15)), /* PA_EINT15 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 16), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "gmac"), /* RXD5 */ - SUNXI_FUNCTION(0x3, "lcd1")), /* D16 */ + SUNXI_FUNCTION(0x3, "lcd1"), /* D16 */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 16)), /* PA_EINT16 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 17), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "gmac"), /* RXD6 */ - SUNXI_FUNCTION(0x3, "lcd1")), /* D17 */ + SUNXI_FUNCTION(0x3, "lcd1"), /* D17 */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 17)), /* PA_EINT17 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 18), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "gmac"), /* RXD7 */ - SUNXI_FUNCTION(0x3, "lcd1")), /* D18 */ + SUNXI_FUNCTION(0x3, "lcd1"), /* D18 */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 18)), /* PA_EINT18 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 19), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "gmac"), /* RXDV */ SUNXI_FUNCTION(0x3, "lcd1"), /* D19 */ - SUNXI_FUNCTION(0x4, "pwm3")), /* Positive */ + SUNXI_FUNCTION(0x4, "pwm3"), /* Positive */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 19)), /* PA_EINT19 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 20), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "gmac"), /* RXCLK */ SUNXI_FUNCTION(0x3, "lcd1"), /* D20 */ - SUNXI_FUNCTION(0x4, "pwm3")), /* Negative */ + SUNXI_FUNCTION(0x4, "pwm3"), /* Negative */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 20)), /* PA_EINT20 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 21), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "gmac"), /* TXERR */ SUNXI_FUNCTION(0x3, "lcd1"), /* D21 */ - SUNXI_FUNCTION(0x4, "spi3")), /* CS0 */ + SUNXI_FUNCTION(0x4, "spi3"), /* CS0 */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 21)), /* PA_EINT21 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 22), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "gmac"), /* RXERR */ SUNXI_FUNCTION(0x3, "lcd1"), /* D22 */ - SUNXI_FUNCTION(0x4, "spi3")), /* CLK */ + SUNXI_FUNCTION(0x4, "spi3"), /* CLK */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 22)), /* PA_EINT22 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 23), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "gmac"), /* COL */ SUNXI_FUNCTION(0x3, "lcd1"), /* D23 */ - SUNXI_FUNCTION(0x4, "spi3")), /* MOSI */ + SUNXI_FUNCTION(0x4, "spi3"), /* MOSI */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 23)), /* PA_EINT23 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 24), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "gmac"), /* CRS */ SUNXI_FUNCTION(0x3, "lcd1"), /* CLK */ - SUNXI_FUNCTION(0x4, "spi3")), /* MISO */ + SUNXI_FUNCTION(0x4, "spi3"), /* MISO */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 24)), /* PA_EINT24 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 25), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "gmac"), /* CLKIN */ SUNXI_FUNCTION(0x3, "lcd1"), /* DE */ - SUNXI_FUNCTION(0x4, "spi3")), /* CS1 */ + SUNXI_FUNCTION(0x4, "spi3"), /* CS1 */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 25)), /* PA_EINT25 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 26), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "gmac"), /* MDC */ - SUNXI_FUNCTION(0x3, "lcd1")), /* HSYNC */ + SUNXI_FUNCTION(0x3, "lcd1"), /* HSYNC */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 26)), /* PA_EINT26 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 27), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "gmac"), /* MDIO */ - SUNXI_FUNCTION(0x3, "lcd1")), /* VSYNC */ + SUNXI_FUNCTION(0x3, "lcd1"), /* VSYNC */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 27)), /* PA_EINT27 */ /* Hole */ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 0), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "i2s0"), /* MCLK */ SUNXI_FUNCTION(0x3, "uart3"), /* CTS */ - SUNXI_FUNCTION(0x4, "csi")), /* MCLK1 */ + SUNXI_FUNCTION(0x4, "csi"), /* MCLK1 */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 0)), /* PB_EINT0 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 1), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), - SUNXI_FUNCTION(0x2, "i2s0")), /* BCLK */ + SUNXI_FUNCTION(0x2, "i2s0"), /* BCLK */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 1)), /* PB_EINT1 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 2), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), - SUNXI_FUNCTION(0x2, "i2s0")), /* LRCK */ + SUNXI_FUNCTION(0x2, "i2s0"), /* LRCK */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 2)), /* PB_EINT2 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 3), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), - SUNXI_FUNCTION(0x2, "i2s0")), /* DO0 */ + SUNXI_FUNCTION(0x2, "i2s0"), /* DO0 */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 3)), /* PB_EINT3 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 4), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "i2s0"), /* DO1 */ - SUNXI_FUNCTION(0x3, "uart3")), /* RTS */ + SUNXI_FUNCTION(0x3, "uart3"), /* RTS */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 4)), /* PB_EINT4 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 5), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "i2s0"), /* DO2 */ SUNXI_FUNCTION(0x3, "uart3"), /* TX */ - SUNXI_FUNCTION(0x4, "i2c3")), /* SCK */ + SUNXI_FUNCTION(0x4, "i2c3"), /* SCK */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 5)), /* PB_EINT5 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 6), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "i2s0"), /* DO3 */ SUNXI_FUNCTION(0x3, "uart3"), /* RX */ - SUNXI_FUNCTION(0x4, "i2c3")), /* SDA */ + SUNXI_FUNCTION(0x4, "i2c3"), /* SDA */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 6)), /* PB_EINT6 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 7), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), - SUNXI_FUNCTION(0x3, "i2s0")), /* DI */ + SUNXI_FUNCTION(0x3, "i2s0"), /* DI */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 7)), /* PB_EINT7 */ /* Hole */ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 0), SUNXI_FUNCTION(0x0, "gpio_in"), @@ -510,86 +546,103 @@ static const struct sunxi_desc_pin sun6i_a31_pins[] = { SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "csi"), /* PCLK */ - SUNXI_FUNCTION(0x3, "ts")), /* CLK */ + SUNXI_FUNCTION(0x3, "ts"), /* CLK */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 0)), /* PE_EINT0 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 1), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "csi"), /* MCLK */ - SUNXI_FUNCTION(0x3, "ts")), /* ERR */ + SUNXI_FUNCTION(0x3, "ts"), /* ERR */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 1)), /* PE_EINT1 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 2), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "csi"), /* HSYNC */ - SUNXI_FUNCTION(0x3, "ts")), /* SYNC */ + SUNXI_FUNCTION(0x3, "ts"), /* SYNC */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 2)), /* PE_EINT2 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 3), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "csi"), /* VSYNC */ - SUNXI_FUNCTION(0x3, "ts")), /* DVLD */ + SUNXI_FUNCTION(0x3, "ts"), /* DVLD */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 3)), /* PE_EINT3 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 4), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "csi"), /* D0 */ - SUNXI_FUNCTION(0x3, "uart5")), /* TX */ + SUNXI_FUNCTION(0x3, "uart5"), /* TX */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 4)), /* PE_EINT4 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 5), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "csi"), /* D1 */ - SUNXI_FUNCTION(0x3, "uart5")), /* RX */ + SUNXI_FUNCTION(0x3, "uart5"), /* RX */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 5)), /* PE_EINT5 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 6), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "csi"), /* D2 */ - SUNXI_FUNCTION(0x3, "uart5")), /* RTS */ + SUNXI_FUNCTION(0x3, "uart5"), /* RTS */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 6)), /* PE_EINT6 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 7), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "csi"), /* D3 */ - SUNXI_FUNCTION(0x3, "uart5")), /* CTS */ + SUNXI_FUNCTION(0x3, "uart5"), /* CTS */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 7)), /* PE_EINT7 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 8), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "csi"), /* D4 */ - SUNXI_FUNCTION(0x3, "ts")), /* D0 */ + SUNXI_FUNCTION(0x3, "ts"), /* D0 */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 8)), /* PE_EINT8 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 9), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "csi"), /* D5 */ - SUNXI_FUNCTION(0x3, "ts")), /* D1 */ + SUNXI_FUNCTION(0x3, "ts"), /* D1 */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 9)), /* PE_EINT9 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 10), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "csi"), /* D6 */ - SUNXI_FUNCTION(0x3, "ts")), /* D2 */ + SUNXI_FUNCTION(0x3, "ts"), /* D2 */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 10)), /* PE_EINT10 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 11), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "csi"), /* D7 */ - SUNXI_FUNCTION(0x3, "ts")), /* D3 */ + SUNXI_FUNCTION(0x3, "ts"), /* D3 */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 11)), /* PE_EINT11 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 12), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "csi"), /* D8 */ - SUNXI_FUNCTION(0x3, "ts")), /* D4 */ + SUNXI_FUNCTION(0x3, "ts"), /* D4 */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 12)), /* PE_EINT12 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 13), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "csi"), /* D9 */ - SUNXI_FUNCTION(0x3, "ts")), /* D5 */ + SUNXI_FUNCTION(0x3, "ts"), /* D5 */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 13)), /* PE_EINT13 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 14), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "csi"), /* D10 */ - SUNXI_FUNCTION(0x3, "ts")), /* D6 */ + SUNXI_FUNCTION(0x3, "ts"), /* D6 */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 14)), /* PE_EINT14 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 15), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "csi"), /* D11 */ - SUNXI_FUNCTION(0x3, "ts")), /* D7 */ + SUNXI_FUNCTION(0x3, "ts"), /* D7 */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 15)), /* PE_EINT15 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 16), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), - SUNXI_FUNCTION(0x2, "csi")), /* MIPI CSI MCLK */ + SUNXI_FUNCTION(0x2, "csi"), /* MIPI CSI MCLK */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 16)), /* PE_EINT16 */ /* Hole */ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 0), SUNXI_FUNCTION(0x0, "gpio_in"), @@ -625,86 +678,105 @@ static const struct sunxi_desc_pin sun6i_a31_pins[] = { SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 0), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), - SUNXI_FUNCTION(0x2, "mmc1")), /* CLK */ + SUNXI_FUNCTION(0x2, "mmc1"), /* CLK */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 0)), /* PG_EINT0 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 1), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), - SUNXI_FUNCTION(0x2, "mmc1")), /* CMD */ + SUNXI_FUNCTION(0x2, "mmc1"), /* CMD */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 1)), /* PG_EINT1 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 2), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), - SUNXI_FUNCTION(0x2, "mmc1")), /* D0 */ + SUNXI_FUNCTION(0x2, "mmc1"), /* D0 */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 2)), /* PG_EINT2 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 3), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), - SUNXI_FUNCTION(0x2, "mmc1")), /* D1 */ + SUNXI_FUNCTION(0x2, "mmc1"), /* D1 */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 3)), /* PG_EINT3 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 4), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), - SUNXI_FUNCTION(0x2, "mmc1")), /* D2 */ + SUNXI_FUNCTION(0x2, "mmc1"), /* D2 */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 4)), /* PG_EINT4 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 5), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), - SUNXI_FUNCTION(0x2, "mmc1")), /* D3 */ + SUNXI_FUNCTION(0x2, "mmc1"), /* D3 */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 5)), /* PG_EINT5 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 6), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), - SUNXI_FUNCTION(0x2, "uart2")), /* TX */ + SUNXI_FUNCTION(0x2, "uart2"), /* TX */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 6)), /* PG_EINT6 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 7), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), - SUNXI_FUNCTION(0x2, "uart2")), /* RX */ + SUNXI_FUNCTION(0x2, "uart2"), /* RX */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 7)), /* PG_EINT7 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 8), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), - SUNXI_FUNCTION(0x2, "uart2")), /* RTS */ + SUNXI_FUNCTION(0x2, "uart2"), /* RTS */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 8)), /* PG_EINT8 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 9), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), - SUNXI_FUNCTION(0x2, "uart2")), /* CTS */ + SUNXI_FUNCTION(0x2, "uart2"), /* CTS */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 9)), /* PG_EINT9 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 10), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "i2c3"), /* SCK */ - SUNXI_FUNCTION(0x3, "usb")), /* DP3 */ + SUNXI_FUNCTION(0x3, "usb"), /* DP3 */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 10)), /* PG_EINT10 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 11), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "i2c3"), /* SDA */ - SUNXI_FUNCTION(0x3, "usb")), /* DM3 */ + SUNXI_FUNCTION(0x3, "usb"), /* DM3 */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 11)), /* PG_EINT11 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 12), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "spi1"), /* CS1 */ - SUNXI_FUNCTION(0x3, "i2s1")), /* MCLK */ + SUNXI_FUNCTION(0x3, "i2s1"), /* MCLK */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 12)), /* PG_EINT12 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 13), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "spi1"), /* CS0 */ - SUNXI_FUNCTION(0x3, "i2s1")), /* BCLK */ + SUNXI_FUNCTION(0x3, "i2s1"), /* BCLK */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 13)), /* PG_EINT13 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 14), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "spi1"), /* CLK */ - SUNXI_FUNCTION(0x3, "i2s1")), /* LRCK */ + SUNXI_FUNCTION(0x3, "i2s1"), /* LRCK */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 14)), /* PG_EINT14 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 15), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "spi1"), /* MOSI */ - SUNXI_FUNCTION(0x3, "i2s1")), /* DIN */ + SUNXI_FUNCTION(0x3, "i2s1"), /* DIN */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 15)), /* PG_EINT15 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 16), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "spi1"), /* MISO */ - SUNXI_FUNCTION(0x3, "i2s1")), /* DOUT */ + SUNXI_FUNCTION(0x3, "i2s1"), /* DOUT */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 16)), /* PG_EINT16 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 17), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), - SUNXI_FUNCTION(0x2, "uart4")), /* TX */ + SUNXI_FUNCTION(0x2, "uart4"), /* TX */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 17)), /* PG_EINT17 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 18), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), - SUNXI_FUNCTION(0x2, "uart4")), /* RX */ + SUNXI_FUNCTION(0x2, "uart4"), /* RX */ + SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 18)), /* PG_EINT18 */ /* Hole */ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 0), SUNXI_FUNCTION(0x0, "gpio_in"), @@ -836,6 +908,7 @@ static const struct sunxi_desc_pin sun6i_a31_pins[] = { static const struct sunxi_pinctrl_desc sun6i_a31_pinctrl_data = { .pins = sun6i_a31_pins, .npins = ARRAY_SIZE(sun6i_a31_pins), + .irq_banks = 4, }; static int sun6i_a31_pinctrl_probe(struct platform_device *pdev) diff --git a/drivers/pinctrl/sunxi/pinctrl-sun7i-a20.c b/drivers/pinctrl/sunxi/pinctrl-sun7i-a20.c index d8577ce5f1a4..dac99e02bfdb 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sun7i-a20.c +++ b/drivers/pinctrl/sunxi/pinctrl-sun7i-a20.c @@ -1036,6 +1036,7 @@ static const struct sunxi_desc_pin sun7i_a20_pins[] = { static const struct sunxi_pinctrl_desc sun7i_a20_pinctrl_data = { .pins = sun7i_a20_pins, .npins = ARRAY_SIZE(sun7i_a20_pins), + .irq_banks = 1, }; static int sun7i_a20_pinctrl_probe(struct platform_device *pdev) diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c new file mode 100644 index 000000000000..90f3b3a7c51e --- /dev/null +++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c @@ -0,0 +1,142 @@ +/* + * Allwinner A23 SoCs special pins pinctrl driver. + * + * Copyright (C) 2014 Chen-Yu Tsai + * Chen-Yu Tsai <wens@csie.org> + * + * Copyright (C) 2014 Boris Brezillon + * Boris Brezillon <boris.brezillon@free-electrons.com> + * + * Copyright (C) 2014 Maxime Ripard + * Maxime Ripard <maxime.ripard@free-electrons.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/pinctrl/pinctrl.h> +#include <linux/reset.h> + +#include "pinctrl-sunxi.h" + +static const struct sunxi_desc_pin sun8i_a23_r_pins[] = { + SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 0), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "s_rsb"), /* SCK */ + SUNXI_FUNCTION(0x3, "s_twi"), /* SCK */ + SUNXI_FUNCTION_IRQ_BANK(0x4, 0, 0)), /* PL_EINT0 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 1), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "s_rsb"), /* SDA */ + SUNXI_FUNCTION(0x3, "s_twi"), /* SDA */ + SUNXI_FUNCTION_IRQ_BANK(0x4, 0, 1)), /* PL_EINT1 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 2), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "s_uart"), /* TX */ + SUNXI_FUNCTION_IRQ_BANK(0x4, 0, 2)), /* PL_EINT2 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 3), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "s_uart"), /* RX */ + SUNXI_FUNCTION_IRQ_BANK(0x4, 0, 3)), /* PL_EINT3 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 4), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x3, "s_jtag"), /* MS */ + SUNXI_FUNCTION_IRQ_BANK(0x4, 0, 4)), /* PL_EINT4 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 5), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x3, "s_jtag"), /* CK */ + SUNXI_FUNCTION_IRQ_BANK(0x4, 0, 5)), /* PL_EINT5 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 6), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x3, "s_jtag"), /* DO */ + SUNXI_FUNCTION_IRQ_BANK(0x4, 0, 6)), /* PL_EINT6 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 7), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x3, "s_jtag"), /* DI */ + SUNXI_FUNCTION_IRQ_BANK(0x4, 0, 7)), /* PL_EINT7 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 8), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "s_twi"), /* SCK */ + SUNXI_FUNCTION_IRQ_BANK(0x4, 0, 8)), /* PL_EINT8 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 9), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "s_twi"), /* SDA */ + SUNXI_FUNCTION_IRQ_BANK(0x4, 0, 9)), /* PL_EINT9 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 10), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "s_pwm"), + SUNXI_FUNCTION_IRQ_BANK(0x4, 0, 10)), /* PL_EINT10 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 11), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION_IRQ_BANK(0x4, 0, 11)), /* PL_EINT11 */ +}; + +static const struct sunxi_pinctrl_desc sun8i_a23_r_pinctrl_data = { + .pins = sun8i_a23_r_pins, + .npins = ARRAY_SIZE(sun8i_a23_r_pins), + .pin_base = PL_BASE, + .irq_banks = 1, +}; + +static int sun8i_a23_r_pinctrl_probe(struct platform_device *pdev) +{ + struct reset_control *rstc; + int ret; + + rstc = devm_reset_control_get(&pdev->dev, NULL); + if (IS_ERR(rstc)) { + dev_err(&pdev->dev, "Reset controller missing\n"); + return PTR_ERR(rstc); + } + + ret = reset_control_deassert(rstc); + if (ret) + return ret; + + ret = sunxi_pinctrl_init(pdev, + &sun8i_a23_r_pinctrl_data); + + if (ret) + reset_control_assert(rstc); + + return ret; +} + +static struct of_device_id sun8i_a23_r_pinctrl_match[] = { + { .compatible = "allwinner,sun8i-a23-r-pinctrl", }, + {} +}; +MODULE_DEVICE_TABLE(of, sun8i_a23_r_pinctrl_match); + +static struct platform_driver sun8i_a23_r_pinctrl_driver = { + .probe = sun8i_a23_r_pinctrl_probe, + .driver = { + .name = "sun8i-a23-r-pinctrl", + .owner = THIS_MODULE, + .of_match_table = sun8i_a23_r_pinctrl_match, + }, +}; +module_platform_driver(sun8i_a23_r_pinctrl_driver); + +MODULE_AUTHOR("Chen-Yu Tsai <wens@csie.org>"); +MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com"); +MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com"); +MODULE_DESCRIPTION("Allwinner A23 R_PIO pinctrl driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c new file mode 100644 index 000000000000..ac71e8c5901b --- /dev/null +++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c @@ -0,0 +1,593 @@ +/* + * Allwinner A23 SoCs pinctrl driver. + * + * Copyright (C) 2014 Chen-Yu Tsai + * + * Chen-Yu Tsai <wens@csie.org> + * + * Copyright (C) 2014 Maxime Ripard + * + * Maxime Ripard <maxime.ripard@free-electrons.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/pinctrl/pinctrl.h> + +#include "pinctrl-sunxi.h" + +static const struct sunxi_desc_pin sun8i_a23_pins[] = { + SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 0), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "spi1"), /* CS */ + SUNXI_FUNCTION(0x3, "jtag"), /* MS0 */ + SUNXI_FUNCTION_IRQ_BANK(0x4, 0, 0)), /* PA_EINT0 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 1), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "spi1"), /* CLK */ + SUNXI_FUNCTION(0x3, "jtag"), /* CKO */ + SUNXI_FUNCTION_IRQ_BANK(0x4, 0, 1)), /* PA_EINT1 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 2), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "spi1"), /* MOSI */ + SUNXI_FUNCTION(0x3, "jtag"), /* DOO */ + SUNXI_FUNCTION_IRQ_BANK(0x4, 0, 2)), /* PA_EINT2 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 3), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "spi1"), /* MISO */ + SUNXI_FUNCTION(0x3, "jtag"), /* DIO */ + SUNXI_FUNCTION_IRQ_BANK(0x4, 0, 3)), /* PA_EINT3 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 4), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "uart4"), /* TX */ + SUNXI_FUNCTION_IRQ_BANK(0x4, 0, 4)), /* PA_EINT4 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 5), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "uart4"), /* RX */ + SUNXI_FUNCTION_IRQ_BANK(0x4, 0, 5)), /* PA_EINT5 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 6), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "uart4"), /* RTS */ + SUNXI_FUNCTION_IRQ_BANK(0x4, 0, 6)), /* PA_EINT6 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 7), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "uart4"), /* CTS */ + SUNXI_FUNCTION_IRQ_BANK(0x4, 0, 7)), /* PA_EINT7 */ + /* Hole */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 0), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "uart2"), /* TX */ + SUNXI_FUNCTION_IRQ_BANK(0x4, 1, 0)), /* PB_EINT0 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 1), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "uart2"), /* RX */ + SUNXI_FUNCTION_IRQ_BANK(0x4, 1, 1)), /* PB_EINT1 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 2), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "uart2"), /* RTS */ + SUNXI_FUNCTION_IRQ_BANK(0x4, 1, 2)), /* PB_EINT2 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 3), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "uart2"), /* CTS */ + SUNXI_FUNCTION_IRQ_BANK(0x4, 1, 3)), /* PB_EINT3 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 4), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "i2s0"), /* SYNC */ + SUNXI_FUNCTION_IRQ_BANK(0x4, 1, 4)), /* PB_EINT4 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 5), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "i2s0"), /* DOUT */ + SUNXI_FUNCTION_IRQ_BANK(0x4, 1, 5)), /* PB_EINT5 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 6), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "i2s0"), /* DIN */ + SUNXI_FUNCTION_IRQ_BANK(0x4, 1, 6)), /* PB_EINT6 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 7), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x3, "i2s0"), /* DI */ + SUNXI_FUNCTION_IRQ_BANK(0x4, 1, 7)), /* PB_EINT7 */ + /* Hole */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 0), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "nand0"), /* WE */ + SUNXI_FUNCTION(0x3, "spi0")), /* MOSI */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 1), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "nand0"), /* ALE */ + SUNXI_FUNCTION(0x3, "spi0")), /* MISO */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 2), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "nand0"), /* CLE */ + SUNXI_FUNCTION(0x3, "spi0")), /* CLK */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 3), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "nand0"), /* CE1 */ + SUNXI_FUNCTION(0x3, "spi0")), /* CS */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 4), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "nand0")), /* CE0 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 5), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "nand0"), /* RE */ + SUNXI_FUNCTION(0x3, "mmc2")), /* CLK */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 6), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "nand0"), /* RB0 */ + SUNXI_FUNCTION(0x3, "mmc2")), /* CMD */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 7), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "nand0")), /* RB1 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 8), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "nand0"), /* DQ0 */ + SUNXI_FUNCTION(0x3, "mmc2")), /* D0 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 9), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "nand0"), /* DQ1 */ + SUNXI_FUNCTION(0x3, "mmc2")), /* D1 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 10), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "nand0"), /* DQ2 */ + SUNXI_FUNCTION(0x3, "mmc2")), /* D2 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 11), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "nand0"), /* DQ3 */ + SUNXI_FUNCTION(0x3, "mmc2")), /* D3 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 12), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "nand0"), /* DQ4 */ + SUNXI_FUNCTION(0x3, "mmc2")), /* D4 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 13), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "nand0"), /* DQ5 */ + SUNXI_FUNCTION(0x3, "mmc2")), /* D5 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 14), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "nand"), /* DQ6 */ + SUNXI_FUNCTION(0x3, "mmc2")), /* D6 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 15), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "nand"), /* DQ7 */ + SUNXI_FUNCTION(0x3, "mmc2")), /* D7 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 16), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "nand"), /* DQS */ + SUNXI_FUNCTION(0x3, "mmc2")), /* RST */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 17), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "nand0")), /* CE2 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 18), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "nand0")), /* CE3 */ + /* Hole */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 0), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "lcd0")), /* D0 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 1), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "lcd0")), /* D1 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 2), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "lcd0"), /* D2 */ + SUNXI_FUNCTION(0x3, "mmc1")), /* CLK */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 3), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "lcd0"), /* D3 */ + SUNXI_FUNCTION(0x3, "mmc1")), /* CMD */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 4), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "lcd0"), /* D4 */ + SUNXI_FUNCTION(0x3, "mmc1")), /* D0 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 5), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "lcd0"), /* D5 */ + SUNXI_FUNCTION(0x3, "mmc1")), /* D1 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 6), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "lcd0"), /* D6 */ + SUNXI_FUNCTION(0x3, "mmc1")), /* D2 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 7), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "lcd0"), /* D7 */ + SUNXI_FUNCTION(0x3, "mmc1")), /* D3 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 8), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "lcd0"), /* D8 */ + SUNXI_FUNCTION(0x3, "uart3")), /* TX */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 9), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "lcd0"), /* D9 */ + SUNXI_FUNCTION(0x3, "uart3")), /* RX */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 10), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "lcd0"), /* D10 */ + SUNXI_FUNCTION(0x3, "uart1")), /* TX */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 11), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "lcd0"), /* D11 */ + SUNXI_FUNCTION(0x3, "uart1")), /* RX */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 12), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "lcd0"), /* D12 */ + SUNXI_FUNCTION(0x3, "uart1")), /* RTS */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 13), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "lcd0"), /* D13 */ + SUNXI_FUNCTION(0x3, "uart1")), /* CTS */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 14), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "lcd0"), /* D14 */ + SUNXI_FUNCTION(0x3, "i2s1")), /* SYNC */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 15), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "lcd0"), /* D15 */ + SUNXI_FUNCTION(0x3, "i2s1")), /* CLK */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 16), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "lcd0"), /* D16 */ + SUNXI_FUNCTION(0x3, "i2s1")), /* DOUT */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 17), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "lcd0"), /* D17 */ + SUNXI_FUNCTION(0x3, "i2s1")), /* DIN */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 18), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "lcd0"), /* D18 */ + SUNXI_FUNCTION(0x3, "lvds0")), /* VN0 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 19), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "lcd0"), /* D19 */ + SUNXI_FUNCTION(0x3, "lvds0")), /* VP0 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 20), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "lcd0"), /* D20 */ + SUNXI_FUNCTION(0x3, "lvds0")), /* VP1 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 21), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "lcd0"), /* D21 */ + SUNXI_FUNCTION(0x3, "lvds0")), /* VN1 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 22), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "lcd0"), /* D22 */ + SUNXI_FUNCTION(0x3, "lvds0")), /* VP2 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 23), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "lcd0"), /* D23 */ + SUNXI_FUNCTION(0x3, "lvds0")), /* VN2 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 24), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "lcd0"), /* CLK */ + SUNXI_FUNCTION(0x3, "lvds0")), /* VPC */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 25), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "lcd0"), /* DE */ + SUNXI_FUNCTION(0x3, "lvds0")), /* VNC */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 26), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "lcd0"), /* HSYNC */ + SUNXI_FUNCTION(0x3, "lvds0")), /* VP3 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 27), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "lcd0"), /* VSYNC */ + SUNXI_FUNCTION(0x3, "lvds0")), /* VN3 */ + /* Hole */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 0), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "csi")), /* PCLK */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 1), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "csi")), /* MCLK */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 2), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "csi")), /* HSYNC */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 3), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "csi")), /* VSYNC */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 4), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "csi")), /* D0 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 5), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "csi")), /* D1 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 6), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "csi")), /* D2 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 7), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "csi")), /* D3 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 8), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "csi")), /* D4 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 9), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "csi")), /* D5 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 10), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "csi")), /* D6 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 11), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "csi")), /* D7 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 12), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "csi"), /* SCK */ + SUNXI_FUNCTION(0x3, "i2c2")), /* SCK */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 13), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "csi"), /* SDA */ + SUNXI_FUNCTION(0x3, "i2c2")), /* SDA */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 14), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out")), + SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 15), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out")), + SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 16), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out")), + SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 17), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out")), + /* Hole */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 0), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "mmc0"), /* D1 */ + SUNXI_FUNCTION(0x3, "jtag")), /* MS1 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 1), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "mmc0"), /* D0 */ + SUNXI_FUNCTION(0x3, "jtag")), /* DI1 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 2), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "mmc0"), /* CLK */ + SUNXI_FUNCTION(0x3, "uart0")), /* TX */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 3), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "mmc0"), /* CMD */ + SUNXI_FUNCTION(0x3, "jtag")), /* DO1 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 4), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "mmc0"), /* D3 */ + SUNXI_FUNCTION(0x3, "uart0")), /* RX */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 5), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "mmc0"), /* D2 */ + SUNXI_FUNCTION(0x3, "jtag")), /* CK1 */ + /* Hole */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 0), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "mmc1"), /* CLK */ + SUNXI_FUNCTION_IRQ_BANK(0x4, 2, 0)), /* PG_EINT0 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 1), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "mmc1"), /* CMD */ + SUNXI_FUNCTION_IRQ_BANK(0x4, 2, 1)), /* PG_EINT1 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 2), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "mmc1"), /* D0 */ + SUNXI_FUNCTION_IRQ_BANK(0x4, 2, 2)), /* PG_EINT2 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 3), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "mmc1"), /* D1 */ + SUNXI_FUNCTION_IRQ_BANK(0x4, 2, 3)), /* PG_EINT3 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 4), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "mmc1"), /* D2 */ + SUNXI_FUNCTION_IRQ_BANK(0x4, 2, 4)), /* PG_EINT4 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 5), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "mmc1"), /* D3 */ + SUNXI_FUNCTION_IRQ_BANK(0x4, 2, 5)), /* PG_EINT5 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 6), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "uart1"), /* TX */ + SUNXI_FUNCTION_IRQ_BANK(0x4, 2, 6)), /* PG_EINT6 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 7), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "uart1"), /* RX */ + SUNXI_FUNCTION_IRQ_BANK(0x4, 2, 7)), /* PG_EINT7 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 8), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "uart2"), /* RTS */ + SUNXI_FUNCTION_IRQ_BANK(0x4, 2, 8)), /* PG_EINT8 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 9), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "uart2"), /* CTS */ + SUNXI_FUNCTION_IRQ_BANK(0x4, 2, 9)), /* PG_EINT9 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 10), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "i2s1"), /* SYNC */ + SUNXI_FUNCTION_IRQ_BANK(0x4, 2, 10)), /* PG_EINT10 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 11), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "i2s1"), /* CLK */ + SUNXI_FUNCTION_IRQ_BANK(0x4, 2, 11)), /* PG_EINT11 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 12), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "i2s1"), /* DOUT */ + SUNXI_FUNCTION_IRQ_BANK(0x4, 2, 12)), /* PG_EINT12 */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 13), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "i2s1"), /* DIN */ + SUNXI_FUNCTION_IRQ_BANK(0x4, 2, 13)), /* PG_EINT13 */ + /* Hole */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 0), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "pwm0")), + SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 1), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "pwm1")), + SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 2), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "i2c0")), /* SCK */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 3), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "i2c0")), /* SDA */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 4), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "i2c1")), /* SCK */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 5), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "i2c1")), /* SDA */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 6), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "spi0"), /* CS */ + SUNXI_FUNCTION(0x3, "uart3")), /* TX */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 7), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "spi0"), /* CLK */ + SUNXI_FUNCTION(0x3, "uart3")), /* RX */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 8), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "spi0"), /* DOUT */ + SUNXI_FUNCTION(0x3, "uart3")), /* RTS */ + SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 9), + SUNXI_FUNCTION(0x0, "gpio_in"), + SUNXI_FUNCTION(0x1, "gpio_out"), + SUNXI_FUNCTION(0x2, "spi0"), /* DIN */ + SUNXI_FUNCTION(0x3, "uart3")), /* CTS */ +}; + +static const struct sunxi_pinctrl_desc sun8i_a23_pinctrl_data = { + .pins = sun8i_a23_pins, + .npins = ARRAY_SIZE(sun8i_a23_pins), + .irq_banks = 3, +}; + +static int sun8i_a23_pinctrl_probe(struct platform_device *pdev) +{ + return sunxi_pinctrl_init(pdev, + &sun8i_a23_pinctrl_data); +} + +static struct of_device_id sun8i_a23_pinctrl_match[] = { + { .compatible = "allwinner,sun8i-a23-pinctrl", }, + {} +}; +MODULE_DEVICE_TABLE(of, sun8i_a23_pinctrl_match); + +static struct platform_driver sun8i_a23_pinctrl_driver = { + .probe = sun8i_a23_pinctrl_probe, + .driver = { + .name = "sun8i-a23-pinctrl", + .owner = THIS_MODULE, + .of_match_table = sun8i_a23_pinctrl_match, + }, +}; +module_platform_driver(sun8i_a23_pinctrl_driver); + +MODULE_AUTHOR("Chen-Yu Tsai <wens@csie.org>"); +MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com"); +MODULE_DESCRIPTION("Allwinner A23 pinctrl driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c index 5f38c7f67834..ef9d804e55de 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c +++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c @@ -31,6 +31,9 @@ #include "../core.h" #include "pinctrl-sunxi.h" +static struct irq_chip sunxi_pinctrl_edge_irq_chip; +static struct irq_chip sunxi_pinctrl_level_irq_chip; + static struct sunxi_pinctrl_group * sunxi_pinctrl_find_group_by_name(struct sunxi_pinctrl *pctl, const char *group) { @@ -390,9 +393,9 @@ static void sunxi_pmx_set(struct pinctrl_dev *pctldev, spin_unlock_irqrestore(&pctl->lock, flags); } -static int sunxi_pmx_enable(struct pinctrl_dev *pctldev, - unsigned function, - unsigned group) +static int sunxi_pmx_set_mux(struct pinctrl_dev *pctldev, + unsigned function, + unsigned group) { struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); struct sunxi_pinctrl_group *g = pctl->groups + group; @@ -438,7 +441,7 @@ static const struct pinmux_ops sunxi_pmx_ops = { .get_functions_count = sunxi_pmx_get_funcs_cnt, .get_function_name = sunxi_pmx_get_func_name, .get_function_groups = sunxi_pmx_get_func_groups, - .enable = sunxi_pmx_enable, + .set_mux = sunxi_pmx_set_mux, .gpio_set_direction = sunxi_pmx_gpio_set_direction, }; @@ -508,7 +511,7 @@ static int sunxi_pinctrl_gpio_of_xlate(struct gpio_chip *gc, base = PINS_PER_BANK * gpiospec->args[0]; pin = base + gpiospec->args[1]; - if (pin > (gc->base + gc->ngpio)) + if (pin > gc->ngpio) return -EINVAL; if (flags) @@ -521,25 +524,61 @@ static int sunxi_pinctrl_gpio_to_irq(struct gpio_chip *chip, unsigned offset) { struct sunxi_pinctrl *pctl = dev_get_drvdata(chip->dev); struct sunxi_desc_function *desc; + unsigned pinnum = pctl->desc->pin_base + offset; + unsigned irqnum; if (offset >= chip->ngpio) return -ENXIO; - desc = sunxi_pinctrl_desc_find_function_by_pin(pctl, offset, "irq"); + desc = sunxi_pinctrl_desc_find_function_by_pin(pctl, pinnum, "irq"); if (!desc) return -EINVAL; + irqnum = desc->irqbank * IRQ_PER_BANK + desc->irqnum; + dev_dbg(chip->dev, "%s: request IRQ for GPIO %d, return %d\n", - chip->label, offset + chip->base, desc->irqnum); + chip->label, offset + chip->base, irqnum); - return irq_find_mapping(pctl->domain, desc->irqnum); + return irq_find_mapping(pctl->domain, irqnum); } +static int sunxi_pinctrl_irq_request_resources(struct irq_data *d) +{ + struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d); + struct sunxi_desc_function *func; + int ret; + + func = sunxi_pinctrl_desc_find_function_by_pin(pctl, + pctl->irq_array[d->hwirq], "irq"); + if (!func) + return -EINVAL; + + ret = gpio_lock_as_irq(pctl->chip, + pctl->irq_array[d->hwirq] - pctl->desc->pin_base); + if (ret) { + dev_err(pctl->dev, "unable to lock HW IRQ %lu for IRQ\n", + irqd_to_hwirq(d)); + return ret; + } + + /* Change muxing to INT mode */ + sunxi_pmx_set(pctl->pctl_dev, pctl->irq_array[d->hwirq], func->muxval); -static int sunxi_pinctrl_irq_set_type(struct irq_data *d, - unsigned int type) + return 0; +} + +static void sunxi_pinctrl_irq_release_resources(struct irq_data *d) +{ + struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d); + + gpio_unlock_as_irq(pctl->chip, + pctl->irq_array[d->hwirq] - pctl->desc->pin_base); +} + +static int sunxi_pinctrl_irq_set_type(struct irq_data *d, unsigned int type) { struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d); + struct irq_desc *desc = container_of(d, struct irq_desc, irq_data); u32 reg = sunxi_irq_cfg_reg(d->hwirq); u8 index = sunxi_irq_cfg_offset(d->hwirq); unsigned long flags; @@ -566,6 +605,14 @@ static int sunxi_pinctrl_irq_set_type(struct irq_data *d, return -EINVAL; } + if (type & IRQ_TYPE_LEVEL_MASK) { + d->chip = &sunxi_pinctrl_level_irq_chip; + desc->handle_irq = handle_fasteoi_irq; + } else { + d->chip = &sunxi_pinctrl_edge_irq_chip; + desc->handle_irq = handle_edge_irq; + } + spin_lock_irqsave(&pctl->lock, flags); regval = readl(pctl->membase + reg); @@ -577,26 +624,14 @@ static int sunxi_pinctrl_irq_set_type(struct irq_data *d, return 0; } -static void sunxi_pinctrl_irq_mask_ack(struct irq_data *d) +static void sunxi_pinctrl_irq_ack(struct irq_data *d) { struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d); - u32 ctrl_reg = sunxi_irq_ctrl_reg(d->hwirq); - u8 ctrl_idx = sunxi_irq_ctrl_offset(d->hwirq); u32 status_reg = sunxi_irq_status_reg(d->hwirq); u8 status_idx = sunxi_irq_status_offset(d->hwirq); - unsigned long flags; - u32 val; - - spin_lock_irqsave(&pctl->lock, flags); - - /* Mask the IRQ */ - val = readl(pctl->membase + ctrl_reg); - writel(val & ~(1 << ctrl_idx), pctl->membase + ctrl_reg); /* Clear the IRQ */ writel(1 << status_idx, pctl->membase + status_reg); - - spin_unlock_irqrestore(&pctl->lock, flags); } static void sunxi_pinctrl_irq_mask(struct irq_data *d) @@ -619,19 +654,11 @@ static void sunxi_pinctrl_irq_mask(struct irq_data *d) static void sunxi_pinctrl_irq_unmask(struct irq_data *d) { struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d); - struct sunxi_desc_function *func; u32 reg = sunxi_irq_ctrl_reg(d->hwirq); u8 idx = sunxi_irq_ctrl_offset(d->hwirq); unsigned long flags; u32 val; - func = sunxi_pinctrl_desc_find_function_by_pin(pctl, - pctl->irq_array[d->hwirq], - "irq"); - - /* Change muxing to INT mode */ - sunxi_pmx_set(pctl->pctl_dev, pctl->irq_array[d->hwirq], func->muxval); - spin_lock_irqsave(&pctl->lock, flags); /* Unmask the IRQ */ @@ -641,28 +668,60 @@ static void sunxi_pinctrl_irq_unmask(struct irq_data *d) spin_unlock_irqrestore(&pctl->lock, flags); } -static struct irq_chip sunxi_pinctrl_irq_chip = { +static void sunxi_pinctrl_irq_ack_unmask(struct irq_data *d) +{ + sunxi_pinctrl_irq_ack(d); + sunxi_pinctrl_irq_unmask(d); +} + +static struct irq_chip sunxi_pinctrl_edge_irq_chip = { + .irq_ack = sunxi_pinctrl_irq_ack, + .irq_mask = sunxi_pinctrl_irq_mask, + .irq_unmask = sunxi_pinctrl_irq_unmask, + .irq_request_resources = sunxi_pinctrl_irq_request_resources, + .irq_release_resources = sunxi_pinctrl_irq_release_resources, + .irq_set_type = sunxi_pinctrl_irq_set_type, + .flags = IRQCHIP_SKIP_SET_WAKE, +}; + +static struct irq_chip sunxi_pinctrl_level_irq_chip = { + .irq_eoi = sunxi_pinctrl_irq_ack, .irq_mask = sunxi_pinctrl_irq_mask, - .irq_mask_ack = sunxi_pinctrl_irq_mask_ack, .irq_unmask = sunxi_pinctrl_irq_unmask, + /* Define irq_enable / disable to avoid spurious irqs for drivers + * using these to suppress irqs while they clear the irq source */ + .irq_enable = sunxi_pinctrl_irq_ack_unmask, + .irq_disable = sunxi_pinctrl_irq_mask, + .irq_request_resources = sunxi_pinctrl_irq_request_resources, + .irq_release_resources = sunxi_pinctrl_irq_release_resources, .irq_set_type = sunxi_pinctrl_irq_set_type, + .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_EOI_THREADED | + IRQCHIP_EOI_IF_HANDLED, }; static void sunxi_pinctrl_irq_handler(unsigned irq, struct irq_desc *desc) { struct irq_chip *chip = irq_get_chip(irq); struct sunxi_pinctrl *pctl = irq_get_handler_data(irq); - const unsigned long reg = readl(pctl->membase + IRQ_STATUS_REG); + unsigned long bank, reg, val; + + for (bank = 0; bank < pctl->desc->irq_banks; bank++) + if (irq == pctl->irq[bank]) + break; + + if (bank == pctl->desc->irq_banks) + return; - /* Clear all interrupts */ - writel(reg, pctl->membase + IRQ_STATUS_REG); + reg = sunxi_irq_status_reg_from_bank(bank); + val = readl(pctl->membase + reg); - if (reg) { + if (val) { int irqoffset; chained_irq_enter(chip, desc); - for_each_set_bit(irqoffset, ®, SUNXI_IRQ_NUMBER) { - int pin_irq = irq_find_mapping(pctl->domain, irqoffset); + for_each_set_bit(irqoffset, &val, IRQ_PER_BANK) { + int pin_irq = irq_find_mapping(pctl->domain, + bank * IRQ_PER_BANK + irqoffset); generic_handle_irq(pin_irq); } chained_irq_exit(chip, desc); @@ -730,8 +789,11 @@ static int sunxi_pinctrl_build_state(struct platform_device *pdev) while (func->name) { /* Create interrupt mapping while we're at it */ - if (!strcmp(func->name, "irq")) - pctl->irq_array[func->irqnum] = pin->pin.number; + if (!strcmp(func->name, "irq")) { + int irqnum = func->irqnum + func->irqbank * IRQ_PER_BANK; + pctl->irq_array[irqnum] = pin->pin.number; + } + sunxi_pinctrl_add_function(pctl, func->name); func++; } @@ -801,6 +863,13 @@ int sunxi_pinctrl_init(struct platform_device *pdev, pctl->dev = &pdev->dev; pctl->desc = desc; + pctl->irq_array = devm_kcalloc(&pdev->dev, + IRQ_PER_BANK * pctl->desc->irq_banks, + sizeof(*pctl->irq_array), + GFP_KERNEL); + if (!pctl->irq_array) + return -ENOMEM; + ret = sunxi_pinctrl_build_state(pdev); if (ret) { dev_err(&pdev->dev, "dt probe failed: %d\n", ret); @@ -869,7 +938,7 @@ int sunxi_pinctrl_init(struct platform_device *pdev, const struct sunxi_desc_pin *pin = pctl->desc->pins + i; ret = gpiochip_add_pin_range(pctl->chip, dev_name(&pdev->dev), - pin->pin.number, + pin->pin.number - pctl->desc->pin_base, pin->pin.number, 1); if (ret) goto gpiochip_error; @@ -885,30 +954,51 @@ int sunxi_pinctrl_init(struct platform_device *pdev, if (ret) goto gpiochip_error; - pctl->irq = irq_of_parse_and_map(node, 0); + pctl->irq = devm_kcalloc(&pdev->dev, + pctl->desc->irq_banks, + sizeof(*pctl->irq), + GFP_KERNEL); if (!pctl->irq) { - ret = -EINVAL; + ret = -ENOMEM; goto clk_error; } - pctl->domain = irq_domain_add_linear(node, SUNXI_IRQ_NUMBER, - &irq_domain_simple_ops, NULL); + for (i = 0; i < pctl->desc->irq_banks; i++) { + pctl->irq[i] = platform_get_irq(pdev, i); + if (pctl->irq[i] < 0) { + ret = pctl->irq[i]; + goto clk_error; + } + } + + pctl->domain = irq_domain_add_linear(node, + pctl->desc->irq_banks * IRQ_PER_BANK, + &irq_domain_simple_ops, + NULL); if (!pctl->domain) { dev_err(&pdev->dev, "Couldn't register IRQ domain\n"); ret = -ENOMEM; goto clk_error; } - for (i = 0; i < SUNXI_IRQ_NUMBER; i++) { + for (i = 0; i < (pctl->desc->irq_banks * IRQ_PER_BANK); i++) { int irqno = irq_create_mapping(pctl->domain, i); - irq_set_chip_and_handler(irqno, &sunxi_pinctrl_irq_chip, - handle_simple_irq); + irq_set_chip_and_handler(irqno, &sunxi_pinctrl_edge_irq_chip, + handle_edge_irq); irq_set_chip_data(irqno, pctl); }; - irq_set_chained_handler(pctl->irq, sunxi_pinctrl_irq_handler); - irq_set_handler_data(pctl->irq, pctl); + for (i = 0; i < pctl->desc->irq_banks; i++) { + /* Mask and clear all IRQs before registering a handler */ + writel(0, pctl->membase + sunxi_irq_ctrl_reg_from_bank(i)); + writel(0xffffffff, + pctl->membase + sunxi_irq_status_reg_from_bank(i)); + + irq_set_chained_handler(pctl->irq[i], + sunxi_pinctrl_irq_handler); + irq_set_handler_data(pctl->irq[i], pctl); + } dev_info(&pdev->dev, "initialized sunXi PIO driver\n"); @@ -917,8 +1007,7 @@ int sunxi_pinctrl_init(struct platform_device *pdev, clk_error: clk_disable_unprepare(clk); gpiochip_error: - if (gpiochip_remove(pctl->chip)) - dev_err(&pdev->dev, "failed to remove gpio chip\n"); + gpiochip_remove(pctl->chip); pinctrl_error: pinctrl_unregister(pctl->pctl_dev); return ret; diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.h b/drivers/pinctrl/sunxi/pinctrl-sunxi.h index 8169ba598876..4245b96c7996 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sunxi.h +++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.h @@ -53,7 +53,7 @@ #define PULL_PINS_BITS 2 #define PULL_PINS_MASK 0x03 -#define SUNXI_IRQ_NUMBER 32 +#define IRQ_PER_BANK 32 #define IRQ_CFG_REG 0x200 #define IRQ_CFG_IRQ_PER_REG 8 @@ -68,6 +68,8 @@ #define IRQ_STATUS_IRQ_BITS 1 #define IRQ_STATUS_IRQ_MASK ((1 << IRQ_STATUS_IRQ_BITS) - 1) +#define IRQ_MEM_SIZE 0x20 + #define IRQ_EDGE_RISING 0x00 #define IRQ_EDGE_FALLING 0x01 #define IRQ_LEVEL_HIGH 0x02 @@ -77,6 +79,7 @@ struct sunxi_desc_function { const char *name; u8 muxval; + u8 irqbank; u8 irqnum; }; @@ -89,6 +92,7 @@ struct sunxi_pinctrl_desc { const struct sunxi_desc_pin *pins; int npins; unsigned pin_base; + unsigned irq_banks; }; struct sunxi_pinctrl_function { @@ -113,8 +117,8 @@ struct sunxi_pinctrl { unsigned nfunctions; struct sunxi_pinctrl_group *groups; unsigned ngroups; - int irq; - int irq_array[SUNXI_IRQ_NUMBER]; + int *irq; + unsigned *irq_array; spinlock_t lock; struct pinctrl_dev *pctl_dev; }; @@ -139,6 +143,14 @@ struct sunxi_pinctrl { .irqnum = _irq, \ } +#define SUNXI_FUNCTION_IRQ_BANK(_val, _bank, _irq) \ + { \ + .name = "irq", \ + .muxval = _val, \ + .irqbank = _bank, \ + .irqnum = _irq, \ + } + /* * The sunXi PIO registers are organized as is: * 0x00 - 0x0c Muxing values. @@ -218,8 +230,10 @@ static inline u32 sunxi_pull_offset(u16 pin) static inline u32 sunxi_irq_cfg_reg(u16 irq) { - u8 reg = irq / IRQ_CFG_IRQ_PER_REG * 0x04; - return reg + IRQ_CFG_REG; + u8 bank = irq / IRQ_PER_BANK; + u8 reg = (irq % IRQ_PER_BANK) / IRQ_CFG_IRQ_PER_REG * 0x04; + + return IRQ_CFG_REG + bank * IRQ_MEM_SIZE + reg; } static inline u32 sunxi_irq_cfg_offset(u16 irq) @@ -228,10 +242,16 @@ static inline u32 sunxi_irq_cfg_offset(u16 irq) return irq_num * IRQ_CFG_IRQ_BITS; } +static inline u32 sunxi_irq_ctrl_reg_from_bank(u8 bank) +{ + return IRQ_CTRL_REG + bank * IRQ_MEM_SIZE; +} + static inline u32 sunxi_irq_ctrl_reg(u16 irq) { - u8 reg = irq / IRQ_CTRL_IRQ_PER_REG * 0x04; - return reg + IRQ_CTRL_REG; + u8 bank = irq / IRQ_PER_BANK; + + return sunxi_irq_ctrl_reg_from_bank(bank); } static inline u32 sunxi_irq_ctrl_offset(u16 irq) @@ -240,10 +260,16 @@ static inline u32 sunxi_irq_ctrl_offset(u16 irq) return irq_num * IRQ_CTRL_IRQ_BITS; } +static inline u32 sunxi_irq_status_reg_from_bank(u8 bank) +{ + return IRQ_STATUS_REG + bank * IRQ_MEM_SIZE; +} + static inline u32 sunxi_irq_status_reg(u16 irq) { - u8 reg = irq / IRQ_STATUS_IRQ_PER_REG * 0x04; - return reg + IRQ_STATUS_REG; + u8 bank = irq / IRQ_PER_BANK; + + return sunxi_irq_status_reg_from_bank(bank); } static inline u32 sunxi_irq_status_offset(u16 irq) diff --git a/drivers/pinctrl/vt8500/pinctrl-wmt.c b/drivers/pinctrl/vt8500/pinctrl-wmt.c index 2c61281bebd7..d055d63309e4 100644 --- a/drivers/pinctrl/vt8500/pinctrl-wmt.c +++ b/drivers/pinctrl/vt8500/pinctrl-wmt.c @@ -131,9 +131,9 @@ static int wmt_set_pinmux(struct wmt_pinctrl_data *data, unsigned func, return 0; } -static int wmt_pmx_enable(struct pinctrl_dev *pctldev, - unsigned func_selector, - unsigned group_selector) +static int wmt_pmx_set_mux(struct pinctrl_dev *pctldev, + unsigned func_selector, + unsigned group_selector) { struct wmt_pinctrl_data *data = pinctrl_dev_get_drvdata(pctldev); u32 pinnum = data->pins[group_selector].number; @@ -141,17 +141,6 @@ static int wmt_pmx_enable(struct pinctrl_dev *pctldev, return wmt_set_pinmux(data, func_selector, pinnum); } -static void wmt_pmx_disable(struct pinctrl_dev *pctldev, - unsigned func_selector, - unsigned group_selector) -{ - struct wmt_pinctrl_data *data = pinctrl_dev_get_drvdata(pctldev); - u32 pinnum = data->pins[group_selector].number; - - /* disable by setting GPIO_IN */ - wmt_set_pinmux(data, WMT_FSEL_GPIO_IN, pinnum); -} - static void wmt_pmx_gpio_disable_free(struct pinctrl_dev *pctldev, struct pinctrl_gpio_range *range, unsigned offset) @@ -179,8 +168,7 @@ static struct pinmux_ops wmt_pinmux_ops = { .get_functions_count = wmt_pmx_get_functions_count, .get_function_name = wmt_pmx_get_function_name, .get_function_groups = wmt_pmx_get_function_groups, - .enable = wmt_pmx_enable, - .disable = wmt_pmx_disable, + .set_mux = wmt_pmx_set_mux, .gpio_disable_free = wmt_pmx_gpio_disable_free, .gpio_set_direction = wmt_pmx_gpio_set_direction, }; @@ -627,8 +615,7 @@ int wmt_pinctrl_probe(struct platform_device *pdev, return 0; fail_range: - if (gpiochip_remove(&data->gpio_chip)) - dev_err(&pdev->dev, "failed to remove gpio chip\n"); + gpiochip_remove(&data->gpio_chip); fail_gpio: pinctrl_unregister(data->pctl_dev); return err; @@ -637,12 +624,8 @@ fail_gpio: int wmt_pinctrl_remove(struct platform_device *pdev) { struct wmt_pinctrl_data *data = platform_get_drvdata(pdev); - int err; - - err = gpiochip_remove(&data->gpio_chip); - if (err) - dev_err(&pdev->dev, "failed to remove gpio chip\n"); + gpiochip_remove(&data->gpio_chip); pinctrl_unregister(data->pctl_dev); return 0; diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c index 7f1a2e2711bd..d866db80b4fd 100644 --- a/drivers/platform/chrome/chromeos_laptop.c +++ b/drivers/platform/chrome/chromeos_laptop.c @@ -37,6 +37,8 @@ #define ISL_ALS_I2C_ADDR 0x44 #define TAOS_ALS_I2C_ADDR 0x29 +#define MAX_I2C_DEVICE_DEFERRALS 5 + static struct i2c_client *als; static struct i2c_client *tp; static struct i2c_client *ts; @@ -45,6 +47,8 @@ static const char *i2c_adapter_names[] = { "SMBus I801 adapter", "i915 gmbus vga", "i915 gmbus panel", + "i2c-designware-pci", + "i2c-designware-pci", }; /* Keep this enum consistent with i2c_adapter_names */ @@ -52,11 +56,21 @@ enum i2c_adapter_type { I2C_ADAPTER_SMBUS = 0, I2C_ADAPTER_VGADDC, I2C_ADAPTER_PANEL, + I2C_ADAPTER_DESIGNWARE_0, + I2C_ADAPTER_DESIGNWARE_1, +}; + +enum i2c_peripheral_state { + UNPROBED = 0, + PROBED, + TIMEDOUT, }; struct i2c_peripheral { int (*add)(enum i2c_adapter_type type); enum i2c_adapter_type type; + enum i2c_peripheral_state state; + int tries; }; #define MAX_I2C_PERIPHERALS 3 @@ -97,8 +111,6 @@ static struct mxt_platform_data atmel_224s_tp_platform_data = { .irqflags = IRQF_TRIGGER_FALLING, .t19_num_keys = ARRAY_SIZE(mxt_t19_keys), .t19_keymap = mxt_t19_keys, - .config = NULL, - .config_length = 0, }; static struct i2c_board_info atmel_224s_tp_device = { @@ -109,8 +121,6 @@ static struct i2c_board_info atmel_224s_tp_device = { static struct mxt_platform_data atmel_1664s_platform_data = { .irqflags = IRQF_TRIGGER_FALLING, - .config = NULL, - .config_length = 0, }; static struct i2c_board_info atmel_1664s_device = { @@ -162,8 +172,8 @@ static struct i2c_client *__add_probed_i2c_device( /* add the i2c device */ client = i2c_new_probed_device(adapter, info, addrs, NULL); if (!client) - pr_err("%s failed to register device %d-%02x\n", - __func__, bus, info->addr); + pr_notice("%s failed to register device %d-%02x\n", + __func__, bus, info->addr); else pr_debug("%s added i2c device %d-%02x\n", __func__, bus, info->addr); @@ -172,29 +182,43 @@ static struct i2c_client *__add_probed_i2c_device( return client; } +struct i2c_lookup { + const char *name; + int instance; + int n; +}; + static int __find_i2c_adap(struct device *dev, void *data) { - const char *name = data; + struct i2c_lookup *lookup = data; static const char *prefix = "i2c-"; struct i2c_adapter *adapter; + if (strncmp(dev_name(dev), prefix, strlen(prefix)) != 0) return 0; adapter = to_i2c_adapter(dev); - return (strncmp(adapter->name, name, strlen(name)) == 0); + if (strncmp(adapter->name, lookup->name, strlen(lookup->name)) == 0 && + lookup->n++ == lookup->instance) + return 1; + return 0; } static int find_i2c_adapter_num(enum i2c_adapter_type type) { struct device *dev = NULL; struct i2c_adapter *adapter; - const char *name = i2c_adapter_names[type]; + struct i2c_lookup lookup; + + memset(&lookup, 0, sizeof(lookup)); + lookup.name = i2c_adapter_names[type]; + lookup.instance = (type == I2C_ADAPTER_DESIGNWARE_1) ? 1 : 0; + /* find the adapter by name */ - dev = bus_find_device(&i2c_bus_type, NULL, (void *)name, - __find_i2c_adap); + dev = bus_find_device(&i2c_bus_type, NULL, &lookup, __find_i2c_adap); if (!dev) { /* Adapters may appear later. Deferred probing will retry */ pr_notice("%s: i2c adapter %s not found on system.\n", __func__, - name); + lookup.name); return -ENODEV; } adapter = to_i2c_adapter(dev); @@ -231,6 +255,7 @@ static struct i2c_client *add_i2c_device(const char *name, struct i2c_board_info *info) { const unsigned short addr_list[] = { info->addr, I2C_CLIENT_END }; + return __add_probed_i2c_device(name, find_i2c_adapter_num(type), info, @@ -328,9 +353,36 @@ static int chromeos_laptop_probe(struct platform_device *pdev) if (i2c_dev->add == NULL) break; - /* Add the device. Set -EPROBE_DEFER on any failure */ - if (i2c_dev->add(i2c_dev->type)) + if (i2c_dev->state == TIMEDOUT || i2c_dev->state == PROBED) + continue; + + /* + * Check that the i2c adapter is present. + * -EPROBE_DEFER if missing as the adapter may appear much + * later. + */ + if (find_i2c_adapter_num(i2c_dev->type) == -ENODEV) { ret = -EPROBE_DEFER; + continue; + } + + /* Add the device. */ + if (i2c_dev->add(i2c_dev->type) == -EAGAIN) { + /* + * Set -EPROBE_DEFER a limited num of times + * if device is not successfully added. + */ + if (++i2c_dev->tries < MAX_I2C_DEVICE_DEFERRALS) { + ret = -EPROBE_DEFER; + } else { + /* Ran out of tries. */ + pr_notice("%s: Ran out of tries for device.\n", + __func__); + i2c_dev->state = TIMEDOUT; + } + } else { + i2c_dev->state = PROBED; + } } return ret; @@ -363,6 +415,27 @@ static struct chromeos_laptop chromebook_pixel = { }, }; +static struct chromeos_laptop hp_chromebook_14 = { + .i2c_peripherals = { + /* Touchpad. */ + { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 }, + }, +}; + +static struct chromeos_laptop dell_chromebook_11 = { + .i2c_peripherals = { + /* Touchpad. */ + { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 }, + }, +}; + +static struct chromeos_laptop toshiba_cb35 = { + .i2c_peripherals = { + /* Touchpad. */ + { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 }, + }, +}; + static struct chromeos_laptop acer_c7_chromebook = { .i2c_peripherals = { /* Touchpad. */ @@ -377,6 +450,17 @@ static struct chromeos_laptop acer_ac700 = { }, }; +static struct chromeos_laptop acer_c720 = { + .i2c_peripherals = { + /* Touchscreen. */ + { .add = setup_atmel_1664s_ts, I2C_ADAPTER_DESIGNWARE_1 }, + /* Touchpad. */ + { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 }, + /* Light Sensor. */ + { .add = setup_isl29018_als, I2C_ADAPTER_DESIGNWARE_1 }, + }, +}; + static struct chromeos_laptop hp_pavilion_14_chromebook = { .i2c_peripherals = { /* Touchpad. */ @@ -420,6 +504,30 @@ static struct dmi_system_id chromeos_laptop_dmi_table[] __initdata = { _CBDD(chromebook_pixel), }, { + .ident = "Wolf", + .matches = { + DMI_MATCH(DMI_BIOS_VENDOR, "coreboot"), + DMI_MATCH(DMI_PRODUCT_NAME, "Wolf"), + }, + _CBDD(dell_chromebook_11), + }, + { + .ident = "HP Chromebook 14", + .matches = { + DMI_MATCH(DMI_BIOS_VENDOR, "coreboot"), + DMI_MATCH(DMI_PRODUCT_NAME, "Falco"), + }, + _CBDD(hp_chromebook_14), + }, + { + .ident = "Toshiba CB35", + .matches = { + DMI_MATCH(DMI_BIOS_VENDOR, "coreboot"), + DMI_MATCH(DMI_PRODUCT_NAME, "Leon"), + }, + _CBDD(toshiba_cb35), + }, + { .ident = "Acer C7 Chromebook", .matches = { DMI_MATCH(DMI_PRODUCT_NAME, "Parrot"), @@ -434,6 +542,13 @@ static struct dmi_system_id chromeos_laptop_dmi_table[] __initdata = { _CBDD(acer_ac700), }, { + .ident = "Acer C720", + .matches = { + DMI_MATCH(DMI_PRODUCT_NAME, "Peppy"), + }, + _CBDD(acer_c720), + }, + { .ident = "HP Pavilion 14 Chromebook", .matches = { DMI_MATCH(DMI_PRODUCT_NAME, "Butterfly"), @@ -464,6 +579,7 @@ static struct platform_driver cros_platform_driver = { static int __init chromeos_laptop_init(void) { int ret; + if (!dmi_check_system(chromeos_laptop_dmi_table)) { pr_debug("%s unsupported system.\n", __func__); return -ENODEV; diff --git a/drivers/platform/chrome/chromeos_pstore.c b/drivers/platform/chrome/chromeos_pstore.c index e0e0e65cf442..34749200e4ab 100644 --- a/drivers/platform/chrome/chromeos_pstore.c +++ b/drivers/platform/chrome/chromeos_pstore.c @@ -16,23 +16,13 @@ static struct dmi_system_id chromeos_pstore_dmi_table[] __initdata = { { /* - * Today all Chromebooks/boxes ship with GOOGLE as vendor and + * Today all Chromebooks/boxes ship with Google_* as version and * coreboot as bios vendor. No other systems with this * combination are known to date. */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), - DMI_MATCH(DMI_BIOS_VENDOR, "coreboot"), - }, - }, - { - /* - * The first Samsung Chromebox and Chromebook Series 5 550 use - * coreboot but with Samsung as the system vendor. - */ - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG"), DMI_MATCH(DMI_BIOS_VENDOR, "coreboot"), + DMI_MATCH(DMI_BIOS_VERSION, "Google_"), }, }, { diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index aa19a8a7066a..4dcfb7116a04 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -652,6 +652,25 @@ config TOSHIBA_BT_RFKILL If you have a modern Toshiba laptop with a Bluetooth and an RFKill switch (such as the Portege R500), say Y. +config TOSHIBA_HAPS + tristate "Toshiba HDD Active Protection Sensor" + depends on ACPI + ---help--- + This driver adds support for the built-in accelerometer + found on recent Toshiba laptops equiped with HID TOS620A + device. + + This driver receives ACPI notify events 0x80 when the sensor + detects a sudden move or a harsh vibration, as well as an + ACPI notify event 0x81 whenever the movement or vibration has + been stabilized. + + Also provides sysfs entries to get/set the desired protection + level and reseting the HDD protection interface. + + If you have a recent Toshiba laptop with a built-in accelerometer + device, say Y. + config ACPI_CMPC tristate "CMPC Laptop Extras" depends on X86 && ACPI diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile index c4ca428fd3db..f82232b1fc4d 100644 --- a/drivers/platform/x86/Makefile +++ b/drivers/platform/x86/Makefile @@ -38,6 +38,7 @@ obj-$(CONFIG_TOPSTAR_LAPTOP) += topstar-laptop.o obj-$(CONFIG_ACPI_TOSHIBA) += toshiba_acpi.o obj-$(CONFIG_TOSHIBA_BT_RFKILL) += toshiba_bluetooth.o +obj-$(CONFIG_TOSHIBA_HAPS) += toshiba_haps.o obj-$(CONFIG_INTEL_SCU_IPC) += intel_scu_ipc.o obj-$(CONFIG_INTEL_SCU_IPC_UTIL) += intel_scu_ipcutil.o obj-$(CONFIG_INTEL_MFLD_THERMAL) += intel_mid_thermal.o diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c index bbf78b2d6d93..96a0b75c52c9 100644 --- a/drivers/platform/x86/acer-wmi.c +++ b/drivers/platform/x86/acer-wmi.c @@ -96,7 +96,7 @@ enum acer_wmi_event_ids { WMID_ACCEL_EVENT = 0x5, }; -static const struct key_entry acer_wmi_keymap[] = { +static const struct key_entry acer_wmi_keymap[] __initconst = { {KE_KEY, 0x01, {KEY_WLAN} }, /* WiFi */ {KE_KEY, 0x03, {KEY_WLAN} }, /* WiFi */ {KE_KEY, 0x04, {KEY_WLAN} }, /* WiFi */ @@ -294,7 +294,7 @@ struct quirk_entry { static struct quirk_entry *quirks; -static void set_quirks(void) +static void __init set_quirks(void) { if (!interface) return; @@ -306,7 +306,7 @@ static void set_quirks(void) interface->capability |= ACER_CAP_BRIGHTNESS; } -static int dmi_matched(const struct dmi_system_id *dmi) +static int __init dmi_matched(const struct dmi_system_id *dmi) { quirks = dmi->driver_data; return 1; @@ -337,7 +337,7 @@ static struct quirk_entry quirk_lenovo_ideapad_s205 = { }; /* The Aspire One has a dummy ACPI-WMI interface - disable it */ -static struct dmi_system_id acer_blacklist[] = { +static const struct dmi_system_id acer_blacklist[] __initconst = { { .ident = "Acer Aspire One (SSD)", .matches = { @@ -355,7 +355,7 @@ static struct dmi_system_id acer_blacklist[] = { {} }; -static struct dmi_system_id acer_quirks[] = { +static const struct dmi_system_id acer_quirks[] __initconst = { { .callback = dmi_matched, .ident = "Acer Aspire 1360", @@ -530,14 +530,15 @@ static struct dmi_system_id acer_quirks[] = { {} }; -static int video_set_backlight_video_vendor(const struct dmi_system_id *d) +static int __init +video_set_backlight_video_vendor(const struct dmi_system_id *d) { interface->capability &= ~ACER_CAP_BRIGHTNESS; pr_info("Brightness must be controlled by generic video driver\n"); return 0; } -static const struct dmi_system_id video_vendor_dmi_table[] = { +static const struct dmi_system_id video_vendor_dmi_table[] __initconst = { { .callback = video_set_backlight_video_vendor, .ident = "Acer TravelMate 4750", @@ -582,7 +583,7 @@ static const struct dmi_system_id video_vendor_dmi_table[] = { }; /* Find which quirks are needed for a particular vendor/ model pair */ -static void find_quirks(void) +static void __init find_quirks(void) { if (!force_series) { dmi_check_system(acer_quirks); @@ -749,7 +750,7 @@ static acpi_status AMW0_set_u32(u32 value, u32 cap) return wmab_execute(&args, NULL); } -static acpi_status AMW0_find_mailled(void) +static acpi_status __init AMW0_find_mailled(void) { struct wmab_args args; struct wmab_ret ret; @@ -781,16 +782,16 @@ static acpi_status AMW0_find_mailled(void) return AE_OK; } -static int AMW0_set_cap_acpi_check_device_found; +static int AMW0_set_cap_acpi_check_device_found __initdata; -static acpi_status AMW0_set_cap_acpi_check_device_cb(acpi_handle handle, +static acpi_status __init AMW0_set_cap_acpi_check_device_cb(acpi_handle handle, u32 level, void *context, void **retval) { AMW0_set_cap_acpi_check_device_found = 1; return AE_OK; } -static const struct acpi_device_id norfkill_ids[] = { +static const struct acpi_device_id norfkill_ids[] __initconst = { { "VPC2004", 0}, { "IBM0068", 0}, { "LEN0068", 0}, @@ -798,7 +799,7 @@ static const struct acpi_device_id norfkill_ids[] = { { "", 0}, }; -static int AMW0_set_cap_acpi_check_device(void) +static int __init AMW0_set_cap_acpi_check_device(void) { const struct acpi_device_id *id; @@ -808,7 +809,7 @@ static int AMW0_set_cap_acpi_check_device(void) return AMW0_set_cap_acpi_check_device_found; } -static acpi_status AMW0_set_capabilities(void) +static acpi_status __init AMW0_set_capabilities(void) { struct wmab_args args; struct wmab_ret ret; @@ -1184,7 +1185,7 @@ static acpi_status wmid_v2_set_u32(u32 value, u32 cap) return wmid3_set_device_status(value, device); } -static void type_aa_dmi_decode(const struct dmi_header *header, void *dummy) +static void __init type_aa_dmi_decode(const struct dmi_header *header, void *d) { struct hotkey_function_type_aa *type_aa; @@ -1209,7 +1210,7 @@ static void type_aa_dmi_decode(const struct dmi_header *header, void *dummy) commun_fn_key_number = type_aa->commun_fn_key_number; } -static acpi_status WMID_set_capabilities(void) +static acpi_status __init WMID_set_capabilities(void) { struct acpi_buffer out = {ACPI_ALLOCATE_BUFFER, NULL}; union acpi_object *obj; @@ -1658,7 +1659,7 @@ static ssize_t show_bool_threeg(struct device *dev, u32 result; \ acpi_status status; - pr_info("This threeg sysfs will be removed in 2012 - used by: %s\n", + pr_info("This threeg sysfs will be removed in 2014 - used by: %s\n", current->comm); status = get_u32(&result, ACER_CAP_THREEG); if (ACPI_SUCCESS(status)) @@ -1671,7 +1672,7 @@ static ssize_t set_bool_threeg(struct device *dev, { u32 tmp = simple_strtoul(buf, NULL, 10); acpi_status status = set_u32(tmp, ACER_CAP_THREEG); - pr_info("This threeg sysfs will be removed in 2012 - used by: %s\n", + pr_info("This threeg sysfs will be removed in 2014 - used by: %s\n", current->comm); if (ACPI_FAILURE(status)) return -EINVAL; @@ -1683,7 +1684,7 @@ static DEVICE_ATTR(threeg, S_IRUGO | S_IWUSR, show_bool_threeg, static ssize_t show_interface(struct device *dev, struct device_attribute *attr, char *buf) { - pr_info("This interface sysfs will be removed in 2012 - used by: %s\n", + pr_info("This interface sysfs will be removed in 2014 - used by: %s\n", current->comm); switch (interface->type) { case ACER_AMW0: @@ -1777,7 +1778,7 @@ static void acer_wmi_notify(u32 value, void *context) } } -static acpi_status +static acpi_status __init wmid3_set_lm_mode(struct lm_input_params *params, struct lm_return_value *return_value) { @@ -1811,7 +1812,7 @@ wmid3_set_lm_mode(struct lm_input_params *params, return status; } -static int acer_wmi_enable_ec_raw(void) +static int __init acer_wmi_enable_ec_raw(void) { struct lm_return_value return_value; acpi_status status; @@ -1834,7 +1835,7 @@ static int acer_wmi_enable_ec_raw(void) return status; } -static int acer_wmi_enable_lm(void) +static int __init acer_wmi_enable_lm(void) { struct lm_return_value return_value; acpi_status status; @@ -2043,6 +2044,7 @@ static int acer_platform_remove(struct platform_device *device) return 0; } +#ifdef CONFIG_PM_SLEEP static int acer_suspend(struct device *dev) { u32 value; @@ -2083,6 +2085,10 @@ static int acer_resume(struct device *dev) return 0; } +#else +#define acer_suspend NULL +#define acer_resume NULL +#endif static SIMPLE_DEV_PM_OPS(acer_pm, acer_suspend, acer_resume); @@ -2120,7 +2126,7 @@ static int remove_sysfs(struct platform_device *device) return 0; } -static int create_sysfs(void) +static int __init create_sysfs(void) { int retval = -ENOMEM; @@ -2149,7 +2155,7 @@ static void remove_debugfs(void) debugfs_remove(interface->debug.root); } -static int create_debugfs(void) +static int __init create_debugfs(void) { interface->debug.root = debugfs_create_dir("acer-wmi", NULL); if (!interface->debug.root) { diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c index 297b6640213f..c5af23b64438 100644 --- a/drivers/platform/x86/alienware-wmi.c +++ b/drivers/platform/x86/alienware-wmi.c @@ -59,25 +59,33 @@ enum WMAX_CONTROL_STATES { struct quirk_entry { u8 num_zones; + u8 hdmi_mux; }; static struct quirk_entry *quirks; static struct quirk_entry quirk_unknown = { .num_zones = 2, + .hdmi_mux = 0, }; static struct quirk_entry quirk_x51_family = { .num_zones = 3, + .hdmi_mux = 0. }; -static int dmi_matched(const struct dmi_system_id *dmi) +static struct quirk_entry quirk_asm100 = { + .num_zones = 2, + .hdmi_mux = 1, +}; + +static int __init dmi_matched(const struct dmi_system_id *dmi) { quirks = dmi->driver_data; return 1; } -static struct dmi_system_id alienware_quirks[] = { +static const struct dmi_system_id alienware_quirks[] __initconst = { { .callback = dmi_matched, .ident = "Alienware X51 R1", @@ -96,6 +104,15 @@ static struct dmi_system_id alienware_quirks[] = { }, .driver_data = &quirk_x51_family, }, + { + .callback = dmi_matched, + .ident = "Alienware ASM100", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Alienware"), + DMI_MATCH(DMI_PRODUCT_NAME, "ASM100"), + }, + .driver_data = &quirk_asm100, + }, {} }; @@ -537,7 +554,8 @@ static struct attribute_group hdmi_attribute_group = { static void remove_hdmi(struct platform_device *dev) { - sysfs_remove_group(&dev->dev.kobj, &hdmi_attribute_group); + if (quirks->hdmi_mux > 0) + sysfs_remove_group(&dev->dev.kobj, &hdmi_attribute_group); } static int create_hdmi(struct platform_device *dev) @@ -583,7 +601,7 @@ static int __init alienware_wmi_init(void) if (ret) goto fail_platform_device2; - if (interface == WMAX) { + if (quirks->hdmi_mux > 0) { ret = create_hdmi(platform_device); if (ret) goto fail_prep_hdmi; diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c index ddf0eefd862c..3a4951f46065 100644 --- a/drivers/platform/x86/asus-nb-wmi.c +++ b/drivers/platform/x86/asus-nb-wmi.c @@ -70,17 +70,35 @@ static struct quirk_entry quirk_asus_x55u = { .no_display_toggle = true, }; -static struct quirk_entry quirk_asus_x401u = { +static struct quirk_entry quirk_asus_wapf4 = { .wapf = 4, }; +static struct quirk_entry quirk_asus_x200ca = { + .wapf = 2, +}; + static int dmi_matched(const struct dmi_system_id *dmi) { quirks = dmi->driver_data; return 1; } -static struct dmi_system_id asus_quirks[] = { +static const struct dmi_system_id asus_quirks[] = { + { + .callback = dmi_matched, + .ident = "ASUSTeK COMPUTER INC. U32U", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "U32U"), + }, + /* + * Note this machine has a Brazos APU, and most Brazos Asus + * machines need quirk_asus_x55u / wmi_backlight_power but + * here acpi-video seems to work fine for backlight control. + */ + .driver_data = &quirk_asus_wapf4, + }, { .callback = dmi_matched, .ident = "ASUSTeK COMPUTER INC. X401U", @@ -97,7 +115,7 @@ static struct dmi_system_id asus_quirks[] = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), DMI_MATCH(DMI_PRODUCT_NAME, "X401A"), }, - .driver_data = &quirk_asus_x401u, + .driver_data = &quirk_asus_wapf4, }, { .callback = dmi_matched, @@ -106,7 +124,7 @@ static struct dmi_system_id asus_quirks[] = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), DMI_MATCH(DMI_PRODUCT_NAME, "X401A1"), }, - .driver_data = &quirk_asus_x401u, + .driver_data = &quirk_asus_wapf4, }, { .callback = dmi_matched, @@ -124,7 +142,7 @@ static struct dmi_system_id asus_quirks[] = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), DMI_MATCH(DMI_PRODUCT_NAME, "X501A"), }, - .driver_data = &quirk_asus_x401u, + .driver_data = &quirk_asus_wapf4, }, { .callback = dmi_matched, @@ -133,7 +151,7 @@ static struct dmi_system_id asus_quirks[] = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), DMI_MATCH(DMI_PRODUCT_NAME, "X501A1"), }, - .driver_data = &quirk_asus_x401u, + .driver_data = &quirk_asus_wapf4, }, { .callback = dmi_matched, @@ -142,7 +160,25 @@ static struct dmi_system_id asus_quirks[] = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), DMI_MATCH(DMI_PRODUCT_NAME, "X550CA"), }, - .driver_data = &quirk_asus_x401u, + .driver_data = &quirk_asus_wapf4, + }, + { + .callback = dmi_matched, + .ident = "ASUSTeK COMPUTER INC. X550CC", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "X550CC"), + }, + .driver_data = &quirk_asus_wapf4, + }, + { + .callback = dmi_matched, + .ident = "ASUSTeK COMPUTER INC. X550CL", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "X550CL"), + }, + .driver_data = &quirk_asus_wapf4, }, { .callback = dmi_matched, @@ -151,7 +187,7 @@ static struct dmi_system_id asus_quirks[] = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), DMI_MATCH(DMI_PRODUCT_NAME, "X55A"), }, - .driver_data = &quirk_asus_x401u, + .driver_data = &quirk_asus_wapf4, }, { .callback = dmi_matched, @@ -160,7 +196,7 @@ static struct dmi_system_id asus_quirks[] = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), DMI_MATCH(DMI_PRODUCT_NAME, "X55C"), }, - .driver_data = &quirk_asus_x401u, + .driver_data = &quirk_asus_wapf4, }, { .callback = dmi_matched, @@ -178,7 +214,7 @@ static struct dmi_system_id asus_quirks[] = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), DMI_MATCH(DMI_PRODUCT_NAME, "X55VD"), }, - .driver_data = &quirk_asus_x401u, + .driver_data = &quirk_asus_wapf4, }, { .callback = dmi_matched, @@ -187,7 +223,16 @@ static struct dmi_system_id asus_quirks[] = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), DMI_MATCH(DMI_PRODUCT_NAME, "X75A"), }, - .driver_data = &quirk_asus_x401u, + .driver_data = &quirk_asus_wapf4, + }, + { + .callback = dmi_matched, + .ident = "ASUSTeK COMPUTER INC. X75VBP", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "X75VBP"), + }, + .driver_data = &quirk_asus_wapf4, }, { .callback = dmi_matched, @@ -196,7 +241,7 @@ static struct dmi_system_id asus_quirks[] = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), DMI_MATCH(DMI_PRODUCT_NAME, "1015E"), }, - .driver_data = &quirk_asus_x401u, + .driver_data = &quirk_asus_wapf4, }, { .callback = dmi_matched, @@ -205,7 +250,16 @@ static struct dmi_system_id asus_quirks[] = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), DMI_MATCH(DMI_PRODUCT_NAME, "1015U"), }, - .driver_data = &quirk_asus_x401u, + .driver_data = &quirk_asus_wapf4, + }, + { + .callback = dmi_matched, + .ident = "ASUSTeK COMPUTER INC. X200CA", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "X200CA"), + }, + .driver_data = &quirk_asus_x200ca, }, {}, }; diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c index 3c6ccedc82b6..21fc932da3a1 100644 --- a/drivers/platform/x86/asus-wmi.c +++ b/drivers/platform/x86/asus-wmi.c @@ -46,6 +46,7 @@ #include <linux/platform_device.h> #include <linux/thermal.h> #include <linux/acpi.h> +#include <linux/dmi.h> #include <acpi/video.h> #include "asus-wmi.h" @@ -554,7 +555,7 @@ static int asus_wmi_led_init(struct asus_wmi *asus) goto error; } - if (wlan_led_presence(asus) && (asus->driver->quirks->wapf == 4)) { + if (wlan_led_presence(asus) && (asus->driver->quirks->wapf > 0)) { INIT_WORK(&asus->wlan_led_work, wlan_led_update); asus->wlan_led.name = "asus::wlan"; @@ -884,7 +885,7 @@ static int asus_new_rfkill(struct asus_wmi *asus, return -EINVAL; if ((dev_id == ASUS_WMI_DEVID_WLAN) && - (asus->driver->quirks->wapf == 4)) + (asus->driver->quirks->wapf > 0)) rfkill_set_led_trigger_name(*rfkill, "asus-wlan"); rfkill_init_sw_state(*rfkill, !result); @@ -1270,10 +1271,7 @@ static int asus_wmi_backlight_init(struct asus_wmi *asus) int power; max = read_brightness_max(asus); - - if (max == -ENODEV) - max = 0; - else if (max < 0) + if (max < 0) return max; power = read_backlight_power(asus); @@ -1734,6 +1732,7 @@ static int asus_wmi_add(struct platform_device *pdev) struct platform_driver *pdrv = to_platform_driver(pdev->dev.driver); struct asus_wmi_driver *wdrv = to_asus_wmi_driver(pdrv); struct asus_wmi *asus; + const char *chassis_type; acpi_status status; int err; u32 result; @@ -1770,6 +1769,11 @@ static int asus_wmi_add(struct platform_device *pdev) if (err) goto fail_rfkill; + /* Some Asus desktop boards export an acpi-video backlight interface, + stop this from showing up */ + chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE); + if (chassis_type && !strcmp(chassis_type, "3")) + acpi_video_dmi_promote_vendor(); if (asus->driver->quirks->wmi_backlight_power) acpi_video_dmi_promote_vendor(); if (!acpi_video_backlight_support()) { diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c index 7297df2ebf50..26bfd7bb5c13 100644 --- a/drivers/platform/x86/compal-laptop.c +++ b/drivers/platform/x86/compal-laptop.c @@ -1028,7 +1028,7 @@ static int compal_probe(struct platform_device *pdev) return err; hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, - DRIVER_NAME, data, + "compal", data, compal_hwmon_groups); if (IS_ERR(hwmon_dev)) { err = PTR_ERR(hwmon_dev); diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c index fed4111ac31a..233d2ee598a6 100644 --- a/drivers/platform/x86/dell-laptop.c +++ b/drivers/platform/x86/dell-laptop.c @@ -70,7 +70,7 @@ static struct quirk_entry quirk_dell_vostro_v130 = { .touchpad_led = 1, }; -static int dmi_matched(const struct dmi_system_id *dmi) +static int __init dmi_matched(const struct dmi_system_id *dmi) { quirks = dmi->driver_data; return 1; @@ -123,7 +123,7 @@ static const struct dmi_system_id dell_device_table[] __initconst = { }; MODULE_DEVICE_TABLE(dmi, dell_device_table); -static struct dmi_system_id dell_quirks[] = { +static const struct dmi_system_id dell_quirks[] __initconst = { { .callback = dmi_matched, .ident = "Dell Vostro V130", @@ -780,7 +780,7 @@ static struct led_classdev touchpad_led = { .flags = LED_CORE_SUSPENDRESUME, }; -static int touchpad_led_init(struct device *dev) +static int __init touchpad_led_init(struct device *dev) { return led_classdev_register(dev, &touchpad_led); } diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c index 9b0c57cd1d4a..bd533c22be57 100644 --- a/drivers/platform/x86/eeepc-laptop.c +++ b/drivers/platform/x86/eeepc-laptop.c @@ -1053,20 +1053,20 @@ static ssize_t show_sys_hwmon(int (*get)(void), char *buf) return sprintf(buf, "%d\n", get()); } -#define EEEPC_CREATE_SENSOR_ATTR(_name, _mode, _set, _get) \ +#define EEEPC_CREATE_SENSOR_ATTR(_name, _mode, _get, _set) \ static ssize_t show_##_name(struct device *dev, \ struct device_attribute *attr, \ char *buf) \ { \ - return show_sys_hwmon(_set, buf); \ + return show_sys_hwmon(_get, buf); \ } \ static ssize_t store_##_name(struct device *dev, \ struct device_attribute *attr, \ const char *buf, size_t count) \ { \ - return store_sys_hwmon(_get, buf, count); \ + return store_sys_hwmon(_set, buf, count); \ } \ - static DEVICE_ATTR(_name, _mode, show_##_name, store_##_name); + static DEVICE_ATTR(_name, _mode, show_##_name, store_##_name) EEEPC_CREATE_SENSOR_ATTR(fan1_input, S_IRUGO, eeepc_get_fan_rpm, NULL); EEEPC_CREATE_SENSOR_ATTR(pwm1, S_IRUGO | S_IWUSR, diff --git a/drivers/platform/x86/eeepc-wmi.c b/drivers/platform/x86/eeepc-wmi.c index 6112933f6278..14fd2ecb06a1 100644 --- a/drivers/platform/x86/eeepc-wmi.c +++ b/drivers/platform/x86/eeepc-wmi.c @@ -145,7 +145,7 @@ static int dmi_matched(const struct dmi_system_id *dmi) return 1; } -static struct dmi_system_id asus_quirks[] = { +static const struct dmi_system_id asus_quirks[] = { { .callback = dmi_matched, .ident = "ASUSTeK Computer INC. 1000H", diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c index e6f336270c21..87aa28c4280f 100644 --- a/drivers/platform/x86/fujitsu-laptop.c +++ b/drivers/platform/x86/fujitsu-laptop.c @@ -129,15 +129,14 @@ #define FUJLAPTOP_DBG_INFO 0x0004 #define FUJLAPTOP_DBG_TRACE 0x0008 -#define dbg_printk(a_dbg_level, format, arg...) \ +#ifdef CONFIG_FUJITSU_LAPTOP_DEBUG +#define vdbg_printk(a_dbg_level, format, arg...) \ do { if (dbg_level & a_dbg_level) \ printk(FUJLAPTOP_DEBUG "%s: " format, __func__ , ## arg); \ } while (0) -#ifdef CONFIG_FUJITSU_LAPTOP_DEBUG -#define vdbg_printk(a_dbg_level, format, arg...) \ - dbg_printk(a_dbg_level, format, ## arg) #else -#define vdbg_printk(a_dbg_level, format, arg...) +#define vdbg_printk(a_dbg_level, format, arg...) \ + do { } while (0) #endif /* Device controlling the backlight and associated keys */ @@ -564,7 +563,7 @@ static struct platform_driver fujitsupf_driver = { } }; -static void dmi_check_cb_common(const struct dmi_system_id *id) +static void __init dmi_check_cb_common(const struct dmi_system_id *id) { pr_info("Identified laptop model '%s'\n", id->ident); if (use_alt_lcd_levels == -1) { @@ -578,7 +577,7 @@ static void dmi_check_cb_common(const struct dmi_system_id *id) } } -static int dmi_check_cb_s6410(const struct dmi_system_id *id) +static int __init dmi_check_cb_s6410(const struct dmi_system_id *id) { dmi_check_cb_common(id); fujitsu->keycode1 = KEY_SCREENLOCK; /* "Lock" */ @@ -586,7 +585,7 @@ static int dmi_check_cb_s6410(const struct dmi_system_id *id) return 1; } -static int dmi_check_cb_s6420(const struct dmi_system_id *id) +static int __init dmi_check_cb_s6420(const struct dmi_system_id *id) { dmi_check_cb_common(id); fujitsu->keycode1 = KEY_SCREENLOCK; /* "Lock" */ @@ -594,7 +593,7 @@ static int dmi_check_cb_s6420(const struct dmi_system_id *id) return 1; } -static int dmi_check_cb_p8010(const struct dmi_system_id *id) +static int __init dmi_check_cb_p8010(const struct dmi_system_id *id) { dmi_check_cb_common(id); fujitsu->keycode1 = KEY_HELP; /* "Support" */ @@ -603,7 +602,7 @@ static int dmi_check_cb_p8010(const struct dmi_system_id *id) return 1; } -static struct dmi_system_id fujitsu_dmi_table[] = { +static const struct dmi_system_id fujitsu_dmi_table[] __initconst = { { .ident = "Fujitsu Siemens S6410", .matches = { diff --git a/drivers/platform/x86/fujitsu-tablet.c b/drivers/platform/x86/fujitsu-tablet.c index c3784baceae3..53bdbb01bd3f 100644 --- a/drivers/platform/x86/fujitsu-tablet.c +++ b/drivers/platform/x86/fujitsu-tablet.c @@ -315,21 +315,21 @@ static irqreturn_t fujitsu_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -static void fujitsu_dmi_common(const struct dmi_system_id *dmi) +static void __init fujitsu_dmi_common(const struct dmi_system_id *dmi) { pr_info("%s\n", dmi->ident); memcpy(fujitsu.config.keymap, dmi->driver_data, sizeof(fujitsu.config.keymap)); } -static int fujitsu_dmi_lifebook(const struct dmi_system_id *dmi) +static int __init fujitsu_dmi_lifebook(const struct dmi_system_id *dmi) { fujitsu_dmi_common(dmi); fujitsu.config.quirks |= INVERT_TABLET_MODE_BIT; return 1; } -static int fujitsu_dmi_stylistic(const struct dmi_system_id *dmi) +static int __init fujitsu_dmi_stylistic(const struct dmi_system_id *dmi) { fujitsu_dmi_common(dmi); fujitsu.config.quirks |= FORCE_TABLET_MODE_IF_UNDOCK; diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c index 484a8673b835..4c559640dcba 100644 --- a/drivers/platform/x86/hp-wmi.c +++ b/drivers/platform/x86/hp-wmi.c @@ -295,7 +295,7 @@ static int hp_wmi_tablet_state(void) return (state & 0x4) ? 1 : 0; } -static int hp_wmi_bios_2009_later(void) +static int __init hp_wmi_bios_2009_later(void) { int state = 0; int ret = hp_wmi_perform_query(HPWMI_FEATURE_QUERY, 0, &state, @@ -704,7 +704,7 @@ static void cleanup_sysfs(struct platform_device *device) device_remove_file(&device->dev, &dev_attr_postcode); } -static int hp_wmi_rfkill_setup(struct platform_device *device) +static int __init hp_wmi_rfkill_setup(struct platform_device *device) { int err; int wireless = 0; @@ -806,7 +806,7 @@ register_wifi_error: return err; } -static int hp_wmi_rfkill2_setup(struct platform_device *device) +static int __init hp_wmi_rfkill2_setup(struct platform_device *device) { int err, i; struct bios_rfkill2_state state; diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c index 3dc934438c28..13e14ec1d3d7 100644 --- a/drivers/platform/x86/hp_accel.c +++ b/drivers/platform/x86/hp_accel.c @@ -74,7 +74,7 @@ static inline void delayed_sysfs_set(struct led_classdev *led_cdev, /* HP-specific accelerometer driver ------------------------------------ */ /* For automatic insertion of the module */ -static struct acpi_device_id lis3lv02d_device_ids[] = { +static const struct acpi_device_id lis3lv02d_device_ids[] = { {"HPQ0004", 0}, /* HP Mobile Data Protection System PNP */ {"HPQ6000", 0}, /* HP Mobile Data Protection System PNP */ {"HPQ6007", 0}, /* HP Mobile Data Protection System PNP */ @@ -192,7 +192,7 @@ DEFINE_CONV(xy_swap_yz_inverted, 2, -1, -3); }, \ .driver_data = &lis3lv02d_axis_##_axis \ } -static struct dmi_system_id lis3lv02d_dmi_ids[] = { +static const struct dmi_system_id lis3lv02d_dmi_ids[] = { /* product names are truncated to match all kinds of a same model */ AXIS_DMI_MATCH("NC64x0", "HP Compaq nc64", x_inverted), AXIS_DMI_MATCH("NC84x0", "HP Compaq nc84", z_inverted), diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index b4c495a62eec..02152de135b5 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c @@ -87,6 +87,7 @@ struct ideapad_private { struct backlight_device *blightdev; struct dentry *debug; unsigned long cfg; + bool has_hw_rfkill_switch; }; static bool no_bt_rfkill; @@ -439,7 +440,7 @@ static umode_t ideapad_is_visible(struct kobject *kobj, return supported ? attr->mode : 0; } -static struct attribute_group ideapad_attribute_group = { +static const struct attribute_group ideapad_attribute_group = { .is_visible = ideapad_is_visible, .attrs = ideapad_attributes }; @@ -454,7 +455,7 @@ struct ideapad_rfk_data { int type; }; -const struct ideapad_rfk_data ideapad_rfk_data[] = { +static const struct ideapad_rfk_data ideapad_rfk_data[] = { { "ideapad_wlan", CFG_WIFI_BIT, VPCCMD_W_WIFI, RFKILL_TYPE_WLAN }, { "ideapad_bluetooth", CFG_BT_BIT, VPCCMD_W_BT, RFKILL_TYPE_BLUETOOTH }, { "ideapad_3g", CFG_3G_BIT, VPCCMD_W_3G, RFKILL_TYPE_WWAN }, @@ -473,12 +474,14 @@ static struct rfkill_ops ideapad_rfk_ops = { static void ideapad_sync_rfk_state(struct ideapad_private *priv) { - unsigned long hw_blocked; + unsigned long hw_blocked = 0; int i; - if (read_ec_data(priv->adev->handle, VPCCMD_R_RF, &hw_blocked)) - return; - hw_blocked = !hw_blocked; + if (priv->has_hw_rfkill_switch) { + if (read_ec_data(priv->adev->handle, VPCCMD_R_RF, &hw_blocked)) + return; + hw_blocked = !hw_blocked; + } for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++) if (priv->rfk[i]) @@ -821,14 +824,17 @@ static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data) } } -/* Blacklist for devices where the ideapad rfkill interface does not work */ -static struct dmi_system_id rfkill_blacklist[] = { - /* The Lenovo Yoga 2 11 always reports everything as blocked */ +/* + * Some ideapads don't have a hardware rfkill switch, reading VPCCMD_R_RF + * always results in 0 on these models, causing ideapad_laptop to wrongly + * report all radios as hardware-blocked. + */ +static const struct dmi_system_id no_hw_rfkill_list[] = { { - .ident = "Lenovo Yoga 2 11", + .ident = "Lenovo Yoga 2 11 / 13 / Pro", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Yoga 2 11"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Yoga 2"), }, }, {} @@ -856,6 +862,7 @@ static int ideapad_acpi_add(struct platform_device *pdev) priv->cfg = cfg; priv->adev = adev; priv->platform_device = pdev; + priv->has_hw_rfkill_switch = !dmi_check_system(no_hw_rfkill_list); ret = ideapad_sysfs_init(priv); if (ret) @@ -869,11 +876,17 @@ static int ideapad_acpi_add(struct platform_device *pdev) if (ret) goto input_failed; - if (!dmi_check_system(rfkill_blacklist)) { - for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++) - if (test_bit(ideapad_rfk_data[i].cfgbit, &priv->cfg)) - ideapad_register_rfkill(priv, i); - } + /* + * On some models without a hw-switch (the yoga 2 13 at least) + * VPCCMD_W_RF must be explicitly set to 1 for the wifi to work. + */ + if (!priv->has_hw_rfkill_switch) + write_ec_cmd(priv->adev->handle, VPCCMD_W_RF, 1); + + for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++) + if (test_bit(ideapad_rfk_data[i].cfgbit, &priv->cfg)) + ideapad_register_rfkill(priv, i); + ideapad_sync_rfk_state(priv); ideapad_sync_touchpad_state(priv); diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c index 18dcb58ba965..c0242ed13d9e 100644 --- a/drivers/platform/x86/intel_ips.c +++ b/drivers/platform/x86/intel_ips.c @@ -269,7 +269,7 @@ struct ips_mcp_limits { /* Max temps are -10 degrees C to avoid PROCHOT# */ -struct ips_mcp_limits ips_sv_limits = { +static struct ips_mcp_limits ips_sv_limits = { .mcp_power_limit = 35000, .core_power_limit = 29000, .mch_power_limit = 20000, @@ -277,7 +277,7 @@ struct ips_mcp_limits ips_sv_limits = { .mch_temp_limit = 90 }; -struct ips_mcp_limits ips_lv_limits = { +static struct ips_mcp_limits ips_lv_limits = { .mcp_power_limit = 25000, .core_power_limit = 21000, .mch_power_limit = 13000, @@ -285,7 +285,7 @@ struct ips_mcp_limits ips_lv_limits = { .mch_temp_limit = 90 }; -struct ips_mcp_limits ips_ulv_limits = { +static struct ips_mcp_limits ips_ulv_limits = { .mcp_power_limit = 18000, .core_power_limit = 14000, .mch_power_limit = 11000, @@ -1478,7 +1478,7 @@ ips_link_to_i915_driver(void) } EXPORT_SYMBOL_GPL(ips_link_to_i915_driver); -static DEFINE_PCI_DEVICE_TABLE(ips_id_table) = { +static const struct pci_device_id ips_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THERMAL_SENSOR), }, { 0, } diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c index 76ca094ed012..66a4d3284aab 100644 --- a/drivers/platform/x86/intel_scu_ipc.c +++ b/drivers/platform/x86/intel_scu_ipc.c @@ -640,7 +640,7 @@ static void ipc_remove(struct pci_dev *pdev) intel_scu_devices_destroy(); } -static DEFINE_PCI_DEVICE_TABLE(pci_ids) = { +static const struct pci_device_id pci_ids[] = { { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_LINCROFT), (kernel_ulong_t)&intel_scu_ipc_lincroft_pdata, diff --git a/drivers/platform/x86/samsung-q10.c b/drivers/platform/x86/samsung-q10.c index 5413f62d2e61..28d12bda3ac1 100644 --- a/drivers/platform/x86/samsung-q10.c +++ b/drivers/platform/x86/samsung-q10.c @@ -46,13 +46,7 @@ static int samsungq10_bl_set_intensity(struct backlight_device *bd) return 0; } -static int samsungq10_bl_get_intensity(struct backlight_device *bd) -{ - return bd->props.brightness; -} - static const struct backlight_ops samsungq10_bl_ops = { - .get_brightness = samsungq10_bl_get_intensity, .update_status = samsungq10_bl_set_intensity, }; diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c index 9c5a07417b2b..26ad9ff12ac5 100644 --- a/drivers/platform/x86/sony-laptop.c +++ b/drivers/platform/x86/sony-laptop.c @@ -2389,7 +2389,7 @@ static int sony_nc_lid_resume_setup(struct platform_device *pd, lid_ctl->attrs[LID_RESUME_S3].store = sony_nc_lid_resume_store; } for (i = 0; i < LID_RESUME_MAX && - lid_ctl->attrs[LID_RESUME_S3].attr.name; i++) { + lid_ctl->attrs[i].attr.name; i++) { result = device_create_file(&pd->dev, &lid_ctl->attrs[i]); if (result) goto liderror; diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index d82f196e3cfe..3bbc6eb60de5 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -3174,7 +3174,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm) KEY_UNKNOWN, /* Extra keys in use since the X240 / T440 / T540 */ - KEY_CONFIG, KEY_SEARCH, KEY_SCALE, KEY_COMPUTER, + KEY_CONFIG, KEY_SEARCH, KEY_SCALE, KEY_FILE, }, }; @@ -6144,7 +6144,7 @@ static int brightness_set(unsigned int value) { int res; - if (value > bright_maxlvl || value < 0) + if (value > bright_maxlvl) return -EINVAL; vdbg_printk(TPACPI_DBG_BRGHT, @@ -6860,7 +6860,7 @@ static int volume_alsa_mute_put(struct snd_kcontrol *kcontrol, return volume_alsa_set_mute(!ucontrol->value.integer.value[0]); } -static struct snd_kcontrol_new volume_alsa_control_vol = { +static struct snd_kcontrol_new volume_alsa_control_vol __initdata = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Console Playback Volume", .index = 0, @@ -6869,7 +6869,7 @@ static struct snd_kcontrol_new volume_alsa_control_vol = { .get = volume_alsa_vol_get, }; -static struct snd_kcontrol_new volume_alsa_control_mute = { +static struct snd_kcontrol_new volume_alsa_control_mute __initdata = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Console Playback Switch", .index = 0, diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c index 76441dcbe5ff..d0dce734b2ed 100644 --- a/drivers/platform/x86/toshiba_acpi.c +++ b/drivers/platform/x86/toshiba_acpi.c @@ -222,6 +222,12 @@ static const struct dmi_system_id toshiba_alt_keymap_dmi[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Satellite M840"), }, }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "Qosmio X75-A"), + }, + }, {} }; @@ -229,6 +235,7 @@ static const struct key_entry toshiba_acpi_alt_keymap[] = { { KE_KEY, 0x157, { KEY_MUTE } }, { KE_KEY, 0x102, { KEY_ZOOMOUT } }, { KE_KEY, 0x103, { KEY_ZOOMIN } }, + { KE_KEY, 0x12c, { KEY_KBDILLUMTOGGLE } }, { KE_KEY, 0x139, { KEY_ZOOMRESET } }, { KE_KEY, 0x13e, { KEY_SWITCHVIDEOMODE } }, { KE_KEY, 0x13c, { KEY_BRIGHTNESSDOWN } }, @@ -872,7 +879,9 @@ static int lcd_proc_open(struct inode *inode, struct file *file) static int set_lcd_brightness(struct toshiba_acpi_dev *dev, int value) { - u32 hci_result; + u32 in[HCI_WORDS] = { HCI_SET, HCI_LCD_BRIGHTNESS, 0, 0, 0, 0 }; + u32 out[HCI_WORDS]; + acpi_status status; if (dev->tr_backlight_supported) { bool enable = !value; @@ -883,9 +892,20 @@ static int set_lcd_brightness(struct toshiba_acpi_dev *dev, int value) value--; } - value = value << HCI_LCD_BRIGHTNESS_SHIFT; - hci_write1(dev, HCI_LCD_BRIGHTNESS, value, &hci_result); - return hci_result == HCI_SUCCESS ? 0 : -EIO; + in[2] = value << HCI_LCD_BRIGHTNESS_SHIFT; + status = hci_raw(dev, in, out); + if (ACPI_FAILURE(status) || out[0] == HCI_FAILURE) { + pr_err("ACPI call to set brightness failed"); + return -EIO; + } + /* Extra check for "incomplete" backlight method, where the AML code + * doesn't check for HCI_SET or HCI_GET and returns HCI_SUCCESS, + * the actual brightness, and in some cases the max brightness. + */ + if (out[2] > 0 || out[3] == 0xE000) + return -ENODEV; + + return out[0] == HCI_SUCCESS ? 0 : -EIO; } static int set_lcd_status(struct backlight_device *bd) @@ -1235,10 +1255,15 @@ static ssize_t toshiba_kbd_bl_mode_store(struct device *dev, const char *buf, size_t count) { struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); - int mode = -1; - int time = -1; + int mode; + int time; + int ret; + - if (sscanf(buf, "%i", &mode) != 1 && (mode != 2 || mode != 1)) + ret = kstrtoint(buf, 0, &mode); + if (ret) + return ret; + if (mode != SCI_KBD_MODE_FNZ && mode != SCI_KBD_MODE_AUTO) return -EINVAL; /* Set the Keyboard Backlight Mode where: @@ -1246,11 +1271,12 @@ static ssize_t toshiba_kbd_bl_mode_store(struct device *dev, * Auto - KBD backlight turns off automatically in given time * FN-Z - KBD backlight "toggles" when hotkey pressed */ - if (mode != -1 && toshiba->kbd_mode != mode) { + if (toshiba->kbd_mode != mode) { time = toshiba->kbd_time << HCI_MISC_SHIFT; time = time + toshiba->kbd_mode; - if (toshiba_kbd_illum_status_set(toshiba, time) < 0) - return -EIO; + ret = toshiba_kbd_illum_status_set(toshiba, time); + if (ret) + return ret; toshiba->kbd_mode = mode; } @@ -1837,9 +1863,16 @@ static int toshiba_acpi_resume(struct device *device) { struct toshiba_acpi_dev *dev = acpi_driver_data(to_acpi_device(device)); u32 result; + acpi_status status; + + if (dev->hotkey_dev) { + status = acpi_evaluate_object(dev->acpi_dev->handle, "ENAB", + NULL, NULL); + if (ACPI_FAILURE(status)) + pr_info("Unable to re-enable hotkeys\n"); - if (dev->hotkey_dev) hci_write1(dev, HCI_HOTKEY_EVENT, HCI_HOTKEY_ENABLE, &result); + } return 0; } diff --git a/drivers/platform/x86/toshiba_haps.c b/drivers/platform/x86/toshiba_haps.c new file mode 100644 index 000000000000..65300b6a84b9 --- /dev/null +++ b/drivers/platform/x86/toshiba_haps.c @@ -0,0 +1,265 @@ +/* + * Toshiba HDD Active Protection Sensor (HAPS) driver + * + * Copyright (C) 2014 Azael Avalos <coproscefalo@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/types.h> +#include <linux/acpi.h> + +MODULE_AUTHOR("Azael Avalos <coproscefalo@gmail.com>"); +MODULE_DESCRIPTION("Toshiba HDD Active Protection Sensor"); +MODULE_LICENSE("GPL"); + +struct toshiba_haps_dev { + struct acpi_device *acpi_dev; + + int protection_level; +}; + +static struct toshiba_haps_dev *toshiba_haps; + +/* HAPS functions */ +static int toshiba_haps_reset_protection(acpi_handle handle) +{ + acpi_status status; + + status = acpi_evaluate_object(handle, "RSSS", NULL, NULL); + if (ACPI_FAILURE(status)) { + pr_err("Unable to reset the HDD protection\n"); + return -EIO; + } + + return 0; +} + +static int toshiba_haps_protection_level(acpi_handle handle, int level) +{ + acpi_status status; + + status = acpi_execute_simple_method(handle, "PTLV", level); + if (ACPI_FAILURE(status)) { + pr_err("Error while setting the protection level\n"); + return -EIO; + } + + pr_info("HDD protection level set to: %d\n", level); + + return 0; +} + +/* sysfs files */ +static ssize_t protection_level_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct toshiba_haps_dev *haps = dev_get_drvdata(dev); + + return sprintf(buf, "%i\n", haps->protection_level); +} + +static ssize_t protection_level_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct toshiba_haps_dev *haps = dev_get_drvdata(dev); + int level, ret; + + if (sscanf(buf, "%d", &level) != 1 || level < 0 || level > 3) + return -EINVAL; + + /* Set the sensor level. + * Acceptable levels are: + * 0 - Disabled | 1 - Low | 2 - Medium | 3 - High + */ + ret = toshiba_haps_protection_level(haps->acpi_dev->handle, level); + if (ret != 0) + return ret; + + haps->protection_level = level; + + return count; +} + +static ssize_t reset_protection_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct toshiba_haps_dev *haps = dev_get_drvdata(dev); + int reset, ret; + + if (sscanf(buf, "%d", &reset) != 1 || reset != 1) + return -EINVAL; + + /* Reset the protection interface */ + ret = toshiba_haps_reset_protection(haps->acpi_dev->handle); + if (ret != 0) + return ret; + + return count; +} + +static DEVICE_ATTR(protection_level, S_IRUGO | S_IWUSR, + protection_level_show, protection_level_store); +static DEVICE_ATTR(reset_protection, S_IWUSR, NULL, reset_protection_store); + +static struct attribute *haps_attributes[] = { + &dev_attr_protection_level.attr, + &dev_attr_reset_protection.attr, + NULL, +}; + +static struct attribute_group haps_attr_group = { + .attrs = haps_attributes, +}; + +/* + * ACPI stuff + */ +static void toshiba_haps_notify(struct acpi_device *device, u32 event) +{ + pr_info("Received event: 0x%x", event); + + acpi_bus_generate_netlink_event(device->pnp.device_class, + dev_name(&device->dev), + event, 0); +} + +static int toshiba_haps_remove(struct acpi_device *device) +{ + sysfs_remove_group(&device->dev.kobj, &haps_attr_group); + + if (toshiba_haps) + toshiba_haps = NULL; + + return 0; +} + +/* Helper function */ +static int toshiba_haps_available(acpi_handle handle) +{ + acpi_status status; + u64 hdd_present; + + /* + * A non existent device as well as having (only) + * Solid State Drives can cause the call to fail. + */ + status = acpi_evaluate_integer(handle, "_STA", NULL, + &hdd_present); + if (ACPI_FAILURE(status) || !hdd_present) { + pr_info("HDD protection not available or using SSD\n"); + return 0; + } + + return 1; +} + +static int toshiba_haps_add(struct acpi_device *acpi_dev) +{ + struct toshiba_haps_dev *haps; + int ret; + + if (toshiba_haps) + return -EBUSY; + + if (!toshiba_haps_available(acpi_dev->handle)) + return -ENODEV; + + pr_info("Toshiba HDD Active Protection Sensor device\n"); + + haps = kzalloc(sizeof(struct toshiba_haps_dev), GFP_KERNEL); + if (!haps) + return -ENOMEM; + + haps->acpi_dev = acpi_dev; + haps->protection_level = 2; + acpi_dev->driver_data = haps; + dev_set_drvdata(&acpi_dev->dev, haps); + + /* Set the protection level, currently at level 2 (Medium) */ + ret = toshiba_haps_protection_level(acpi_dev->handle, 2); + if (ret != 0) + return ret; + + ret = sysfs_create_group(&acpi_dev->dev.kobj, &haps_attr_group); + if (ret) + return ret; + + toshiba_haps = haps; + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int toshiba_haps_suspend(struct device *device) +{ + struct toshiba_haps_dev *haps; + int ret; + + haps = acpi_driver_data(to_acpi_device(device)); + + /* Deactivate the protection on suspend */ + ret = toshiba_haps_protection_level(haps->acpi_dev->handle, 0); + + return ret; +} + +static int toshiba_haps_resume(struct device *device) +{ + struct toshiba_haps_dev *haps; + int ret; + + haps = acpi_driver_data(to_acpi_device(device)); + + /* Set the stored protection level */ + ret = toshiba_haps_protection_level(haps->acpi_dev->handle, + haps->protection_level); + + /* Reset the protection on resume */ + ret = toshiba_haps_reset_protection(haps->acpi_dev->handle); + if (ret != 0) + return ret; + + return ret; +} +#endif + +static SIMPLE_DEV_PM_OPS(toshiba_haps_pm, + toshiba_haps_suspend, toshiba_haps_resume); + +static const struct acpi_device_id haps_device_ids[] = { + {"TOS620A", 0}, + {"", 0}, +}; +MODULE_DEVICE_TABLE(acpi, haps_device_ids); + +static struct acpi_driver toshiba_haps_driver = { + .name = "Toshiba HAPS", + .owner = THIS_MODULE, + .ids = haps_device_ids, + .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS, + .ops = { + .add = toshiba_haps_add, + .remove = toshiba_haps_remove, + .notify = toshiba_haps_notify, + }, + .drv.pm = &toshiba_haps_pm, +}; + +module_acpi_driver(toshiba_haps_driver); diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c index 43d13295e63d..737e56d46f61 100644 --- a/drivers/platform/x86/wmi.c +++ b/drivers/platform/x86/wmi.c @@ -256,10 +256,6 @@ static acpi_status wmi_method_enable(struct wmi_block *wblock, int enable) block = &wblock->gblock; handle = wblock->handle; - if (!block) - return AE_NOT_EXIST; - - snprintf(method, 5, "WE%02X", block->notify_id); status = acpi_execute_simple_method(handle, method, enable); diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig index f2ac54df496f..ca41523bbebf 100644 --- a/drivers/power/reset/Kconfig +++ b/drivers/power/reset/Kconfig @@ -39,6 +39,12 @@ config POWER_RESET_GPIO If your board needs a GPIO high/low to power down, say Y and create a binding in your devicetree. +config POWER_RESET_HISI + bool "Hisilicon power-off driver" + depends on POWER_RESET && ARCH_HISI + help + Reboot support for Hisilicon boards. + config POWER_RESET_MSM bool "Qualcomm MSM power-off driver" depends on POWER_RESET && ARCH_QCOM diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile index 7379818ca69d..a42e70edd037 100644 --- a/drivers/power/reset/Makefile +++ b/drivers/power/reset/Makefile @@ -2,6 +2,7 @@ obj-$(CONFIG_POWER_RESET_AS3722) += as3722-poweroff.o obj-$(CONFIG_POWER_RESET_AXXIA) += axxia-reset.o obj-$(CONFIG_POWER_RESET_BRCMSTB) += brcmstb-reboot.o obj-$(CONFIG_POWER_RESET_GPIO) += gpio-poweroff.o +obj-$(CONFIG_POWER_RESET_HISI) += hisi-reboot.o obj-$(CONFIG_POWER_RESET_MSM) += msm-poweroff.o obj-$(CONFIG_POWER_RESET_QNAP) += qnap-poweroff.o obj-$(CONFIG_POWER_RESET_RESTART) += restart-poweroff.o diff --git a/drivers/power/reset/hisi-reboot.c b/drivers/power/reset/hisi-reboot.c new file mode 100644 index 000000000000..0c91d0231d36 --- /dev/null +++ b/drivers/power/reset/hisi-reboot.c @@ -0,0 +1,67 @@ +/* + * Hisilicon SoC reset code + * + * Copyright (c) 2014 Hisilicon Ltd. + * Copyright (c) 2014 Linaro Ltd. + * + * Author: Haojian Zhuang <haojian.zhuang@linaro.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/platform_device.h> +#include <linux/reboot.h> + +#include <asm/proc-fns.h> +#include <asm/system_misc.h> + +static void __iomem *base; +static u32 reboot_offset; + +static void hisi_restart(enum reboot_mode mode, const char *cmd) +{ + writel_relaxed(0xdeadbeef, base + reboot_offset); + + while (1) + cpu_do_idle(); +} + +static int hisi_reboot_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + + base = of_iomap(np, 0); + if (!base) { + WARN(1, "failed to map base address"); + return -ENODEV; + } + + if (of_property_read_u32(np, "reboot-offset", &reboot_offset) < 0) { + pr_err("failed to find reboot-offset property\n"); + return -EINVAL; + } + + arm_pm_restart = hisi_restart; + + return 0; +} + +static struct of_device_id hisi_reboot_of_match[] = { + { .compatible = "hisilicon,sysctrl" }, + {} +}; + +static struct platform_driver hisi_reboot_driver = { + .probe = hisi_reboot_probe, + .driver = { + .name = "hisi-reboot", + .of_match_table = hisi_reboot_of_match, + }, +}; +module_platform_driver(hisi_reboot_driver); diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c index b1cda6ffdbcc..45e05b32f9b6 100644 --- a/drivers/powercap/intel_rapl.c +++ b/drivers/powercap/intel_rapl.c @@ -953,6 +953,7 @@ static const struct x86_cpu_id rapl_ids[] = { { X86_VENDOR_INTEL, 6, 0x3a},/* Ivy Bridge */ { X86_VENDOR_INTEL, 6, 0x3c},/* Haswell */ { X86_VENDOR_INTEL, 6, 0x3d},/* Broadwell */ + { X86_VENDOR_INTEL, 6, 0x3f},/* Haswell */ { X86_VENDOR_INTEL, 6, 0x45},/* Haswell ULT */ /* TODO: Add more CPU IDs after testing */ {} @@ -1166,11 +1167,10 @@ static int rapl_detect_domains(struct rapl_package *rp, int cpu) for (i = 0; i < RAPL_DOMAIN_MAX; i++) { /* use physical package id to read counters */ - if (!rapl_check_domain(cpu, i)) + if (!rapl_check_domain(cpu, i)) { rp->domain_map |= 1 << i; - else - pr_warn("RAPL domain %s detection failed\n", - rapl_domain_names[i]); + pr_info("Found RAPL domain %s\n", rapl_domain_names[i]); + } } rp->nr_domains = bitmap_weight(&rp->domain_map, RAPL_DOMAIN_MAX); if (!rp->nr_domains) { diff --git a/drivers/ptp/ptp_pch.c b/drivers/ptp/ptp_pch.c index 90a106308c4f..255487272859 100644 --- a/drivers/ptp/ptp_pch.c +++ b/drivers/ptp/ptp_pch.c @@ -691,7 +691,7 @@ err_pci_en: return ret; } -static DEFINE_PCI_DEVICE_TABLE(pch_ieee1588_pcidev_id) = { +static const struct pci_device_id pch_ieee1588_pcidev_id[] = { { .vendor = PCI_VENDOR_ID_INTEL, .device = PCI_DEVICE_ID_PCH_1588 diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig index 4ad7b89a4cb4..b800783800a3 100644 --- a/drivers/pwm/Kconfig +++ b/drivers/pwm/Kconfig @@ -43,7 +43,7 @@ config PWM_AB8500 config PWM_ATMEL tristate "Atmel PWM support" - depends on ARCH_AT91 + depends on ARCH_AT91 || AVR32 help Generic PWM framework driver for Atmel SoC. @@ -206,6 +206,13 @@ config PWM_RENESAS_TPU To compile this driver as a module, choose M here: the module will be called pwm-renesas-tpu. +config PWM_ROCKCHIP + tristate "Rockchip PWM support" + depends on ARCH_ROCKCHIP + help + Generic PWM framework driver for the PWM controller found on + Rockchip SoCs. + config PWM_SAMSUNG tristate "Samsung PWM support" depends on PLAT_SAMSUNG @@ -226,6 +233,16 @@ config PWM_SPEAR To compile this driver as a module, choose M here: the module will be called pwm-spear. +config PWM_STI + tristate "STiH4xx PWM support" + depends on ARCH_STI + depends on OF + help + Generic PWM framework driver for STiH4xx SoCs. + + To compile this driver as a module, choose M here: the module + will be called pwm-sti. + config PWM_TEGRA tristate "NVIDIA Tegra PWM support" depends on ARCH_TEGRA diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile index 5c86a19d5d39..f8c577d41091 100644 --- a/drivers/pwm/Makefile +++ b/drivers/pwm/Makefile @@ -18,8 +18,10 @@ obj-$(CONFIG_PWM_PCA9685) += pwm-pca9685.o obj-$(CONFIG_PWM_PUV3) += pwm-puv3.o obj-$(CONFIG_PWM_PXA) += pwm-pxa.o obj-$(CONFIG_PWM_RENESAS_TPU) += pwm-renesas-tpu.o +obj-$(CONFIG_PWM_ROCKCHIP) += pwm-rockchip.o obj-$(CONFIG_PWM_SAMSUNG) += pwm-samsung.o obj-$(CONFIG_PWM_SPEAR) += pwm-spear.o +obj-$(CONFIG_PWM_STI) += pwm-sti.o obj-$(CONFIG_PWM_TEGRA) += pwm-tegra.o obj-$(CONFIG_PWM_TIECAP) += pwm-tiecap.o obj-$(CONFIG_PWM_TIEHRPWM) += pwm-tiehrpwm.o diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c index 4b66bf09ee55..d2c35920ff08 100644 --- a/drivers/pwm/core.c +++ b/drivers/pwm/core.c @@ -606,6 +606,8 @@ struct pwm_device *pwm_get(struct device *dev, const char *con_id) unsigned int best = 0; struct pwm_lookup *p; unsigned int match; + unsigned int period; + enum pwm_polarity polarity; /* look up via DT first */ if (IS_ENABLED(CONFIG_OF) && dev && dev->of_node) @@ -653,6 +655,8 @@ struct pwm_device *pwm_get(struct device *dev, const char *con_id) if (match > best) { chip = pwmchip_find_by_name(p->provider); index = p->index; + period = p->period; + polarity = p->polarity; if (match != 3) best = match; @@ -668,8 +672,8 @@ struct pwm_device *pwm_get(struct device *dev, const char *con_id) if (IS_ERR(pwm)) return pwm; - pwm_set_period(pwm, p->period); - pwm_set_polarity(pwm, p->polarity); + pwm_set_period(pwm, period); + pwm_set_polarity(pwm, polarity); return pwm; diff --git a/drivers/pwm/pwm-imx.c b/drivers/pwm/pwm-imx.c index d797c7b84c3f..5449d9150d40 100644 --- a/drivers/pwm/pwm-imx.c +++ b/drivers/pwm/pwm-imx.c @@ -262,6 +262,7 @@ static int imx_pwm_probe(struct platform_device *pdev) imx->chip.dev = &pdev->dev; imx->chip.base = -1; imx->chip.npwm = 1; + imx->chip.can_sleep = true; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); imx->mmio_base = devm_ioremap_resource(&pdev->dev, r); diff --git a/drivers/pwm/pwm-lpss.c b/drivers/pwm/pwm-lpss.c index 44ce6c6103ae..4df994f72d96 100644 --- a/drivers/pwm/pwm-lpss.c +++ b/drivers/pwm/pwm-lpss.c @@ -14,7 +14,6 @@ */ #include <linux/acpi.h> -#include <linux/clk.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/module.h> @@ -37,7 +36,6 @@ static int pci_drv, plat_drv; /* So we know which drivers registered */ struct pwm_lpss_chip { struct pwm_chip chip; void __iomem *regs; - struct clk *clk; unsigned long clk_rate; }; @@ -97,11 +95,6 @@ static int pwm_lpss_enable(struct pwm_chip *chip, struct pwm_device *pwm) { struct pwm_lpss_chip *lpwm = to_lpwm(chip); u32 ctrl; - int ret; - - ret = clk_prepare_enable(lpwm->clk); - if (ret) - return ret; ctrl = readl(lpwm->regs + PWM); writel(ctrl | PWM_ENABLE, lpwm->regs + PWM); @@ -116,8 +109,6 @@ static void pwm_lpss_disable(struct pwm_chip *chip, struct pwm_device *pwm) ctrl = readl(lpwm->regs + PWM); writel(ctrl & ~PWM_ENABLE, lpwm->regs + PWM); - - clk_disable_unprepare(lpwm->clk); } static const struct pwm_ops pwm_lpss_ops = { @@ -142,17 +133,7 @@ static struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, if (IS_ERR(lpwm->regs)) return ERR_CAST(lpwm->regs); - if (info) { - lpwm->clk_rate = info->clk_rate; - } else { - lpwm->clk = devm_clk_get(dev, NULL); - if (IS_ERR(lpwm->clk)) { - dev_err(dev, "failed to get PWM clock\n"); - return ERR_CAST(lpwm->clk); - } - lpwm->clk_rate = clk_get_rate(lpwm->clk); - } - + lpwm->clk_rate = info->clk_rate; lpwm->chip.dev = dev; lpwm->chip.ops = &pwm_lpss_ops; lpwm->chip.base = -1; @@ -221,12 +202,19 @@ static struct pci_driver pwm_lpss_driver_pci = { static int pwm_lpss_probe_platform(struct platform_device *pdev) { + const struct pwm_lpss_boardinfo *info; + const struct acpi_device_id *id; struct pwm_lpss_chip *lpwm; struct resource *r; + id = acpi_match_device(pdev->dev.driver->acpi_match_table, &pdev->dev); + if (!id) + return -ENODEV; + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - lpwm = pwm_lpss_probe(&pdev->dev, r, NULL); + info = (struct pwm_lpss_boardinfo *)id->driver_data; + lpwm = pwm_lpss_probe(&pdev->dev, r, info); if (IS_ERR(lpwm)) return PTR_ERR(lpwm); @@ -242,7 +230,7 @@ static int pwm_lpss_remove_platform(struct platform_device *pdev) } static const struct acpi_device_id pwm_lpss_acpi_match[] = { - { "80860F09", 0 }, + { "80860F09", (unsigned long)&byt_info }, { }, }; MODULE_DEVICE_TABLE(acpi, pwm_lpss_acpi_match); diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c new file mode 100644 index 000000000000..bdd8644c01cf --- /dev/null +++ b/drivers/pwm/pwm-rockchip.c @@ -0,0 +1,264 @@ +/* + * PWM driver for Rockchip SoCs + * + * Copyright (C) 2014 Beniamino Galvani <b.galvani@gmail.com> + * Copyright (C) 2014 ROCKCHIP, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + */ + +#include <linux/clk.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/pwm.h> +#include <linux/time.h> + +#define PWM_CTRL_TIMER_EN (1 << 0) +#define PWM_CTRL_OUTPUT_EN (1 << 3) + +#define PWM_ENABLE (1 << 0) +#define PWM_CONTINUOUS (1 << 1) +#define PWM_DUTY_POSITIVE (1 << 3) +#define PWM_INACTIVE_NEGATIVE (0 << 4) +#define PWM_OUTPUT_LEFT (0 << 5) +#define PWM_LP_DISABLE (0 << 8) + +struct rockchip_pwm_chip { + struct pwm_chip chip; + struct clk *clk; + const struct rockchip_pwm_data *data; + void __iomem *base; +}; + +struct rockchip_pwm_regs { + unsigned long duty; + unsigned long period; + unsigned long cntr; + unsigned long ctrl; +}; + +struct rockchip_pwm_data { + struct rockchip_pwm_regs regs; + unsigned int prescaler; + + void (*set_enable)(struct pwm_chip *chip, bool enable); +}; + +static inline struct rockchip_pwm_chip *to_rockchip_pwm_chip(struct pwm_chip *c) +{ + return container_of(c, struct rockchip_pwm_chip, chip); +} + +static void rockchip_pwm_set_enable_v1(struct pwm_chip *chip, bool enable) +{ + struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip); + u32 enable_conf = PWM_CTRL_OUTPUT_EN | PWM_CTRL_TIMER_EN; + u32 val; + + val = readl_relaxed(pc->base + pc->data->regs.ctrl); + + if (enable) + val |= enable_conf; + else + val &= ~enable_conf; + + writel_relaxed(val, pc->base + pc->data->regs.ctrl); +} + +static void rockchip_pwm_set_enable_v2(struct pwm_chip *chip, bool enable) +{ + struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip); + u32 enable_conf = PWM_OUTPUT_LEFT | PWM_LP_DISABLE | PWM_ENABLE | + PWM_CONTINUOUS | PWM_DUTY_POSITIVE | + PWM_INACTIVE_NEGATIVE; + u32 val; + + val = readl_relaxed(pc->base + pc->data->regs.ctrl); + + if (enable) + val |= enable_conf; + else + val &= ~enable_conf; + + writel_relaxed(val, pc->base + pc->data->regs.ctrl); +} + +static int rockchip_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, + int duty_ns, int period_ns) +{ + struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip); + unsigned long period, duty; + u64 clk_rate, div; + int ret; + + clk_rate = clk_get_rate(pc->clk); + + /* + * Since period and duty cycle registers have a width of 32 + * bits, every possible input period can be obtained using the + * default prescaler value for all practical clock rate values. + */ + div = clk_rate * period_ns; + do_div(div, pc->data->prescaler * NSEC_PER_SEC); + period = div; + + div = clk_rate * duty_ns; + do_div(div, pc->data->prescaler * NSEC_PER_SEC); + duty = div; + + ret = clk_enable(pc->clk); + if (ret) + return ret; + + writel(period, pc->base + pc->data->regs.period); + writel(duty, pc->base + pc->data->regs.duty); + writel(0, pc->base + pc->data->regs.cntr); + + clk_disable(pc->clk); + + return 0; +} + +static int rockchip_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) +{ + struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip); + int ret; + + ret = clk_enable(pc->clk); + if (ret) + return ret; + + pc->data->set_enable(chip, true); + + return 0; +} + +static void rockchip_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) +{ + struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip); + + pc->data->set_enable(chip, false); + + clk_disable(pc->clk); +} + +static const struct pwm_ops rockchip_pwm_ops = { + .config = rockchip_pwm_config, + .enable = rockchip_pwm_enable, + .disable = rockchip_pwm_disable, + .owner = THIS_MODULE, +}; + +static const struct rockchip_pwm_data pwm_data_v1 = { + .regs = { + .duty = 0x04, + .period = 0x08, + .cntr = 0x00, + .ctrl = 0x0c, + }, + .prescaler = 2, + .set_enable = rockchip_pwm_set_enable_v1, +}; + +static const struct rockchip_pwm_data pwm_data_v2 = { + .regs = { + .duty = 0x08, + .period = 0x04, + .cntr = 0x00, + .ctrl = 0x0c, + }, + .prescaler = 1, + .set_enable = rockchip_pwm_set_enable_v2, +}; + +static const struct rockchip_pwm_data pwm_data_vop = { + .regs = { + .duty = 0x08, + .period = 0x04, + .cntr = 0x0c, + .ctrl = 0x00, + }, + .prescaler = 1, + .set_enable = rockchip_pwm_set_enable_v2, +}; + +static const struct of_device_id rockchip_pwm_dt_ids[] = { + { .compatible = "rockchip,rk2928-pwm", .data = &pwm_data_v1}, + { .compatible = "rockchip,rk3288-pwm", .data = &pwm_data_v2}, + { .compatible = "rockchip,vop-pwm", .data = &pwm_data_vop}, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, rockchip_pwm_dt_ids); + +static int rockchip_pwm_probe(struct platform_device *pdev) +{ + const struct of_device_id *id; + struct rockchip_pwm_chip *pc; + struct resource *r; + int ret; + + id = of_match_device(rockchip_pwm_dt_ids, &pdev->dev); + if (!id) + return -EINVAL; + + pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL); + if (!pc) + return -ENOMEM; + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + pc->base = devm_ioremap_resource(&pdev->dev, r); + if (IS_ERR(pc->base)) + return PTR_ERR(pc->base); + + pc->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(pc->clk)) + return PTR_ERR(pc->clk); + + ret = clk_prepare(pc->clk); + if (ret) + return ret; + + platform_set_drvdata(pdev, pc); + + pc->data = id->data; + pc->chip.dev = &pdev->dev; + pc->chip.ops = &rockchip_pwm_ops; + pc->chip.base = -1; + pc->chip.npwm = 1; + + ret = pwmchip_add(&pc->chip); + if (ret < 0) { + clk_unprepare(pc->clk); + dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret); + } + + return ret; +} + +static int rockchip_pwm_remove(struct platform_device *pdev) +{ + struct rockchip_pwm_chip *pc = platform_get_drvdata(pdev); + + clk_unprepare(pc->clk); + + return pwmchip_remove(&pc->chip); +} + +static struct platform_driver rockchip_pwm_driver = { + .driver = { + .name = "rockchip-pwm", + .of_match_table = rockchip_pwm_dt_ids, + }, + .probe = rockchip_pwm_probe, + .remove = rockchip_pwm_remove, +}; +module_platform_driver(rockchip_pwm_driver); + +MODULE_AUTHOR("Beniamino Galvani <b.galvani@gmail.com>"); +MODULE_DESCRIPTION("Rockchip SoC PWM driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pwm/pwm-sti.c b/drivers/pwm/pwm-sti.c new file mode 100644 index 000000000000..b95115cdaea7 --- /dev/null +++ b/drivers/pwm/pwm-sti.c @@ -0,0 +1,418 @@ +/* + * PWM device driver for ST SoCs. + * Author: Ajit Pal Singh <ajitpal.singh@st.com> + * + * Copyright (C) 2013-2014 STMicroelectronics (R&D) Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/clk.h> +#include <linux/math64.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/pwm.h> +#include <linux/regmap.h> +#include <linux/slab.h> +#include <linux/time.h> + +#define STI_DS_REG(ch) (4 * (ch)) /* Channel's Duty Cycle register */ +#define STI_PWMCR 0x50 /* Control/Config register */ +#define STI_INTEN 0x54 /* Interrupt Enable/Disable register */ +#define PWM_PRESCALE_LOW_MASK 0x0f +#define PWM_PRESCALE_HIGH_MASK 0xf0 + +/* Regfield IDs */ +enum { + PWMCLK_PRESCALE_LOW, + PWMCLK_PRESCALE_HIGH, + PWM_EN, + PWM_INT_EN, + + /* Keep last */ + MAX_REGFIELDS +}; + +struct sti_pwm_compat_data { + const struct reg_field *reg_fields; + unsigned int num_chan; + unsigned int max_pwm_cnt; + unsigned int max_prescale; +}; + +struct sti_pwm_chip { + struct device *dev; + struct clk *clk; + unsigned long clk_rate; + struct regmap *regmap; + struct sti_pwm_compat_data *cdata; + struct regmap_field *prescale_low; + struct regmap_field *prescale_high; + struct regmap_field *pwm_en; + struct regmap_field *pwm_int_en; + struct pwm_chip chip; + struct pwm_device *cur; + unsigned int en_count; + struct mutex sti_pwm_lock; /* To sync between enable/disable calls */ + void __iomem *mmio; +}; + +static const struct reg_field sti_pwm_regfields[MAX_REGFIELDS] = { + [PWMCLK_PRESCALE_LOW] = REG_FIELD(STI_PWMCR, 0, 3), + [PWMCLK_PRESCALE_HIGH] = REG_FIELD(STI_PWMCR, 11, 14), + [PWM_EN] = REG_FIELD(STI_PWMCR, 9, 9), + [PWM_INT_EN] = REG_FIELD(STI_INTEN, 0, 0), +}; + +static inline struct sti_pwm_chip *to_sti_pwmchip(struct pwm_chip *chip) +{ + return container_of(chip, struct sti_pwm_chip, chip); +} + +/* + * Calculate the prescaler value corresponding to the period. + */ +static int sti_pwm_get_prescale(struct sti_pwm_chip *pc, unsigned long period, + unsigned int *prescale) +{ + struct sti_pwm_compat_data *cdata = pc->cdata; + unsigned long val; + unsigned int ps; + + /* + * prescale = ((period_ns * clk_rate) / (10^9 * (max_pwm_count + 1)) - 1 + */ + val = NSEC_PER_SEC / pc->clk_rate; + val *= cdata->max_pwm_cnt + 1; + + if (period % val) { + return -EINVAL; + } else { + ps = period / val - 1; + if (ps > cdata->max_prescale) + return -EINVAL; + } + *prescale = ps; + + return 0; +} + +/* Calculate the number of PWM devices configured with a period. */ +static unsigned int sti_pwm_count_configured(struct pwm_chip *chip) +{ + struct pwm_device *pwm; + unsigned int ncfg = 0; + unsigned int i; + + for (i = 0; i < chip->npwm; i++) { + pwm = &chip->pwms[i]; + if (test_bit(PWMF_REQUESTED, &pwm->flags)) { + if (pwm_get_period(pwm)) + ncfg++; + } + } + + return ncfg; +} + +/* + * For STiH4xx PWM IP, the PWM period is fixed to 256 local clock cycles. + * The only way to change the period (apart from changing the PWM input clock) + * is to change the PWM clock prescaler. + * The prescaler is of 8 bits, so 256 prescaler values and hence + * 256 possible period values are supported (for a particular clock rate). + * The requested period will be applied only if it matches one of these + * 256 values. + */ +static int sti_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, + int duty_ns, int period_ns) +{ + struct sti_pwm_chip *pc = to_sti_pwmchip(chip); + struct sti_pwm_compat_data *cdata = pc->cdata; + struct pwm_device *cur = pc->cur; + struct device *dev = pc->dev; + unsigned int prescale = 0, pwmvalx; + int ret; + unsigned int ncfg; + bool period_same = false; + + ncfg = sti_pwm_count_configured(chip); + if (ncfg) + period_same = (period_ns == pwm_get_period(cur)); + + /* Allow configuration changes if one of the + * following conditions satisfy. + * 1. No channels have been configured. + * 2. Only one channel has been configured and the new request + * is for the same channel. + * 3. Only one channel has been configured and the new request is + * for a new channel and period of the new channel is same as + * the current configured period. + * 4. More than one channels are configured and period of the new + * requestis the same as the current period. + */ + if (!ncfg || + ((ncfg == 1) && (pwm->hwpwm == cur->hwpwm)) || + ((ncfg == 1) && (pwm->hwpwm != cur->hwpwm) && period_same) || + ((ncfg > 1) && period_same)) { + /* Enable clock before writing to PWM registers. */ + ret = clk_enable(pc->clk); + if (ret) + return ret; + + if (!period_same) { + ret = sti_pwm_get_prescale(pc, period_ns, &prescale); + if (ret) + goto clk_dis; + + ret = + regmap_field_write(pc->prescale_low, + prescale & PWM_PRESCALE_LOW_MASK); + if (ret) + goto clk_dis; + + ret = + regmap_field_write(pc->prescale_high, + (prescale & PWM_PRESCALE_HIGH_MASK) >> 4); + if (ret) + goto clk_dis; + } + + /* + * When PWMVal == 0, PWM pulse = 1 local clock cycle. + * When PWMVal == max_pwm_count, + * PWM pulse = (max_pwm_count + 1) local cycles, + * that is continuous pulse: signal never goes low. + */ + pwmvalx = cdata->max_pwm_cnt * duty_ns / period_ns; + + ret = regmap_write(pc->regmap, STI_DS_REG(pwm->hwpwm), pwmvalx); + if (ret) + goto clk_dis; + + ret = regmap_field_write(pc->pwm_int_en, 0); + + pc->cur = pwm; + + dev_dbg(dev, "prescale:%u, period:%i, duty:%i, pwmvalx:%u\n", + prescale, period_ns, duty_ns, pwmvalx); + } else { + return -EINVAL; + } + +clk_dis: + clk_disable(pc->clk); + return ret; +} + +static int sti_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) +{ + struct sti_pwm_chip *pc = to_sti_pwmchip(chip); + struct device *dev = pc->dev; + int ret = 0; + + /* + * Since we have a common enable for all PWM channels, + * do not enable if already enabled. + */ + mutex_lock(&pc->sti_pwm_lock); + if (!pc->en_count) { + ret = clk_enable(pc->clk); + if (ret) + goto out; + + ret = regmap_field_write(pc->pwm_en, 1); + if (ret) { + dev_err(dev, "failed to enable PWM device:%d\n", + pwm->hwpwm); + goto out; + } + } + pc->en_count++; +out: + mutex_unlock(&pc->sti_pwm_lock); + return ret; +} + +static void sti_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) +{ + struct sti_pwm_chip *pc = to_sti_pwmchip(chip); + + mutex_lock(&pc->sti_pwm_lock); + if (--pc->en_count) { + mutex_unlock(&pc->sti_pwm_lock); + return; + } + regmap_field_write(pc->pwm_en, 0); + + clk_disable(pc->clk); + mutex_unlock(&pc->sti_pwm_lock); +} + +static const struct pwm_ops sti_pwm_ops = { + .config = sti_pwm_config, + .enable = sti_pwm_enable, + .disable = sti_pwm_disable, + .owner = THIS_MODULE, +}; + +static int sti_pwm_probe_dt(struct sti_pwm_chip *pc) +{ + struct device *dev = pc->dev; + const struct reg_field *reg_fields; + struct device_node *np = dev->of_node; + struct sti_pwm_compat_data *cdata = pc->cdata; + u32 num_chan; + + of_property_read_u32(np, "st,pwm-num-chan", &num_chan); + if (num_chan) + cdata->num_chan = num_chan; + + reg_fields = cdata->reg_fields; + + pc->prescale_low = devm_regmap_field_alloc(dev, pc->regmap, + reg_fields[PWMCLK_PRESCALE_LOW]); + if (IS_ERR(pc->prescale_low)) + return PTR_ERR(pc->prescale_low); + + pc->prescale_high = devm_regmap_field_alloc(dev, pc->regmap, + reg_fields[PWMCLK_PRESCALE_HIGH]); + if (IS_ERR(pc->prescale_high)) + return PTR_ERR(pc->prescale_high); + + pc->pwm_en = devm_regmap_field_alloc(dev, pc->regmap, + reg_fields[PWM_EN]); + if (IS_ERR(pc->pwm_en)) + return PTR_ERR(pc->pwm_en); + + pc->pwm_int_en = devm_regmap_field_alloc(dev, pc->regmap, + reg_fields[PWM_INT_EN]); + if (IS_ERR(pc->pwm_int_en)) + return PTR_ERR(pc->pwm_int_en); + + return 0; +} + +static const struct regmap_config sti_pwm_regmap_config = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, +}; + +static int sti_pwm_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct sti_pwm_compat_data *cdata; + struct sti_pwm_chip *pc; + struct resource *res; + int ret; + + pc = devm_kzalloc(dev, sizeof(*pc), GFP_KERNEL); + if (!pc) + return -ENOMEM; + + cdata = devm_kzalloc(dev, sizeof(*cdata), GFP_KERNEL); + if (!cdata) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + pc->mmio = devm_ioremap_resource(dev, res); + if (IS_ERR(pc->mmio)) + return PTR_ERR(pc->mmio); + + pc->regmap = devm_regmap_init_mmio(dev, pc->mmio, + &sti_pwm_regmap_config); + if (IS_ERR(pc->regmap)) + return PTR_ERR(pc->regmap); + + /* + * Setup PWM data with default values: some values could be replaced + * with specific ones provided from Device Tree. + */ + cdata->reg_fields = &sti_pwm_regfields[0]; + cdata->max_prescale = 0xff; + cdata->max_pwm_cnt = 255; + cdata->num_chan = 1; + + pc->cdata = cdata; + pc->dev = dev; + pc->en_count = 0; + mutex_init(&pc->sti_pwm_lock); + + ret = sti_pwm_probe_dt(pc); + if (ret) + return ret; + + pc->clk = of_clk_get_by_name(dev->of_node, "pwm"); + if (IS_ERR(pc->clk)) { + dev_err(dev, "failed to get PWM clock\n"); + return PTR_ERR(pc->clk); + } + + pc->clk_rate = clk_get_rate(pc->clk); + if (!pc->clk_rate) { + dev_err(dev, "failed to get clock rate\n"); + return -EINVAL; + } + + ret = clk_prepare(pc->clk); + if (ret) { + dev_err(dev, "failed to prepare clock\n"); + return ret; + } + + pc->chip.dev = dev; + pc->chip.ops = &sti_pwm_ops; + pc->chip.base = -1; + pc->chip.npwm = pc->cdata->num_chan; + pc->chip.can_sleep = true; + + ret = pwmchip_add(&pc->chip); + if (ret < 0) { + clk_unprepare(pc->clk); + return ret; + } + + platform_set_drvdata(pdev, pc); + + return 0; +} + +static int sti_pwm_remove(struct platform_device *pdev) +{ + struct sti_pwm_chip *pc = platform_get_drvdata(pdev); + unsigned int i; + + for (i = 0; i < pc->cdata->num_chan; i++) + pwm_disable(&pc->chip.pwms[i]); + + clk_unprepare(pc->clk); + + return pwmchip_remove(&pc->chip); +} + +static const struct of_device_id sti_pwm_of_match[] = { + { .compatible = "st,sti-pwm", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, sti_pwm_of_match); + +static struct platform_driver sti_pwm_driver = { + .driver = { + .name = "sti-pwm", + .of_match_table = sti_pwm_of_match, + }, + .probe = sti_pwm_probe, + .remove = sti_pwm_remove, +}; +module_platform_driver(sti_pwm_driver); + +MODULE_AUTHOR("Ajit Pal Singh <ajitpal.singh@st.com>"); +MODULE_DESCRIPTION("STMicroelectronics ST PWM driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/pwm/pwm-tipwmss.c b/drivers/pwm/pwm-tipwmss.c index 3b119bc2c3c6..67481dc6da3f 100644 --- a/drivers/pwm/pwm-tipwmss.c +++ b/drivers/pwm/pwm-tipwmss.c @@ -62,10 +62,8 @@ static int pwmss_probe(struct platform_device *pdev) struct device_node *node = pdev->dev.of_node; info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); - if (!info) { - dev_err(&pdev->dev, "failed to allocate memory\n"); + if (!info) return -ENOMEM; - } mutex_init(&info->pwmss_lock); diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c index 2ca1a0b3ad57..8bcfecd66281 100644 --- a/drivers/rapidio/devices/tsi721.c +++ b/drivers/rapidio/devices/tsi721.c @@ -2493,7 +2493,7 @@ err_exit: return err; } -static DEFINE_PCI_DEVICE_TABLE(tsi721_pci_tbl) = { +static const struct pci_device_id tsi721_pci_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_IDT, PCI_DEVICE_ID_TSI721) }, { 0, } /* terminate list */ }; diff --git a/drivers/rapidio/devices/tsi721.h b/drivers/rapidio/devices/tsi721.h index 0305675270ee..a7b42680a06a 100644 --- a/drivers/rapidio/devices/tsi721.h +++ b/drivers/rapidio/devices/tsi721.h @@ -644,27 +644,26 @@ enum tsi721_smsg_int_flag { #ifdef CONFIG_RAPIDIO_DMA_ENGINE -#define TSI721_BDMA_BD_RING_SZ 128 #define TSI721_BDMA_MAX_BCOUNT (TSI721_DMAD_BCOUNT1 + 1) struct tsi721_tx_desc { struct dma_async_tx_descriptor txd; - struct tsi721_dma_desc *hw_desc; u16 destid; /* low 64-bits of 66-bit RIO address */ u64 rio_addr; /* upper 2-bits of 66-bit RIO address */ u8 rio_addr_u; - u32 bcount; - bool interrupt; + enum dma_rtype rtype; struct list_head desc_node; - struct list_head tx_list; + struct scatterlist *sg; + unsigned int sg_len; + enum dma_status status; }; struct tsi721_bdma_chan { int id; void __iomem *regs; - int bd_num; /* number of buffer descriptors */ + int bd_num; /* number of HW buffer descriptors */ void *bd_base; /* start of DMA descriptors */ dma_addr_t bd_phys; void *sts_base; /* start of DMA BD status FIFO */ @@ -680,7 +679,6 @@ struct tsi721_bdma_chan { struct list_head active_list; struct list_head queue; struct list_head free_list; - dma_cookie_t completed_cookie; struct tasklet_struct tasklet; bool active; }; diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c index 44341dc5b148..f64c5decb747 100644 --- a/drivers/rapidio/devices/tsi721_dma.c +++ b/drivers/rapidio/devices/tsi721_dma.c @@ -1,7 +1,7 @@ /* * DMA Engine support for Tsi721 PCIExpress-to-SRIO bridge * - * Copyright 2011 Integrated Device Technology, Inc. + * Copyright (c) 2011-2014 Integrated Device Technology, Inc. * Alexandre Bounine <alexandre.bounine@idt.com> * * This program is free software; you can redistribute it and/or modify it @@ -14,9 +14,8 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 - * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * The full GNU General Public License is included in this distribution in the + * file called COPYING. */ #include <linux/io.h> @@ -32,9 +31,22 @@ #include <linux/interrupt.h> #include <linux/kfifo.h> #include <linux/delay.h> +#include "../../dma/dmaengine.h" #include "tsi721.h" +#define TSI721_DMA_TX_QUEUE_SZ 16 /* number of transaction descriptors */ + +#ifdef CONFIG_PCI_MSI +static irqreturn_t tsi721_bdma_msix(int irq, void *ptr); +#endif +static int tsi721_submit_sg(struct tsi721_tx_desc *desc); + +static unsigned int dma_desc_per_channel = 128; +module_param(dma_desc_per_channel, uint, S_IWUSR | S_IRUGO); +MODULE_PARM_DESC(dma_desc_per_channel, + "Number of DMA descriptors per channel (default: 128)"); + static inline struct tsi721_bdma_chan *to_tsi721_chan(struct dma_chan *chan) { return container_of(chan, struct tsi721_bdma_chan, dchan); @@ -59,7 +71,7 @@ struct tsi721_tx_desc *tsi721_dma_first_active( struct tsi721_tx_desc, desc_node); } -static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan) +static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan, int bd_num) { struct tsi721_dma_desc *bd_ptr; struct device *dev = bdma_chan->dchan.device->dev; @@ -67,17 +79,23 @@ static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan) dma_addr_t bd_phys; dma_addr_t sts_phys; int sts_size; - int bd_num = bdma_chan->bd_num; +#ifdef CONFIG_PCI_MSI + struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device); +#endif dev_dbg(dev, "Init Block DMA Engine, CH%d\n", bdma_chan->id); - /* Allocate space for DMA descriptors */ + /* + * Allocate space for DMA descriptors + * (add an extra element for link descriptor) + */ bd_ptr = dma_zalloc_coherent(dev, - bd_num * sizeof(struct tsi721_dma_desc), + (bd_num + 1) * sizeof(struct tsi721_dma_desc), &bd_phys, GFP_KERNEL); if (!bd_ptr) return -ENOMEM; + bdma_chan->bd_num = bd_num; bdma_chan->bd_phys = bd_phys; bdma_chan->bd_base = bd_ptr; @@ -85,8 +103,8 @@ static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan) bd_ptr, (unsigned long long)bd_phys); /* Allocate space for descriptor status FIFO */ - sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ? - bd_num : TSI721_DMA_MINSTSSZ; + sts_size = ((bd_num + 1) >= TSI721_DMA_MINSTSSZ) ? + (bd_num + 1) : TSI721_DMA_MINSTSSZ; sts_size = roundup_pow_of_two(sts_size); sts_ptr = dma_zalloc_coherent(dev, sts_size * sizeof(struct tsi721_dma_sts), @@ -94,7 +112,7 @@ static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan) if (!sts_ptr) { /* Free space allocated for DMA descriptors */ dma_free_coherent(dev, - bd_num * sizeof(struct tsi721_dma_desc), + (bd_num + 1) * sizeof(struct tsi721_dma_desc), bd_ptr, bd_phys); bdma_chan->bd_base = NULL; return -ENOMEM; @@ -108,11 +126,11 @@ static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan) "desc status FIFO @ %p (phys = %llx) size=0x%x\n", sts_ptr, (unsigned long long)sts_phys, sts_size); - /* Initialize DMA descriptors ring */ - bd_ptr[bd_num - 1].type_id = cpu_to_le32(DTYPE3 << 29); - bd_ptr[bd_num - 1].next_lo = cpu_to_le32((u64)bd_phys & + /* Initialize DMA descriptors ring using added link descriptor */ + bd_ptr[bd_num].type_id = cpu_to_le32(DTYPE3 << 29); + bd_ptr[bd_num].next_lo = cpu_to_le32((u64)bd_phys & TSI721_DMAC_DPTRL_MASK); - bd_ptr[bd_num - 1].next_hi = cpu_to_le32((u64)bd_phys >> 32); + bd_ptr[bd_num].next_hi = cpu_to_le32((u64)bd_phys >> 32); /* Setup DMA descriptor pointers */ iowrite32(((u64)bd_phys >> 32), @@ -134,6 +152,55 @@ static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan) ioread32(bdma_chan->regs + TSI721_DMAC_INT); +#ifdef CONFIG_PCI_MSI + /* Request interrupt service if we are in MSI-X mode */ + if (priv->flags & TSI721_USING_MSIX) { + int rc, idx; + + idx = TSI721_VECT_DMA0_DONE + bdma_chan->id; + + rc = request_irq(priv->msix[idx].vector, tsi721_bdma_msix, 0, + priv->msix[idx].irq_name, (void *)bdma_chan); + + if (rc) { + dev_dbg(dev, "Unable to get MSI-X for BDMA%d-DONE\n", + bdma_chan->id); + goto err_out; + } + + idx = TSI721_VECT_DMA0_INT + bdma_chan->id; + + rc = request_irq(priv->msix[idx].vector, tsi721_bdma_msix, 0, + priv->msix[idx].irq_name, (void *)bdma_chan); + + if (rc) { + dev_dbg(dev, "Unable to get MSI-X for BDMA%d-INT\n", + bdma_chan->id); + free_irq( + priv->msix[TSI721_VECT_DMA0_DONE + + bdma_chan->id].vector, + (void *)bdma_chan); + } + +err_out: + if (rc) { + /* Free space allocated for DMA descriptors */ + dma_free_coherent(dev, + (bd_num + 1) * sizeof(struct tsi721_dma_desc), + bd_ptr, bd_phys); + bdma_chan->bd_base = NULL; + + /* Free space allocated for status descriptors */ + dma_free_coherent(dev, + sts_size * sizeof(struct tsi721_dma_sts), + sts_ptr, sts_phys); + bdma_chan->sts_base = NULL; + + return -EIO; + } + } +#endif /* CONFIG_PCI_MSI */ + /* Toggle DMA channel initialization */ iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL); ioread32(bdma_chan->regs + TSI721_DMAC_CTL); @@ -147,6 +214,9 @@ static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan) static int tsi721_bdma_ch_free(struct tsi721_bdma_chan *bdma_chan) { u32 ch_stat; +#ifdef CONFIG_PCI_MSI + struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device); +#endif if (bdma_chan->bd_base == NULL) return 0; @@ -159,9 +229,18 @@ static int tsi721_bdma_ch_free(struct tsi721_bdma_chan *bdma_chan) /* Put DMA channel into init state */ iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL); +#ifdef CONFIG_PCI_MSI + if (priv->flags & TSI721_USING_MSIX) { + free_irq(priv->msix[TSI721_VECT_DMA0_DONE + + bdma_chan->id].vector, (void *)bdma_chan); + free_irq(priv->msix[TSI721_VECT_DMA0_INT + + bdma_chan->id].vector, (void *)bdma_chan); + } +#endif /* CONFIG_PCI_MSI */ + /* Free space allocated for DMA descriptors */ dma_free_coherent(bdma_chan->dchan.device->dev, - bdma_chan->bd_num * sizeof(struct tsi721_dma_desc), + (bdma_chan->bd_num + 1) * sizeof(struct tsi721_dma_desc), bdma_chan->bd_base, bdma_chan->bd_phys); bdma_chan->bd_base = NULL; @@ -243,8 +322,8 @@ static void tsi721_start_dma(struct tsi721_bdma_chan *bdma_chan) } dev_dbg(bdma_chan->dchan.device->dev, - "tx_chan: %p, chan: %d, regs: %p\n", - bdma_chan, bdma_chan->dchan.chan_id, bdma_chan->regs); + "%s: chan_%d (wrc=%d)\n", __func__, bdma_chan->id, + bdma_chan->wr_count_next); iowrite32(bdma_chan->wr_count_next, bdma_chan->regs + TSI721_DMAC_DWRCNT); @@ -253,72 +332,19 @@ static void tsi721_start_dma(struct tsi721_bdma_chan *bdma_chan) bdma_chan->wr_count = bdma_chan->wr_count_next; } -static void tsi721_desc_put(struct tsi721_bdma_chan *bdma_chan, - struct tsi721_tx_desc *desc) -{ - dev_dbg(bdma_chan->dchan.device->dev, - "Put desc: %p into free list\n", desc); - - if (desc) { - spin_lock_bh(&bdma_chan->lock); - list_splice_init(&desc->tx_list, &bdma_chan->free_list); - list_add(&desc->desc_node, &bdma_chan->free_list); - bdma_chan->wr_count_next = bdma_chan->wr_count; - spin_unlock_bh(&bdma_chan->lock); - } -} - -static -struct tsi721_tx_desc *tsi721_desc_get(struct tsi721_bdma_chan *bdma_chan) -{ - struct tsi721_tx_desc *tx_desc, *_tx_desc; - struct tsi721_tx_desc *ret = NULL; - int i; - - spin_lock_bh(&bdma_chan->lock); - list_for_each_entry_safe(tx_desc, _tx_desc, - &bdma_chan->free_list, desc_node) { - if (async_tx_test_ack(&tx_desc->txd)) { - list_del(&tx_desc->desc_node); - ret = tx_desc; - break; - } - dev_dbg(bdma_chan->dchan.device->dev, - "desc %p not ACKed\n", tx_desc); - } - - if (ret == NULL) { - dev_dbg(bdma_chan->dchan.device->dev, - "%s: unable to obtain tx descriptor\n", __func__); - goto err_out; - } - - i = bdma_chan->wr_count_next % bdma_chan->bd_num; - if (i == bdma_chan->bd_num - 1) { - i = 0; - bdma_chan->wr_count_next++; /* skip link descriptor */ - } - - bdma_chan->wr_count_next++; - tx_desc->txd.phys = bdma_chan->bd_phys + - i * sizeof(struct tsi721_dma_desc); - tx_desc->hw_desc = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[i]; -err_out: - spin_unlock_bh(&bdma_chan->lock); - - return ret; -} - static int -tsi721_desc_fill_init(struct tsi721_tx_desc *desc, struct scatterlist *sg, - enum dma_rtype rtype, u32 sys_size) +tsi721_desc_fill_init(struct tsi721_tx_desc *desc, + struct tsi721_dma_desc *bd_ptr, + struct scatterlist *sg, u32 sys_size) { - struct tsi721_dma_desc *bd_ptr = desc->hw_desc; u64 rio_addr; + if (bd_ptr == NULL) + return -EINVAL; + /* Initialize DMA descriptor */ bd_ptr->type_id = cpu_to_le32((DTYPE1 << 29) | - (rtype << 19) | desc->destid); + (desc->rtype << 19) | desc->destid); bd_ptr->bcount = cpu_to_le32(((desc->rio_addr & 0x3) << 30) | (sys_size << 26)); rio_addr = (desc->rio_addr >> 2) | @@ -335,51 +361,32 @@ tsi721_desc_fill_init(struct tsi721_tx_desc *desc, struct scatterlist *sg, } static int -tsi721_desc_fill_end(struct tsi721_tx_desc *desc) +tsi721_desc_fill_end(struct tsi721_dma_desc *bd_ptr, u32 bcount, bool interrupt) { - struct tsi721_dma_desc *bd_ptr = desc->hw_desc; + if (bd_ptr == NULL) + return -EINVAL; /* Update DMA descriptor */ - if (desc->interrupt) + if (interrupt) bd_ptr->type_id |= cpu_to_le32(TSI721_DMAD_IOF); - bd_ptr->bcount |= cpu_to_le32(desc->bcount & TSI721_DMAD_BCOUNT1); + bd_ptr->bcount |= cpu_to_le32(bcount & TSI721_DMAD_BCOUNT1); return 0; } - -static void tsi721_dma_chain_complete(struct tsi721_bdma_chan *bdma_chan, - struct tsi721_tx_desc *desc) +static void tsi721_dma_tx_err(struct tsi721_bdma_chan *bdma_chan, + struct tsi721_tx_desc *desc) { struct dma_async_tx_descriptor *txd = &desc->txd; dma_async_tx_callback callback = txd->callback; void *param = txd->callback_param; - list_splice_init(&desc->tx_list, &bdma_chan->free_list); list_move(&desc->desc_node, &bdma_chan->free_list); - bdma_chan->completed_cookie = txd->cookie; if (callback) callback(param); } -static void tsi721_dma_complete_all(struct tsi721_bdma_chan *bdma_chan) -{ - struct tsi721_tx_desc *desc, *_d; - LIST_HEAD(list); - - BUG_ON(!tsi721_dma_is_idle(bdma_chan)); - - if (!list_empty(&bdma_chan->queue)) - tsi721_start_dma(bdma_chan); - - list_splice_init(&bdma_chan->active_list, &list); - list_splice_init(&bdma_chan->queue, &bdma_chan->active_list); - - list_for_each_entry_safe(desc, _d, &list, desc_node) - tsi721_dma_chain_complete(bdma_chan, desc); -} - static void tsi721_clr_stat(struct tsi721_bdma_chan *bdma_chan) { u32 srd_ptr; @@ -403,20 +410,159 @@ static void tsi721_clr_stat(struct tsi721_bdma_chan *bdma_chan) bdma_chan->sts_rdptr = srd_ptr; } +/* Must be called with the channel spinlock held */ +static int tsi721_submit_sg(struct tsi721_tx_desc *desc) +{ + struct dma_chan *dchan = desc->txd.chan; + struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); + u32 sys_size; + u64 rio_addr; + dma_addr_t next_addr; + u32 bcount; + struct scatterlist *sg; + unsigned int i; + int err = 0; + struct tsi721_dma_desc *bd_ptr = NULL; + u32 idx, rd_idx; + u32 add_count = 0; + + if (!tsi721_dma_is_idle(bdma_chan)) { + dev_err(bdma_chan->dchan.device->dev, + "BUG: Attempt to use non-idle channel\n"); + return -EIO; + } + + /* + * Fill DMA channel's hardware buffer descriptors. + * (NOTE: RapidIO destination address is limited to 64 bits for now) + */ + rio_addr = desc->rio_addr; + next_addr = -1; + bcount = 0; + sys_size = dma_to_mport(bdma_chan->dchan.device)->sys_size; + + rd_idx = ioread32(bdma_chan->regs + TSI721_DMAC_DRDCNT); + rd_idx %= (bdma_chan->bd_num + 1); + + idx = bdma_chan->wr_count_next % (bdma_chan->bd_num + 1); + if (idx == bdma_chan->bd_num) { + /* wrap around link descriptor */ + idx = 0; + add_count++; + } + + dev_dbg(dchan->device->dev, "%s: BD ring status: rdi=%d wri=%d\n", + __func__, rd_idx, idx); + + for_each_sg(desc->sg, sg, desc->sg_len, i) { + + dev_dbg(dchan->device->dev, "sg%d/%d addr: 0x%llx len: %d\n", + i, desc->sg_len, + (unsigned long long)sg_dma_address(sg), sg_dma_len(sg)); + + if (sg_dma_len(sg) > TSI721_BDMA_MAX_BCOUNT) { + dev_err(dchan->device->dev, + "%s: SG entry %d is too large\n", __func__, i); + err = -EINVAL; + break; + } + + /* + * If this sg entry forms contiguous block with previous one, + * try to merge it into existing DMA descriptor + */ + if (next_addr == sg_dma_address(sg) && + bcount + sg_dma_len(sg) <= TSI721_BDMA_MAX_BCOUNT) { + /* Adjust byte count of the descriptor */ + bcount += sg_dma_len(sg); + goto entry_done; + } else if (next_addr != -1) { + /* Finalize descriptor using total byte count value */ + tsi721_desc_fill_end(bd_ptr, bcount, 0); + dev_dbg(dchan->device->dev, + "%s: prev desc final len: %d\n", + __func__, bcount); + } + + desc->rio_addr = rio_addr; + + if (i && idx == rd_idx) { + dev_dbg(dchan->device->dev, + "%s: HW descriptor ring is full @ %d\n", + __func__, i); + desc->sg = sg; + desc->sg_len -= i; + break; + } + + bd_ptr = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[idx]; + err = tsi721_desc_fill_init(desc, bd_ptr, sg, sys_size); + if (err) { + dev_err(dchan->device->dev, + "Failed to build desc: err=%d\n", err); + break; + } + + dev_dbg(dchan->device->dev, "bd_ptr = %p did=%d raddr=0x%llx\n", + bd_ptr, desc->destid, desc->rio_addr); + + next_addr = sg_dma_address(sg); + bcount = sg_dma_len(sg); + + add_count++; + if (++idx == bdma_chan->bd_num) { + /* wrap around link descriptor */ + idx = 0; + add_count++; + } + +entry_done: + if (sg_is_last(sg)) { + tsi721_desc_fill_end(bd_ptr, bcount, 0); + dev_dbg(dchan->device->dev, "%s: last desc final len: %d\n", + __func__, bcount); + desc->sg_len = 0; + } else { + rio_addr += sg_dma_len(sg); + next_addr += sg_dma_len(sg); + } + } + + if (!err) + bdma_chan->wr_count_next += add_count; + + return err; +} + static void tsi721_advance_work(struct tsi721_bdma_chan *bdma_chan) { - if (list_empty(&bdma_chan->active_list) || - list_is_singular(&bdma_chan->active_list)) { - dev_dbg(bdma_chan->dchan.device->dev, - "%s: Active_list empty\n", __func__); - tsi721_dma_complete_all(bdma_chan); - } else { - dev_dbg(bdma_chan->dchan.device->dev, - "%s: Active_list NOT empty\n", __func__); - tsi721_dma_chain_complete(bdma_chan, - tsi721_dma_first_active(bdma_chan)); - tsi721_start_dma(bdma_chan); + struct tsi721_tx_desc *desc; + int err; + + dev_dbg(bdma_chan->dchan.device->dev, "%s: Enter\n", __func__); + + /* + * If there are any new transactions in the queue add them + * into the processing list + */ + if (!list_empty(&bdma_chan->queue)) + list_splice_init(&bdma_chan->queue, &bdma_chan->active_list); + + /* Start new transaction (if available) */ + if (!list_empty(&bdma_chan->active_list)) { + desc = tsi721_dma_first_active(bdma_chan); + err = tsi721_submit_sg(desc); + if (!err) + tsi721_start_dma(bdma_chan); + else { + tsi721_dma_tx_err(bdma_chan, desc); + dev_dbg(bdma_chan->dchan.device->dev, + "ERR: tsi721_submit_sg failed with err=%d\n", + err); + } } + + dev_dbg(bdma_chan->dchan.device->dev, "%s: Exit\n", __func__); } static void tsi721_dma_tasklet(unsigned long data) @@ -444,8 +590,29 @@ static void tsi721_dma_tasklet(unsigned long data) } if (dmac_int & (TSI721_DMAC_INT_DONE | TSI721_DMAC_INT_IOFDONE)) { + struct tsi721_tx_desc *desc; + tsi721_clr_stat(bdma_chan); spin_lock(&bdma_chan->lock); + desc = tsi721_dma_first_active(bdma_chan); + + if (desc->sg_len == 0) { + dma_async_tx_callback callback = NULL; + void *param = NULL; + + desc->status = DMA_COMPLETE; + dma_cookie_complete(&desc->txd); + if (desc->txd.flags & DMA_PREP_INTERRUPT) { + callback = desc->txd.callback; + param = desc->txd.callback_param; + } + list_move(&desc->desc_node, &bdma_chan->free_list); + spin_unlock(&bdma_chan->lock); + if (callback) + callback(param); + spin_lock(&bdma_chan->lock); + } + tsi721_advance_work(bdma_chan); spin_unlock(&bdma_chan->lock); } @@ -460,21 +627,24 @@ static dma_cookie_t tsi721_tx_submit(struct dma_async_tx_descriptor *txd) struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(txd->chan); dma_cookie_t cookie; - spin_lock_bh(&bdma_chan->lock); + /* Check if the descriptor is detached from any lists */ + if (!list_empty(&desc->desc_node)) { + dev_err(bdma_chan->dchan.device->dev, + "%s: wrong state of descriptor %p\n", __func__, txd); + return -EIO; + } - cookie = txd->chan->cookie; - if (++cookie < 0) - cookie = 1; - txd->chan->cookie = cookie; - txd->cookie = cookie; + spin_lock_bh(&bdma_chan->lock); - if (list_empty(&bdma_chan->active_list)) { - list_add_tail(&desc->desc_node, &bdma_chan->active_list); - tsi721_start_dma(bdma_chan); - } else { - list_add_tail(&desc->desc_node, &bdma_chan->queue); + if (!bdma_chan->active) { + spin_unlock_bh(&bdma_chan->lock); + return -ENODEV; } + cookie = dma_cookie_assign(txd); + desc->status = DMA_IN_PROGRESS; + list_add_tail(&desc->desc_node, &bdma_chan->queue); + spin_unlock_bh(&bdma_chan->lock); return cookie; } @@ -482,115 +652,52 @@ static dma_cookie_t tsi721_tx_submit(struct dma_async_tx_descriptor *txd) static int tsi721_alloc_chan_resources(struct dma_chan *dchan) { struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); -#ifdef CONFIG_PCI_MSI - struct tsi721_device *priv = to_tsi721(dchan->device); -#endif struct tsi721_tx_desc *desc = NULL; - LIST_HEAD(tmp_list); int i; - int rc; + + dev_dbg(dchan->device->dev, "%s: for channel %d\n", + __func__, bdma_chan->id); if (bdma_chan->bd_base) - return bdma_chan->bd_num - 1; + return TSI721_DMA_TX_QUEUE_SZ; /* Initialize BDMA channel */ - if (tsi721_bdma_ch_init(bdma_chan)) { + if (tsi721_bdma_ch_init(bdma_chan, dma_desc_per_channel)) { dev_err(dchan->device->dev, "Unable to initialize data DMA" " channel %d, aborting\n", bdma_chan->id); - return -ENOMEM; + return -ENODEV; } - /* Alocate matching number of logical descriptors */ - desc = kcalloc((bdma_chan->bd_num - 1), sizeof(struct tsi721_tx_desc), + /* Allocate queue of transaction descriptors */ + desc = kcalloc(TSI721_DMA_TX_QUEUE_SZ, sizeof(struct tsi721_tx_desc), GFP_KERNEL); if (!desc) { dev_err(dchan->device->dev, "Failed to allocate logical descriptors\n"); - rc = -ENOMEM; - goto err_out; + tsi721_bdma_ch_free(bdma_chan); + return -ENOMEM; } bdma_chan->tx_desc = desc; - for (i = 0; i < bdma_chan->bd_num - 1; i++) { + for (i = 0; i < TSI721_DMA_TX_QUEUE_SZ; i++) { dma_async_tx_descriptor_init(&desc[i].txd, dchan); desc[i].txd.tx_submit = tsi721_tx_submit; desc[i].txd.flags = DMA_CTRL_ACK; - INIT_LIST_HEAD(&desc[i].tx_list); - list_add_tail(&desc[i].desc_node, &tmp_list); + list_add(&desc[i].desc_node, &bdma_chan->free_list); } - spin_lock_bh(&bdma_chan->lock); - list_splice(&tmp_list, &bdma_chan->free_list); - bdma_chan->completed_cookie = dchan->cookie = 1; - spin_unlock_bh(&bdma_chan->lock); - -#ifdef CONFIG_PCI_MSI - if (priv->flags & TSI721_USING_MSIX) { - /* Request interrupt service if we are in MSI-X mode */ - rc = request_irq( - priv->msix[TSI721_VECT_DMA0_DONE + - bdma_chan->id].vector, - tsi721_bdma_msix, 0, - priv->msix[TSI721_VECT_DMA0_DONE + - bdma_chan->id].irq_name, - (void *)bdma_chan); - - if (rc) { - dev_dbg(dchan->device->dev, - "Unable to allocate MSI-X interrupt for " - "BDMA%d-DONE\n", bdma_chan->id); - goto err_out; - } - - rc = request_irq(priv->msix[TSI721_VECT_DMA0_INT + - bdma_chan->id].vector, - tsi721_bdma_msix, 0, - priv->msix[TSI721_VECT_DMA0_INT + - bdma_chan->id].irq_name, - (void *)bdma_chan); - - if (rc) { - dev_dbg(dchan->device->dev, - "Unable to allocate MSI-X interrupt for " - "BDMA%d-INT\n", bdma_chan->id); - free_irq( - priv->msix[TSI721_VECT_DMA0_DONE + - bdma_chan->id].vector, - (void *)bdma_chan); - rc = -EIO; - goto err_out; - } - } -#endif /* CONFIG_PCI_MSI */ + dma_cookie_init(dchan); bdma_chan->active = true; tsi721_bdma_interrupt_enable(bdma_chan, 1); - return bdma_chan->bd_num - 1; - -err_out: - kfree(desc); - tsi721_bdma_ch_free(bdma_chan); - return rc; + return TSI721_DMA_TX_QUEUE_SZ; } -static void tsi721_free_chan_resources(struct dma_chan *dchan) +static void tsi721_sync_dma_irq(struct tsi721_bdma_chan *bdma_chan) { - struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); - struct tsi721_device *priv = to_tsi721(dchan->device); - LIST_HEAD(list); - - dev_dbg(dchan->device->dev, "%s: Entry\n", __func__); - - if (bdma_chan->bd_base == NULL) - return; - - BUG_ON(!list_empty(&bdma_chan->active_list)); - BUG_ON(!list_empty(&bdma_chan->queue)); - - tsi721_bdma_interrupt_enable(bdma_chan, 0); - bdma_chan->active = false; + struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device); #ifdef CONFIG_PCI_MSI if (priv->flags & TSI721_USING_MSIX) { @@ -601,64 +708,48 @@ static void tsi721_free_chan_resources(struct dma_chan *dchan) } else #endif synchronize_irq(priv->pdev->irq); +} - tasklet_kill(&bdma_chan->tasklet); +static void tsi721_free_chan_resources(struct dma_chan *dchan) +{ + struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); - spin_lock_bh(&bdma_chan->lock); - list_splice_init(&bdma_chan->free_list, &list); - spin_unlock_bh(&bdma_chan->lock); + dev_dbg(dchan->device->dev, "%s: for channel %d\n", + __func__, bdma_chan->id); -#ifdef CONFIG_PCI_MSI - if (priv->flags & TSI721_USING_MSIX) { - free_irq(priv->msix[TSI721_VECT_DMA0_DONE + - bdma_chan->id].vector, (void *)bdma_chan); - free_irq(priv->msix[TSI721_VECT_DMA0_INT + - bdma_chan->id].vector, (void *)bdma_chan); - } -#endif /* CONFIG_PCI_MSI */ + if (bdma_chan->bd_base == NULL) + return; - tsi721_bdma_ch_free(bdma_chan); + BUG_ON(!list_empty(&bdma_chan->active_list)); + BUG_ON(!list_empty(&bdma_chan->queue)); + + tsi721_bdma_interrupt_enable(bdma_chan, 0); + bdma_chan->active = false; + tsi721_sync_dma_irq(bdma_chan); + tasklet_kill(&bdma_chan->tasklet); + INIT_LIST_HEAD(&bdma_chan->free_list); kfree(bdma_chan->tx_desc); + tsi721_bdma_ch_free(bdma_chan); } static enum dma_status tsi721_tx_status(struct dma_chan *dchan, dma_cookie_t cookie, struct dma_tx_state *txstate) { - struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); - dma_cookie_t last_used; - dma_cookie_t last_completed; - int ret; - - spin_lock_bh(&bdma_chan->lock); - last_completed = bdma_chan->completed_cookie; - last_used = dchan->cookie; - spin_unlock_bh(&bdma_chan->lock); - - ret = dma_async_is_complete(cookie, last_completed, last_used); - - dma_set_tx_state(txstate, last_completed, last_used, 0); - - dev_dbg(dchan->device->dev, - "%s: exit, ret: %d, last_completed: %d, last_used: %d\n", - __func__, ret, last_completed, last_used); - - return ret; + return dma_cookie_status(dchan, cookie, txstate); } static void tsi721_issue_pending(struct dma_chan *dchan) { struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); - dev_dbg(dchan->device->dev, "%s: Entry\n", __func__); + dev_dbg(dchan->device->dev, "%s: Enter\n", __func__); - if (tsi721_dma_is_idle(bdma_chan)) { + if (tsi721_dma_is_idle(bdma_chan) && bdma_chan->active) { spin_lock_bh(&bdma_chan->lock); tsi721_advance_work(bdma_chan); spin_unlock_bh(&bdma_chan->lock); - } else - dev_dbg(dchan->device->dev, - "%s: DMA channel still busy\n", __func__); + } } static @@ -668,21 +759,19 @@ struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan, void *tinfo) { struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); - struct tsi721_tx_desc *desc = NULL; - struct tsi721_tx_desc *first = NULL; - struct scatterlist *sg; + struct tsi721_tx_desc *desc, *_d; struct rio_dma_ext *rext = tinfo; - u64 rio_addr = rext->rio_addr; /* limited to 64-bit rio_addr for now */ - unsigned int i; - u32 sys_size = dma_to_mport(dchan->device)->sys_size; enum dma_rtype rtype; - dma_addr_t next_addr = -1; + struct dma_async_tx_descriptor *txd = NULL; if (!sgl || !sg_len) { dev_err(dchan->device->dev, "%s: No SG list\n", __func__); return NULL; } + dev_dbg(dchan->device->dev, "%s: %s\n", __func__, + (dir == DMA_DEV_TO_MEM)?"READ":"WRITE"); + if (dir == DMA_DEV_TO_MEM) rtype = NREAD; else if (dir == DMA_MEM_TO_DEV) { @@ -704,97 +793,26 @@ struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan, return NULL; } - for_each_sg(sgl, sg, sg_len, i) { - int err; - - if (sg_dma_len(sg) > TSI721_BDMA_MAX_BCOUNT) { - dev_err(dchan->device->dev, - "%s: SG entry %d is too large\n", __func__, i); - goto err_desc_put; - } - - /* - * If this sg entry forms contiguous block with previous one, - * try to merge it into existing DMA descriptor - */ - if (desc) { - if (next_addr == sg_dma_address(sg) && - desc->bcount + sg_dma_len(sg) <= - TSI721_BDMA_MAX_BCOUNT) { - /* Adjust byte count of the descriptor */ - desc->bcount += sg_dma_len(sg); - goto entry_done; - } - - /* - * Finalize this descriptor using total - * byte count value. - */ - tsi721_desc_fill_end(desc); - dev_dbg(dchan->device->dev, "%s: desc final len: %d\n", - __func__, desc->bcount); - } - - /* - * Obtain and initialize a new descriptor - */ - desc = tsi721_desc_get(bdma_chan); - if (!desc) { - dev_err(dchan->device->dev, - "%s: Failed to get new descriptor for SG %d\n", - __func__, i); - goto err_desc_put; - } - - desc->destid = rext->destid; - desc->rio_addr = rio_addr; - desc->rio_addr_u = 0; - desc->bcount = sg_dma_len(sg); - - dev_dbg(dchan->device->dev, - "sg%d desc: 0x%llx, addr: 0x%llx len: %d\n", - i, (u64)desc->txd.phys, - (unsigned long long)sg_dma_address(sg), - sg_dma_len(sg)); - - dev_dbg(dchan->device->dev, - "bd_ptr = %p did=%d raddr=0x%llx\n", - desc->hw_desc, desc->destid, desc->rio_addr); - - err = tsi721_desc_fill_init(desc, sg, rtype, sys_size); - if (err) { - dev_err(dchan->device->dev, - "Failed to build desc: %d\n", err); - goto err_desc_put; - } - - next_addr = sg_dma_address(sg); - - if (!first) - first = desc; - else - list_add_tail(&desc->desc_node, &first->tx_list); + spin_lock_bh(&bdma_chan->lock); -entry_done: - if (sg_is_last(sg)) { - desc->interrupt = (flags & DMA_PREP_INTERRUPT) != 0; - tsi721_desc_fill_end(desc); - dev_dbg(dchan->device->dev, "%s: desc final len: %d\n", - __func__, desc->bcount); - } else { - rio_addr += sg_dma_len(sg); - next_addr += sg_dma_len(sg); + list_for_each_entry_safe(desc, _d, &bdma_chan->free_list, desc_node) { + if (async_tx_test_ack(&desc->txd)) { + list_del_init(&desc->desc_node); + desc->destid = rext->destid; + desc->rio_addr = rext->rio_addr; + desc->rio_addr_u = 0; + desc->rtype = rtype; + desc->sg_len = sg_len; + desc->sg = sgl; + txd = &desc->txd; + txd->flags = flags; + break; } } - first->txd.cookie = -EBUSY; - desc->txd.flags = flags; - - return &first->txd; + spin_unlock_bh(&bdma_chan->lock); -err_desc_put: - tsi721_desc_put(bdma_chan, first); - return NULL; + return txd; } static int tsi721_device_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd, @@ -802,23 +820,34 @@ static int tsi721_device_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd, { struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); struct tsi721_tx_desc *desc, *_d; + u32 dmac_int; LIST_HEAD(list); dev_dbg(dchan->device->dev, "%s: Entry\n", __func__); if (cmd != DMA_TERMINATE_ALL) - return -ENXIO; + return -ENOSYS; spin_lock_bh(&bdma_chan->lock); - /* make sure to stop the transfer */ - iowrite32(TSI721_DMAC_CTL_SUSP, bdma_chan->regs + TSI721_DMAC_CTL); + bdma_chan->active = false; + + if (!tsi721_dma_is_idle(bdma_chan)) { + /* make sure to stop the transfer */ + iowrite32(TSI721_DMAC_CTL_SUSP, + bdma_chan->regs + TSI721_DMAC_CTL); + + /* Wait until DMA channel stops */ + do { + dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT); + } while ((dmac_int & TSI721_DMAC_INT_SUSP) == 0); + } list_splice_init(&bdma_chan->active_list, &list); list_splice_init(&bdma_chan->queue, &list); list_for_each_entry_safe(desc, _d, &list, desc_node) - tsi721_dma_chain_complete(bdma_chan, desc); + tsi721_dma_tx_err(bdma_chan, desc); spin_unlock_bh(&bdma_chan->lock); @@ -828,22 +857,18 @@ static int tsi721_device_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd, int tsi721_register_dma(struct tsi721_device *priv) { int i; - int nr_channels = TSI721_DMA_MAXCH; + int nr_channels = 0; int err; struct rio_mport *mport = priv->mport; - mport->dma.dev = &priv->pdev->dev; - mport->dma.chancnt = nr_channels; - INIT_LIST_HEAD(&mport->dma.channels); - for (i = 0; i < nr_channels; i++) { + for (i = 0; i < TSI721_DMA_MAXCH; i++) { struct tsi721_bdma_chan *bdma_chan = &priv->bdma[i]; if (i == TSI721_DMACH_MAINT) continue; - bdma_chan->bd_num = TSI721_BDMA_BD_RING_SZ; bdma_chan->regs = priv->regs + TSI721_DMAC_BASE(i); bdma_chan->dchan.device = &mport->dma; @@ -862,12 +887,15 @@ int tsi721_register_dma(struct tsi721_device *priv) (unsigned long)bdma_chan); list_add_tail(&bdma_chan->dchan.device_node, &mport->dma.channels); + nr_channels++; } + mport->dma.chancnt = nr_channels; dma_cap_zero(mport->dma.cap_mask); dma_cap_set(DMA_PRIVATE, mport->dma.cap_mask); dma_cap_set(DMA_SLAVE, mport->dma.cap_mask); + mport->dma.dev = &priv->pdev->dev; mport->dma.device_alloc_chan_resources = tsi721_alloc_chan_resources; mport->dma.device_free_chan_resources = tsi721_free_chan_resources; mport->dma.device_tx_status = tsi721_tx_status; diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c index a54ba0494dd3..d7b87c64b7cd 100644 --- a/drivers/rapidio/rio.c +++ b/drivers/rapidio/rio.c @@ -1509,30 +1509,39 @@ EXPORT_SYMBOL_GPL(rio_route_clr_table); static bool rio_chan_filter(struct dma_chan *chan, void *arg) { - struct rio_dev *rdev = arg; + struct rio_mport *mport = arg; /* Check that DMA device belongs to the right MPORT */ - return (rdev->net->hport == - container_of(chan->device, struct rio_mport, dma)); + return mport == container_of(chan->device, struct rio_mport, dma); } /** - * rio_request_dma - request RapidIO capable DMA channel that supports - * specified target RapidIO device. - * @rdev: RIO device control structure + * rio_request_mport_dma - request RapidIO capable DMA channel associated + * with specified local RapidIO mport device. + * @mport: RIO mport to perform DMA data transfers * * Returns pointer to allocated DMA channel or NULL if failed. */ -struct dma_chan *rio_request_dma(struct rio_dev *rdev) +struct dma_chan *rio_request_mport_dma(struct rio_mport *mport) { dma_cap_mask_t mask; - struct dma_chan *dchan; dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); - dchan = dma_request_channel(mask, rio_chan_filter, rdev); + return dma_request_channel(mask, rio_chan_filter, mport); +} +EXPORT_SYMBOL_GPL(rio_request_mport_dma); - return dchan; +/** + * rio_request_dma - request RapidIO capable DMA channel that supports + * specified target RapidIO device. + * @rdev: RIO device associated with DMA transfer + * + * Returns pointer to allocated DMA channel or NULL if failed. + */ +struct dma_chan *rio_request_dma(struct rio_dev *rdev) +{ + return rio_request_mport_dma(rdev->net->hport); } EXPORT_SYMBOL_GPL(rio_request_dma); @@ -1547,10 +1556,10 @@ void rio_release_dma(struct dma_chan *dchan) EXPORT_SYMBOL_GPL(rio_release_dma); /** - * rio_dma_prep_slave_sg - RapidIO specific wrapper + * rio_dma_prep_xfer - RapidIO specific wrapper * for device_prep_slave_sg callback defined by DMAENGINE. - * @rdev: RIO device control structure * @dchan: DMA channel to configure + * @destid: target RapidIO device destination ID * @data: RIO specific data descriptor * @direction: DMA data transfer direction (TO or FROM the device) * @flags: dmaengine defined flags @@ -1560,11 +1569,10 @@ EXPORT_SYMBOL_GPL(rio_release_dma); * target RIO device. * Returns pointer to DMA transaction descriptor or NULL if failed. */ -struct dma_async_tx_descriptor *rio_dma_prep_slave_sg(struct rio_dev *rdev, - struct dma_chan *dchan, struct rio_dma_data *data, +struct dma_async_tx_descriptor *rio_dma_prep_xfer(struct dma_chan *dchan, + u16 destid, struct rio_dma_data *data, enum dma_transfer_direction direction, unsigned long flags) { - struct dma_async_tx_descriptor *txd = NULL; struct rio_dma_ext rio_ext; if (dchan->device->device_prep_slave_sg == NULL) { @@ -1572,15 +1580,35 @@ struct dma_async_tx_descriptor *rio_dma_prep_slave_sg(struct rio_dev *rdev, return NULL; } - rio_ext.destid = rdev->destid; + rio_ext.destid = destid; rio_ext.rio_addr_u = data->rio_addr_u; rio_ext.rio_addr = data->rio_addr; rio_ext.wr_type = data->wr_type; - txd = dmaengine_prep_rio_sg(dchan, data->sg, data->sg_len, - direction, flags, &rio_ext); + return dmaengine_prep_rio_sg(dchan, data->sg, data->sg_len, + direction, flags, &rio_ext); +} +EXPORT_SYMBOL_GPL(rio_dma_prep_xfer); - return txd; +/** + * rio_dma_prep_slave_sg - RapidIO specific wrapper + * for device_prep_slave_sg callback defined by DMAENGINE. + * @rdev: RIO device control structure + * @dchan: DMA channel to configure + * @data: RIO specific data descriptor + * @direction: DMA data transfer direction (TO or FROM the device) + * @flags: dmaengine defined flags + * + * Initializes RapidIO capable DMA channel for the specified data transfer. + * Uses DMA channel private extension to pass information related to remote + * target RIO device. + * Returns pointer to DMA transaction descriptor or NULL if failed. + */ +struct dma_async_tx_descriptor *rio_dma_prep_slave_sg(struct rio_dev *rdev, + struct dma_chan *dchan, struct rio_dma_data *data, + enum dma_transfer_direction direction, unsigned long flags) +{ + return rio_dma_prep_xfer(dchan, rdev->destid, data, direction, flags); } EXPORT_SYMBOL_GPL(rio_dma_prep_slave_sg); diff --git a/drivers/regulator/88pm8607.c b/drivers/regulator/88pm8607.c index 337634ad0562..6d77dcd7dcf6 100644 --- a/drivers/regulator/88pm8607.c +++ b/drivers/regulator/88pm8607.c @@ -319,7 +319,7 @@ static int pm8607_regulator_dt_init(struct platform_device *pdev, struct regulator_config *config) { struct device_node *nproot, *np; - nproot = of_node_get(pdev->dev.parent->of_node); + nproot = pdev->dev.parent->of_node; if (!nproot) return -ENODEV; nproot = of_get_child_by_name(nproot, "regulators"); diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig index 2dc8289e5dba..55d7b7b0f2e0 100644 --- a/drivers/regulator/Kconfig +++ b/drivers/regulator/Kconfig @@ -199,13 +199,14 @@ config REGULATOR_DA9210 interface. config REGULATOR_DA9211 - tristate "Dialog Semiconductor DA9211/DA9212 regulator" + tristate "Dialog Semiconductor DA9211/DA9212/DA9213/DA9214 regulator" depends on I2C select REGMAP_I2C help - Say y here to support for the Dialog Semiconductor DA9211/DA9212. - The DA9211/DA9212 is a multi-phase synchronous step down - converter 12A DC-DC Buck controlled through an I2C + Say y here to support for the Dialog Semiconductor DA9211/DA9212 + /DA9213/DA9214. + The DA9211/DA9212/DA9213/DA9214 is a multi-phase synchronous + step down converter 12A or 16A DC-DC Buck controlled through an I2C interface. config REGULATOR_DBX500_PRCMU @@ -240,6 +241,23 @@ config REGULATOR_GPIO and the platform has to provide a mapping of GPIO-states to target volts/amps. +config REGULATOR_HI6421 + tristate "HiSilicon Hi6421 PMIC voltage regulator support" + depends on MFD_HI6421_PMIC && OF + help + This driver provides support for the voltage regulators on the + HiSilicon Hi6421 PMU / Codec IC. + Hi6421 is a multi-function device which, on regulator part, provides + 21 general purpose LDOs, 3 dedicated LDOs, and 5 BUCKs. All + of them come with support to either ECO (idle) or sleep mode. + +config REGULATOR_ISL9305 + tristate "Intersil ISL9305 regulator" + depends on I2C + select REGMAP_I2C + help + This driver supports ISL9305 voltage regulator chip. + config REGULATOR_ISL6271A tristate "Intersil ISL6271A Power regulator" depends on I2C @@ -387,6 +405,15 @@ config REGULATOR_MAX77693 and one current regulator 'CHARGER'. This is suitable for Exynos-4x12 chips. +config REGULATOR_MAX77802 + tristate "Maxim 77802 regulator" + depends on MFD_MAX77686 + help + This driver controls a Maxim 77802 regulator + via I2C bus. The provided regulator is suitable for + Exynos5420/Exynos5800 SoCs to control various voltages. + It includes support for control of voltage and ramp speed. + config REGULATOR_MC13XXX_CORE tristate @@ -449,6 +476,25 @@ config REGULATOR_PFUZE100 Say y here to support the regulators found on the Freescale PFUZE100/PFUZE200 PMIC. +config REGULATOR_PWM + tristate "PWM voltage regulator" + depends on PWM + help + This driver supports PWM controlled voltage regulators. PWM + duty cycle can increase or decrease the voltage. + +config REGULATOR_QCOM_RPM + tristate "Qualcomm RPM regulator driver" + depends on MFD_QCOM_RPM + help + If you say yes to this option, support will be included for the + regulators exposed by the Resource Power Manager found in Qualcomm + 8660, 8960 and 8064 based devices. + + Say M here if you want to include support for the regulators on the + Qualcomm RPM as a module. The module will be named + "qcom_rpm-regulator". + config REGULATOR_RC5T583 tristate "RICOH RC5T583 Power regulators" depends on MFD_RC5T583 @@ -459,6 +505,22 @@ config REGULATOR_RC5T583 through regulator interface. The device supports multiple DCDC/LDO outputs which can be controlled by i2c communication. +config REGULATOR_RK808 + tristate "Rockchip RK808 Power regulators" + depends on MFD_RK808 + help + Select this option to enable the power regulator of ROCKCHIP + PMIC RK808. + This driver supports the control of different power rails of device + through regulator interface. The device supports multiple DCDC/LDO + outputs which can be controlled by i2c communication. + +config REGULATOR_RN5T618 + tristate "Ricoh RN5T618 voltage regulators" + depends on MFD_RN5T618 + help + Say y here to support the regulators found on Ricoh RN5T618 PMIC. + config REGULATOR_S2MPA01 tristate "Samsung S2MPA01 voltage regulator" depends on MFD_SEC_CORE @@ -483,11 +545,16 @@ config REGULATOR_S5M8767 via I2C bus. S5M8767A have 9 Bucks and 28 LDOs output and supports DVS mode with 8bits of output voltage control. -config REGULATOR_ST_PWM - tristate "STMicroelectronics PWM voltage regulator" - depends on ARCH_STI +config REGULATOR_SKY81452 + tristate "Skyworks Solutions SKY81452 voltage regulator" + depends on SKY81452 help - This driver supports ST's PWM controlled voltage regulators. + This driver supports Skyworks SKY81452 voltage output regulator + via I2C bus. SKY81452 has one voltage linear regulator can be + programmed from 4.5V to 20V. + + This driver can also be built as a module. If so, the module + will be called sky81452-regulator. config REGULATOR_TI_ABB tristate "TI Adaptive Body Bias on-chip LDO" diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile index aa4a6aa7b558..1029ed39c512 100644 --- a/drivers/regulator/Makefile +++ b/drivers/regulator/Makefile @@ -32,7 +32,9 @@ obj-$(CONFIG_REGULATOR_DBX500_PRCMU) += dbx500-prcmu.o obj-$(CONFIG_REGULATOR_DB8500_PRCMU) += db8500-prcmu.o obj-$(CONFIG_REGULATOR_FAN53555) += fan53555.o obj-$(CONFIG_REGULATOR_GPIO) += gpio-regulator.o +obj-$(CONFIG_REGULATOR_HI6421) += hi6421-regulator.o obj-$(CONFIG_REGULATOR_ISL6271A) += isl6271a-regulator.o +obj-$(CONFIG_REGULATOR_ISL9305) += isl9305.o obj-$(CONFIG_REGULATOR_LP3971) += lp3971.o obj-$(CONFIG_REGULATOR_LP3972) += lp3972.o obj-$(CONFIG_REGULATOR_LP872X) += lp872x.o @@ -52,20 +54,25 @@ obj-$(CONFIG_REGULATOR_MAX8997) += max8997.o obj-$(CONFIG_REGULATOR_MAX8998) += max8998.o obj-$(CONFIG_REGULATOR_MAX77686) += max77686.o obj-$(CONFIG_REGULATOR_MAX77693) += max77693.o +obj-$(CONFIG_REGULATOR_MAX77802) += max77802.o obj-$(CONFIG_REGULATOR_MC13783) += mc13783-regulator.o obj-$(CONFIG_REGULATOR_MC13892) += mc13892-regulator.o obj-$(CONFIG_REGULATOR_MC13XXX_CORE) += mc13xxx-regulator-core.o +obj-$(CONFIG_REGULATOR_QCOM_RPM) += qcom_rpm-regulator.o obj-$(CONFIG_REGULATOR_PALMAS) += palmas-regulator.o obj-$(CONFIG_REGULATOR_PFUZE100) += pfuze100-regulator.o +obj-$(CONFIG_REGULATOR_PWM) += pwm-regulator.o obj-$(CONFIG_REGULATOR_TPS51632) += tps51632-regulator.o obj-$(CONFIG_REGULATOR_PBIAS) += pbias-regulator.o obj-$(CONFIG_REGULATOR_PCAP) += pcap-regulator.o obj-$(CONFIG_REGULATOR_PCF50633) += pcf50633-regulator.o obj-$(CONFIG_REGULATOR_RC5T583) += rc5t583-regulator.o +obj-$(CONFIG_REGULATOR_RK808) += rk808-regulator.o +obj-$(CONFIG_REGULATOR_RN5T618) += rn5t618-regulator.o obj-$(CONFIG_REGULATOR_S2MPA01) += s2mpa01.o obj-$(CONFIG_REGULATOR_S2MPS11) += s2mps11.o obj-$(CONFIG_REGULATOR_S5M8767) += s5m8767.o -obj-$(CONFIG_REGULATOR_ST_PWM) += st-pwm.o +obj-$(CONFIG_REGULATOR_SKY81452) += sky81452-regulator.o obj-$(CONFIG_REGULATOR_STW481X_VMMC) += stw481x-vmmc.o obj-$(CONFIG_REGULATOR_TI_ABB) += ti-abb-regulator.o obj-$(CONFIG_REGULATOR_TPS6105X) += tps6105x-regulator.o diff --git a/drivers/regulator/as3711-regulator.c b/drivers/regulator/as3711-regulator.c index b47283f91e2d..8459b0b648cd 100644 --- a/drivers/regulator/as3711-regulator.c +++ b/drivers/regulator/as3711-regulator.c @@ -22,12 +22,10 @@ struct as3711_regulator_info { struct regulator_desc desc; - unsigned int max_uV; }; struct as3711_regulator { struct as3711_regulator_info *reg_info; - struct regulator_dev *rdev; }; /* @@ -132,39 +130,37 @@ static const struct regulator_linear_range as3711_dldo_ranges[] = { REGULATOR_LINEAR_RANGE(1750000, 0x20, 0x3f, 50000), }; -#define AS3711_REG(_id, _en_reg, _en_bit, _vmask, _vshift, _min_uV, _max_uV, _sfx) \ - [AS3711_REGULATOR_ ## _id] = { \ - .desc = { \ - .name = "as3711-regulator-" # _id, \ - .id = AS3711_REGULATOR_ ## _id, \ - .n_voltages = (_vmask + 1), \ - .ops = &as3711_ ## _sfx ## _ops, \ - .type = REGULATOR_VOLTAGE, \ - .owner = THIS_MODULE, \ - .vsel_reg = AS3711_ ## _id ## _VOLTAGE, \ - .vsel_mask = _vmask << _vshift, \ - .enable_reg = AS3711_ ## _en_reg, \ - .enable_mask = BIT(_en_bit), \ - .min_uV = _min_uV, \ - .linear_ranges = as3711_ ## _sfx ## _ranges, \ - .n_linear_ranges = ARRAY_SIZE(as3711_ ## _sfx ## _ranges), \ - }, \ - .max_uV = _max_uV, \ +#define AS3711_REG(_id, _en_reg, _en_bit, _vmask, _sfx) \ + [AS3711_REGULATOR_ ## _id] = { \ + .desc = { \ + .name = "as3711-regulator-" # _id, \ + .id = AS3711_REGULATOR_ ## _id, \ + .n_voltages = (_vmask + 1), \ + .ops = &as3711_ ## _sfx ## _ops, \ + .type = REGULATOR_VOLTAGE, \ + .owner = THIS_MODULE, \ + .vsel_reg = AS3711_ ## _id ## _VOLTAGE, \ + .vsel_mask = _vmask, \ + .enable_reg = AS3711_ ## _en_reg, \ + .enable_mask = BIT(_en_bit), \ + .linear_ranges = as3711_ ## _sfx ## _ranges, \ + .n_linear_ranges = ARRAY_SIZE(as3711_ ## _sfx ## _ranges), \ + }, \ } static struct as3711_regulator_info as3711_reg_info[] = { - AS3711_REG(SD_1, SD_CONTROL, 0, 0x7f, 0, 612500, 3350000, sd), - AS3711_REG(SD_2, SD_CONTROL, 1, 0x7f, 0, 612500, 3350000, sd), - AS3711_REG(SD_3, SD_CONTROL, 2, 0x7f, 0, 612500, 3350000, sd), - AS3711_REG(SD_4, SD_CONTROL, 3, 0x7f, 0, 612500, 3350000, sd), - AS3711_REG(LDO_1, LDO_1_VOLTAGE, 7, 0x1f, 0, 1200000, 3300000, aldo), - AS3711_REG(LDO_2, LDO_2_VOLTAGE, 7, 0x1f, 0, 1200000, 3300000, aldo), - AS3711_REG(LDO_3, LDO_3_VOLTAGE, 7, 0x3f, 0, 900000, 3300000, dldo), - AS3711_REG(LDO_4, LDO_4_VOLTAGE, 7, 0x3f, 0, 900000, 3300000, dldo), - AS3711_REG(LDO_5, LDO_5_VOLTAGE, 7, 0x3f, 0, 900000, 3300000, dldo), - AS3711_REG(LDO_6, LDO_6_VOLTAGE, 7, 0x3f, 0, 900000, 3300000, dldo), - AS3711_REG(LDO_7, LDO_7_VOLTAGE, 7, 0x3f, 0, 900000, 3300000, dldo), - AS3711_REG(LDO_8, LDO_8_VOLTAGE, 7, 0x3f, 0, 900000, 3300000, dldo), + AS3711_REG(SD_1, SD_CONTROL, 0, 0x7f, sd), + AS3711_REG(SD_2, SD_CONTROL, 1, 0x7f, sd), + AS3711_REG(SD_3, SD_CONTROL, 2, 0x7f, sd), + AS3711_REG(SD_4, SD_CONTROL, 3, 0x7f, sd), + AS3711_REG(LDO_1, LDO_1_VOLTAGE, 7, 0x1f, aldo), + AS3711_REG(LDO_2, LDO_2_VOLTAGE, 7, 0x1f, aldo), + AS3711_REG(LDO_3, LDO_3_VOLTAGE, 7, 0x3f, dldo), + AS3711_REG(LDO_4, LDO_4_VOLTAGE, 7, 0x3f, dldo), + AS3711_REG(LDO_5, LDO_5_VOLTAGE, 7, 0x3f, dldo), + AS3711_REG(LDO_6, LDO_6_VOLTAGE, 7, 0x3f, dldo), + AS3711_REG(LDO_7, LDO_7_VOLTAGE, 7, 0x3f, dldo), + AS3711_REG(LDO_8, LDO_8_VOLTAGE, 7, 0x3f, dldo), /* StepUp output voltage depends on supplying regulator */ }; @@ -263,7 +259,6 @@ static int as3711_regulator_probe(struct platform_device *pdev) ri->desc.name); return PTR_ERR(rdev); } - reg->rdev = rdev; } platform_set_drvdata(pdev, regs); return 0; diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c index 004aadb7bcc1..2e1010a34ddc 100644 --- a/drivers/regulator/axp20x-regulator.c +++ b/drivers/regulator/axp20x-regulator.c @@ -245,7 +245,7 @@ static int axp20x_regulator_probe(struct platform_device *pdev) for (i = 0; i < AXP20X_REG_ID_MAX; i++) { init_data = axp20x_matches[i].init_data; - config.dev = &pdev->dev; + config.dev = pdev->dev.parent; config.init_data = init_data; config.regmap = axp20x->regmap; config.of_node = axp20x_matches[i].of_node; diff --git a/drivers/regulator/bcm590xx-regulator.c b/drivers/regulator/bcm590xx-regulator.c index 5d1fd6f3d10a..fe6ac69549a6 100644 --- a/drivers/regulator/bcm590xx-regulator.c +++ b/drivers/regulator/bcm590xx-regulator.c @@ -202,7 +202,6 @@ static struct bcm590xx_info bcm590xx_regs[] = { struct bcm590xx_reg { struct regulator_desc *desc; struct bcm590xx *mfd; - struct bcm590xx_info **info; }; static int bcm590xx_get_vsel_register(int id) @@ -389,11 +388,6 @@ static int bcm590xx_probe(struct platform_device *pdev) if (!pmu->desc) return -ENOMEM; - pmu->info = devm_kzalloc(&pdev->dev, BCM590XX_NUM_REGS * - sizeof(struct bcm590xx_info *), GFP_KERNEL); - if (!pmu->info) - return -ENOMEM; - info = bcm590xx_regs; for (i = 0; i < BCM590XX_NUM_REGS; i++, info++) { @@ -403,8 +397,6 @@ static int bcm590xx_probe(struct platform_device *pdev) reg_data = NULL; /* Register the regulators */ - pmu->info[i] = info; - pmu->desc[i].name = info->name; pmu->desc[i].supply_name = info->vin_name; pmu->desc[i].id = i; diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index a3c3785901f5..cd87c0c37034 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -102,7 +102,7 @@ static int _regulator_disable(struct regulator_dev *rdev); static int _regulator_get_voltage(struct regulator_dev *rdev); static int _regulator_get_current_limit(struct regulator_dev *rdev); static unsigned int _regulator_get_mode(struct regulator_dev *rdev); -static void _notifier_call_chain(struct regulator_dev *rdev, +static int _notifier_call_chain(struct regulator_dev *rdev, unsigned long event, void *data); static int _regulator_do_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV); @@ -839,7 +839,7 @@ static void print_constraints(struct regulator_dev *rdev) static int machine_constraints_voltage(struct regulator_dev *rdev, struct regulation_constraints *constraints) { - struct regulator_ops *ops = rdev->desc->ops; + const struct regulator_ops *ops = rdev->desc->ops; int ret; /* do we need to apply the constraint voltage */ @@ -938,7 +938,7 @@ static int machine_constraints_voltage(struct regulator_dev *rdev, static int machine_constraints_current(struct regulator_dev *rdev, struct regulation_constraints *constraints) { - struct regulator_ops *ops = rdev->desc->ops; + const struct regulator_ops *ops = rdev->desc->ops; int ret; if (!constraints->min_uA && !constraints->max_uA) @@ -982,7 +982,7 @@ static int set_machine_constraints(struct regulator_dev *rdev, const struct regulation_constraints *constraints) { int ret = 0; - struct regulator_ops *ops = rdev->desc->ops; + const struct regulator_ops *ops = rdev->desc->ops; if (constraints) rdev->constraints = kmemdup(constraints, sizeof(*constraints), @@ -1759,6 +1759,45 @@ static int regulator_ena_gpio_ctrl(struct regulator_dev *rdev, bool enable) return 0; } +/** + * _regulator_enable_delay - a delay helper function + * @delay: time to delay in microseconds + * + * Delay for the requested amount of time as per the guidelines in: + * + * Documentation/timers/timers-howto.txt + * + * The assumption here is that regulators will never be enabled in + * atomic context and therefore sleeping functions can be used. + */ +static void _regulator_enable_delay(unsigned int delay) +{ + unsigned int ms = delay / 1000; + unsigned int us = delay % 1000; + + if (ms > 0) { + /* + * For small enough values, handle super-millisecond + * delays in the usleep_range() call below. + */ + if (ms < 20) + us += ms * 1000; + else + msleep(ms); + } + + /* + * Give the scheduler some room to coalesce with any other + * wakeup sources. For delays shorter than 10 us, don't even + * bother setting up high-resolution timers and just busy- + * loop. + */ + if (us >= 10) + usleep_range(us, us + 100); + else + udelay(us); +} + static int _regulator_do_enable(struct regulator_dev *rdev) { int ret, delay; @@ -1774,6 +1813,31 @@ static int _regulator_do_enable(struct regulator_dev *rdev) trace_regulator_enable(rdev_get_name(rdev)); + if (rdev->desc->off_on_delay) { + /* if needed, keep a distance of off_on_delay from last time + * this regulator was disabled. + */ + unsigned long start_jiffy = jiffies; + unsigned long intended, max_delay, remaining; + + max_delay = usecs_to_jiffies(rdev->desc->off_on_delay); + intended = rdev->last_off_jiffy + max_delay; + + if (time_before(start_jiffy, intended)) { + /* calc remaining jiffies to deal with one-time + * timer wrapping. + * in case of multiple timer wrapping, either it can be + * detected by out-of-range remaining, or it cannot be + * detected and we gets a panelty of + * _regulator_enable_delay(). + */ + remaining = intended - start_jiffy; + if (remaining <= max_delay) + _regulator_enable_delay( + jiffies_to_usecs(remaining)); + } + } + if (rdev->ena_pin) { ret = regulator_ena_gpio_ctrl(rdev, true); if (ret < 0) @@ -1792,40 +1856,7 @@ static int _regulator_do_enable(struct regulator_dev *rdev) * together. */ trace_regulator_enable_delay(rdev_get_name(rdev)); - /* - * Delay for the requested amount of time as per the guidelines in: - * - * Documentation/timers/timers-howto.txt - * - * The assumption here is that regulators will never be enabled in - * atomic context and therefore sleeping functions can be used. - */ - if (delay) { - unsigned int ms = delay / 1000; - unsigned int us = delay % 1000; - - if (ms > 0) { - /* - * For small enough values, handle super-millisecond - * delays in the usleep_range() call below. - */ - if (ms < 20) - us += ms * 1000; - else - msleep(ms); - } - - /* - * Give the scheduler some room to coalesce with any other - * wakeup sources. For delays shorter than 10 us, don't even - * bother setting up high-resolution timers and just busy- - * loop. - */ - if (us >= 10) - usleep_range(us, us + 100); - else - udelay(us); - } + _regulator_enable_delay(delay); trace_regulator_enable_complete(rdev_get_name(rdev)); @@ -1919,6 +1950,12 @@ static int _regulator_do_disable(struct regulator_dev *rdev) return ret; } + /* cares about last_off_jiffy only if off_on_delay is required by + * device. + */ + if (rdev->desc->off_on_delay) + rdev->last_off_jiffy = jiffies; + trace_regulator_disable_complete(rdev_get_name(rdev)); return 0; @@ -2208,9 +2245,9 @@ EXPORT_SYMBOL_GPL(regulator_count_voltages); */ int regulator_list_voltage(struct regulator *regulator, unsigned selector) { - struct regulator_dev *rdev = regulator->rdev; - struct regulator_ops *ops = rdev->desc->ops; - int ret; + struct regulator_dev *rdev = regulator->rdev; + const struct regulator_ops *ops = rdev->desc->ops; + int ret; if (rdev->desc->fixed_uV && rdev->desc->n_voltages == 1 && !selector) return rdev->desc->fixed_uV; @@ -2270,8 +2307,8 @@ int regulator_get_hardware_vsel_register(struct regulator *regulator, unsigned *vsel_reg, unsigned *vsel_mask) { - struct regulator_dev *rdev = regulator->rdev; - struct regulator_ops *ops = rdev->desc->ops; + struct regulator_dev *rdev = regulator->rdev; + const struct regulator_ops *ops = rdev->desc->ops; if (ops->set_voltage_sel != regulator_set_voltage_sel_regmap) return -EOPNOTSUPP; @@ -2297,8 +2334,8 @@ EXPORT_SYMBOL_GPL(regulator_get_hardware_vsel_register); int regulator_list_hardware_vsel(struct regulator *regulator, unsigned selector) { - struct regulator_dev *rdev = regulator->rdev; - struct regulator_ops *ops = rdev->desc->ops; + struct regulator_dev *rdev = regulator->rdev; + const struct regulator_ops *ops = rdev->desc->ops; if (selector >= rdev->desc->n_voltages) return -EINVAL; @@ -2369,6 +2406,55 @@ int regulator_is_supported_voltage(struct regulator *regulator, } EXPORT_SYMBOL_GPL(regulator_is_supported_voltage); +static int _regulator_call_set_voltage(struct regulator_dev *rdev, + int min_uV, int max_uV, + unsigned *selector) +{ + struct pre_voltage_change_data data; + int ret; + + data.old_uV = _regulator_get_voltage(rdev); + data.min_uV = min_uV; + data.max_uV = max_uV; + ret = _notifier_call_chain(rdev, REGULATOR_EVENT_PRE_VOLTAGE_CHANGE, + &data); + if (ret & NOTIFY_STOP_MASK) + return -EINVAL; + + ret = rdev->desc->ops->set_voltage(rdev, min_uV, max_uV, selector); + if (ret >= 0) + return ret; + + _notifier_call_chain(rdev, REGULATOR_EVENT_ABORT_VOLTAGE_CHANGE, + (void *)data.old_uV); + + return ret; +} + +static int _regulator_call_set_voltage_sel(struct regulator_dev *rdev, + int uV, unsigned selector) +{ + struct pre_voltage_change_data data; + int ret; + + data.old_uV = _regulator_get_voltage(rdev); + data.min_uV = uV; + data.max_uV = uV; + ret = _notifier_call_chain(rdev, REGULATOR_EVENT_PRE_VOLTAGE_CHANGE, + &data); + if (ret & NOTIFY_STOP_MASK) + return -EINVAL; + + ret = rdev->desc->ops->set_voltage_sel(rdev, selector); + if (ret >= 0) + return ret; + + _notifier_call_chain(rdev, REGULATOR_EVENT_ABORT_VOLTAGE_CHANGE, + (void *)data.old_uV); + + return ret; +} + static int _regulator_do_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV) { @@ -2396,8 +2482,8 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev, } if (rdev->desc->ops->set_voltage) { - ret = rdev->desc->ops->set_voltage(rdev, min_uV, max_uV, - &selector); + ret = _regulator_call_set_voltage(rdev, min_uV, max_uV, + &selector); if (ret >= 0) { if (rdev->desc->ops->list_voltage) @@ -2432,8 +2518,8 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev, if (old_selector == selector) ret = 0; else - ret = rdev->desc->ops->set_voltage_sel( - rdev, ret); + ret = _regulator_call_set_voltage_sel( + rdev, best_val, selector); } else { ret = -EINVAL; } @@ -2572,8 +2658,8 @@ EXPORT_SYMBOL_GPL(regulator_set_voltage); int regulator_set_voltage_time(struct regulator *regulator, int old_uV, int new_uV) { - struct regulator_dev *rdev = regulator->rdev; - struct regulator_ops *ops = rdev->desc->ops; + struct regulator_dev *rdev = regulator->rdev; + const struct regulator_ops *ops = rdev->desc->ops; int old_sel = -1; int new_sel = -1; int voltage; @@ -3079,11 +3165,11 @@ EXPORT_SYMBOL_GPL(regulator_unregister_notifier); /* notify regulator consumers and downstream regulator consumers. * Note mutex must be held by caller. */ -static void _notifier_call_chain(struct regulator_dev *rdev, +static int _notifier_call_chain(struct regulator_dev *rdev, unsigned long event, void *data) { /* call rdev chain first */ - blocking_notifier_call_chain(&rdev->notifier, event, data); + return blocking_notifier_call_chain(&rdev->notifier, event, data); } /** @@ -3336,9 +3422,9 @@ EXPORT_SYMBOL_GPL(regulator_mode_to_status); */ static int add_regulator_attributes(struct regulator_dev *rdev) { - struct device *dev = &rdev->dev; - struct regulator_ops *ops = rdev->desc->ops; - int status = 0; + struct device *dev = &rdev->dev; + const struct regulator_ops *ops = rdev->desc->ops; + int status = 0; /* some attributes need specific methods to be displayed */ if ((ops->get_voltage && ops->get_voltage(rdev) >= 0) || @@ -3516,12 +3602,17 @@ regulator_register(const struct regulator_desc *regulator_desc, return ERR_PTR(-EINVAL); } - init_data = config->init_data; - rdev = kzalloc(sizeof(struct regulator_dev), GFP_KERNEL); if (rdev == NULL) return ERR_PTR(-ENOMEM); + init_data = regulator_of_get_init_data(dev, regulator_desc, + &rdev->dev.of_node); + if (!init_data) { + init_data = config->init_data; + rdev->dev.of_node = of_node_get(config->of_node); + } + mutex_lock(®ulator_list_mutex); mutex_init(&rdev->mutex); @@ -3548,7 +3639,6 @@ regulator_register(const struct regulator_desc *regulator_desc, /* register with sysfs */ rdev->dev.class = ®ulator_class; - rdev->dev.of_node = of_node_get(config->of_node); rdev->dev.parent = dev; dev_set_name(&rdev->dev, "regulator.%d", atomic_inc_return(®ulator_no) - 1); @@ -3905,7 +3995,7 @@ core_initcall(regulator_init); static int __init regulator_init_complete(void) { struct regulator_dev *rdev; - struct regulator_ops *ops; + const struct regulator_ops *ops; struct regulation_constraints *c; int enabled, ret; diff --git a/drivers/regulator/da9052-regulator.c b/drivers/regulator/da9052-regulator.c index fdb6ea8ae7e6..00033625a09c 100644 --- a/drivers/regulator/da9052-regulator.c +++ b/drivers/regulator/da9052-regulator.c @@ -422,9 +422,9 @@ static int da9052_regulator_probe(struct platform_device *pdev) config.init_data = pdata->regulators[pdev->id]; } else { #ifdef CONFIG_OF - struct device_node *nproot, *np; + struct device_node *nproot = da9052->dev->of_node; + struct device_node *np; - nproot = of_node_get(da9052->dev->of_node); if (!nproot) return -ENODEV; diff --git a/drivers/regulator/da9211-regulator.c b/drivers/regulator/da9211-regulator.c index 1482adafa1ad..c78d2106d6cb 100644 --- a/drivers/regulator/da9211-regulator.c +++ b/drivers/regulator/da9211-regulator.c @@ -1,5 +1,5 @@ /* - * da9211-regulator.c - Regulator device driver for DA9211 + * da9211-regulator.c - Regulator device driver for DA9211/DA9213 * Copyright (C) 2014 Dialog Semiconductor Ltd. * * This library is free software; you can redistribute it and/or @@ -24,9 +24,14 @@ #include <linux/regmap.h> #include <linux/irq.h> #include <linux/interrupt.h> +#include <linux/regulator/of_regulator.h> #include <linux/regulator/da9211.h> #include "da9211-regulator.h" +/* DEVICE IDs */ +#define DA9211_DEVICE_ID 0x22 +#define DA9213_DEVICE_ID 0x23 + #define DA9211_BUCK_MODE_SLEEP 1 #define DA9211_BUCK_MODE_SYNC 2 #define DA9211_BUCK_MODE_AUTO 3 @@ -42,6 +47,7 @@ struct da9211 { struct regulator_dev *rdev[DA9211_MAX_REGULATORS]; int num_regulator; int chip_irq; + int chip_id; }; static const struct regmap_range_cfg da9211_regmap_range[] = { @@ -52,14 +58,14 @@ static const struct regmap_range_cfg da9211_regmap_range[] = { .window_start = 0, .window_len = 256, .range_min = 0, - .range_max = 2*256, + .range_max = 5*128, }, }; static const struct regmap_config da9211_regmap_config = { .reg_bits = 8, .val_bits = 8, - .max_register = 2 * 256, + .max_register = 5 * 128, .ranges = da9211_regmap_range, .num_ranges = ARRAY_SIZE(da9211_regmap_range), }; @@ -69,11 +75,20 @@ static const struct regmap_config da9211_regmap_config = { #define DA9211_MAX_MV 1570 #define DA9211_STEP_MV 10 -/* Current limits for buck (uA) indices corresponds with register values */ +/* Current limits for DA9211 buck (uA) indices + * corresponds with register values + */ static const int da9211_current_limits[] = { 2000000, 2200000, 2400000, 2600000, 2800000, 3000000, 3200000, 3400000, 3600000, 3800000, 4000000, 4200000, 4400000, 4600000, 4800000, 5000000 }; +/* Current limits for DA9213 buck (uA) indices + * corresponds with register values + */ +static const int da9213_current_limits[] = { + 3000000, 3200000, 3400000, 3600000, 3800000, 4000000, 4200000, 4400000, + 4600000, 4800000, 5000000, 5200000, 5400000, 5600000, 5800000, 6000000 +}; static unsigned int da9211_buck_get_mode(struct regulator_dev *rdev) { @@ -129,12 +144,26 @@ static int da9211_set_current_limit(struct regulator_dev *rdev, int min, { int id = rdev_get_id(rdev); struct da9211 *chip = rdev_get_drvdata(rdev); - int i; + int i, max_size; + const int *current_limits; + + switch (chip->chip_id) { + case DA9211: + current_limits = da9211_current_limits; + max_size = ARRAY_SIZE(da9211_current_limits)-1; + break; + case DA9213: + current_limits = da9213_current_limits; + max_size = ARRAY_SIZE(da9213_current_limits)-1; + break; + default: + return -EINVAL; + } /* search for closest to maximum */ - for (i = ARRAY_SIZE(da9211_current_limits)-1; i >= 0; i--) { - if (min <= da9211_current_limits[i] && - max >= da9211_current_limits[i]) { + for (i = max_size; i >= 0; i--) { + if (min <= current_limits[i] && + max >= current_limits[i]) { return regmap_update_bits(chip->regmap, DA9211_REG_BUCK_ILIM, (0x0F << id*4), (i << id*4)); @@ -150,14 +179,28 @@ static int da9211_get_current_limit(struct regulator_dev *rdev) struct da9211 *chip = rdev_get_drvdata(rdev); unsigned int data; int ret; + const int *current_limits; + + switch (chip->chip_id) { + case DA9211: + current_limits = da9211_current_limits; + break; + case DA9213: + current_limits = da9213_current_limits; + break; + default: + return -EINVAL; + } ret = regmap_read(chip->regmap, DA9211_REG_BUCK_ILIM, &data); if (ret < 0) return ret; - /* select one of 16 values: 0000 (2000mA) to 1111 (5000mA) */ + /* select one of 16 values: 0000 (2000mA or 3000mA) + * to 1111 (5000mA or 6000mA). + */ data = (data >> id*4) & 0x0F; - return da9211_current_limits[data]; + return current_limits[data]; } static struct regulator_ops da9211_buck_ops = { @@ -194,6 +237,59 @@ static struct regulator_desc da9211_regulators[] = { DA9211_BUCK(BUCKB), }; +#ifdef CONFIG_OF +static struct of_regulator_match da9211_matches[] = { + [DA9211_ID_BUCKA] = { .name = "BUCKA" }, + [DA9211_ID_BUCKB] = { .name = "BUCKB" }, + }; + +static struct da9211_pdata *da9211_parse_regulators_dt( + struct device *dev) +{ + struct da9211_pdata *pdata; + struct device_node *node; + int i, num, n; + + node = of_get_child_by_name(dev->of_node, "regulators"); + if (!node) { + dev_err(dev, "regulators node not found\n"); + return ERR_PTR(-ENODEV); + } + + num = of_regulator_match(dev, node, da9211_matches, + ARRAY_SIZE(da9211_matches)); + of_node_put(node); + if (num < 0) { + dev_err(dev, "Failed to match regulators\n"); + return ERR_PTR(-EINVAL); + } + + pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return ERR_PTR(-ENOMEM); + + pdata->num_buck = num; + + n = 0; + for (i = 0; i < ARRAY_SIZE(da9211_matches); i++) { + if (!da9211_matches[i].init_data) + continue; + + pdata->init_data[n] = da9211_matches[i].init_data; + + n++; + } + + return pdata; +} +#else +static struct da9211_pdata *da9211_parse_regulators_dt( + struct device *dev) +{ + return ERR_PTR(-ENODEV); +} +#endif + static irqreturn_t da9211_irq_handler(int irq, void *data) { struct da9211 *chip = data; @@ -264,13 +360,11 @@ static int da9211_regulator_init(struct da9211 *chip) } for (i = 0; i < chip->num_regulator; i++) { - if (chip->pdata) - config.init_data = - &(chip->pdata->init_data[i]); - + config.init_data = chip->pdata->init_data[i]; config.dev = chip->dev; config.driver_data = chip; config.regmap = chip->regmap; + config.of_node = chip->dev->of_node; chip->rdev[i] = devm_regulator_register(chip->dev, &da9211_regulators[i], &config); @@ -282,7 +376,7 @@ static int da9211_regulator_init(struct da9211 *chip) if (chip->chip_irq != 0) { ret = regmap_update_bits(chip->regmap, - DA9211_REG_MASK_B, DA9211_M_OV_CURR_A << i, 1); + DA9211_REG_MASK_B, DA9211_M_OV_CURR_A << i, 0); if (ret < 0) { dev_err(chip->dev, "Failed to update mask reg: %d\n", ret); @@ -293,6 +387,7 @@ static int da9211_regulator_init(struct da9211 *chip) return 0; } + /* * I2C driver interface functions */ @@ -301,14 +396,17 @@ static int da9211_i2c_probe(struct i2c_client *i2c, { struct da9211 *chip; int error, ret; + unsigned int data; chip = devm_kzalloc(&i2c->dev, sizeof(struct da9211), GFP_KERNEL); + if (!chip) + return -ENOMEM; chip->dev = &i2c->dev; chip->regmap = devm_regmap_init_i2c(i2c, &da9211_regmap_config); if (IS_ERR(chip->regmap)) { error = PTR_ERR(chip->regmap); - dev_err(&i2c->dev, "Failed to allocate register map: %d\n", + dev_err(chip->dev, "Failed to allocate register map: %d\n", error); return error; } @@ -316,11 +414,33 @@ static int da9211_i2c_probe(struct i2c_client *i2c, i2c_set_clientdata(i2c, chip); chip->pdata = i2c->dev.platform_data; - if (!chip->pdata) { - dev_err(&i2c->dev, "No platform init data supplied\n"); + + ret = regmap_read(chip->regmap, DA9211_REG_DEVICE_ID, &data); + if (ret < 0) { + dev_err(chip->dev, "Failed to read DEVICE_ID reg: %d\n", ret); + return ret; + } + + switch (data) { + case DA9211_DEVICE_ID: + chip->chip_id = DA9211; + break; + case DA9213_DEVICE_ID: + chip->chip_id = DA9213; + break; + default: + dev_err(chip->dev, "Unsupported device id = 0x%x.\n", data); return -ENODEV; } + if (!chip->pdata) + chip->pdata = da9211_parse_regulators_dt(chip->dev); + + if (IS_ERR(chip->pdata)) { + dev_err(chip->dev, "No regulators defined for the platform\n"); + return PTR_ERR(chip->pdata); + } + chip->chip_irq = i2c->irq; if (chip->chip_irq != 0) { @@ -340,22 +460,32 @@ static int da9211_i2c_probe(struct i2c_client *i2c, ret = da9211_regulator_init(chip); if (ret < 0) - dev_err(&i2c->dev, "Failed to initialize regulator: %d\n", ret); + dev_err(chip->dev, "Failed to initialize regulator: %d\n", ret); return ret; } static const struct i2c_device_id da9211_i2c_id[] = { - {"da9211", 0}, + {"da9211", DA9211}, + {"da9213", DA9213}, {}, }; - MODULE_DEVICE_TABLE(i2c, da9211_i2c_id); +#ifdef CONFIG_OF +static const struct of_device_id da9211_dt_ids[] = { + { .compatible = "dlg,da9211", .data = &da9211_i2c_id[0] }, + { .compatible = "dlg,da9213", .data = &da9211_i2c_id[1] }, + {}, +}; +MODULE_DEVICE_TABLE(of, da9211_dt_ids); +#endif + static struct i2c_driver da9211_regulator_driver = { .driver = { .name = "da9211", .owner = THIS_MODULE, + .of_match_table = of_match_ptr(da9211_dt_ids), }, .probe = da9211_i2c_probe, .id_table = da9211_i2c_id, @@ -364,5 +494,5 @@ static struct i2c_driver da9211_regulator_driver = { module_i2c_driver(da9211_regulator_driver); MODULE_AUTHOR("James Ban <James.Ban.opensource@diasemi.com>"); -MODULE_DESCRIPTION("Regulator device driver for Dialog DA9211"); +MODULE_DESCRIPTION("Regulator device driver for Dialog DA9211/DA9213"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/regulator/da9211-regulator.h b/drivers/regulator/da9211-regulator.h index 88b1769e8058..93fa9df2721c 100644 --- a/drivers/regulator/da9211-regulator.h +++ b/drivers/regulator/da9211-regulator.h @@ -1,5 +1,5 @@ /* - * da9211-regulator.h - Regulator definitions for DA9211 + * da9211-regulator.h - Regulator definitions for DA9211/DA9213 * Copyright (C) 2014 Dialog Semiconductor Ltd. * * This library is free software; you can redistribute it and/or @@ -53,12 +53,15 @@ /* BUCK Phase Selection*/ #define DA9211_REG_CONFIG_E 0x147 +/* Device ID */ +#define DA9211_REG_DEVICE_ID 0x201 + /* * Registers bits */ /* DA9211_REG_PAGE_CON (addr=0x00) */ #define DA9211_REG_PAGE_SHIFT 1 -#define DA9211_REG_PAGE_MASK 0x02 +#define DA9211_REG_PAGE_MASK 0x06 /* On I2C registers 0x00 - 0xFF */ #define DA9211_REG_PAGE0 0 /* On I2C registers 0x100 - 0x1FF */ diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c index 714fd9a89aa1..f8e4257aef92 100644 --- a/drivers/regulator/fan53555.c +++ b/drivers/regulator/fan53555.c @@ -18,6 +18,8 @@ #include <linux/platform_device.h> #include <linux/regulator/driver.h> #include <linux/regulator/machine.h> +#include <linux/regulator/of_regulator.h> +#include <linux/of_device.h> #include <linux/i2c.h> #include <linux/slab.h> #include <linux/regmap.h> @@ -50,6 +52,11 @@ #define FAN53555_NVOLTAGES 64 /* Numbers of voltages */ +enum fan53555_vendor { + FAN53555_VENDOR_FAIRCHILD = 0, + FAN53555_VENDOR_SILERGY, +}; + /* IC Type */ enum { FAN53555_CHIP_ID_00 = 0, @@ -60,7 +67,12 @@ enum { FAN53555_CHIP_ID_05, }; +enum { + SILERGY_SYR82X = 8, +}; + struct fan53555_device_info { + enum fan53555_vendor vendor; struct regmap *regmap; struct device *dev; struct regulator_desc desc; @@ -135,6 +147,38 @@ static unsigned int fan53555_get_mode(struct regulator_dev *rdev) return REGULATOR_MODE_NORMAL; } +static int slew_rates[] = { + 64000, + 32000, + 16000, + 8000, + 4000, + 2000, + 1000, + 500, +}; + +static int fan53555_set_ramp(struct regulator_dev *rdev, int ramp) +{ + struct fan53555_device_info *di = rdev_get_drvdata(rdev); + int regval = -1, i; + + for (i = 0; i < ARRAY_SIZE(slew_rates); i++) { + if (ramp <= slew_rates[i]) + regval = i; + else + break; + } + + if (regval < 0) { + dev_err(di->dev, "unsupported ramp value %d\n", ramp); + return -EINVAL; + } + + return regmap_update_bits(di->regmap, FAN53555_CONTROL, + CTL_SLEW_MASK, regval << CTL_SLEW_SHIFT); +} + static struct regulator_ops fan53555_regulator_ops = { .set_voltage_sel = regulator_set_voltage_sel_regmap, .get_voltage_sel = regulator_get_voltage_sel_regmap, @@ -146,8 +190,50 @@ static struct regulator_ops fan53555_regulator_ops = { .is_enabled = regulator_is_enabled_regmap, .set_mode = fan53555_set_mode, .get_mode = fan53555_get_mode, + .set_ramp_delay = fan53555_set_ramp, }; +static int fan53555_voltages_setup_fairchild(struct fan53555_device_info *di) +{ + /* Init voltage range and step */ + switch (di->chip_id) { + case FAN53555_CHIP_ID_00: + case FAN53555_CHIP_ID_01: + case FAN53555_CHIP_ID_03: + case FAN53555_CHIP_ID_05: + di->vsel_min = 600000; + di->vsel_step = 10000; + break; + case FAN53555_CHIP_ID_04: + di->vsel_min = 603000; + di->vsel_step = 12826; + break; + default: + dev_err(di->dev, + "Chip ID %d not supported!\n", di->chip_id); + return -EINVAL; + } + + return 0; +} + +static int fan53555_voltages_setup_silergy(struct fan53555_device_info *di) +{ + /* Init voltage range and step */ + switch (di->chip_id) { + case SILERGY_SYR82X: + di->vsel_min = 712500; + di->vsel_step = 12500; + break; + default: + dev_err(di->dev, + "Chip ID %d not supported!\n", di->chip_id); + return -EINVAL; + } + + return 0; +} + /* For 00,01,03,05 options: * VOUT = 0.60V + NSELx * 10mV, from 0.60 to 1.23V. * For 04 option: @@ -156,7 +242,7 @@ static struct regulator_ops fan53555_regulator_ops = { static int fan53555_device_setup(struct fan53555_device_info *di, struct fan53555_platform_data *pdata) { - unsigned int reg, data, mask; + int ret = 0; /* Setup voltage control register */ switch (pdata->sleep_vsel_id) { @@ -172,33 +258,20 @@ static int fan53555_device_setup(struct fan53555_device_info *di, dev_err(di->dev, "Invalid VSEL ID!\n"); return -EINVAL; } - /* Init voltage range and step */ - switch (di->chip_id) { - case FAN53555_CHIP_ID_00: - case FAN53555_CHIP_ID_01: - case FAN53555_CHIP_ID_03: - case FAN53555_CHIP_ID_05: - di->vsel_min = 600000; - di->vsel_step = 10000; + + switch (di->vendor) { + case FAN53555_VENDOR_FAIRCHILD: + ret = fan53555_voltages_setup_fairchild(di); break; - case FAN53555_CHIP_ID_04: - di->vsel_min = 603000; - di->vsel_step = 12826; + case FAN53555_VENDOR_SILERGY: + ret = fan53555_voltages_setup_silergy(di); break; default: - dev_err(di->dev, - "Chip ID[%d]\n not supported!\n", di->chip_id); + dev_err(di->dev, "vendor %d not supported!\n", di->vendor); return -EINVAL; } - /* Init slew rate */ - if (pdata->slew_rate & 0x7) - di->slew_rate = pdata->slew_rate; - else - di->slew_rate = FAN53555_SLEW_RATE_64MV; - reg = FAN53555_CONTROL; - data = di->slew_rate << CTL_SLEW_SHIFT; - mask = CTL_SLEW_MASK; - return regmap_update_bits(di->regmap, reg, mask, data); + + return ret; } static int fan53555_regulator_register(struct fan53555_device_info *di, @@ -207,6 +280,7 @@ static int fan53555_regulator_register(struct fan53555_device_info *di, struct regulator_desc *rdesc = &di->desc; rdesc->name = "fan53555-reg"; + rdesc->supply_name = "vin"; rdesc->ops = &fan53555_regulator_ops; rdesc->type = REGULATOR_VOLTAGE; rdesc->n_voltages = FAN53555_NVOLTAGES; @@ -227,9 +301,46 @@ static struct regmap_config fan53555_regmap_config = { .val_bits = 8, }; +static struct fan53555_platform_data *fan53555_parse_dt(struct device *dev, + struct device_node *np) +{ + struct fan53555_platform_data *pdata; + int ret; + u32 tmp; + + pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return NULL; + + pdata->regulator = of_get_regulator_init_data(dev, np); + + ret = of_property_read_u32(np, "fcs,suspend-voltage-selector", + &tmp); + if (!ret) + pdata->sleep_vsel_id = tmp; + + return pdata; +} + +static const struct of_device_id fan53555_dt_ids[] = { + { + .compatible = "fcs,fan53555", + .data = (void *)FAN53555_VENDOR_FAIRCHILD + }, { + .compatible = "silergy,syr827", + .data = (void *)FAN53555_VENDOR_SILERGY, + }, { + .compatible = "silergy,syr828", + .data = (void *)FAN53555_VENDOR_SILERGY, + }, + { } +}; +MODULE_DEVICE_TABLE(of, fan53555_dt_ids); + static int fan53555_regulator_probe(struct i2c_client *client, const struct i2c_device_id *id) { + struct device_node *np = client->dev.of_node; struct fan53555_device_info *di; struct fan53555_platform_data *pdata; struct regulator_config config = { }; @@ -237,6 +348,9 @@ static int fan53555_regulator_probe(struct i2c_client *client, int ret; pdata = dev_get_platdata(&client->dev); + if (!pdata) + pdata = fan53555_parse_dt(&client->dev, np); + if (!pdata || !pdata->regulator) { dev_err(&client->dev, "Platform data not found!\n"); return -ENODEV; @@ -247,13 +361,35 @@ static int fan53555_regulator_probe(struct i2c_client *client, if (!di) return -ENOMEM; + di->regulator = pdata->regulator; + if (client->dev.of_node) { + const struct of_device_id *match; + + match = of_match_device(of_match_ptr(fan53555_dt_ids), + &client->dev); + if (!match) + return -ENODEV; + + di->vendor = (unsigned long) match->data; + } else { + /* if no ramp constraint set, get the pdata ramp_delay */ + if (!di->regulator->constraints.ramp_delay) { + int slew_idx = (pdata->slew_rate & 0x7) + ? pdata->slew_rate : 0; + + di->regulator->constraints.ramp_delay + = slew_rates[slew_idx]; + } + + di->vendor = id->driver_data; + } + di->regmap = devm_regmap_init_i2c(client, &fan53555_regmap_config); if (IS_ERR(di->regmap)) { dev_err(&client->dev, "Failed to allocate regmap!\n"); return PTR_ERR(di->regmap); } di->dev = &client->dev; - di->regulator = pdata->regulator; i2c_set_clientdata(client, di); /* Get chip ID */ ret = regmap_read(di->regmap, FAN53555_ID1, &val); @@ -282,6 +418,8 @@ static int fan53555_regulator_probe(struct i2c_client *client, config.init_data = di->regulator; config.regmap = di->regmap; config.driver_data = di; + config.of_node = np; + ret = fan53555_regulator_register(di, &config); if (ret < 0) dev_err(&client->dev, "Failed to register regulator!\n"); @@ -290,13 +428,20 @@ static int fan53555_regulator_probe(struct i2c_client *client, } static const struct i2c_device_id fan53555_id[] = { - {"fan53555", -1}, + { + .name = "fan53555", + .driver_data = FAN53555_VENDOR_FAIRCHILD + }, { + .name = "syr82x", + .driver_data = FAN53555_VENDOR_SILERGY + }, { }, }; static struct i2c_driver fan53555_regulator_driver = { .driver = { .name = "fan53555-regulator", + .of_match_table = of_match_ptr(fan53555_dt_ids), }, .probe = fan53555_regulator_probe, .id_table = fan53555_id, diff --git a/drivers/regulator/hi6421-regulator.c b/drivers/regulator/hi6421-regulator.c new file mode 100644 index 000000000000..156d0d1a55f1 --- /dev/null +++ b/drivers/regulator/hi6421-regulator.c @@ -0,0 +1,634 @@ +/* + * Device driver for regulators in Hi6421 IC + * + * Copyright (c) <2011-2014> HiSilicon Technologies Co., Ltd. + * http://www.hisilicon.com + * Copyright (c) <2013-2014> Linaro Ltd. + * http://www.linaro.org + * + * Author: Guodong Xu <guodong.xu@linaro.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/slab.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/err.h> +#include <linux/platform_device.h> +#include <linux/of.h> +#include <linux/regmap.h> +#include <linux/regulator/driver.h> +#include <linux/regulator/machine.h> +#include <linux/regulator/of_regulator.h> +#include <linux/mfd/hi6421-pmic.h> + +/* + * struct hi6421_regulator_pdata - Hi6421 regulator data of platform device + * @lock: mutex to serialize regulator enable + */ +struct hi6421_regulator_pdata { + struct mutex lock; +}; + +/* + * struct hi6421_regulator_info - hi6421 regulator information + * @desc: regulator description + * @mode_mask: ECO mode bitmask of LDOs; for BUCKs, this masks sleep + * @eco_microamp: eco mode load upper limit (in uA), valid for LDOs only + */ +struct hi6421_regulator_info { + struct regulator_desc desc; + u8 mode_mask; + u32 eco_microamp; +}; + +/* HI6421 regulators */ +enum hi6421_regulator_id { + HI6421_LDO0, + HI6421_LDO1, + HI6421_LDO2, + HI6421_LDO3, + HI6421_LDO4, + HI6421_LDO5, + HI6421_LDO6, + HI6421_LDO7, + HI6421_LDO8, + HI6421_LDO9, + HI6421_LDO10, + HI6421_LDO11, + HI6421_LDO12, + HI6421_LDO13, + HI6421_LDO14, + HI6421_LDO15, + HI6421_LDO16, + HI6421_LDO17, + HI6421_LDO18, + HI6421_LDO19, + HI6421_LDO20, + HI6421_LDOAUDIO, + HI6421_BUCK0, + HI6421_BUCK1, + HI6421_BUCK2, + HI6421_BUCK3, + HI6421_BUCK4, + HI6421_BUCK5, + HI6421_NUM_REGULATORS, +}; + +#define HI6421_REGULATOR_OF_MATCH(_name, id) \ +{ \ + .name = #_name, \ + .driver_data = (void *) HI6421_##id, \ +} + +static struct of_regulator_match hi6421_regulator_match[] = { + HI6421_REGULATOR_OF_MATCH(hi6421_vout0, LDO0), + HI6421_REGULATOR_OF_MATCH(hi6421_vout1, LDO1), + HI6421_REGULATOR_OF_MATCH(hi6421_vout2, LDO2), + HI6421_REGULATOR_OF_MATCH(hi6421_vout3, LDO3), + HI6421_REGULATOR_OF_MATCH(hi6421_vout4, LDO4), + HI6421_REGULATOR_OF_MATCH(hi6421_vout5, LDO5), + HI6421_REGULATOR_OF_MATCH(hi6421_vout6, LDO6), + HI6421_REGULATOR_OF_MATCH(hi6421_vout7, LDO7), + HI6421_REGULATOR_OF_MATCH(hi6421_vout8, LDO8), + HI6421_REGULATOR_OF_MATCH(hi6421_vout9, LDO9), + HI6421_REGULATOR_OF_MATCH(hi6421_vout10, LDO10), + HI6421_REGULATOR_OF_MATCH(hi6421_vout11, LDO11), + HI6421_REGULATOR_OF_MATCH(hi6421_vout12, LDO12), + HI6421_REGULATOR_OF_MATCH(hi6421_vout13, LDO13), + HI6421_REGULATOR_OF_MATCH(hi6421_vout14, LDO14), + HI6421_REGULATOR_OF_MATCH(hi6421_vout15, LDO15), + HI6421_REGULATOR_OF_MATCH(hi6421_vout16, LDO16), + HI6421_REGULATOR_OF_MATCH(hi6421_vout17, LDO17), + HI6421_REGULATOR_OF_MATCH(hi6421_vout18, LDO18), + HI6421_REGULATOR_OF_MATCH(hi6421_vout19, LDO19), + HI6421_REGULATOR_OF_MATCH(hi6421_vout20, LDO20), + HI6421_REGULATOR_OF_MATCH(hi6421_vout_audio, LDOAUDIO), + HI6421_REGULATOR_OF_MATCH(hi6421_buck0, BUCK0), + HI6421_REGULATOR_OF_MATCH(hi6421_buck1, BUCK1), + HI6421_REGULATOR_OF_MATCH(hi6421_buck2, BUCK2), + HI6421_REGULATOR_OF_MATCH(hi6421_buck3, BUCK3), + HI6421_REGULATOR_OF_MATCH(hi6421_buck4, BUCK4), + HI6421_REGULATOR_OF_MATCH(hi6421_buck5, BUCK5), +}; + +/* LDO 0, 4~7, 9~14, 16~20 have same voltage table. */ +static const unsigned int ldo_0_voltages[] = { + 1500000, 1800000, 2400000, 2500000, + 2600000, 2700000, 2850000, 3000000, +}; + +/* LDO 8, 15 have same voltage table. */ +static const unsigned int ldo_8_voltages[] = { + 1500000, 1800000, 2400000, 2600000, + 2700000, 2850000, 3000000, 3300000, +}; + +/* Ranges are sorted in ascending order. */ +static const struct regulator_linear_range ldo_audio_volt_range[] = { + REGULATOR_LINEAR_RANGE(2800000, 0, 3, 50000), + REGULATOR_LINEAR_RANGE(3000000, 4, 7, 100000), +}; + +static const unsigned int buck_3_voltages[] = { + 950000, 1050000, 1100000, 1117000, + 1134000, 1150000, 1167000, 1200000, +}; + +static const unsigned int buck_4_voltages[] = { + 1150000, 1200000, 1250000, 1350000, + 1700000, 1800000, 1900000, 2000000, +}; + +static const unsigned int buck_5_voltages[] = { + 1150000, 1200000, 1250000, 1350000, + 1600000, 1700000, 1800000, 1900000, +}; + +static const struct regulator_ops hi6421_ldo_ops; +static const struct regulator_ops hi6421_ldo_linear_ops; +static const struct regulator_ops hi6421_ldo_linear_range_ops; +static const struct regulator_ops hi6421_buck012_ops; +static const struct regulator_ops hi6421_buck345_ops; + +#define HI6421_LDO_ENABLE_TIME (350) +/* + * _id - LDO id name string + * v_table - voltage table + * vreg - voltage select register + * vmask - voltage select mask + * ereg - enable register + * emask - enable mask + * odelay - off/on delay time in uS + * ecomask - eco mode mask + * ecoamp - eco mode load uppler limit in uA + */ +#define HI6421_LDO(_id, v_table, vreg, vmask, ereg, emask, \ + odelay, ecomask, ecoamp) \ + [HI6421_##_id] = { \ + .desc = { \ + .name = #_id, \ + .ops = &hi6421_ldo_ops, \ + .type = REGULATOR_VOLTAGE, \ + .id = HI6421_##_id, \ + .owner = THIS_MODULE, \ + .n_voltages = ARRAY_SIZE(v_table), \ + .volt_table = v_table, \ + .vsel_reg = HI6421_REG_TO_BUS_ADDR(vreg), \ + .vsel_mask = vmask, \ + .enable_reg = HI6421_REG_TO_BUS_ADDR(ereg), \ + .enable_mask = emask, \ + .enable_time = HI6421_LDO_ENABLE_TIME, \ + .off_on_delay = odelay, \ + }, \ + .mode_mask = ecomask, \ + .eco_microamp = ecoamp, \ + } + +/* HI6421 LDO1~3 are linear voltage regulators at fixed uV_step + * + * _id - LDO id name string + * _min_uV - minimum voltage supported in uV + * n_volt - number of votages available + * vstep - voltage increase in each linear step in uV + * vreg - voltage select register + * vmask - voltage select mask + * ereg - enable register + * emask - enable mask + * odelay - off/on delay time in uS + * ecomask - eco mode mask + * ecoamp - eco mode load uppler limit in uA + */ +#define HI6421_LDO_LINEAR(_id, _min_uV, n_volt, vstep, vreg, vmask, \ + ereg, emask, odelay, ecomask, ecoamp) \ + [HI6421_##_id] = { \ + .desc = { \ + .name = #_id, \ + .ops = &hi6421_ldo_linear_ops, \ + .type = REGULATOR_VOLTAGE, \ + .id = HI6421_##_id, \ + .owner = THIS_MODULE, \ + .min_uV = _min_uV, \ + .n_voltages = n_volt, \ + .uV_step = vstep, \ + .vsel_reg = HI6421_REG_TO_BUS_ADDR(vreg), \ + .vsel_mask = vmask, \ + .enable_reg = HI6421_REG_TO_BUS_ADDR(ereg), \ + .enable_mask = emask, \ + .enable_time = HI6421_LDO_ENABLE_TIME, \ + .off_on_delay = odelay, \ + }, \ + .mode_mask = ecomask, \ + .eco_microamp = ecoamp, \ + } + +/* HI6421 LDOAUDIO is a linear voltage regulator with two 4-step ranges + * + * _id - LDO id name string + * n_volt - number of votages available + * volt_ranges - array of regulator_linear_range + * vstep - voltage increase in each linear step in uV + * vreg - voltage select register + * vmask - voltage select mask + * ereg - enable register + * emask - enable mask + * odelay - off/on delay time in uS + * ecomask - eco mode mask + * ecoamp - eco mode load uppler limit in uA + */ +#define HI6421_LDO_LINEAR_RANGE(_id, n_volt, volt_ranges, vreg, vmask, \ + ereg, emask, odelay, ecomask, ecoamp) \ + [HI6421_##_id] = { \ + .desc = { \ + .name = #_id, \ + .ops = &hi6421_ldo_linear_range_ops, \ + .type = REGULATOR_VOLTAGE, \ + .id = HI6421_##_id, \ + .owner = THIS_MODULE, \ + .n_voltages = n_volt, \ + .linear_ranges = volt_ranges, \ + .n_linear_ranges = ARRAY_SIZE(volt_ranges), \ + .vsel_reg = HI6421_REG_TO_BUS_ADDR(vreg), \ + .vsel_mask = vmask, \ + .enable_reg = HI6421_REG_TO_BUS_ADDR(ereg), \ + .enable_mask = emask, \ + .enable_time = HI6421_LDO_ENABLE_TIME, \ + .off_on_delay = odelay, \ + }, \ + .mode_mask = ecomask, \ + .eco_microamp = ecoamp, \ + } + +/* HI6421 BUCK0/1/2 are linear voltage regulators at fixed uV_step + * + * _id - BUCK0/1/2 id name string + * vreg - voltage select register + * vmask - voltage select mask + * ereg - enable register + * emask - enable mask + * sleepmask - mask of sleep mode + * etime - enable time + * odelay - off/on delay time in uS + */ +#define HI6421_BUCK012(_id, vreg, vmask, ereg, emask, sleepmask, \ + etime, odelay) \ + [HI6421_##_id] = { \ + .desc = { \ + .name = #_id, \ + .ops = &hi6421_buck012_ops, \ + .type = REGULATOR_VOLTAGE, \ + .id = HI6421_##_id, \ + .owner = THIS_MODULE, \ + .min_uV = 700000, \ + .n_voltages = 128, \ + .uV_step = 7086, \ + .vsel_reg = HI6421_REG_TO_BUS_ADDR(vreg), \ + .vsel_mask = vmask, \ + .enable_reg = HI6421_REG_TO_BUS_ADDR(ereg), \ + .enable_mask = emask, \ + .enable_time = etime, \ + .off_on_delay = odelay, \ + }, \ + .mode_mask = sleepmask, \ + } + +/* HI6421 BUCK3/4/5 share similar configurations as LDOs, with exception + * that it supports SLEEP mode, so has different .ops. + * + * _id - LDO id name string + * v_table - voltage table + * vreg - voltage select register + * vmask - voltage select mask + * ereg - enable register + * emask - enable mask + * odelay - off/on delay time in uS + * sleepmask - mask of sleep mode + */ +#define HI6421_BUCK345(_id, v_table, vreg, vmask, ereg, emask, \ + odelay, sleepmask) \ + [HI6421_##_id] = { \ + .desc = { \ + .name = #_id, \ + .ops = &hi6421_buck345_ops, \ + .type = REGULATOR_VOLTAGE, \ + .id = HI6421_##_id, \ + .owner = THIS_MODULE, \ + .n_voltages = ARRAY_SIZE(v_table), \ + .volt_table = v_table, \ + .vsel_reg = HI6421_REG_TO_BUS_ADDR(vreg), \ + .vsel_mask = vmask, \ + .enable_reg = HI6421_REG_TO_BUS_ADDR(ereg), \ + .enable_mask = emask, \ + .enable_time = HI6421_LDO_ENABLE_TIME, \ + .off_on_delay = odelay, \ + }, \ + .mode_mask = sleepmask, \ + } + +/* HI6421 regulator information */ +static struct hi6421_regulator_info + hi6421_regulator_info[HI6421_NUM_REGULATORS] = { + HI6421_LDO(LDO0, ldo_0_voltages, 0x20, 0x07, 0x20, 0x10, + 10000, 0x20, 8000), + HI6421_LDO_LINEAR(LDO1, 1700000, 4, 100000, 0x21, 0x03, 0x21, 0x10, + 10000, 0x20, 5000), + HI6421_LDO_LINEAR(LDO2, 1050000, 8, 50000, 0x22, 0x07, 0x22, 0x10, + 20000, 0x20, 8000), + HI6421_LDO_LINEAR(LDO3, 1050000, 8, 50000, 0x23, 0x07, 0x23, 0x10, + 20000, 0x20, 8000), + HI6421_LDO(LDO4, ldo_0_voltages, 0x24, 0x07, 0x24, 0x10, + 20000, 0x20, 8000), + HI6421_LDO(LDO5, ldo_0_voltages, 0x25, 0x07, 0x25, 0x10, + 20000, 0x20, 8000), + HI6421_LDO(LDO6, ldo_0_voltages, 0x26, 0x07, 0x26, 0x10, + 20000, 0x20, 8000), + HI6421_LDO(LDO7, ldo_0_voltages, 0x27, 0x07, 0x27, 0x10, + 20000, 0x20, 5000), + HI6421_LDO(LDO8, ldo_8_voltages, 0x28, 0x07, 0x28, 0x10, + 20000, 0x20, 8000), + HI6421_LDO(LDO9, ldo_0_voltages, 0x29, 0x07, 0x29, 0x10, + 40000, 0x20, 8000), + HI6421_LDO(LDO10, ldo_0_voltages, 0x2a, 0x07, 0x2a, 0x10, + 40000, 0x20, 8000), + HI6421_LDO(LDO11, ldo_0_voltages, 0x2b, 0x07, 0x2b, 0x10, + 40000, 0x20, 8000), + HI6421_LDO(LDO12, ldo_0_voltages, 0x2c, 0x07, 0x2c, 0x10, + 40000, 0x20, 8000), + HI6421_LDO(LDO13, ldo_0_voltages, 0x2d, 0x07, 0x2d, 0x10, + 40000, 0x20, 8000), + HI6421_LDO(LDO14, ldo_0_voltages, 0x2e, 0x07, 0x2e, 0x10, + 40000, 0x20, 8000), + HI6421_LDO(LDO15, ldo_8_voltages, 0x2f, 0x07, 0x2f, 0x10, + 40000, 0x20, 8000), + HI6421_LDO(LDO16, ldo_0_voltages, 0x30, 0x07, 0x30, 0x10, + 40000, 0x20, 8000), + HI6421_LDO(LDO17, ldo_0_voltages, 0x31, 0x07, 0x31, 0x10, + 40000, 0x20, 8000), + HI6421_LDO(LDO18, ldo_0_voltages, 0x32, 0x07, 0x32, 0x10, + 40000, 0x20, 8000), + HI6421_LDO(LDO19, ldo_0_voltages, 0x33, 0x07, 0x33, 0x10, + 40000, 0x20, 8000), + HI6421_LDO(LDO20, ldo_0_voltages, 0x34, 0x07, 0x34, 0x10, + 40000, 0x20, 8000), + HI6421_LDO_LINEAR_RANGE(LDOAUDIO, 8, ldo_audio_volt_range, 0x36, + 0x70, 0x36, 0x01, 40000, 0x02, 5000), + HI6421_BUCK012(BUCK0, 0x0d, 0x7f, 0x0c, 0x01, 0x10, 400, 20000), + HI6421_BUCK012(BUCK1, 0x0f, 0x7f, 0x0e, 0x01, 0x10, 400, 20000), + HI6421_BUCK012(BUCK2, 0x11, 0x7f, 0x10, 0x01, 0x10, 350, 100), + HI6421_BUCK345(BUCK3, buck_3_voltages, 0x13, 0x07, 0x12, 0x01, + 20000, 0x10), + HI6421_BUCK345(BUCK4, buck_4_voltages, 0x15, 0x07, 0x14, 0x01, + 20000, 0x10), + HI6421_BUCK345(BUCK5, buck_5_voltages, 0x17, 0x07, 0x16, 0x01, + 20000, 0x10), +}; + +static int hi6421_regulator_enable(struct regulator_dev *rdev) +{ + struct hi6421_regulator_pdata *pdata; + + pdata = dev_get_drvdata(rdev->dev.parent); + /* hi6421 spec requires regulator enablement must be serialized: + * - Because when BUCK, LDO switching from off to on, it will have + * a huge instantaneous current; so you can not turn on two or + * more LDO or BUCKs simultaneously, or it may burn the chip. + */ + mutex_lock(&pdata->lock); + + /* call regulator regmap helper */ + regulator_enable_regmap(rdev); + + mutex_unlock(&pdata->lock); + return 0; +} + +static unsigned int hi6421_regulator_ldo_get_mode(struct regulator_dev *rdev) +{ + struct hi6421_regulator_info *info = rdev_get_drvdata(rdev); + u32 reg_val; + + regmap_read(rdev->regmap, rdev->desc->enable_reg, ®_val); + if (reg_val & info->mode_mask) + return REGULATOR_MODE_IDLE; + + return REGULATOR_MODE_NORMAL; +} + +static unsigned int hi6421_regulator_buck_get_mode(struct regulator_dev *rdev) +{ + struct hi6421_regulator_info *info = rdev_get_drvdata(rdev); + u32 reg_val; + + regmap_read(rdev->regmap, rdev->desc->enable_reg, ®_val); + if (reg_val & info->mode_mask) + return REGULATOR_MODE_STANDBY; + + return REGULATOR_MODE_NORMAL; +} + +static int hi6421_regulator_ldo_set_mode(struct regulator_dev *rdev, + unsigned int mode) +{ + struct hi6421_regulator_info *info = rdev_get_drvdata(rdev); + u32 new_mode; + + switch (mode) { + case REGULATOR_MODE_NORMAL: + new_mode = 0; + break; + case REGULATOR_MODE_IDLE: + new_mode = info->mode_mask; + break; + default: + return -EINVAL; + } + + /* set mode */ + regmap_update_bits(rdev->regmap, rdev->desc->enable_reg, + info->mode_mask, new_mode); + + return 0; +} + +static int hi6421_regulator_buck_set_mode(struct regulator_dev *rdev, + unsigned int mode) +{ + struct hi6421_regulator_info *info = rdev_get_drvdata(rdev); + u32 new_mode; + + switch (mode) { + case REGULATOR_MODE_NORMAL: + new_mode = 0; + break; + case REGULATOR_MODE_STANDBY: + new_mode = info->mode_mask; + break; + default: + return -EINVAL; + } + + /* set mode */ + regmap_update_bits(rdev->regmap, rdev->desc->enable_reg, + info->mode_mask, new_mode); + + return 0; +} + +unsigned int hi6421_regulator_ldo_get_optimum_mode(struct regulator_dev *rdev, + int input_uV, int output_uV, int load_uA) +{ + struct hi6421_regulator_info *info = rdev_get_drvdata(rdev); + + if (load_uA > info->eco_microamp) + return REGULATOR_MODE_NORMAL; + + return REGULATOR_MODE_IDLE; +} + +static const struct regulator_ops hi6421_ldo_ops = { + .is_enabled = regulator_is_enabled_regmap, + .enable = hi6421_regulator_enable, + .disable = regulator_disable_regmap, + .list_voltage = regulator_list_voltage_table, + .map_voltage = regulator_map_voltage_ascend, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .get_mode = hi6421_regulator_ldo_get_mode, + .set_mode = hi6421_regulator_ldo_set_mode, + .get_optimum_mode = hi6421_regulator_ldo_get_optimum_mode, +}; + +static const struct regulator_ops hi6421_ldo_linear_ops = { + .is_enabled = regulator_is_enabled_regmap, + .enable = hi6421_regulator_enable, + .disable = regulator_disable_regmap, + .list_voltage = regulator_list_voltage_linear, + .map_voltage = regulator_map_voltage_linear, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .get_mode = hi6421_regulator_ldo_get_mode, + .set_mode = hi6421_regulator_ldo_set_mode, + .get_optimum_mode = hi6421_regulator_ldo_get_optimum_mode, +}; + +static const struct regulator_ops hi6421_ldo_linear_range_ops = { + .is_enabled = regulator_is_enabled_regmap, + .enable = hi6421_regulator_enable, + .disable = regulator_disable_regmap, + .list_voltage = regulator_list_voltage_linear_range, + .map_voltage = regulator_map_voltage_linear_range, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .get_mode = hi6421_regulator_ldo_get_mode, + .set_mode = hi6421_regulator_ldo_set_mode, + .get_optimum_mode = hi6421_regulator_ldo_get_optimum_mode, +}; + +static const struct regulator_ops hi6421_buck012_ops = { + .is_enabled = regulator_is_enabled_regmap, + .enable = hi6421_regulator_enable, + .disable = regulator_disable_regmap, + .list_voltage = regulator_list_voltage_linear, + .map_voltage = regulator_map_voltage_linear, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .get_mode = hi6421_regulator_buck_get_mode, + .set_mode = hi6421_regulator_buck_set_mode, +}; + +static const struct regulator_ops hi6421_buck345_ops = { + .is_enabled = regulator_is_enabled_regmap, + .enable = hi6421_regulator_enable, + .disable = regulator_disable_regmap, + .list_voltage = regulator_list_voltage_table, + .map_voltage = regulator_map_voltage_ascend, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .get_mode = hi6421_regulator_buck_get_mode, + .set_mode = hi6421_regulator_buck_set_mode, +}; + +static int hi6421_regulator_register(struct platform_device *pdev, + struct regmap *rmap, + struct regulator_init_data *init_data, + int id, struct device_node *np) +{ + struct hi6421_regulator_info *info = NULL; + struct regulator_config config = { }; + struct regulator_dev *rdev; + + /* assign per-regulator data */ + info = &hi6421_regulator_info[id]; + + config.dev = &pdev->dev; + config.init_data = init_data; + config.driver_data = info; + config.regmap = rmap; + config.of_node = np; + + /* register regulator with framework */ + rdev = devm_regulator_register(&pdev->dev, &info->desc, &config); + if (IS_ERR(rdev)) { + dev_err(&pdev->dev, "failed to register regulator %s\n", + info->desc.name); + return PTR_ERR(rdev); + } + + return 0; +} + +static int hi6421_regulator_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np; + struct hi6421_pmic *pmic; + struct hi6421_regulator_pdata *pdata; + int i, ret = 0; + + pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return -ENOMEM; + mutex_init(&pdata->lock); + platform_set_drvdata(pdev, pdata); + + np = of_get_child_by_name(dev->parent->of_node, "regulators"); + if (!np) + return -ENODEV; + + ret = of_regulator_match(dev, np, + hi6421_regulator_match, + ARRAY_SIZE(hi6421_regulator_match)); + of_node_put(np); + if (ret < 0) { + dev_err(dev, "Error parsing regulator init data: %d\n", ret); + return ret; + } + + pmic = dev_get_drvdata(dev->parent); + + for (i = 0; i < ARRAY_SIZE(hi6421_regulator_info); i++) { + ret = hi6421_regulator_register(pdev, pmic->regmap, + hi6421_regulator_match[i].init_data, i, + hi6421_regulator_match[i].of_node); + if (ret) + return ret; + } + + return 0; +} + +static struct platform_driver hi6421_regulator_driver = { + .driver = { + .name = "hi6421-regulator", + .owner = THIS_MODULE, + }, + .probe = hi6421_regulator_probe, +}; +module_platform_driver(hi6421_regulator_driver); + +MODULE_AUTHOR("Guodong Xu <guodong.xu@linaro.org>"); +MODULE_DESCRIPTION("Hi6421 regulator driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/regulator/internal.h b/drivers/regulator/internal.h index 84bbda10c396..80ba2a35a04b 100644 --- a/drivers/regulator/internal.h +++ b/drivers/regulator/internal.h @@ -35,4 +35,18 @@ struct regulator { struct dentry *debugfs; }; +#ifdef CONFIG_OF +struct regulator_init_data *regulator_of_get_init_data(struct device *dev, + const struct regulator_desc *desc, + struct device_node **node); +#else +static inline struct regulator_init_data * +regulator_of_get_init_data(struct device *dev, + const struct regulator_desc *desc, + struct device_node **node) +{ + return NULL; +} +#endif + #endif diff --git a/drivers/regulator/isl9305.c b/drivers/regulator/isl9305.c new file mode 100644 index 000000000000..92fefd98da58 --- /dev/null +++ b/drivers/regulator/isl9305.c @@ -0,0 +1,207 @@ +/* + * isl9305 - Intersil ISL9305 DCDC regulator + * + * Copyright 2014 Linaro Ltd + * + * Author: Mark Brown <broonie@kernel.org> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/module.h> +#include <linux/err.h> +#include <linux/i2c.h> +#include <linux/of.h> +#include <linux/platform_data/isl9305.h> +#include <linux/regmap.h> +#include <linux/regulator/driver.h> +#include <linux/regulator/of_regulator.h> +#include <linux/slab.h> + +/* + * Registers + */ +#define ISL9305_DCD1OUT 0x0 +#define ISL9305_DCD2OUT 0x1 +#define ISL9305_LDO1OUT 0x2 +#define ISL9305_LDO2OUT 0x3 +#define ISL9305_DCD_PARAMETER 0x4 +#define ISL9305_SYSTEM_PARAMETER 0x5 +#define ISL9305_DCD_SRCTL 0x6 + +#define ISL9305_MAX_REG ISL9305_DCD_SRCTL + +/* + * DCD_PARAMETER + */ +#define ISL9305_DCD_PHASE 0x40 +#define ISL9305_DCD2_ULTRA 0x20 +#define ISL9305_DCD1_ULTRA 0x10 +#define ISL9305_DCD2_BLD 0x08 +#define ISL9305_DCD1_BLD 0x04 +#define ISL9305_DCD2_MODE 0x02 +#define ISL9305_DCD1_MODE 0x01 + +/* + * SYSTEM_PARAMETER + */ +#define ISL9305_I2C_EN 0x40 +#define ISL9305_DCDPOR_MASK 0x30 +#define ISL9305_LDO2_EN 0x08 +#define ISL9305_LDO1_EN 0x04 +#define ISL9305_DCD2_EN 0x02 +#define ISL9305_DCD1_EN 0x01 + +/* + * DCD_SRCTL + */ +#define ISL9305_DCD2SR_MASK 0xc0 +#define ISL9305_DCD1SR_MASK 0x07 + +static const struct regulator_ops isl9305_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .list_voltage = regulator_list_voltage_linear, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_sel = regulator_set_voltage_sel_regmap, +}; + +static const struct regulator_desc isl9305_regulators[] = { + [ISL9305_DCD1] = { + .name = "DCD1", + .of_match = of_match_ptr("dcd1"), + .regulators_node = of_match_ptr("regulators"), + .n_voltages = 0x70, + .min_uV = 825000, + .uV_step = 25000, + .vsel_reg = ISL9305_DCD1OUT, + .vsel_mask = 0x7f, + .enable_reg = ISL9305_SYSTEM_PARAMETER, + .enable_mask = ISL9305_DCD1_EN, + .supply_name = "VINDCD1", + .ops = &isl9305_ops, + }, + [ISL9305_DCD2] = { + .name = "DCD2", + .of_match = of_match_ptr("dcd2"), + .regulators_node = of_match_ptr("regulators"), + .n_voltages = 0x70, + .min_uV = 825000, + .uV_step = 25000, + .vsel_reg = ISL9305_DCD2OUT, + .vsel_mask = 0x7f, + .enable_reg = ISL9305_SYSTEM_PARAMETER, + .enable_mask = ISL9305_DCD2_EN, + .supply_name = "VINDCD2", + .ops = &isl9305_ops, + }, + [ISL9305_LDO1] = { + .name = "LDO1", + .of_match = of_match_ptr("ldo1"), + .regulators_node = of_match_ptr("regulators"), + .n_voltages = 0x37, + .min_uV = 900000, + .uV_step = 50000, + .vsel_reg = ISL9305_LDO1OUT, + .vsel_mask = 0x3f, + .enable_reg = ISL9305_SYSTEM_PARAMETER, + .enable_mask = ISL9305_LDO1_EN, + .supply_name = "VINLDO1", + .ops = &isl9305_ops, + }, + [ISL9305_LDO2] = { + .name = "LDO2", + .of_match = of_match_ptr("ldo2"), + .regulators_node = of_match_ptr("regulators"), + .n_voltages = 0x37, + .min_uV = 900000, + .uV_step = 50000, + .vsel_reg = ISL9305_LDO2OUT, + .vsel_mask = 0x3f, + .enable_reg = ISL9305_SYSTEM_PARAMETER, + .enable_mask = ISL9305_LDO2_EN, + .supply_name = "VINLDO2", + .ops = &isl9305_ops, + }, +}; + +static const struct regmap_config isl9305_regmap = { + .reg_bits = 8, + .val_bits = 8, + + .max_register = ISL9305_MAX_REG, + .cache_type = REGCACHE_RBTREE, +}; + +static int isl9305_i2c_probe(struct i2c_client *i2c, + const struct i2c_device_id *id) +{ + struct regulator_config config = { }; + struct isl9305_pdata *pdata = i2c->dev.platform_data; + struct regulator_dev *rdev; + struct regmap *regmap; + int i, ret; + + regmap = devm_regmap_init_i2c(i2c, &isl9305_regmap); + if (IS_ERR(regmap)) { + ret = PTR_ERR(regmap); + dev_err(&i2c->dev, "Failed to create regmap: %d\n", ret); + return ret; + } + + config.dev = &i2c->dev; + + for (i = 0; i < ARRAY_SIZE(isl9305_regulators); i++) { + if (pdata) + config.init_data = pdata->init_data[i]; + else + config.init_data = NULL; + + rdev = devm_regulator_register(&i2c->dev, + &isl9305_regulators[i], + &config); + if (IS_ERR(rdev)) { + ret = PTR_ERR(rdev); + dev_err(&i2c->dev, "Failed to register %s: %d\n", + isl9305_regulators[i].name, ret); + return ret; + } + } + + return 0; +} + +#ifdef CONFIG_OF +static const struct of_device_id isl9305_dt_ids[] = { + { .compatible = "isl,isl9305" }, + { .compatible = "isl,isl9305h" }, + {}, +}; +#endif + +static const struct i2c_device_id isl9305_i2c_id[] = { + { "isl9305", }, + { "isl9305h", }, + { } +}; +MODULE_DEVICE_TABLE(i2c, isl9305_i2c_id); + +static struct i2c_driver isl9305_regulator_driver = { + .driver = { + .name = "isl9305", + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(isl9305_dt_ids), + }, + .probe = isl9305_i2c_probe, + .id_table = isl9305_i2c_id, +}; + +module_i2c_driver(isl9305_regulator_driver); + +MODULE_AUTHOR("Mark Brown"); +MODULE_DESCRIPTION("Intersil ISL9305 DCDC regulator"); +MODULE_LICENSE("GPL"); diff --git a/drivers/regulator/ltc3589.c b/drivers/regulator/ltc3589.c index c756955bfcc5..0ce8e4e0fa73 100644 --- a/drivers/regulator/ltc3589.c +++ b/drivers/regulator/ltc3589.c @@ -372,6 +372,7 @@ static bool ltc3589_volatile_reg(struct device *dev, unsigned int reg) switch (reg) { case LTC3589_IRQSTAT: case LTC3589_PGSTAT: + case LTC3589_VCCR: return true; } return false; diff --git a/drivers/regulator/max1586.c b/drivers/regulator/max1586.c index d23d0577754b..86db310d5304 100644 --- a/drivers/regulator/max1586.c +++ b/drivers/regulator/max1586.c @@ -24,6 +24,8 @@ #include <linux/regulator/driver.h> #include <linux/slab.h> #include <linux/regulator/max1586.h> +#include <linux/of_device.h> +#include <linux/regulator/of_regulator.h> #define MAX1586_V3_MAX_VSEL 31 #define MAX1586_V6_MAX_VSEL 3 @@ -157,13 +159,87 @@ static struct regulator_desc max1586_reg[] = { }, }; +static int of_get_max1586_platform_data(struct device *dev, + struct max1586_platform_data *pdata) +{ + struct max1586_subdev_data *sub; + struct of_regulator_match rmatch[ARRAY_SIZE(max1586_reg)]; + struct device_node *np = dev->of_node; + int i, matched; + + if (of_property_read_u32(np, "v3-gain", + &pdata->v3_gain) < 0) { + dev_err(dev, "%s has no 'v3-gain' property\n", np->full_name); + return -EINVAL; + } + + np = of_get_child_by_name(np, "regulators"); + if (!np) { + dev_err(dev, "missing 'regulators' subnode in DT\n"); + return -EINVAL; + } + + for (i = 0; i < ARRAY_SIZE(rmatch); i++) + rmatch[i].name = max1586_reg[i].name; + + matched = of_regulator_match(dev, np, rmatch, ARRAY_SIZE(rmatch)); + of_node_put(np); + /* + * If matched is 0, ie. neither Output_V3 nor Output_V6 have been found, + * return 0, which signals the normal situation where no subregulator is + * available. This is normal because the max1586 doesn't provide any + * readback support, so the subregulators can't report any status + * anyway. If matched < 0, return the error. + */ + if (matched <= 0) + return matched; + + pdata->subdevs = devm_kzalloc(dev, sizeof(struct max1586_subdev_data) * + matched, GFP_KERNEL); + if (!pdata->subdevs) + return -ENOMEM; + + pdata->num_subdevs = matched; + sub = pdata->subdevs; + + for (i = 0; i < matched; i++) { + sub->id = i; + sub->name = rmatch[i].of_node->name; + sub->platform_data = rmatch[i].init_data; + sub++; + } + + return 0; +} + +static const struct of_device_id max1586_of_match[] = { + { .compatible = "maxim,max1586", }, + {}, +}; +MODULE_DEVICE_TABLE(of, max1586_of_match); + static int max1586_pmic_probe(struct i2c_client *client, const struct i2c_device_id *i2c_id) { - struct max1586_platform_data *pdata = dev_get_platdata(&client->dev); + struct max1586_platform_data *pdata, pdata_of; struct regulator_config config = { }; struct max1586_data *max1586; - int i, id; + int i, id, ret; + const struct of_device_id *match; + + pdata = dev_get_platdata(&client->dev); + if (client->dev.of_node && !pdata) { + match = of_match_device(of_match_ptr(max1586_of_match), + &client->dev); + if (!match) { + dev_err(&client->dev, "Error: No device match found\n"); + return -ENODEV; + } + ret = of_get_max1586_platform_data(&client->dev, &pdata_of); + if (ret < 0) + return ret; + pdata = &pdata_of; + } max1586 = devm_kzalloc(&client->dev, sizeof(struct max1586_data), GFP_KERNEL); @@ -229,6 +305,7 @@ static struct i2c_driver max1586_pmic_driver = { .driver = { .name = "max1586", .owner = THIS_MODULE, + .of_match_table = of_match_ptr(max1586_of_match), }, .id_table = max1586_id, }; diff --git a/drivers/regulator/max77802.c b/drivers/regulator/max77802.c new file mode 100644 index 000000000000..d89792b084e9 --- /dev/null +++ b/drivers/regulator/max77802.c @@ -0,0 +1,586 @@ +/* + * max77802.c - Regulator driver for the Maxim 77802 + * + * Copyright (C) 2013-2014 Google, Inc + * Simon Glass <sjg@chromium.org> + * + * Copyright (C) 2012 Samsung Electronics + * Chiwoong Byun <woong.byun@smasung.com> + * Jonghwa Lee <jonghwa3.lee@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * This driver is based on max8997.c + */ + +#include <linux/kernel.h> +#include <linux/bug.h> +#include <linux/err.h> +#include <linux/gpio.h> +#include <linux/slab.h> +#include <linux/gpio/consumer.h> +#include <linux/platform_device.h> +#include <linux/regulator/driver.h> +#include <linux/regulator/machine.h> +#include <linux/regulator/of_regulator.h> +#include <linux/mfd/max77686.h> +#include <linux/mfd/max77686-private.h> + +/* Default ramp delay in case it is not manually set */ +#define MAX77802_RAMP_DELAY 100000 /* uV/us */ + +#define MAX77802_OPMODE_SHIFT_LDO 6 +#define MAX77802_OPMODE_BUCK234_SHIFT 4 +#define MAX77802_OPMODE_MASK 0x3 + +#define MAX77802_VSEL_MASK 0x3F +#define MAX77802_DVS_VSEL_MASK 0xFF + +#define MAX77802_RAMP_RATE_MASK_2BIT 0xC0 +#define MAX77802_RAMP_RATE_SHIFT_2BIT 6 +#define MAX77802_RAMP_RATE_MASK_4BIT 0xF0 +#define MAX77802_RAMP_RATE_SHIFT_4BIT 4 + +/* MAX77802 has two register formats: 2-bit and 4-bit */ +static const unsigned int ramp_table_77802_2bit[] = { + 12500, + 25000, + 50000, + 100000, +}; + +static unsigned int ramp_table_77802_4bit[] = { + 1000, 2000, 3030, 4000, + 5000, 5880, 7140, 8330, + 9090, 10000, 11110, 12500, + 16670, 25000, 50000, 100000, +}; + +struct max77802_regulator_prv { + unsigned int opmode[MAX77802_REG_MAX]; +}; + +static int max77802_get_opmode_shift(int id) +{ + if (id == MAX77802_BUCK1 || (id >= MAX77802_BUCK5 && + id <= MAX77802_BUCK10)) + return 0; + + if (id >= MAX77802_BUCK2 && id <= MAX77802_BUCK4) + return MAX77802_OPMODE_BUCK234_SHIFT; + + if (id >= MAX77802_LDO1 && id <= MAX77802_LDO35) + return MAX77802_OPMODE_SHIFT_LDO; + + return -EINVAL; +} + +/* + * Some BUCKS supports Normal[ON/OFF] mode during suspend + * + * BUCK 1, 6, 2-4, 5, 7-10 (all) + * + * The other mode (0x02) will make PWRREQ switch between normal + * and low power. + */ +static int max77802_buck_set_suspend_disable(struct regulator_dev *rdev) +{ + unsigned int val = MAX77802_OPMODE_STANDBY; + struct max77802_regulator_prv *max77802 = rdev_get_drvdata(rdev); + int id = rdev_get_id(rdev); + int shift = max77802_get_opmode_shift(id); + + max77802->opmode[id] = val; + return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg, + rdev->desc->enable_mask, val << shift); +} + +/* + * Some LDOs supports LPM-ON/OFF/Normal-ON mode during suspend state + * (Enable Control Logic1 by PWRREQ) + * + * LDOs 2, 4-19, 22-35. + * + */ +static int max77802_ldo_set_suspend_mode_logic1(struct regulator_dev *rdev, + unsigned int mode) +{ + struct max77802_regulator_prv *max77802 = rdev_get_drvdata(rdev); + int id = rdev_get_id(rdev); + unsigned int val; + int shift = max77802_get_opmode_shift(id); + + switch (mode) { + case REGULATOR_MODE_IDLE: /* ON in LP Mode */ + val = MAX77802_OPMODE_LP; + break; + case REGULATOR_MODE_NORMAL: /* ON in Normal Mode */ + val = MAX77802_OPMODE_NORMAL; + break; + case REGULATOR_MODE_STANDBY: /* ON/OFF by PWRREQ */ + val = MAX77802_OPMODE_STANDBY; + break; + default: + dev_warn(&rdev->dev, "%s: regulator mode: 0x%x not supported\n", + rdev->desc->name, mode); + return -EINVAL; + } + + max77802->opmode[id] = val; + return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg, + rdev->desc->enable_mask, val << shift); +} + +/* + * Mode 1 (Output[ON/OFF] by PWRREQ) is not supported on some LDOs + * (Enable Control Logic2 by PWRREQ) + * + * LDOs 1, 20, 21, and 3, + * + */ +static int max77802_ldo_set_suspend_mode_logic2(struct regulator_dev *rdev, + unsigned int mode) +{ + struct max77802_regulator_prv *max77802 = rdev_get_drvdata(rdev); + int id = rdev_get_id(rdev); + unsigned int val; + int shift = max77802_get_opmode_shift(id); + + switch (mode) { + case REGULATOR_MODE_IDLE: /* ON in LP Mode */ + val = MAX77802_OPMODE_LP; + break; + case REGULATOR_MODE_NORMAL: /* ON in Normal Mode */ + val = MAX77802_OPMODE_NORMAL; + break; + default: + dev_warn(&rdev->dev, "%s: regulator mode: 0x%x not supported\n", + rdev->desc->name, mode); + return -EINVAL; + } + + max77802->opmode[id] = val; + return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg, + rdev->desc->enable_mask, val << shift); +} + +static int max77802_enable(struct regulator_dev *rdev) +{ + struct max77802_regulator_prv *max77802 = rdev_get_drvdata(rdev); + int id = rdev_get_id(rdev); + int shift = max77802_get_opmode_shift(id); + + return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg, + rdev->desc->enable_mask, + max77802->opmode[id] << shift); +} + +static int max77802_find_ramp_value(struct regulator_dev *rdev, + const unsigned int limits[], int size, + unsigned int ramp_delay) +{ + int i; + + for (i = 0; i < size; i++) { + if (ramp_delay <= limits[i]) + return i; + } + + /* Use maximum value for no ramp control */ + dev_warn(&rdev->dev, "%s: ramp_delay: %d not supported, setting 100000\n", + rdev->desc->name, ramp_delay); + return size - 1; +} + +/* Used for BUCKs 2-4 */ +static int max77802_set_ramp_delay_2bit(struct regulator_dev *rdev, + int ramp_delay) +{ + int id = rdev_get_id(rdev); + unsigned int ramp_value; + + if (id > MAX77802_BUCK4) { + dev_warn(&rdev->dev, + "%s: regulator: ramp delay not supported\n", + rdev->desc->name); + return -EINVAL; + } + ramp_value = max77802_find_ramp_value(rdev, ramp_table_77802_2bit, + ARRAY_SIZE(ramp_table_77802_2bit), ramp_delay); + + return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg, + MAX77802_RAMP_RATE_MASK_2BIT, + ramp_value << MAX77802_RAMP_RATE_SHIFT_2BIT); +} + +/* For BUCK1, 6 */ +static int max77802_set_ramp_delay_4bit(struct regulator_dev *rdev, + int ramp_delay) +{ + unsigned int ramp_value; + + ramp_value = max77802_find_ramp_value(rdev, ramp_table_77802_4bit, + ARRAY_SIZE(ramp_table_77802_4bit), ramp_delay); + + return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg, + MAX77802_RAMP_RATE_MASK_4BIT, + ramp_value << MAX77802_RAMP_RATE_SHIFT_4BIT); +} + +/* + * LDOs 2, 4-19, 22-35 + */ +static struct regulator_ops max77802_ldo_ops_logic1 = { + .list_voltage = regulator_list_voltage_linear, + .map_voltage = regulator_map_voltage_linear, + .is_enabled = regulator_is_enabled_regmap, + .enable = max77802_enable, + .disable = regulator_disable_regmap, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .set_voltage_time_sel = regulator_set_voltage_time_sel, + .set_suspend_mode = max77802_ldo_set_suspend_mode_logic1, +}; + +/* + * LDOs 1, 20, 21, 3 + */ +static struct regulator_ops max77802_ldo_ops_logic2 = { + .list_voltage = regulator_list_voltage_linear, + .map_voltage = regulator_map_voltage_linear, + .is_enabled = regulator_is_enabled_regmap, + .enable = max77802_enable, + .disable = regulator_disable_regmap, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .set_voltage_time_sel = regulator_set_voltage_time_sel, + .set_suspend_mode = max77802_ldo_set_suspend_mode_logic2, +}; + +/* BUCKS 1, 6 */ +static struct regulator_ops max77802_buck_16_dvs_ops = { + .list_voltage = regulator_list_voltage_linear, + .map_voltage = regulator_map_voltage_linear, + .is_enabled = regulator_is_enabled_regmap, + .enable = max77802_enable, + .disable = regulator_disable_regmap, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .set_voltage_time_sel = regulator_set_voltage_time_sel, + .set_ramp_delay = max77802_set_ramp_delay_4bit, + .set_suspend_disable = max77802_buck_set_suspend_disable, +}; + +/* BUCKs 2-4, 5, 7-10 */ +static struct regulator_ops max77802_buck_dvs_ops = { + .list_voltage = regulator_list_voltage_linear, + .map_voltage = regulator_map_voltage_linear, + .is_enabled = regulator_is_enabled_regmap, + .enable = max77802_enable, + .disable = regulator_disable_regmap, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .set_voltage_time_sel = regulator_set_voltage_time_sel, + .set_ramp_delay = max77802_set_ramp_delay_2bit, + .set_suspend_disable = max77802_buck_set_suspend_disable, +}; + +/* LDOs 3-7, 9-14, 18-26, 28, 29, 32-34 */ +#define regulator_77802_desc_p_ldo(num, supply, log) { \ + .name = "LDO"#num, \ + .id = MAX77802_LDO##num, \ + .supply_name = "inl"#supply, \ + .ops = &max77802_ldo_ops_logic##log, \ + .type = REGULATOR_VOLTAGE, \ + .owner = THIS_MODULE, \ + .min_uV = 800000, \ + .uV_step = 50000, \ + .ramp_delay = MAX77802_RAMP_DELAY, \ + .n_voltages = 1 << 6, \ + .vsel_reg = MAX77802_REG_LDO1CTRL1 + num - 1, \ + .vsel_mask = MAX77802_VSEL_MASK, \ + .enable_reg = MAX77802_REG_LDO1CTRL1 + num - 1, \ + .enable_mask = MAX77802_OPMODE_MASK << MAX77802_OPMODE_SHIFT_LDO, \ +} + +/* LDOs 1, 2, 8, 15, 17, 27, 30, 35 */ +#define regulator_77802_desc_n_ldo(num, supply, log) { \ + .name = "LDO"#num, \ + .id = MAX77802_LDO##num, \ + .supply_name = "inl"#supply, \ + .ops = &max77802_ldo_ops_logic##log, \ + .type = REGULATOR_VOLTAGE, \ + .owner = THIS_MODULE, \ + .min_uV = 800000, \ + .uV_step = 25000, \ + .ramp_delay = MAX77802_RAMP_DELAY, \ + .n_voltages = 1 << 6, \ + .vsel_reg = MAX77802_REG_LDO1CTRL1 + num - 1, \ + .vsel_mask = MAX77802_VSEL_MASK, \ + .enable_reg = MAX77802_REG_LDO1CTRL1 + num - 1, \ + .enable_mask = MAX77802_OPMODE_MASK << MAX77802_OPMODE_SHIFT_LDO, \ +} + +/* BUCKs 1, 6 */ +#define regulator_77802_desc_16_buck(num) { \ + .name = "BUCK"#num, \ + .id = MAX77802_BUCK##num, \ + .supply_name = "inb"#num, \ + .ops = &max77802_buck_16_dvs_ops, \ + .type = REGULATOR_VOLTAGE, \ + .owner = THIS_MODULE, \ + .min_uV = 612500, \ + .uV_step = 6250, \ + .ramp_delay = MAX77802_RAMP_DELAY, \ + .n_voltages = 1 << 8, \ + .vsel_reg = MAX77802_REG_BUCK ## num ## DVS1, \ + .vsel_mask = MAX77802_DVS_VSEL_MASK, \ + .enable_reg = MAX77802_REG_BUCK ## num ## CTRL, \ + .enable_mask = MAX77802_OPMODE_MASK, \ +} + +/* BUCKS 2-4 */ +#define regulator_77802_desc_234_buck(num) { \ + .name = "BUCK"#num, \ + .id = MAX77802_BUCK##num, \ + .supply_name = "inb"#num, \ + .ops = &max77802_buck_dvs_ops, \ + .type = REGULATOR_VOLTAGE, \ + .owner = THIS_MODULE, \ + .min_uV = 600000, \ + .uV_step = 6250, \ + .ramp_delay = MAX77802_RAMP_DELAY, \ + .n_voltages = 0x91, \ + .vsel_reg = MAX77802_REG_BUCK ## num ## DVS1, \ + .vsel_mask = MAX77802_DVS_VSEL_MASK, \ + .enable_reg = MAX77802_REG_BUCK ## num ## CTRL1, \ + .enable_mask = MAX77802_OPMODE_MASK << \ + MAX77802_OPMODE_BUCK234_SHIFT, \ +} + +/* BUCK 5 */ +#define regulator_77802_desc_buck5(num) { \ + .name = "BUCK"#num, \ + .id = MAX77802_BUCK##num, \ + .supply_name = "inb"#num, \ + .ops = &max77802_buck_dvs_ops, \ + .type = REGULATOR_VOLTAGE, \ + .owner = THIS_MODULE, \ + .min_uV = 750000, \ + .uV_step = 50000, \ + .ramp_delay = MAX77802_RAMP_DELAY, \ + .n_voltages = 1 << 6, \ + .vsel_reg = MAX77802_REG_BUCK5OUT, \ + .vsel_mask = MAX77802_VSEL_MASK, \ + .enable_reg = MAX77802_REG_BUCK5CTRL, \ + .enable_mask = MAX77802_OPMODE_MASK, \ +} + +/* BUCKs 7-10 */ +#define regulator_77802_desc_buck7_10(num) { \ + .name = "BUCK"#num, \ + .id = MAX77802_BUCK##num, \ + .supply_name = "inb"#num, \ + .ops = &max77802_buck_dvs_ops, \ + .type = REGULATOR_VOLTAGE, \ + .owner = THIS_MODULE, \ + .min_uV = 750000, \ + .uV_step = 50000, \ + .ramp_delay = MAX77802_RAMP_DELAY, \ + .n_voltages = 1 << 6, \ + .vsel_reg = MAX77802_REG_BUCK7OUT + (num - 7) * 3, \ + .vsel_mask = MAX77802_VSEL_MASK, \ + .enable_reg = MAX77802_REG_BUCK7CTRL + (num - 7) * 3, \ + .enable_mask = MAX77802_OPMODE_MASK, \ +} + +static struct regulator_desc regulators[] = { + regulator_77802_desc_16_buck(1), + regulator_77802_desc_234_buck(2), + regulator_77802_desc_234_buck(3), + regulator_77802_desc_234_buck(4), + regulator_77802_desc_buck5(5), + regulator_77802_desc_16_buck(6), + regulator_77802_desc_buck7_10(7), + regulator_77802_desc_buck7_10(8), + regulator_77802_desc_buck7_10(9), + regulator_77802_desc_buck7_10(10), + regulator_77802_desc_n_ldo(1, 10, 2), + regulator_77802_desc_n_ldo(2, 10, 1), + regulator_77802_desc_p_ldo(3, 3, 2), + regulator_77802_desc_p_ldo(4, 6, 1), + regulator_77802_desc_p_ldo(5, 3, 1), + regulator_77802_desc_p_ldo(6, 3, 1), + regulator_77802_desc_p_ldo(7, 3, 1), + regulator_77802_desc_n_ldo(8, 1, 1), + regulator_77802_desc_p_ldo(9, 5, 1), + regulator_77802_desc_p_ldo(10, 4, 1), + regulator_77802_desc_p_ldo(11, 4, 1), + regulator_77802_desc_p_ldo(12, 9, 1), + regulator_77802_desc_p_ldo(13, 4, 1), + regulator_77802_desc_p_ldo(14, 4, 1), + regulator_77802_desc_n_ldo(15, 1, 1), + regulator_77802_desc_n_ldo(17, 2, 1), + regulator_77802_desc_p_ldo(18, 7, 1), + regulator_77802_desc_p_ldo(19, 5, 1), + regulator_77802_desc_p_ldo(20, 7, 2), + regulator_77802_desc_p_ldo(21, 6, 2), + regulator_77802_desc_p_ldo(23, 9, 1), + regulator_77802_desc_p_ldo(24, 6, 1), + regulator_77802_desc_p_ldo(25, 9, 1), + regulator_77802_desc_p_ldo(26, 9, 1), + regulator_77802_desc_n_ldo(27, 2, 1), + regulator_77802_desc_p_ldo(28, 7, 1), + regulator_77802_desc_p_ldo(29, 7, 1), + regulator_77802_desc_n_ldo(30, 2, 1), + regulator_77802_desc_p_ldo(32, 9, 1), + regulator_77802_desc_p_ldo(33, 6, 1), + regulator_77802_desc_p_ldo(34, 9, 1), + regulator_77802_desc_n_ldo(35, 2, 1), +}; + +#ifdef CONFIG_OF +static int max77802_pmic_dt_parse_pdata(struct platform_device *pdev, + struct max77686_platform_data *pdata) +{ + struct max77686_dev *iodev = dev_get_drvdata(pdev->dev.parent); + struct device_node *pmic_np, *regulators_np; + struct max77686_regulator_data *rdata; + struct of_regulator_match rmatch; + unsigned int i; + + pmic_np = iodev->dev->of_node; + regulators_np = of_get_child_by_name(pmic_np, "regulators"); + if (!regulators_np) { + dev_err(&pdev->dev, "could not find regulators sub-node\n"); + return -EINVAL; + } + + pdata->num_regulators = ARRAY_SIZE(regulators); + rdata = devm_kzalloc(&pdev->dev, sizeof(*rdata) * + pdata->num_regulators, GFP_KERNEL); + if (!rdata) { + of_node_put(regulators_np); + return -ENOMEM; + } + + for (i = 0; i < pdata->num_regulators; i++) { + rmatch.name = regulators[i].name; + rmatch.init_data = NULL; + rmatch.of_node = NULL; + if (of_regulator_match(&pdev->dev, regulators_np, &rmatch, + 1) != 1) { + dev_warn(&pdev->dev, "No matching regulator for '%s'\n", + rmatch.name); + continue; + } + rdata[i].initdata = rmatch.init_data; + rdata[i].of_node = rmatch.of_node; + rdata[i].id = regulators[i].id; + } + + pdata->regulators = rdata; + of_node_put(regulators_np); + + return 0; +} +#else +static int max77802_pmic_dt_parse_pdata(struct platform_device *pdev, + struct max77686_platform_data *pdata) +{ + return 0; +} +#endif /* CONFIG_OF */ + +static int max77802_pmic_probe(struct platform_device *pdev) +{ + struct max77686_dev *iodev = dev_get_drvdata(pdev->dev.parent); + struct max77686_platform_data *pdata = dev_get_platdata(iodev->dev); + struct max77802_regulator_prv *max77802; + int i, ret = 0, val; + struct regulator_config config = { }; + + /* This is allocated by the MFD driver */ + if (!pdata) { + dev_err(&pdev->dev, "no platform data found for regulator\n"); + return -ENODEV; + } + + max77802 = devm_kzalloc(&pdev->dev, + sizeof(struct max77802_regulator_prv), + GFP_KERNEL); + if (!max77802) + return -ENOMEM; + + if (iodev->dev->of_node) { + ret = max77802_pmic_dt_parse_pdata(pdev, pdata); + if (ret) + return ret; + } + + config.dev = iodev->dev; + config.regmap = iodev->regmap; + config.driver_data = max77802; + platform_set_drvdata(pdev, max77802); + + for (i = 0; i < MAX77802_REG_MAX; i++) { + struct regulator_dev *rdev; + int id = pdata->regulators[i].id; + int shift = max77802_get_opmode_shift(id); + + config.init_data = pdata->regulators[i].initdata; + config.of_node = pdata->regulators[i].of_node; + + ret = regmap_read(iodev->regmap, regulators[i].enable_reg, &val); + val = val >> shift & MAX77802_OPMODE_MASK; + + /* + * If the regulator is disabled and the system warm rebooted, + * the hardware reports OFF as the regulator operating mode. + * Default to operating mode NORMAL in that case. + */ + if (val == MAX77802_OPMODE_OFF) + max77802->opmode[id] = MAX77802_OPMODE_NORMAL; + else + max77802->opmode[id] = val; + + rdev = devm_regulator_register(&pdev->dev, + ®ulators[i], &config); + if (IS_ERR(rdev)) { + dev_err(&pdev->dev, + "regulator init failed for %d\n", i); + return PTR_ERR(rdev); + } + } + + return 0; +} + +static const struct platform_device_id max77802_pmic_id[] = { + {"max77802-pmic", 0}, + { }, +}; +MODULE_DEVICE_TABLE(platform, max77802_pmic_id); + +static struct platform_driver max77802_pmic_driver = { + .driver = { + .name = "max77802-pmic", + .owner = THIS_MODULE, + }, + .probe = max77802_pmic_probe, + .id_table = max77802_pmic_id, +}; + +module_platform_driver(max77802_pmic_driver); + +MODULE_DESCRIPTION("MAXIM 77802 Regulator Driver"); +MODULE_AUTHOR("Simon Glass <sjg@chromium.org>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/regulator/max8907-regulator.c b/drivers/regulator/max8907-regulator.c index 9623e9e290bf..3426be89c9f6 100644 --- a/drivers/regulator/max8907-regulator.c +++ b/drivers/regulator/max8907-regulator.c @@ -226,7 +226,7 @@ static int max8907_regulator_parse_dt(struct platform_device *pdev) struct device_node *np, *regulators; int ret; - np = of_node_get(pdev->dev.parent->of_node); + np = pdev->dev.parent->of_node; if (!np) return 0; diff --git a/drivers/regulator/max8925-regulator.c b/drivers/regulator/max8925-regulator.c index dad2bcd14e96..7770777befc4 100644 --- a/drivers/regulator/max8925-regulator.c +++ b/drivers/regulator/max8925-regulator.c @@ -250,7 +250,7 @@ static int max8925_regulator_dt_init(struct platform_device *pdev, struct device_node *nproot, *np; int rcount; - nproot = of_node_get(pdev->dev.parent->of_node); + nproot = pdev->dev.parent->of_node; if (!nproot) return -ENODEV; np = of_get_child_by_name(nproot, "regulators"); diff --git a/drivers/regulator/max8997.c b/drivers/regulator/max8997.c index 90b4c530dee5..9c31e215a521 100644 --- a/drivers/regulator/max8997.c +++ b/drivers/regulator/max8997.c @@ -917,7 +917,7 @@ static int max8997_pmic_dt_parse_pdata(struct platform_device *pdev, struct max8997_regulator_data *rdata; unsigned int i, dvs_voltage_nr = 1, ret; - pmic_np = of_node_get(iodev->dev->of_node); + pmic_np = iodev->dev->of_node; if (!pmic_np) { dev_err(&pdev->dev, "could not find pmic sub-node\n"); return -ENODEV; diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c index f374fa57220f..793b662a1967 100644 --- a/drivers/regulator/mc13892-regulator.c +++ b/drivers/regulator/mc13892-regulator.c @@ -526,6 +526,7 @@ static unsigned int mc13892_vcam_get_mode(struct regulator_dev *rdev) return REGULATOR_MODE_NORMAL; } +static struct regulator_ops mc13892_vcam_ops; static int mc13892_regulator_probe(struct platform_device *pdev) { @@ -582,10 +583,12 @@ static int mc13892_regulator_probe(struct platform_device *pdev) } mc13xxx_unlock(mc13892); - mc13892_regulators[MC13892_VCAM].desc.ops->set_mode - = mc13892_vcam_set_mode; - mc13892_regulators[MC13892_VCAM].desc.ops->get_mode - = mc13892_vcam_get_mode; + /* update mc13892_vcam ops */ + memcpy(&mc13892_vcam_ops, mc13892_regulators[MC13892_VCAM].desc.ops, + sizeof(struct regulator_ops)); + mc13892_vcam_ops.set_mode = mc13892_vcam_set_mode, + mc13892_vcam_ops.get_mode = mc13892_vcam_get_mode, + mc13892_regulators[MC13892_VCAM].desc.ops = &mc13892_vcam_ops; mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators, ARRAY_SIZE(mc13892_regulators)); diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c index ee5e67bc8d5b..7a51814abdc5 100644 --- a/drivers/regulator/of_regulator.c +++ b/drivers/regulator/of_regulator.c @@ -14,8 +14,11 @@ #include <linux/slab.h> #include <linux/of.h> #include <linux/regulator/machine.h> +#include <linux/regulator/driver.h> #include <linux/regulator/of_regulator.h> +#include "internal.h" + static void of_get_regulation_constraints(struct device_node *np, struct regulator_init_data **init_data) { @@ -189,3 +192,51 @@ int of_regulator_match(struct device *dev, struct device_node *node, return count; } EXPORT_SYMBOL_GPL(of_regulator_match); + +struct regulator_init_data *regulator_of_get_init_data(struct device *dev, + const struct regulator_desc *desc, + struct device_node **node) +{ + struct device_node *search, *child; + struct regulator_init_data *init_data = NULL; + const char *name; + + if (!dev->of_node || !desc->of_match) + return NULL; + + if (desc->regulators_node) + search = of_get_child_by_name(dev->of_node, + desc->regulators_node); + else + search = dev->of_node; + + if (!search) { + dev_err(dev, "Failed to find regulator container node\n"); + return NULL; + } + + for_each_child_of_node(search, child) { + name = of_get_property(child, "regulator-compatible", NULL); + if (!name) + name = child->name; + + if (strcmp(desc->of_match, name)) + continue; + + init_data = of_get_regulator_init_data(dev, child); + if (!init_data) { + dev_err(dev, + "failed to parse DT for regulator %s\n", + child->name); + break; + } + + of_node_get(child); + *node = child; + break; + } + + of_node_put(search); + + return init_data; +} diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c index a7ce34d1b5f2..1878e5b567ef 100644 --- a/drivers/regulator/palmas-regulator.c +++ b/drivers/regulator/palmas-regulator.c @@ -1427,7 +1427,6 @@ static void palmas_dt_to_pdata(struct device *dev, u32 prop; int idx, ret; - node = of_node_get(node); regulators = of_get_child_by_name(node, "regulators"); if (!regulators) { dev_info(dev, "regulator node not found\n"); diff --git a/drivers/regulator/pwm-regulator.c b/drivers/regulator/pwm-regulator.c new file mode 100644 index 000000000000..d3f55eaea058 --- /dev/null +++ b/drivers/regulator/pwm-regulator.c @@ -0,0 +1,197 @@ +/* + * Regulator driver for PWM Regulators + * + * Copyright (C) 2014 - STMicroelectronics Inc. + * + * Author: Lee Jones <lee.jones@linaro.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/err.h> +#include <linux/regulator/driver.h> +#include <linux/regulator/machine.h> +#include <linux/regulator/of_regulator.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/pwm.h> + +struct pwm_regulator_data { + struct regulator_desc desc; + struct pwm_voltages *duty_cycle_table; + struct pwm_device *pwm; + bool enabled; + int state; +}; + +struct pwm_voltages { + unsigned int uV; + unsigned int dutycycle; +}; + +static int pwm_regulator_get_voltage_sel(struct regulator_dev *dev) +{ + struct pwm_regulator_data *drvdata = rdev_get_drvdata(dev); + + return drvdata->state; +} + +static int pwm_regulator_set_voltage_sel(struct regulator_dev *dev, + unsigned selector) +{ + struct pwm_regulator_data *drvdata = rdev_get_drvdata(dev); + unsigned int pwm_reg_period; + int dutycycle; + int ret; + + pwm_reg_period = pwm_get_period(drvdata->pwm); + + dutycycle = (pwm_reg_period * + drvdata->duty_cycle_table[selector].dutycycle) / 100; + + ret = pwm_config(drvdata->pwm, dutycycle, pwm_reg_period); + if (ret) { + dev_err(&dev->dev, "Failed to configure PWM\n"); + return ret; + } + + drvdata->state = selector; + + if (!drvdata->enabled) { + ret = pwm_enable(drvdata->pwm); + if (ret) { + dev_err(&dev->dev, "Failed to enable PWM\n"); + return ret; + } + drvdata->enabled = true; + } + + return 0; +} + +static int pwm_regulator_list_voltage(struct regulator_dev *dev, + unsigned selector) +{ + struct pwm_regulator_data *drvdata = rdev_get_drvdata(dev); + + if (selector >= drvdata->desc.n_voltages) + return -EINVAL; + + return drvdata->duty_cycle_table[selector].uV; +} + +static struct regulator_ops pwm_regulator_voltage_ops = { + .set_voltage_sel = pwm_regulator_set_voltage_sel, + .get_voltage_sel = pwm_regulator_get_voltage_sel, + .list_voltage = pwm_regulator_list_voltage, + .map_voltage = regulator_map_voltage_iterate, +}; + +static const struct regulator_desc pwm_regulator_desc = { + .name = "pwm-regulator", + .ops = &pwm_regulator_voltage_ops, + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + .supply_name = "pwm", +}; + +static int pwm_regulator_probe(struct platform_device *pdev) +{ + struct pwm_regulator_data *drvdata; + struct property *prop; + struct regulator_dev *regulator; + struct regulator_config config = { }; + struct device_node *np = pdev->dev.of_node; + int length, ret; + + if (!np) { + dev_err(&pdev->dev, "Device Tree node missing\n"); + return -EINVAL; + } + + drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL); + if (!drvdata) + return -ENOMEM; + + memcpy(&drvdata->desc, &pwm_regulator_desc, sizeof(pwm_regulator_desc)); + + /* determine the number of voltage-table */ + prop = of_find_property(np, "voltage-table", &length); + if (!prop) { + dev_err(&pdev->dev, "No voltage-table\n"); + return -EINVAL; + } + + if ((length < sizeof(*drvdata->duty_cycle_table)) || + (length % sizeof(*drvdata->duty_cycle_table))) { + dev_err(&pdev->dev, "voltage-table length(%d) is invalid\n", + length); + return -EINVAL; + } + + drvdata->desc.n_voltages = length / sizeof(*drvdata->duty_cycle_table); + + drvdata->duty_cycle_table = devm_kzalloc(&pdev->dev, + length, GFP_KERNEL); + if (!drvdata->duty_cycle_table) + return -ENOMEM; + + /* read voltage table from DT property */ + ret = of_property_read_u32_array(np, "voltage-table", + (u32 *)drvdata->duty_cycle_table, + length / sizeof(u32)); + if (ret < 0) { + dev_err(&pdev->dev, "read voltage-table failed\n"); + return ret; + } + + config.init_data = of_get_regulator_init_data(&pdev->dev, np); + if (!config.init_data) + return -ENOMEM; + + config.of_node = np; + config.dev = &pdev->dev; + config.driver_data = drvdata; + + drvdata->pwm = devm_pwm_get(&pdev->dev, NULL); + if (IS_ERR(drvdata->pwm)) { + dev_err(&pdev->dev, "Failed to get PWM\n"); + return PTR_ERR(drvdata->pwm); + } + + regulator = devm_regulator_register(&pdev->dev, + &drvdata->desc, &config); + if (IS_ERR(regulator)) { + dev_err(&pdev->dev, "Failed to register regulator %s\n", + drvdata->desc.name); + return PTR_ERR(regulator); + } + + return 0; +} + +static const struct of_device_id pwm_of_match[] = { + { .compatible = "pwm-regulator" }, + { }, +}; +MODULE_DEVICE_TABLE(of, pwm_of_match); + +static struct platform_driver pwm_regulator_driver = { + .driver = { + .name = "pwm-regulator", + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(pwm_of_match), + }, + .probe = pwm_regulator_probe, +}; + +module_platform_driver(pwm_regulator_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Lee Jones <lee.jones@linaro.org>"); +MODULE_DESCRIPTION("PWM Regulator Driver"); +MODULE_ALIAS("platform:pwm-regulator"); diff --git a/drivers/regulator/qcom_rpm-regulator.c b/drivers/regulator/qcom_rpm-regulator.c new file mode 100644 index 000000000000..b55cd5b50ebe --- /dev/null +++ b/drivers/regulator/qcom_rpm-regulator.c @@ -0,0 +1,798 @@ +/* + * Copyright (c) 2014, Sony Mobile Communications AB. + * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/regulator/driver.h> +#include <linux/regulator/machine.h> +#include <linux/regulator/of_regulator.h> +#include <linux/mfd/qcom_rpm.h> + +#include <dt-bindings/mfd/qcom-rpm.h> + +#define MAX_REQUEST_LEN 2 + +struct request_member { + int word; + unsigned int mask; + int shift; +}; + +struct rpm_reg_parts { + struct request_member mV; /* used if voltage is in mV */ + struct request_member uV; /* used if voltage is in uV */ + struct request_member ip; /* peak current in mA */ + struct request_member pd; /* pull down enable */ + struct request_member ia; /* average current in mA */ + struct request_member fm; /* force mode */ + struct request_member pm; /* power mode */ + struct request_member pc; /* pin control */ + struct request_member pf; /* pin function */ + struct request_member enable_state; /* NCP and switch */ + struct request_member comp_mode; /* NCP */ + struct request_member freq; /* frequency: NCP and SMPS */ + struct request_member freq_clk_src; /* clock source: SMPS */ + struct request_member hpm; /* switch: control OCP and SS */ + int request_len; +}; + +#define FORCE_MODE_IS_2_BITS(reg) \ + (((reg)->parts->fm.mask >> (reg)->parts->fm.shift) == 3) + +struct qcom_rpm_reg { + struct qcom_rpm *rpm; + + struct mutex lock; + struct device *dev; + struct regulator_desc desc; + const struct rpm_reg_parts *parts; + + int resource; + u32 val[MAX_REQUEST_LEN]; + + int uV; + int is_enabled; + + bool supports_force_mode_auto; + bool supports_force_mode_bypass; +}; + +static const struct rpm_reg_parts rpm8660_ldo_parts = { + .request_len = 2, + .mV = { 0, 0x00000FFF, 0 }, + .ip = { 0, 0x00FFF000, 12 }, + .fm = { 0, 0x03000000, 24 }, + .pc = { 0, 0x3C000000, 26 }, + .pf = { 0, 0xC0000000, 30 }, + .pd = { 1, 0x00000001, 0 }, + .ia = { 1, 0x00001FFE, 1 }, +}; + +static const struct rpm_reg_parts rpm8660_smps_parts = { + .request_len = 2, + .mV = { 0, 0x00000FFF, 0 }, + .ip = { 0, 0x00FFF000, 12 }, + .fm = { 0, 0x03000000, 24 }, + .pc = { 0, 0x3C000000, 26 }, + .pf = { 0, 0xC0000000, 30 }, + .pd = { 1, 0x00000001, 0 }, + .ia = { 1, 0x00001FFE, 1 }, + .freq = { 1, 0x001FE000, 13 }, + .freq_clk_src = { 1, 0x00600000, 21 }, +}; + +static const struct rpm_reg_parts rpm8660_switch_parts = { + .request_len = 1, + .enable_state = { 0, 0x00000001, 0 }, + .pd = { 0, 0x00000002, 1 }, + .pc = { 0, 0x0000003C, 2 }, + .pf = { 0, 0x000000C0, 6 }, + .hpm = { 0, 0x00000300, 8 }, +}; + +static const struct rpm_reg_parts rpm8660_ncp_parts = { + .request_len = 1, + .mV = { 0, 0x00000FFF, 0 }, + .enable_state = { 0, 0x00001000, 12 }, + .comp_mode = { 0, 0x00002000, 13 }, + .freq = { 0, 0x003FC000, 14 }, +}; + +static const struct rpm_reg_parts rpm8960_ldo_parts = { + .request_len = 2, + .uV = { 0, 0x007FFFFF, 0 }, + .pd = { 0, 0x00800000, 23 }, + .pc = { 0, 0x0F000000, 24 }, + .pf = { 0, 0xF0000000, 28 }, + .ip = { 1, 0x000003FF, 0 }, + .ia = { 1, 0x000FFC00, 10 }, + .fm = { 1, 0x00700000, 20 }, +}; + +static const struct rpm_reg_parts rpm8960_smps_parts = { + .request_len = 2, + .uV = { 0, 0x007FFFFF, 0 }, + .pd = { 0, 0x00800000, 23 }, + .pc = { 0, 0x0F000000, 24 }, + .pf = { 0, 0xF0000000, 28 }, + .ip = { 1, 0x000003FF, 0 }, + .ia = { 1, 0x000FFC00, 10 }, + .fm = { 1, 0x00700000, 20 }, + .pm = { 1, 0x00800000, 23 }, + .freq = { 1, 0x1F000000, 24 }, + .freq_clk_src = { 1, 0x60000000, 29 }, +}; + +static const struct rpm_reg_parts rpm8960_switch_parts = { + .request_len = 1, + .enable_state = { 0, 0x00000001, 0 }, + .pd = { 0, 0x00000002, 1 }, + .pc = { 0, 0x0000003C, 2 }, + .pf = { 0, 0x000003C0, 6 }, + .hpm = { 0, 0x00000C00, 10 }, +}; + +static const struct rpm_reg_parts rpm8960_ncp_parts = { + .request_len = 1, + .uV = { 0, 0x007FFFFF, 0 }, + .enable_state = { 0, 0x00800000, 23 }, + .comp_mode = { 0, 0x01000000, 24 }, + .freq = { 0, 0x3E000000, 25 }, +}; + +/* + * Physically available PMIC regulator voltage ranges + */ +static const struct regulator_linear_range pldo_ranges[] = { + REGULATOR_LINEAR_RANGE( 750000, 0, 59, 12500), + REGULATOR_LINEAR_RANGE(1500000, 60, 123, 25000), + REGULATOR_LINEAR_RANGE(3100000, 124, 160, 50000), +}; + +static const struct regulator_linear_range nldo_ranges[] = { + REGULATOR_LINEAR_RANGE( 750000, 0, 63, 12500), +}; + +static const struct regulator_linear_range nldo1200_ranges[] = { + REGULATOR_LINEAR_RANGE( 375000, 0, 59, 6250), + REGULATOR_LINEAR_RANGE( 750000, 60, 123, 12500), +}; + +static const struct regulator_linear_range smps_ranges[] = { + REGULATOR_LINEAR_RANGE( 375000, 0, 29, 12500), + REGULATOR_LINEAR_RANGE( 750000, 30, 89, 12500), + REGULATOR_LINEAR_RANGE(1500000, 90, 153, 25000), +}; + +static const struct regulator_linear_range ftsmps_ranges[] = { + REGULATOR_LINEAR_RANGE( 350000, 0, 6, 50000), + REGULATOR_LINEAR_RANGE( 700000, 7, 63, 12500), + REGULATOR_LINEAR_RANGE(1500000, 64, 100, 50000), +}; + +static const struct regulator_linear_range ncp_ranges[] = { + REGULATOR_LINEAR_RANGE(1500000, 0, 31, 50000), +}; + +static int rpm_reg_write(struct qcom_rpm_reg *vreg, + const struct request_member *req, + const int value) +{ + if (WARN_ON((value << req->shift) & ~req->mask)) + return -EINVAL; + + vreg->val[req->word] &= ~req->mask; + vreg->val[req->word] |= value << req->shift; + + return qcom_rpm_write(vreg->rpm, + vreg->resource, + vreg->val, + vreg->parts->request_len); +} + +static int rpm_reg_set_mV_sel(struct regulator_dev *rdev, + unsigned selector) +{ + struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev); + const struct rpm_reg_parts *parts = vreg->parts; + const struct request_member *req = &parts->mV; + int ret = 0; + int uV; + + if (req->mask == 0) + return -EINVAL; + + uV = regulator_list_voltage_linear_range(rdev, selector); + if (uV < 0) + return uV; + + mutex_lock(&vreg->lock); + vreg->uV = uV; + if (vreg->is_enabled) + ret = rpm_reg_write(vreg, req, vreg->uV / 1000); + mutex_unlock(&vreg->lock); + + return ret; +} + +static int rpm_reg_set_uV_sel(struct regulator_dev *rdev, + unsigned selector) +{ + struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev); + const struct rpm_reg_parts *parts = vreg->parts; + const struct request_member *req = &parts->uV; + int ret = 0; + int uV; + + if (req->mask == 0) + return -EINVAL; + + uV = regulator_list_voltage_linear_range(rdev, selector); + if (uV < 0) + return uV; + + mutex_lock(&vreg->lock); + vreg->uV = uV; + if (vreg->is_enabled) + ret = rpm_reg_write(vreg, req, vreg->uV); + mutex_unlock(&vreg->lock); + + return ret; +} + +static int rpm_reg_get_voltage(struct regulator_dev *rdev) +{ + struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev); + + return vreg->uV; +} + +static int rpm_reg_mV_enable(struct regulator_dev *rdev) +{ + struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev); + const struct rpm_reg_parts *parts = vreg->parts; + const struct request_member *req = &parts->mV; + int ret; + + if (req->mask == 0) + return -EINVAL; + + mutex_lock(&vreg->lock); + ret = rpm_reg_write(vreg, req, vreg->uV / 1000); + if (!ret) + vreg->is_enabled = 1; + mutex_unlock(&vreg->lock); + + return ret; +} + +static int rpm_reg_uV_enable(struct regulator_dev *rdev) +{ + struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev); + const struct rpm_reg_parts *parts = vreg->parts; + const struct request_member *req = &parts->uV; + int ret; + + if (req->mask == 0) + return -EINVAL; + + mutex_lock(&vreg->lock); + ret = rpm_reg_write(vreg, req, vreg->uV); + if (!ret) + vreg->is_enabled = 1; + mutex_unlock(&vreg->lock); + + return ret; +} + +static int rpm_reg_switch_enable(struct regulator_dev *rdev) +{ + struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev); + const struct rpm_reg_parts *parts = vreg->parts; + const struct request_member *req = &parts->enable_state; + int ret; + + if (req->mask == 0) + return -EINVAL; + + mutex_lock(&vreg->lock); + ret = rpm_reg_write(vreg, req, 1); + if (!ret) + vreg->is_enabled = 1; + mutex_unlock(&vreg->lock); + + return ret; +} + +static int rpm_reg_mV_disable(struct regulator_dev *rdev) +{ + struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev); + const struct rpm_reg_parts *parts = vreg->parts; + const struct request_member *req = &parts->mV; + int ret; + + if (req->mask == 0) + return -EINVAL; + + mutex_lock(&vreg->lock); + ret = rpm_reg_write(vreg, req, 0); + if (!ret) + vreg->is_enabled = 0; + mutex_unlock(&vreg->lock); + + return ret; +} + +static int rpm_reg_uV_disable(struct regulator_dev *rdev) +{ + struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev); + const struct rpm_reg_parts *parts = vreg->parts; + const struct request_member *req = &parts->uV; + int ret; + + if (req->mask == 0) + return -EINVAL; + + mutex_lock(&vreg->lock); + ret = rpm_reg_write(vreg, req, 0); + if (!ret) + vreg->is_enabled = 0; + mutex_unlock(&vreg->lock); + + return ret; +} + +static int rpm_reg_switch_disable(struct regulator_dev *rdev) +{ + struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev); + const struct rpm_reg_parts *parts = vreg->parts; + const struct request_member *req = &parts->enable_state; + int ret; + + if (req->mask == 0) + return -EINVAL; + + mutex_lock(&vreg->lock); + ret = rpm_reg_write(vreg, req, 0); + if (!ret) + vreg->is_enabled = 0; + mutex_unlock(&vreg->lock); + + return ret; +} + +static int rpm_reg_is_enabled(struct regulator_dev *rdev) +{ + struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev); + + return vreg->is_enabled; +} + +static struct regulator_ops uV_ops = { + .list_voltage = regulator_list_voltage_linear_range, + + .set_voltage_sel = rpm_reg_set_uV_sel, + .get_voltage = rpm_reg_get_voltage, + + .enable = rpm_reg_uV_enable, + .disable = rpm_reg_uV_disable, + .is_enabled = rpm_reg_is_enabled, +}; + +static struct regulator_ops mV_ops = { + .list_voltage = regulator_list_voltage_linear_range, + + .set_voltage_sel = rpm_reg_set_mV_sel, + .get_voltage = rpm_reg_get_voltage, + + .enable = rpm_reg_mV_enable, + .disable = rpm_reg_mV_disable, + .is_enabled = rpm_reg_is_enabled, +}; + +static struct regulator_ops switch_ops = { + .enable = rpm_reg_switch_enable, + .disable = rpm_reg_switch_disable, + .is_enabled = rpm_reg_is_enabled, +}; + +/* + * PM8058 regulators + */ +static const struct qcom_rpm_reg pm8058_pldo = { + .desc.linear_ranges = pldo_ranges, + .desc.n_linear_ranges = ARRAY_SIZE(pldo_ranges), + .desc.n_voltages = 161, + .desc.ops = &mV_ops, + .parts = &rpm8660_ldo_parts, + .supports_force_mode_auto = false, + .supports_force_mode_bypass = false, +}; + +static const struct qcom_rpm_reg pm8058_nldo = { + .desc.linear_ranges = nldo_ranges, + .desc.n_linear_ranges = ARRAY_SIZE(nldo_ranges), + .desc.n_voltages = 64, + .desc.ops = &mV_ops, + .parts = &rpm8660_ldo_parts, + .supports_force_mode_auto = false, + .supports_force_mode_bypass = false, +}; + +static const struct qcom_rpm_reg pm8058_smps = { + .desc.linear_ranges = smps_ranges, + .desc.n_linear_ranges = ARRAY_SIZE(smps_ranges), + .desc.n_voltages = 154, + .desc.ops = &mV_ops, + .parts = &rpm8660_smps_parts, + .supports_force_mode_auto = false, + .supports_force_mode_bypass = false, +}; + +static const struct qcom_rpm_reg pm8058_ncp = { + .desc.linear_ranges = ncp_ranges, + .desc.n_linear_ranges = ARRAY_SIZE(ncp_ranges), + .desc.n_voltages = 32, + .desc.ops = &mV_ops, + .parts = &rpm8660_ncp_parts, +}; + +static const struct qcom_rpm_reg pm8058_switch = { + .desc.ops = &switch_ops, + .parts = &rpm8660_switch_parts, +}; + +/* + * PM8901 regulators + */ +static const struct qcom_rpm_reg pm8901_pldo = { + .desc.linear_ranges = pldo_ranges, + .desc.n_linear_ranges = ARRAY_SIZE(pldo_ranges), + .desc.n_voltages = 161, + .desc.ops = &mV_ops, + .parts = &rpm8660_ldo_parts, + .supports_force_mode_auto = false, + .supports_force_mode_bypass = true, +}; + +static const struct qcom_rpm_reg pm8901_nldo = { + .desc.linear_ranges = nldo_ranges, + .desc.n_linear_ranges = ARRAY_SIZE(nldo_ranges), + .desc.n_voltages = 64, + .desc.ops = &mV_ops, + .parts = &rpm8660_ldo_parts, + .supports_force_mode_auto = false, + .supports_force_mode_bypass = true, +}; + +static const struct qcom_rpm_reg pm8901_ftsmps = { + .desc.linear_ranges = ftsmps_ranges, + .desc.n_linear_ranges = ARRAY_SIZE(ftsmps_ranges), + .desc.n_voltages = 101, + .desc.ops = &mV_ops, + .parts = &rpm8660_smps_parts, + .supports_force_mode_auto = true, + .supports_force_mode_bypass = false, +}; + +static const struct qcom_rpm_reg pm8901_switch = { + .desc.ops = &switch_ops, + .parts = &rpm8660_switch_parts, +}; + +/* + * PM8921 regulators + */ +static const struct qcom_rpm_reg pm8921_pldo = { + .desc.linear_ranges = pldo_ranges, + .desc.n_linear_ranges = ARRAY_SIZE(pldo_ranges), + .desc.n_voltages = 161, + .desc.ops = &uV_ops, + .parts = &rpm8960_ldo_parts, + .supports_force_mode_auto = false, + .supports_force_mode_bypass = true, +}; + +static const struct qcom_rpm_reg pm8921_nldo = { + .desc.linear_ranges = nldo_ranges, + .desc.n_linear_ranges = ARRAY_SIZE(nldo_ranges), + .desc.n_voltages = 64, + .desc.ops = &uV_ops, + .parts = &rpm8960_ldo_parts, + .supports_force_mode_auto = false, + .supports_force_mode_bypass = true, +}; + +static const struct qcom_rpm_reg pm8921_nldo1200 = { + .desc.linear_ranges = nldo1200_ranges, + .desc.n_linear_ranges = ARRAY_SIZE(nldo1200_ranges), + .desc.n_voltages = 124, + .desc.ops = &uV_ops, + .parts = &rpm8960_ldo_parts, + .supports_force_mode_auto = false, + .supports_force_mode_bypass = true, +}; + +static const struct qcom_rpm_reg pm8921_smps = { + .desc.linear_ranges = smps_ranges, + .desc.n_linear_ranges = ARRAY_SIZE(smps_ranges), + .desc.n_voltages = 154, + .desc.ops = &uV_ops, + .parts = &rpm8960_smps_parts, + .supports_force_mode_auto = true, + .supports_force_mode_bypass = false, +}; + +static const struct qcom_rpm_reg pm8921_ftsmps = { + .desc.linear_ranges = ftsmps_ranges, + .desc.n_linear_ranges = ARRAY_SIZE(ftsmps_ranges), + .desc.n_voltages = 101, + .desc.ops = &uV_ops, + .parts = &rpm8960_smps_parts, + .supports_force_mode_auto = true, + .supports_force_mode_bypass = false, +}; + +static const struct qcom_rpm_reg pm8921_ncp = { + .desc.linear_ranges = ncp_ranges, + .desc.n_linear_ranges = ARRAY_SIZE(ncp_ranges), + .desc.n_voltages = 32, + .desc.ops = &uV_ops, + .parts = &rpm8960_ncp_parts, +}; + +static const struct qcom_rpm_reg pm8921_switch = { + .desc.ops = &switch_ops, + .parts = &rpm8960_switch_parts, +}; + +static const struct of_device_id rpm_of_match[] = { + { .compatible = "qcom,rpm-pm8058-pldo", .data = &pm8058_pldo }, + { .compatible = "qcom,rpm-pm8058-nldo", .data = &pm8058_nldo }, + { .compatible = "qcom,rpm-pm8058-smps", .data = &pm8058_smps }, + { .compatible = "qcom,rpm-pm8058-ncp", .data = &pm8058_ncp }, + { .compatible = "qcom,rpm-pm8058-switch", .data = &pm8058_switch }, + + { .compatible = "qcom,rpm-pm8901-pldo", .data = &pm8901_pldo }, + { .compatible = "qcom,rpm-pm8901-nldo", .data = &pm8901_nldo }, + { .compatible = "qcom,rpm-pm8901-ftsmps", .data = &pm8901_ftsmps }, + { .compatible = "qcom,rpm-pm8901-switch", .data = &pm8901_switch }, + + { .compatible = "qcom,rpm-pm8921-pldo", .data = &pm8921_pldo }, + { .compatible = "qcom,rpm-pm8921-nldo", .data = &pm8921_nldo }, + { .compatible = "qcom,rpm-pm8921-nldo1200", .data = &pm8921_nldo1200 }, + { .compatible = "qcom,rpm-pm8921-smps", .data = &pm8921_smps }, + { .compatible = "qcom,rpm-pm8921-ftsmps", .data = &pm8921_ftsmps }, + { .compatible = "qcom,rpm-pm8921-ncp", .data = &pm8921_ncp }, + { .compatible = "qcom,rpm-pm8921-switch", .data = &pm8921_switch }, + { } +}; +MODULE_DEVICE_TABLE(of, rpm_of_match); + +static int rpm_reg_set(struct qcom_rpm_reg *vreg, + const struct request_member *req, + const int value) +{ + if (req->mask == 0 || (value << req->shift) & ~req->mask) + return -EINVAL; + + vreg->val[req->word] &= ~req->mask; + vreg->val[req->word] |= value << req->shift; + + return 0; +} + +static int rpm_reg_of_parse_freq(struct device *dev, struct qcom_rpm_reg *vreg) +{ + static const int freq_table[] = { + 19200000, 9600000, 6400000, 4800000, 3840000, 3200000, 2740000, + 2400000, 2130000, 1920000, 1750000, 1600000, 1480000, 1370000, + 1280000, 1200000, + + }; + const char *key; + u32 freq; + int ret; + int i; + + key = "qcom,switch-mode-frequency"; + ret = of_property_read_u32(dev->of_node, key, &freq); + if (ret) { + dev_err(dev, "regulator requires %s property\n", key); + return -EINVAL; + } + + for (i = 0; i < ARRAY_SIZE(freq_table); i++) { + if (freq == freq_table[i]) { + rpm_reg_set(vreg, &vreg->parts->freq, i + 1); + return 0; + } + } + + dev_err(dev, "invalid frequency %d\n", freq); + return -EINVAL; +} + +static int rpm_reg_probe(struct platform_device *pdev) +{ + struct regulator_init_data *initdata; + const struct qcom_rpm_reg *template; + const struct of_device_id *match; + struct regulator_config config = { }; + struct regulator_dev *rdev; + struct qcom_rpm_reg *vreg; + const char *key; + u32 force_mode; + bool pwm; + u32 val; + int ret; + + match = of_match_device(rpm_of_match, &pdev->dev); + template = match->data; + + initdata = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node); + if (!initdata) + return -EINVAL; + + vreg = devm_kmalloc(&pdev->dev, sizeof(*vreg), GFP_KERNEL); + if (!vreg) { + dev_err(&pdev->dev, "failed to allocate vreg\n"); + return -ENOMEM; + } + memcpy(vreg, template, sizeof(*vreg)); + mutex_init(&vreg->lock); + vreg->dev = &pdev->dev; + vreg->desc.id = -1; + vreg->desc.owner = THIS_MODULE; + vreg->desc.type = REGULATOR_VOLTAGE; + vreg->desc.name = pdev->dev.of_node->name; + + vreg->rpm = dev_get_drvdata(pdev->dev.parent); + if (!vreg->rpm) { + dev_err(&pdev->dev, "unable to retrieve handle to rpm\n"); + return -ENODEV; + } + + key = "reg"; + ret = of_property_read_u32(pdev->dev.of_node, key, &val); + if (ret) { + dev_err(&pdev->dev, "failed to read %s\n", key); + return ret; + } + vreg->resource = val; + + if ((vreg->parts->uV.mask || vreg->parts->mV.mask) && + (!initdata->constraints.min_uV || !initdata->constraints.max_uV)) { + dev_err(&pdev->dev, "no voltage specified for regulator\n"); + return -EINVAL; + } + + key = "bias-pull-down"; + if (of_property_read_bool(pdev->dev.of_node, key)) { + ret = rpm_reg_set(vreg, &vreg->parts->pd, 1); + if (ret) { + dev_err(&pdev->dev, "%s is invalid", key); + return ret; + } + } + + if (vreg->parts->freq.mask) { + ret = rpm_reg_of_parse_freq(&pdev->dev, vreg); + if (ret < 0) + return ret; + } + + if (vreg->parts->pm.mask) { + key = "qcom,power-mode-hysteretic"; + pwm = !of_property_read_bool(pdev->dev.of_node, key); + + ret = rpm_reg_set(vreg, &vreg->parts->pm, pwm); + if (ret) { + dev_err(&pdev->dev, "failed to set power mode\n"); + return ret; + } + } + + if (vreg->parts->fm.mask) { + force_mode = -1; + + key = "qcom,force-mode"; + ret = of_property_read_u32(pdev->dev.of_node, key, &val); + if (ret == -EINVAL) { + val = QCOM_RPM_FORCE_MODE_NONE; + } else if (ret < 0) { + dev_err(&pdev->dev, "failed to read %s\n", key); + return ret; + } + + /* + * If force-mode is encoded as 2 bits then the + * possible register values are: + * NONE, LPM, HPM + * otherwise: + * NONE, LPM, AUTO, HPM, BYPASS + */ + switch (val) { + case QCOM_RPM_FORCE_MODE_NONE: + force_mode = 0; + break; + case QCOM_RPM_FORCE_MODE_LPM: + force_mode = 1; + break; + case QCOM_RPM_FORCE_MODE_HPM: + if (FORCE_MODE_IS_2_BITS(vreg)) + force_mode = 2; + else + force_mode = 3; + break; + case QCOM_RPM_FORCE_MODE_AUTO: + if (vreg->supports_force_mode_auto) + force_mode = 2; + break; + case QCOM_RPM_FORCE_MODE_BYPASS: + if (vreg->supports_force_mode_bypass) + force_mode = 4; + break; + } + + if (force_mode < 0) { + dev_err(&pdev->dev, "invalid force mode\n"); + return -EINVAL; + } + + ret = rpm_reg_set(vreg, &vreg->parts->fm, force_mode); + if (ret) { + dev_err(&pdev->dev, "failed to set force mode\n"); + return ret; + } + } + + config.dev = &pdev->dev; + config.init_data = initdata; + config.driver_data = vreg; + config.of_node = pdev->dev.of_node; + rdev = devm_regulator_register(&pdev->dev, &vreg->desc, &config); + if (IS_ERR(rdev)) { + dev_err(&pdev->dev, "can't register regulator\n"); + return PTR_ERR(rdev); + } + + return 0; +} + +static struct platform_driver rpm_reg_driver = { + .probe = rpm_reg_probe, + .driver = { + .name = "qcom_rpm_reg", + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(rpm_of_match), + }, +}; + +static int __init rpm_reg_init(void) +{ + return platform_driver_register(&rpm_reg_driver); +} +subsys_initcall(rpm_reg_init); + +static void __exit rpm_reg_exit(void) +{ + platform_driver_unregister(&rpm_reg_driver); +} +module_exit(rpm_reg_exit) + +MODULE_DESCRIPTION("Qualcomm RPM regulator driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c new file mode 100644 index 000000000000..e305416d7697 --- /dev/null +++ b/drivers/regulator/rk808-regulator.c @@ -0,0 +1,381 @@ +/* + * Regulator driver for Rockchip RK808 + * + * Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd + * + * Author: Chris Zhong <zyw@rock-chips.com> + * Author: Zhang Qing <zhangqing@rock-chips.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include <linux/module.h> +#include <linux/i2c.h> +#include <linux/mfd/rk808.h> +#include <linux/of_device.h> +#include <linux/regulator/driver.h> +#include <linux/regulator/of_regulator.h> + +/* Field Definitions */ +#define RK808_BUCK_VSEL_MASK 0x3f +#define RK808_BUCK4_VSEL_MASK 0xf +#define RK808_LDO_VSEL_MASK 0x1f + +/* Ramp rate definitions for buck1 / buck2 only */ +#define RK808_RAMP_RATE_OFFSET 3 +#define RK808_RAMP_RATE_MASK (3 << RK808_RAMP_RATE_OFFSET) +#define RK808_RAMP_RATE_2MV_PER_US (0 << RK808_RAMP_RATE_OFFSET) +#define RK808_RAMP_RATE_4MV_PER_US (1 << RK808_RAMP_RATE_OFFSET) +#define RK808_RAMP_RATE_6MV_PER_US (2 << RK808_RAMP_RATE_OFFSET) +#define RK808_RAMP_RATE_10MV_PER_US (3 << RK808_RAMP_RATE_OFFSET) + +static const int rk808_buck_config_regs[] = { + RK808_BUCK1_CONFIG_REG, + RK808_BUCK2_CONFIG_REG, + RK808_BUCK3_CONFIG_REG, + RK808_BUCK4_CONFIG_REG, +}; + +static const struct regulator_linear_range rk808_buck_voltage_ranges[] = { + REGULATOR_LINEAR_RANGE(700000, 0, 63, 12500), +}; + +static const struct regulator_linear_range rk808_buck4_voltage_ranges[] = { + REGULATOR_LINEAR_RANGE(1800000, 0, 15, 100000), +}; + +static const struct regulator_linear_range rk808_ldo_voltage_ranges[] = { + REGULATOR_LINEAR_RANGE(1800000, 0, 16, 100000), +}; + +static const struct regulator_linear_range rk808_ldo3_voltage_ranges[] = { + REGULATOR_LINEAR_RANGE(800000, 0, 13, 100000), + REGULATOR_LINEAR_RANGE(2500000, 15, 15, 0), +}; + +static const struct regulator_linear_range rk808_ldo6_voltage_ranges[] = { + REGULATOR_LINEAR_RANGE(800000, 0, 17, 100000), +}; + +static int rk808_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay) +{ + unsigned int ramp_value = RK808_RAMP_RATE_10MV_PER_US; + unsigned int reg = rk808_buck_config_regs[rdev->desc->id - + RK808_ID_DCDC1]; + + switch (ramp_delay) { + case 1 ... 2000: + ramp_value = RK808_RAMP_RATE_2MV_PER_US; + break; + case 2001 ... 4000: + ramp_value = RK808_RAMP_RATE_4MV_PER_US; + break; + case 4001 ... 6000: + ramp_value = RK808_RAMP_RATE_6MV_PER_US; + break; + case 6001 ... 10000: + break; + default: + pr_warn("%s ramp_delay: %d not supported, setting 10000\n", + rdev->desc->name, ramp_delay); + } + + return regmap_update_bits(rdev->regmap, reg, + RK808_RAMP_RATE_MASK, ramp_value); +} + +static struct regulator_ops rk808_buck1_2_ops = { + .list_voltage = regulator_list_voltage_linear_range, + .map_voltage = regulator_map_voltage_linear_range, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .set_ramp_delay = rk808_set_ramp_delay, +}; + +static struct regulator_ops rk808_reg_ops = { + .list_voltage = regulator_list_voltage_linear_range, + .map_voltage = regulator_map_voltage_linear_range, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, +}; + +static struct regulator_ops rk808_switch_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, +}; + +static const struct regulator_desc rk808_reg[] = { + { + .name = "DCDC_REG1", + .supply_name = "vcc1", + .id = RK808_ID_DCDC1, + .ops = &rk808_buck1_2_ops, + .type = REGULATOR_VOLTAGE, + .n_voltages = 64, + .linear_ranges = rk808_buck_voltage_ranges, + .n_linear_ranges = ARRAY_SIZE(rk808_buck_voltage_ranges), + .vsel_reg = RK808_BUCK1_ON_VSEL_REG, + .vsel_mask = RK808_BUCK_VSEL_MASK, + .enable_reg = RK808_DCDC_EN_REG, + .enable_mask = BIT(0), + .owner = THIS_MODULE, + }, { + .name = "DCDC_REG2", + .supply_name = "vcc2", + .id = RK808_ID_DCDC2, + .ops = &rk808_buck1_2_ops, + .type = REGULATOR_VOLTAGE, + .n_voltages = 64, + .linear_ranges = rk808_buck_voltage_ranges, + .n_linear_ranges = ARRAY_SIZE(rk808_buck_voltage_ranges), + .vsel_reg = RK808_BUCK2_ON_VSEL_REG, + .vsel_mask = RK808_BUCK_VSEL_MASK, + .enable_reg = RK808_DCDC_EN_REG, + .enable_mask = BIT(1), + .owner = THIS_MODULE, + }, { + .name = "DCDC_REG3", + .supply_name = "vcc3", + .id = RK808_ID_DCDC3, + .ops = &rk808_switch_ops, + .type = REGULATOR_VOLTAGE, + .n_voltages = 1, + .enable_reg = RK808_DCDC_EN_REG, + .enable_mask = BIT(2), + .owner = THIS_MODULE, + }, { + .name = "DCDC_REG4", + .supply_name = "vcc4", + .id = RK808_ID_DCDC4, + .ops = &rk808_reg_ops, + .type = REGULATOR_VOLTAGE, + .n_voltages = 16, + .linear_ranges = rk808_buck4_voltage_ranges, + .n_linear_ranges = ARRAY_SIZE(rk808_buck4_voltage_ranges), + .vsel_reg = RK808_BUCK4_ON_VSEL_REG, + .vsel_mask = RK808_BUCK4_VSEL_MASK, + .enable_reg = RK808_DCDC_EN_REG, + .enable_mask = BIT(3), + .owner = THIS_MODULE, + }, { + .name = "LDO_REG1", + .supply_name = "vcc6", + .id = RK808_ID_LDO1, + .ops = &rk808_reg_ops, + .type = REGULATOR_VOLTAGE, + .n_voltages = 17, + .linear_ranges = rk808_ldo_voltage_ranges, + .n_linear_ranges = ARRAY_SIZE(rk808_ldo_voltage_ranges), + .vsel_reg = RK808_LDO1_ON_VSEL_REG, + .vsel_mask = RK808_LDO_VSEL_MASK, + .enable_reg = RK808_LDO_EN_REG, + .enable_mask = BIT(0), + .owner = THIS_MODULE, + }, { + .name = "LDO_REG2", + .supply_name = "vcc6", + .id = RK808_ID_LDO2, + .ops = &rk808_reg_ops, + .type = REGULATOR_VOLTAGE, + .n_voltages = 17, + .linear_ranges = rk808_ldo_voltage_ranges, + .n_linear_ranges = ARRAY_SIZE(rk808_ldo_voltage_ranges), + .vsel_reg = RK808_LDO2_ON_VSEL_REG, + .vsel_mask = RK808_LDO_VSEL_MASK, + .enable_reg = RK808_LDO_EN_REG, + .enable_mask = BIT(1), + .owner = THIS_MODULE, + }, { + .name = "LDO_REG3", + .supply_name = "vcc7", + .id = RK808_ID_LDO3, + .ops = &rk808_reg_ops, + .type = REGULATOR_VOLTAGE, + .n_voltages = 16, + .linear_ranges = rk808_ldo3_voltage_ranges, + .n_linear_ranges = ARRAY_SIZE(rk808_ldo3_voltage_ranges), + .vsel_reg = RK808_LDO3_ON_VSEL_REG, + .vsel_mask = RK808_BUCK4_VSEL_MASK, + .enable_reg = RK808_LDO_EN_REG, + .enable_mask = BIT(2), + .owner = THIS_MODULE, + }, { + .name = "LDO_REG4", + .supply_name = "vcc9", + .id = RK808_ID_LDO4, + .ops = &rk808_reg_ops, + .type = REGULATOR_VOLTAGE, + .n_voltages = 17, + .linear_ranges = rk808_ldo_voltage_ranges, + .n_linear_ranges = ARRAY_SIZE(rk808_ldo_voltage_ranges), + .vsel_reg = RK808_LDO4_ON_VSEL_REG, + .vsel_mask = RK808_LDO_VSEL_MASK, + .enable_reg = RK808_LDO_EN_REG, + .enable_mask = BIT(3), + .owner = THIS_MODULE, + }, { + .name = "LDO_REG5", + .supply_name = "vcc9", + .id = RK808_ID_LDO5, + .ops = &rk808_reg_ops, + .type = REGULATOR_VOLTAGE, + .n_voltages = 17, + .linear_ranges = rk808_ldo_voltage_ranges, + .n_linear_ranges = ARRAY_SIZE(rk808_ldo_voltage_ranges), + .vsel_reg = RK808_LDO5_ON_VSEL_REG, + .vsel_mask = RK808_LDO_VSEL_MASK, + .enable_reg = RK808_LDO_EN_REG, + .enable_mask = BIT(4), + .owner = THIS_MODULE, + }, { + .name = "LDO_REG6", + .supply_name = "vcc10", + .id = RK808_ID_LDO6, + .ops = &rk808_reg_ops, + .type = REGULATOR_VOLTAGE, + .n_voltages = 18, + .linear_ranges = rk808_ldo6_voltage_ranges, + .n_linear_ranges = ARRAY_SIZE(rk808_ldo6_voltage_ranges), + .vsel_reg = RK808_LDO6_ON_VSEL_REG, + .vsel_mask = RK808_LDO_VSEL_MASK, + .enable_reg = RK808_LDO_EN_REG, + .enable_mask = BIT(5), + .owner = THIS_MODULE, + }, { + .name = "LDO_REG7", + .supply_name = "vcc7", + .id = RK808_ID_LDO7, + .ops = &rk808_reg_ops, + .type = REGULATOR_VOLTAGE, + .n_voltages = 18, + .linear_ranges = rk808_ldo6_voltage_ranges, + .n_linear_ranges = ARRAY_SIZE(rk808_ldo6_voltage_ranges), + .vsel_reg = RK808_LDO7_ON_VSEL_REG, + .vsel_mask = RK808_LDO_VSEL_MASK, + .enable_reg = RK808_LDO_EN_REG, + .enable_mask = BIT(6), + .owner = THIS_MODULE, + }, { + .name = "LDO_REG8", + .supply_name = "vcc11", + .id = RK808_ID_LDO8, + .ops = &rk808_reg_ops, + .type = REGULATOR_VOLTAGE, + .n_voltages = 17, + .linear_ranges = rk808_ldo_voltage_ranges, + .n_linear_ranges = ARRAY_SIZE(rk808_ldo_voltage_ranges), + .vsel_reg = RK808_LDO8_ON_VSEL_REG, + .vsel_mask = RK808_LDO_VSEL_MASK, + .enable_reg = RK808_LDO_EN_REG, + .enable_mask = BIT(7), + .owner = THIS_MODULE, + }, { + .name = "SWITCH_REG1", + .supply_name = "vcc8", + .id = RK808_ID_SWITCH1, + .ops = &rk808_switch_ops, + .type = REGULATOR_VOLTAGE, + .enable_reg = RK808_DCDC_EN_REG, + .enable_mask = BIT(5), + .owner = THIS_MODULE, + }, { + .name = "SWITCH_REG2", + .supply_name = "vcc12", + .id = RK808_ID_SWITCH2, + .ops = &rk808_switch_ops, + .type = REGULATOR_VOLTAGE, + .enable_reg = RK808_DCDC_EN_REG, + .enable_mask = BIT(6), + .owner = THIS_MODULE, + }, +}; + +static struct of_regulator_match rk808_reg_matches[] = { + [RK808_ID_DCDC1] = { .name = "DCDC_REG1" }, + [RK808_ID_DCDC2] = { .name = "DCDC_REG2" }, + [RK808_ID_DCDC3] = { .name = "DCDC_REG3" }, + [RK808_ID_DCDC4] = { .name = "DCDC_REG4" }, + [RK808_ID_LDO1] = { .name = "LDO_REG1" }, + [RK808_ID_LDO2] = { .name = "LDO_REG2" }, + [RK808_ID_LDO3] = { .name = "LDO_REG3" }, + [RK808_ID_LDO4] = { .name = "LDO_REG4" }, + [RK808_ID_LDO5] = { .name = "LDO_REG5" }, + [RK808_ID_LDO6] = { .name = "LDO_REG6" }, + [RK808_ID_LDO7] = { .name = "LDO_REG7" }, + [RK808_ID_LDO8] = { .name = "LDO_REG8" }, + [RK808_ID_SWITCH1] = { .name = "SWITCH_REG1" }, + [RK808_ID_SWITCH2] = { .name = "SWITCH_REG2" }, +}; + +static int rk808_regulator_probe(struct platform_device *pdev) +{ + struct rk808 *rk808 = dev_get_drvdata(pdev->dev.parent); + struct i2c_client *client = rk808->i2c; + struct device_node *reg_np; + struct regulator_config config = {}; + struct regulator_dev *rk808_rdev; + int ret, i; + + reg_np = of_get_child_by_name(client->dev.of_node, "regulators"); + if (!reg_np) + return -ENXIO; + + ret = of_regulator_match(&pdev->dev, reg_np, rk808_reg_matches, + RK808_NUM_REGULATORS); + of_node_put(reg_np); + if (ret < 0) + return ret; + + /* Instantiate the regulators */ + for (i = 0; i < RK808_NUM_REGULATORS; i++) { + if (!rk808_reg_matches[i].init_data || + !rk808_reg_matches[i].of_node) + continue; + + config.dev = &client->dev; + config.driver_data = rk808; + config.regmap = rk808->regmap; + config.of_node = rk808_reg_matches[i].of_node; + config.init_data = rk808_reg_matches[i].init_data; + + rk808_rdev = devm_regulator_register(&pdev->dev, + &rk808_reg[i], &config); + if (IS_ERR(rk808_rdev)) { + dev_err(&client->dev, + "failed to register %d regulator\n", i); + return PTR_ERR(rk808_rdev); + } + } + + return 0; +} + +static struct platform_driver rk808_regulator_driver = { + .probe = rk808_regulator_probe, + .driver = { + .name = "rk808-regulator", + .owner = THIS_MODULE, + }, +}; + +module_platform_driver(rk808_regulator_driver); + +MODULE_DESCRIPTION("regulator driver for the rk808 series PMICs"); +MODULE_AUTHOR("Chris Zhong<zyw@rock-chips.com>"); +MODULE_AUTHOR("Zhang Qing<zhangqing@rock-chips.com>"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:rk808-regulator"); diff --git a/drivers/regulator/rn5t618-regulator.c b/drivers/regulator/rn5t618-regulator.c new file mode 100644 index 000000000000..e58d79aeb393 --- /dev/null +++ b/drivers/regulator/rn5t618-regulator.c @@ -0,0 +1,143 @@ +/* + * Regulator driver for Ricoh RN5T618 PMIC + * + * Copyright (C) 2014 Beniamino Galvani <b.galvani@gmail.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/mfd/rn5t618.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/regulator/driver.h> +#include <linux/regulator/of_regulator.h> + +static struct regulator_ops rn5t618_reg_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .list_voltage = regulator_list_voltage_linear, +}; + +#define REG(rid, ereg, emask, vreg, vmask, min, max, step) \ + [RN5T618_##rid] = { \ + .name = #rid, \ + .id = RN5T618_##rid, \ + .type = REGULATOR_VOLTAGE, \ + .owner = THIS_MODULE, \ + .ops = &rn5t618_reg_ops, \ + .n_voltages = ((max) - (min)) / (step) + 1, \ + .min_uV = (min), \ + .uV_step = (step), \ + .enable_reg = RN5T618_##ereg, \ + .enable_mask = (emask), \ + .vsel_reg = RN5T618_##vreg, \ + .vsel_mask = (vmask), \ + } + +static struct regulator_desc rn5t618_regulators[] = { + /* DCDC */ + REG(DCDC1, DC1CTL, BIT(0), DC1DAC, 0xff, 600000, 3500000, 12500), + REG(DCDC2, DC2CTL, BIT(0), DC2DAC, 0xff, 600000, 3500000, 12500), + REG(DCDC3, DC3CTL, BIT(0), DC3DAC, 0xff, 600000, 3500000, 12500), + /* LDO */ + REG(LDO1, LDOEN1, BIT(0), LDO1DAC, 0x7f, 900000, 3500000, 25000), + REG(LDO2, LDOEN1, BIT(1), LDO2DAC, 0x7f, 900000, 3500000, 25000), + REG(LDO3, LDOEN1, BIT(2), LDO3DAC, 0x7f, 600000, 3500000, 25000), + REG(LDO4, LDOEN1, BIT(3), LDO4DAC, 0x7f, 900000, 3500000, 25000), + REG(LDO5, LDOEN1, BIT(4), LDO5DAC, 0x7f, 900000, 3500000, 25000), + /* LDO RTC */ + REG(LDORTC1, LDOEN2, BIT(4), LDORTCDAC, 0x7f, 1700000, 3500000, 25000), + REG(LDORTC2, LDOEN2, BIT(5), LDORTC2DAC, 0x7f, 900000, 3500000, 25000), +}; + +static struct of_regulator_match rn5t618_matches[] = { + [RN5T618_DCDC1] = { .name = "DCDC1" }, + [RN5T618_DCDC2] = { .name = "DCDC2" }, + [RN5T618_DCDC3] = { .name = "DCDC3" }, + [RN5T618_LDO1] = { .name = "LDO1" }, + [RN5T618_LDO2] = { .name = "LDO2" }, + [RN5T618_LDO3] = { .name = "LDO3" }, + [RN5T618_LDO4] = { .name = "LDO4" }, + [RN5T618_LDO5] = { .name = "LDO5" }, + [RN5T618_LDORTC1] = { .name = "LDORTC1" }, + [RN5T618_LDORTC2] = { .name = "LDORTC2" }, +}; + +static int rn5t618_regulator_parse_dt(struct platform_device *pdev) +{ + struct device_node *np, *regulators; + int ret; + + np = of_node_get(pdev->dev.parent->of_node); + if (!np) + return 0; + + regulators = of_get_child_by_name(np, "regulators"); + if (!regulators) { + dev_err(&pdev->dev, "regulators node not found\n"); + return -EINVAL; + } + + ret = of_regulator_match(&pdev->dev, regulators, rn5t618_matches, + ARRAY_SIZE(rn5t618_matches)); + of_node_put(regulators); + if (ret < 0) { + dev_err(&pdev->dev, "error parsing regulator init data: %d\n", + ret); + } + + return 0; +} + +static int rn5t618_regulator_probe(struct platform_device *pdev) +{ + struct rn5t618 *rn5t618 = dev_get_drvdata(pdev->dev.parent); + struct regulator_config config = { }; + struct regulator_dev *rdev; + int ret, i; + + ret = rn5t618_regulator_parse_dt(pdev); + if (ret) + return ret; + + for (i = 0; i < RN5T618_REG_NUM; i++) { + config.dev = &pdev->dev; + config.init_data = rn5t618_matches[i].init_data; + config.of_node = rn5t618_matches[i].of_node; + config.regmap = rn5t618->regmap; + + rdev = devm_regulator_register(&pdev->dev, + &rn5t618_regulators[i], + &config); + if (IS_ERR(rdev)) { + dev_err(&pdev->dev, "failed to register %s regulator\n", + rn5t618_regulators[i].name); + return PTR_ERR(rdev); + } + } + + return 0; +} + +static struct platform_driver rn5t618_regulator_driver = { + .probe = rn5t618_regulator_probe, + .driver = { + .name = "rn5t618-regulator", + }, +}; + +module_platform_driver(rn5t618_regulator_driver); + +MODULE_AUTHOR("Beniamino Galvani <b.galvani@gmail.com>"); +MODULE_DESCRIPTION("RN5T618 regulator driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/regulator/s2mpa01.c b/drivers/regulator/s2mpa01.c index ee83b4876420..4acefa6b462e 100644 --- a/drivers/regulator/s2mpa01.c +++ b/drivers/regulator/s2mpa01.c @@ -235,28 +235,14 @@ static struct regulator_ops s2mpa01_buck_ops = { .set_ramp_delay = s2mpa01_set_ramp_delay, }; -#define regulator_desc_ldo1(num) { \ +#define regulator_desc_ldo(num, step) { \ .name = "LDO"#num, \ .id = S2MPA01_LDO##num, \ .ops = &s2mpa01_ldo_ops, \ .type = REGULATOR_VOLTAGE, \ .owner = THIS_MODULE, \ - .min_uV = S2MPA01_LDO_MIN, \ - .uV_step = S2MPA01_LDO_STEP1, \ - .n_voltages = S2MPA01_LDO_N_VOLTAGES, \ - .vsel_reg = S2MPA01_REG_L1CTRL + num - 1, \ - .vsel_mask = S2MPA01_LDO_VSEL_MASK, \ - .enable_reg = S2MPA01_REG_L1CTRL + num - 1, \ - .enable_mask = S2MPA01_ENABLE_MASK \ -} -#define regulator_desc_ldo2(num) { \ - .name = "LDO"#num, \ - .id = S2MPA01_LDO##num, \ - .ops = &s2mpa01_ldo_ops, \ - .type = REGULATOR_VOLTAGE, \ - .owner = THIS_MODULE, \ - .min_uV = S2MPA01_LDO_MIN, \ - .uV_step = S2MPA01_LDO_STEP2, \ + .min_uV = MIN_800_MV, \ + .uV_step = step, \ .n_voltages = S2MPA01_LDO_N_VOLTAGES, \ .vsel_reg = S2MPA01_REG_L1CTRL + num - 1, \ .vsel_mask = S2MPA01_LDO_VSEL_MASK, \ @@ -270,8 +256,8 @@ static struct regulator_ops s2mpa01_buck_ops = { .ops = &s2mpa01_buck_ops, \ .type = REGULATOR_VOLTAGE, \ .owner = THIS_MODULE, \ - .min_uV = S2MPA01_BUCK_MIN1, \ - .uV_step = S2MPA01_BUCK_STEP1, \ + .min_uV = MIN_600_MV, \ + .uV_step = STEP_6_25_MV, \ .n_voltages = S2MPA01_BUCK_N_VOLTAGES, \ .ramp_delay = S2MPA01_RAMP_DELAY, \ .vsel_reg = S2MPA01_REG_B1CTRL2 + (num - 1) * 2, \ @@ -286,8 +272,8 @@ static struct regulator_ops s2mpa01_buck_ops = { .ops = &s2mpa01_buck_ops, \ .type = REGULATOR_VOLTAGE, \ .owner = THIS_MODULE, \ - .min_uV = S2MPA01_BUCK_MIN2, \ - .uV_step = S2MPA01_BUCK_STEP1, \ + .min_uV = MIN_800_MV, \ + .uV_step = STEP_6_25_MV, \ .n_voltages = S2MPA01_BUCK_N_VOLTAGES, \ .ramp_delay = S2MPA01_RAMP_DELAY, \ .vsel_reg = S2MPA01_REG_B5CTRL2, \ @@ -296,14 +282,14 @@ static struct regulator_ops s2mpa01_buck_ops = { .enable_mask = S2MPA01_ENABLE_MASK \ } -#define regulator_desc_buck6_7(num) { \ +#define regulator_desc_buck6_10(num, min, step) { \ .name = "BUCK"#num, \ .id = S2MPA01_BUCK##num, \ .ops = &s2mpa01_buck_ops, \ .type = REGULATOR_VOLTAGE, \ .owner = THIS_MODULE, \ - .min_uV = S2MPA01_BUCK_MIN1, \ - .uV_step = S2MPA01_BUCK_STEP1, \ + .min_uV = min, \ + .uV_step = step, \ .n_voltages = S2MPA01_BUCK_N_VOLTAGES, \ .ramp_delay = S2MPA01_RAMP_DELAY, \ .vsel_reg = S2MPA01_REG_B6CTRL2 + (num - 6) * 2, \ @@ -312,91 +298,43 @@ static struct regulator_ops s2mpa01_buck_ops = { .enable_mask = S2MPA01_ENABLE_MASK \ } -#define regulator_desc_buck8 { \ - .name = "BUCK8", \ - .id = S2MPA01_BUCK8, \ - .ops = &s2mpa01_buck_ops, \ - .type = REGULATOR_VOLTAGE, \ - .owner = THIS_MODULE, \ - .min_uV = S2MPA01_BUCK_MIN2, \ - .uV_step = S2MPA01_BUCK_STEP2, \ - .n_voltages = S2MPA01_BUCK_N_VOLTAGES, \ - .ramp_delay = S2MPA01_RAMP_DELAY, \ - .vsel_reg = S2MPA01_REG_B8CTRL2, \ - .vsel_mask = S2MPA01_BUCK_VSEL_MASK, \ - .enable_reg = S2MPA01_REG_B8CTRL1, \ - .enable_mask = S2MPA01_ENABLE_MASK \ -} - -#define regulator_desc_buck9 { \ - .name = "BUCK9", \ - .id = S2MPA01_BUCK9, \ - .ops = &s2mpa01_buck_ops, \ - .type = REGULATOR_VOLTAGE, \ - .owner = THIS_MODULE, \ - .min_uV = S2MPA01_BUCK_MIN4, \ - .uV_step = S2MPA01_BUCK_STEP2, \ - .n_voltages = S2MPA01_BUCK_N_VOLTAGES, \ - .ramp_delay = S2MPA01_RAMP_DELAY, \ - .vsel_reg = S2MPA01_REG_B9CTRL2, \ - .vsel_mask = S2MPA01_BUCK_VSEL_MASK, \ - .enable_reg = S2MPA01_REG_B9CTRL1, \ - .enable_mask = S2MPA01_ENABLE_MASK \ -} - -#define regulator_desc_buck10 { \ - .name = "BUCK10", \ - .id = S2MPA01_BUCK10, \ - .ops = &s2mpa01_buck_ops, \ - .type = REGULATOR_VOLTAGE, \ - .owner = THIS_MODULE, \ - .min_uV = S2MPA01_BUCK_MIN3, \ - .uV_step = S2MPA01_BUCK_STEP2, \ - .n_voltages = S2MPA01_BUCK_N_VOLTAGES, \ - .ramp_delay = S2MPA01_RAMP_DELAY, \ - .vsel_reg = S2MPA01_REG_B10CTRL2, \ - .vsel_mask = S2MPA01_BUCK_VSEL_MASK, \ - .enable_reg = S2MPA01_REG_B10CTRL1, \ - .enable_mask = S2MPA01_ENABLE_MASK \ -} - static struct regulator_desc regulators[] = { - regulator_desc_ldo2(1), - regulator_desc_ldo1(2), - regulator_desc_ldo1(3), - regulator_desc_ldo1(4), - regulator_desc_ldo1(5), - regulator_desc_ldo2(6), - regulator_desc_ldo1(7), - regulator_desc_ldo1(8), - regulator_desc_ldo1(9), - regulator_desc_ldo1(10), - regulator_desc_ldo2(11), - regulator_desc_ldo1(12), - regulator_desc_ldo1(13), - regulator_desc_ldo1(14), - regulator_desc_ldo1(15), - regulator_desc_ldo1(16), - regulator_desc_ldo1(17), - regulator_desc_ldo1(18), - regulator_desc_ldo1(19), - regulator_desc_ldo1(20), - regulator_desc_ldo1(21), - regulator_desc_ldo2(22), - regulator_desc_ldo2(23), - regulator_desc_ldo1(24), - regulator_desc_ldo1(25), - regulator_desc_ldo1(26), + regulator_desc_ldo(1, STEP_25_MV), + regulator_desc_ldo(2, STEP_50_MV), + regulator_desc_ldo(3, STEP_50_MV), + regulator_desc_ldo(4, STEP_50_MV), + regulator_desc_ldo(5, STEP_50_MV), + regulator_desc_ldo(6, STEP_25_MV), + regulator_desc_ldo(7, STEP_50_MV), + regulator_desc_ldo(8, STEP_50_MV), + regulator_desc_ldo(9, STEP_50_MV), + regulator_desc_ldo(10, STEP_50_MV), + regulator_desc_ldo(11, STEP_25_MV), + regulator_desc_ldo(12, STEP_50_MV), + regulator_desc_ldo(13, STEP_50_MV), + regulator_desc_ldo(14, STEP_50_MV), + regulator_desc_ldo(15, STEP_50_MV), + regulator_desc_ldo(16, STEP_50_MV), + regulator_desc_ldo(17, STEP_50_MV), + regulator_desc_ldo(18, STEP_50_MV), + regulator_desc_ldo(19, STEP_50_MV), + regulator_desc_ldo(20, STEP_50_MV), + regulator_desc_ldo(21, STEP_50_MV), + regulator_desc_ldo(22, STEP_25_MV), + regulator_desc_ldo(23, STEP_25_MV), + regulator_desc_ldo(24, STEP_50_MV), + regulator_desc_ldo(25, STEP_50_MV), + regulator_desc_ldo(26, STEP_50_MV), regulator_desc_buck1_4(1), regulator_desc_buck1_4(2), regulator_desc_buck1_4(3), regulator_desc_buck1_4(4), regulator_desc_buck5, - regulator_desc_buck6_7(6), - regulator_desc_buck6_7(7), - regulator_desc_buck8, - regulator_desc_buck9, - regulator_desc_buck10, + regulator_desc_buck6_10(6, MIN_600_MV, STEP_6_25_MV), + regulator_desc_buck6_10(7, MIN_600_MV, STEP_6_25_MV), + regulator_desc_buck6_10(8, MIN_800_MV, STEP_12_5_MV), + regulator_desc_buck6_10(9, MIN_1500_MV, STEP_12_5_MV), + regulator_desc_buck6_10(10, MIN_1000_MV, STEP_12_5_MV), }; static int s2mpa01_pmic_probe(struct platform_device *pdev) diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c index 2b7e9e220497..adab82d5279f 100644 --- a/drivers/regulator/s2mps11.c +++ b/drivers/regulator/s2mps11.c @@ -31,6 +31,7 @@ #include <linux/mfd/samsung/core.h> #include <linux/mfd/samsung/s2mps11.h> #include <linux/mfd/samsung/s2mps14.h> +#include <linux/mfd/samsung/s2mpu02.h> struct s2mps11_info { unsigned int rdev_num; @@ -40,11 +41,15 @@ struct s2mps11_info { int ramp_delay16; int ramp_delay7810; int ramp_delay9; + + enum sec_device_type dev_type; + /* - * One bit for each S2MPS14 regulator whether the suspend mode + * One bit for each S2MPS14/S2MPU02 regulator whether the suspend mode * was enabled. */ - unsigned int s2mps14_suspend_state:30; + unsigned long long s2mps14_suspend_state:35; + /* Array of size rdev_num with GPIO-s for external sleep control */ int *ext_control_gpio; }; @@ -250,28 +255,14 @@ static struct regulator_ops s2mps11_buck_ops = { .set_ramp_delay = s2mps11_set_ramp_delay, }; -#define regulator_desc_s2mps11_ldo1(num) { \ +#define regulator_desc_s2mps11_ldo(num, step) { \ .name = "LDO"#num, \ .id = S2MPS11_LDO##num, \ .ops = &s2mps11_ldo_ops, \ .type = REGULATOR_VOLTAGE, \ .owner = THIS_MODULE, \ - .min_uV = S2MPS11_LDO_MIN, \ - .uV_step = S2MPS11_LDO_STEP1, \ - .n_voltages = S2MPS11_LDO_N_VOLTAGES, \ - .vsel_reg = S2MPS11_REG_L1CTRL + num - 1, \ - .vsel_mask = S2MPS11_LDO_VSEL_MASK, \ - .enable_reg = S2MPS11_REG_L1CTRL + num - 1, \ - .enable_mask = S2MPS11_ENABLE_MASK \ -} -#define regulator_desc_s2mps11_ldo2(num) { \ - .name = "LDO"#num, \ - .id = S2MPS11_LDO##num, \ - .ops = &s2mps11_ldo_ops, \ - .type = REGULATOR_VOLTAGE, \ - .owner = THIS_MODULE, \ - .min_uV = S2MPS11_LDO_MIN, \ - .uV_step = S2MPS11_LDO_STEP2, \ + .min_uV = MIN_800_MV, \ + .uV_step = step, \ .n_voltages = S2MPS11_LDO_N_VOLTAGES, \ .vsel_reg = S2MPS11_REG_L1CTRL + num - 1, \ .vsel_mask = S2MPS11_LDO_VSEL_MASK, \ @@ -285,8 +276,8 @@ static struct regulator_ops s2mps11_buck_ops = { .ops = &s2mps11_buck_ops, \ .type = REGULATOR_VOLTAGE, \ .owner = THIS_MODULE, \ - .min_uV = S2MPS11_BUCK_MIN1, \ - .uV_step = S2MPS11_BUCK_STEP1, \ + .min_uV = MIN_600_MV, \ + .uV_step = STEP_6_25_MV, \ .n_voltages = S2MPS11_BUCK_N_VOLTAGES, \ .ramp_delay = S2MPS11_RAMP_DELAY, \ .vsel_reg = S2MPS11_REG_B1CTRL2 + (num - 1) * 2, \ @@ -301,8 +292,8 @@ static struct regulator_ops s2mps11_buck_ops = { .ops = &s2mps11_buck_ops, \ .type = REGULATOR_VOLTAGE, \ .owner = THIS_MODULE, \ - .min_uV = S2MPS11_BUCK_MIN1, \ - .uV_step = S2MPS11_BUCK_STEP1, \ + .min_uV = MIN_600_MV, \ + .uV_step = STEP_6_25_MV, \ .n_voltages = S2MPS11_BUCK_N_VOLTAGES, \ .ramp_delay = S2MPS11_RAMP_DELAY, \ .vsel_reg = S2MPS11_REG_B5CTRL2, \ @@ -311,14 +302,14 @@ static struct regulator_ops s2mps11_buck_ops = { .enable_mask = S2MPS11_ENABLE_MASK \ } -#define regulator_desc_s2mps11_buck6_8(num) { \ +#define regulator_desc_s2mps11_buck6_10(num, min, step) { \ .name = "BUCK"#num, \ .id = S2MPS11_BUCK##num, \ .ops = &s2mps11_buck_ops, \ .type = REGULATOR_VOLTAGE, \ .owner = THIS_MODULE, \ - .min_uV = S2MPS11_BUCK_MIN1, \ - .uV_step = S2MPS11_BUCK_STEP1, \ + .min_uV = min, \ + .uV_step = step, \ .n_voltages = S2MPS11_BUCK_N_VOLTAGES, \ .ramp_delay = S2MPS11_RAMP_DELAY, \ .vsel_reg = S2MPS11_REG_B6CTRL2 + (num - 6) * 2, \ @@ -327,87 +318,55 @@ static struct regulator_ops s2mps11_buck_ops = { .enable_mask = S2MPS11_ENABLE_MASK \ } -#define regulator_desc_s2mps11_buck9 { \ - .name = "BUCK9", \ - .id = S2MPS11_BUCK9, \ - .ops = &s2mps11_buck_ops, \ - .type = REGULATOR_VOLTAGE, \ - .owner = THIS_MODULE, \ - .min_uV = S2MPS11_BUCK_MIN3, \ - .uV_step = S2MPS11_BUCK_STEP3, \ - .n_voltages = S2MPS11_BUCK_N_VOLTAGES, \ - .ramp_delay = S2MPS11_RAMP_DELAY, \ - .vsel_reg = S2MPS11_REG_B9CTRL2, \ - .vsel_mask = S2MPS11_BUCK_VSEL_MASK, \ - .enable_reg = S2MPS11_REG_B9CTRL1, \ - .enable_mask = S2MPS11_ENABLE_MASK \ -} - -#define regulator_desc_s2mps11_buck10 { \ - .name = "BUCK10", \ - .id = S2MPS11_BUCK10, \ - .ops = &s2mps11_buck_ops, \ - .type = REGULATOR_VOLTAGE, \ - .owner = THIS_MODULE, \ - .min_uV = S2MPS11_BUCK_MIN2, \ - .uV_step = S2MPS11_BUCK_STEP2, \ - .n_voltages = S2MPS11_BUCK_N_VOLTAGES, \ - .ramp_delay = S2MPS11_RAMP_DELAY, \ - .vsel_reg = S2MPS11_REG_B10CTRL2, \ - .vsel_mask = S2MPS11_BUCK_VSEL_MASK, \ - .enable_reg = S2MPS11_REG_B10CTRL1, \ - .enable_mask = S2MPS11_ENABLE_MASK \ -} - static const struct regulator_desc s2mps11_regulators[] = { - regulator_desc_s2mps11_ldo2(1), - regulator_desc_s2mps11_ldo1(2), - regulator_desc_s2mps11_ldo1(3), - regulator_desc_s2mps11_ldo1(4), - regulator_desc_s2mps11_ldo1(5), - regulator_desc_s2mps11_ldo2(6), - regulator_desc_s2mps11_ldo1(7), - regulator_desc_s2mps11_ldo1(8), - regulator_desc_s2mps11_ldo1(9), - regulator_desc_s2mps11_ldo1(10), - regulator_desc_s2mps11_ldo2(11), - regulator_desc_s2mps11_ldo1(12), - regulator_desc_s2mps11_ldo1(13), - regulator_desc_s2mps11_ldo1(14), - regulator_desc_s2mps11_ldo1(15), - regulator_desc_s2mps11_ldo1(16), - regulator_desc_s2mps11_ldo1(17), - regulator_desc_s2mps11_ldo1(18), - regulator_desc_s2mps11_ldo1(19), - regulator_desc_s2mps11_ldo1(20), - regulator_desc_s2mps11_ldo1(21), - regulator_desc_s2mps11_ldo2(22), - regulator_desc_s2mps11_ldo2(23), - regulator_desc_s2mps11_ldo1(24), - regulator_desc_s2mps11_ldo1(25), - regulator_desc_s2mps11_ldo1(26), - regulator_desc_s2mps11_ldo2(27), - regulator_desc_s2mps11_ldo1(28), - regulator_desc_s2mps11_ldo1(29), - regulator_desc_s2mps11_ldo1(30), - regulator_desc_s2mps11_ldo1(31), - regulator_desc_s2mps11_ldo1(32), - regulator_desc_s2mps11_ldo1(33), - regulator_desc_s2mps11_ldo1(34), - regulator_desc_s2mps11_ldo1(35), - regulator_desc_s2mps11_ldo1(36), - regulator_desc_s2mps11_ldo1(37), - regulator_desc_s2mps11_ldo1(38), + regulator_desc_s2mps11_ldo(1, STEP_25_MV), + regulator_desc_s2mps11_ldo(2, STEP_50_MV), + regulator_desc_s2mps11_ldo(3, STEP_50_MV), + regulator_desc_s2mps11_ldo(4, STEP_50_MV), + regulator_desc_s2mps11_ldo(5, STEP_50_MV), + regulator_desc_s2mps11_ldo(6, STEP_25_MV), + regulator_desc_s2mps11_ldo(7, STEP_50_MV), + regulator_desc_s2mps11_ldo(8, STEP_50_MV), + regulator_desc_s2mps11_ldo(9, STEP_50_MV), + regulator_desc_s2mps11_ldo(10, STEP_50_MV), + regulator_desc_s2mps11_ldo(11, STEP_25_MV), + regulator_desc_s2mps11_ldo(12, STEP_50_MV), + regulator_desc_s2mps11_ldo(13, STEP_50_MV), + regulator_desc_s2mps11_ldo(14, STEP_50_MV), + regulator_desc_s2mps11_ldo(15, STEP_50_MV), + regulator_desc_s2mps11_ldo(16, STEP_50_MV), + regulator_desc_s2mps11_ldo(17, STEP_50_MV), + regulator_desc_s2mps11_ldo(18, STEP_50_MV), + regulator_desc_s2mps11_ldo(19, STEP_50_MV), + regulator_desc_s2mps11_ldo(20, STEP_50_MV), + regulator_desc_s2mps11_ldo(21, STEP_50_MV), + regulator_desc_s2mps11_ldo(22, STEP_25_MV), + regulator_desc_s2mps11_ldo(23, STEP_25_MV), + regulator_desc_s2mps11_ldo(24, STEP_50_MV), + regulator_desc_s2mps11_ldo(25, STEP_50_MV), + regulator_desc_s2mps11_ldo(26, STEP_50_MV), + regulator_desc_s2mps11_ldo(27, STEP_25_MV), + regulator_desc_s2mps11_ldo(28, STEP_50_MV), + regulator_desc_s2mps11_ldo(29, STEP_50_MV), + regulator_desc_s2mps11_ldo(30, STEP_50_MV), + regulator_desc_s2mps11_ldo(31, STEP_50_MV), + regulator_desc_s2mps11_ldo(32, STEP_50_MV), + regulator_desc_s2mps11_ldo(33, STEP_50_MV), + regulator_desc_s2mps11_ldo(34, STEP_50_MV), + regulator_desc_s2mps11_ldo(35, STEP_50_MV), + regulator_desc_s2mps11_ldo(36, STEP_50_MV), + regulator_desc_s2mps11_ldo(37, STEP_50_MV), + regulator_desc_s2mps11_ldo(38, STEP_50_MV), regulator_desc_s2mps11_buck1_4(1), regulator_desc_s2mps11_buck1_4(2), regulator_desc_s2mps11_buck1_4(3), regulator_desc_s2mps11_buck1_4(4), regulator_desc_s2mps11_buck5, - regulator_desc_s2mps11_buck6_8(6), - regulator_desc_s2mps11_buck6_8(7), - regulator_desc_s2mps11_buck6_8(8), - regulator_desc_s2mps11_buck9, - regulator_desc_s2mps11_buck10, + regulator_desc_s2mps11_buck6_10(6, MIN_600_MV, STEP_6_25_MV), + regulator_desc_s2mps11_buck6_10(7, MIN_600_MV, STEP_6_25_MV), + regulator_desc_s2mps11_buck6_10(8, MIN_600_MV, STEP_6_25_MV), + regulator_desc_s2mps11_buck6_10(9, MIN_3000_MV, STEP_25_MV), + regulator_desc_s2mps11_buck6_10(10, MIN_750_MV, STEP_12_5_MV), }; static int s2mps14_regulator_enable(struct regulator_dev *rdev) @@ -415,12 +374,24 @@ static int s2mps14_regulator_enable(struct regulator_dev *rdev) struct s2mps11_info *s2mps11 = rdev_get_drvdata(rdev); unsigned int val; - if (s2mps11->s2mps14_suspend_state & (1 << rdev_get_id(rdev))) - val = S2MPS14_ENABLE_SUSPEND; - else if (gpio_is_valid(s2mps11->ext_control_gpio[rdev_get_id(rdev)])) - val = S2MPS14_ENABLE_EXT_CONTROL; - else - val = rdev->desc->enable_mask; + switch (s2mps11->dev_type) { + case S2MPS14X: + if (s2mps11->s2mps14_suspend_state & (1 << rdev_get_id(rdev))) + val = S2MPS14_ENABLE_SUSPEND; + else if (gpio_is_valid(s2mps11->ext_control_gpio[rdev_get_id(rdev)])) + val = S2MPS14_ENABLE_EXT_CONTROL; + else + val = rdev->desc->enable_mask; + break; + case S2MPU02: + if (s2mps11->s2mps14_suspend_state & (1 << rdev_get_id(rdev))) + val = S2MPU02_ENABLE_SUSPEND; + else + val = rdev->desc->enable_mask; + break; + default: + return -EINVAL; + }; return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg, rdev->desc->enable_mask, val); @@ -429,12 +400,38 @@ static int s2mps14_regulator_enable(struct regulator_dev *rdev) static int s2mps14_regulator_set_suspend_disable(struct regulator_dev *rdev) { int ret; - unsigned int val; + unsigned int val, state; struct s2mps11_info *s2mps11 = rdev_get_drvdata(rdev); + int rdev_id = rdev_get_id(rdev); - /* LDO3 should be always on and does not support suspend mode */ - if (rdev_get_id(rdev) == S2MPS14_LDO3) - return 0; + /* Below LDO should be always on or does not support suspend mode. */ + switch (s2mps11->dev_type) { + case S2MPS14X: + switch (rdev_id) { + case S2MPS14_LDO3: + return 0; + default: + state = S2MPS14_ENABLE_SUSPEND; + break; + }; + break; + case S2MPU02: + switch (rdev_id) { + case S2MPU02_LDO13: + case S2MPU02_LDO14: + case S2MPU02_LDO15: + case S2MPU02_LDO17: + case S2MPU02_BUCK7: + state = S2MPU02_DISABLE_SUSPEND; + break; + default: + state = S2MPU02_ENABLE_SUSPEND; + break; + }; + break; + default: + return -EINVAL; + }; ret = regmap_read(rdev->regmap, rdev->desc->enable_reg, &val); if (ret < 0) @@ -452,7 +449,7 @@ static int s2mps14_regulator_set_suspend_disable(struct regulator_dev *rdev) return 0; return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg, - rdev->desc->enable_mask, S2MPS14_ENABLE_SUSPEND); + rdev->desc->enable_mask, state); } static struct regulator_ops s2mps14_reg_ops = { @@ -467,56 +464,29 @@ static struct regulator_ops s2mps14_reg_ops = { .set_suspend_disable = s2mps14_regulator_set_suspend_disable, }; -#define regulator_desc_s2mps14_ldo1(num) { \ - .name = "LDO"#num, \ - .id = S2MPS14_LDO##num, \ - .ops = &s2mps14_reg_ops, \ - .type = REGULATOR_VOLTAGE, \ - .owner = THIS_MODULE, \ - .min_uV = S2MPS14_LDO_MIN_800MV, \ - .uV_step = S2MPS14_LDO_STEP_25MV, \ - .n_voltages = S2MPS14_LDO_N_VOLTAGES, \ - .vsel_reg = S2MPS14_REG_L1CTRL + num - 1, \ - .vsel_mask = S2MPS14_LDO_VSEL_MASK, \ - .enable_reg = S2MPS14_REG_L1CTRL + num - 1, \ - .enable_mask = S2MPS14_ENABLE_MASK \ -} -#define regulator_desc_s2mps14_ldo2(num) { \ +#define regulator_desc_s2mps14_ldo(num, min, step) { \ .name = "LDO"#num, \ .id = S2MPS14_LDO##num, \ .ops = &s2mps14_reg_ops, \ .type = REGULATOR_VOLTAGE, \ .owner = THIS_MODULE, \ - .min_uV = S2MPS14_LDO_MIN_1800MV, \ - .uV_step = S2MPS14_LDO_STEP_25MV, \ + .min_uV = min, \ + .uV_step = step, \ .n_voltages = S2MPS14_LDO_N_VOLTAGES, \ .vsel_reg = S2MPS14_REG_L1CTRL + num - 1, \ .vsel_mask = S2MPS14_LDO_VSEL_MASK, \ .enable_reg = S2MPS14_REG_L1CTRL + num - 1, \ .enable_mask = S2MPS14_ENABLE_MASK \ } -#define regulator_desc_s2mps14_ldo3(num) { \ - .name = "LDO"#num, \ - .id = S2MPS14_LDO##num, \ - .ops = &s2mps14_reg_ops, \ - .type = REGULATOR_VOLTAGE, \ - .owner = THIS_MODULE, \ - .min_uV = S2MPS14_LDO_MIN_800MV, \ - .uV_step = S2MPS14_LDO_STEP_12_5MV, \ - .n_voltages = S2MPS14_LDO_N_VOLTAGES, \ - .vsel_reg = S2MPS14_REG_L1CTRL + num - 1, \ - .vsel_mask = S2MPS14_LDO_VSEL_MASK, \ - .enable_reg = S2MPS14_REG_L1CTRL + num - 1, \ - .enable_mask = S2MPS14_ENABLE_MASK \ -} -#define regulator_desc_s2mps14_buck1235(num) { \ + +#define regulator_desc_s2mps14_buck(num, min, step) { \ .name = "BUCK"#num, \ .id = S2MPS14_BUCK##num, \ .ops = &s2mps14_reg_ops, \ .type = REGULATOR_VOLTAGE, \ .owner = THIS_MODULE, \ - .min_uV = S2MPS14_BUCK1235_MIN_600MV, \ - .uV_step = S2MPS14_BUCK1235_STEP_6_25MV, \ + .min_uV = min, \ + .uV_step = step, \ .n_voltages = S2MPS14_BUCK_N_VOLTAGES, \ .linear_min_sel = S2MPS14_BUCK1235_START_SEL, \ .ramp_delay = S2MPS14_BUCK_RAMP_DELAY, \ @@ -525,54 +495,38 @@ static struct regulator_ops s2mps14_reg_ops = { .enable_reg = S2MPS14_REG_B1CTRL1 + (num - 1) * 2, \ .enable_mask = S2MPS14_ENABLE_MASK \ } -#define regulator_desc_s2mps14_buck4(num) { \ - .name = "BUCK"#num, \ - .id = S2MPS14_BUCK##num, \ - .ops = &s2mps14_reg_ops, \ - .type = REGULATOR_VOLTAGE, \ - .owner = THIS_MODULE, \ - .min_uV = S2MPS14_BUCK4_MIN_1400MV, \ - .uV_step = S2MPS14_BUCK4_STEP_12_5MV, \ - .n_voltages = S2MPS14_BUCK_N_VOLTAGES, \ - .linear_min_sel = S2MPS14_BUCK4_START_SEL, \ - .ramp_delay = S2MPS14_BUCK_RAMP_DELAY, \ - .vsel_reg = S2MPS14_REG_B1CTRL2 + (num - 1) * 2, \ - .vsel_mask = S2MPS14_BUCK_VSEL_MASK, \ - .enable_reg = S2MPS14_REG_B1CTRL1 + (num - 1) * 2, \ - .enable_mask = S2MPS14_ENABLE_MASK \ -} static const struct regulator_desc s2mps14_regulators[] = { - regulator_desc_s2mps14_ldo3(1), - regulator_desc_s2mps14_ldo3(2), - regulator_desc_s2mps14_ldo1(3), - regulator_desc_s2mps14_ldo1(4), - regulator_desc_s2mps14_ldo3(5), - regulator_desc_s2mps14_ldo3(6), - regulator_desc_s2mps14_ldo1(7), - regulator_desc_s2mps14_ldo2(8), - regulator_desc_s2mps14_ldo3(9), - regulator_desc_s2mps14_ldo3(10), - regulator_desc_s2mps14_ldo1(11), - regulator_desc_s2mps14_ldo2(12), - regulator_desc_s2mps14_ldo2(13), - regulator_desc_s2mps14_ldo2(14), - regulator_desc_s2mps14_ldo2(15), - regulator_desc_s2mps14_ldo2(16), - regulator_desc_s2mps14_ldo2(17), - regulator_desc_s2mps14_ldo2(18), - regulator_desc_s2mps14_ldo1(19), - regulator_desc_s2mps14_ldo1(20), - regulator_desc_s2mps14_ldo1(21), - regulator_desc_s2mps14_ldo3(22), - regulator_desc_s2mps14_ldo1(23), - regulator_desc_s2mps14_ldo2(24), - regulator_desc_s2mps14_ldo2(25), - regulator_desc_s2mps14_buck1235(1), - regulator_desc_s2mps14_buck1235(2), - regulator_desc_s2mps14_buck1235(3), - regulator_desc_s2mps14_buck4(4), - regulator_desc_s2mps14_buck1235(5), + regulator_desc_s2mps14_ldo(1, MIN_800_MV, STEP_12_5_MV), + regulator_desc_s2mps14_ldo(2, MIN_800_MV, STEP_12_5_MV), + regulator_desc_s2mps14_ldo(3, MIN_800_MV, STEP_25_MV), + regulator_desc_s2mps14_ldo(4, MIN_800_MV, STEP_25_MV), + regulator_desc_s2mps14_ldo(5, MIN_800_MV, STEP_12_5_MV), + regulator_desc_s2mps14_ldo(6, MIN_800_MV, STEP_12_5_MV), + regulator_desc_s2mps14_ldo(7, MIN_800_MV, STEP_25_MV), + regulator_desc_s2mps14_ldo(8, MIN_1800_MV, STEP_25_MV), + regulator_desc_s2mps14_ldo(9, MIN_800_MV, STEP_12_5_MV), + regulator_desc_s2mps14_ldo(10, MIN_800_MV, STEP_12_5_MV), + regulator_desc_s2mps14_ldo(11, MIN_800_MV, STEP_25_MV), + regulator_desc_s2mps14_ldo(12, MIN_1800_MV, STEP_25_MV), + regulator_desc_s2mps14_ldo(13, MIN_1800_MV, STEP_25_MV), + regulator_desc_s2mps14_ldo(14, MIN_1800_MV, STEP_25_MV), + regulator_desc_s2mps14_ldo(15, MIN_1800_MV, STEP_25_MV), + regulator_desc_s2mps14_ldo(16, MIN_1800_MV, STEP_25_MV), + regulator_desc_s2mps14_ldo(17, MIN_1800_MV, STEP_25_MV), + regulator_desc_s2mps14_ldo(18, MIN_1800_MV, STEP_25_MV), + regulator_desc_s2mps14_ldo(19, MIN_800_MV, STEP_25_MV), + regulator_desc_s2mps14_ldo(20, MIN_800_MV, STEP_25_MV), + regulator_desc_s2mps14_ldo(21, MIN_800_MV, STEP_25_MV), + regulator_desc_s2mps14_ldo(22, MIN_800_MV, STEP_12_5_MV), + regulator_desc_s2mps14_ldo(23, MIN_800_MV, STEP_25_MV), + regulator_desc_s2mps14_ldo(24, MIN_1800_MV, STEP_25_MV), + regulator_desc_s2mps14_ldo(25, MIN_1800_MV, STEP_25_MV), + regulator_desc_s2mps14_buck(1, MIN_600_MV, STEP_6_25_MV), + regulator_desc_s2mps14_buck(2, MIN_600_MV, STEP_6_25_MV), + regulator_desc_s2mps14_buck(3, MIN_600_MV, STEP_6_25_MV), + regulator_desc_s2mps14_buck(4, MIN_1400_MV, STEP_12_5_MV), + regulator_desc_s2mps14_buck(5, MIN_600_MV, STEP_6_25_MV), }; static int s2mps14_pmic_enable_ext_control(struct s2mps11_info *s2mps11, @@ -605,8 +559,7 @@ static void s2mps14_pmic_dt_parse_ext_control_gpio(struct platform_device *pdev, } static int s2mps11_pmic_dt_parse(struct platform_device *pdev, - struct of_regulator_match *rdata, struct s2mps11_info *s2mps11, - enum sec_device_type dev_type) + struct of_regulator_match *rdata, struct s2mps11_info *s2mps11) { struct device_node *reg_np; @@ -617,7 +570,7 @@ static int s2mps11_pmic_dt_parse(struct platform_device *pdev, } of_regulator_match(&pdev->dev, reg_np, rdata, s2mps11->rdev_num); - if (dev_type == S2MPS14X) + if (s2mps11->dev_type == S2MPS14X) s2mps14_pmic_dt_parse_ext_control_gpio(pdev, rdata, s2mps11); of_node_put(reg_np); @@ -625,6 +578,238 @@ static int s2mps11_pmic_dt_parse(struct platform_device *pdev, return 0; } +static int s2mpu02_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay) +{ + unsigned int ramp_val, ramp_shift, ramp_reg; + + switch (rdev_get_id(rdev)) { + case S2MPU02_BUCK1: + ramp_shift = S2MPU02_BUCK1_RAMP_SHIFT; + break; + case S2MPU02_BUCK2: + ramp_shift = S2MPU02_BUCK2_RAMP_SHIFT; + break; + case S2MPU02_BUCK3: + ramp_shift = S2MPU02_BUCK3_RAMP_SHIFT; + break; + case S2MPU02_BUCK4: + ramp_shift = S2MPU02_BUCK4_RAMP_SHIFT; + break; + default: + return 0; + } + ramp_reg = S2MPU02_REG_RAMP1; + ramp_val = get_ramp_delay(ramp_delay); + + return regmap_update_bits(rdev->regmap, ramp_reg, + S2MPU02_BUCK1234_RAMP_MASK << ramp_shift, + ramp_val << ramp_shift); +} + +static struct regulator_ops s2mpu02_ldo_ops = { + .list_voltage = regulator_list_voltage_linear, + .map_voltage = regulator_map_voltage_linear, + .is_enabled = regulator_is_enabled_regmap, + .enable = s2mps14_regulator_enable, + .disable = regulator_disable_regmap, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .set_voltage_time_sel = regulator_set_voltage_time_sel, + .set_suspend_disable = s2mps14_regulator_set_suspend_disable, +}; + +static struct regulator_ops s2mpu02_buck_ops = { + .list_voltage = regulator_list_voltage_linear, + .map_voltage = regulator_map_voltage_linear, + .is_enabled = regulator_is_enabled_regmap, + .enable = s2mps14_regulator_enable, + .disable = regulator_disable_regmap, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .set_voltage_time_sel = regulator_set_voltage_time_sel, + .set_suspend_disable = s2mps14_regulator_set_suspend_disable, + .set_ramp_delay = s2mpu02_set_ramp_delay, +}; + +#define regulator_desc_s2mpu02_ldo1(num) { \ + .name = "LDO"#num, \ + .id = S2MPU02_LDO##num, \ + .ops = &s2mpu02_ldo_ops, \ + .type = REGULATOR_VOLTAGE, \ + .owner = THIS_MODULE, \ + .min_uV = S2MPU02_LDO_MIN_900MV, \ + .uV_step = S2MPU02_LDO_STEP_12_5MV, \ + .linear_min_sel = S2MPU02_LDO_GROUP1_START_SEL, \ + .n_voltages = S2MPU02_LDO_N_VOLTAGES, \ + .vsel_reg = S2MPU02_REG_L1CTRL, \ + .vsel_mask = S2MPU02_LDO_VSEL_MASK, \ + .enable_reg = S2MPU02_REG_L1CTRL, \ + .enable_mask = S2MPU02_ENABLE_MASK \ +} +#define regulator_desc_s2mpu02_ldo2(num) { \ + .name = "LDO"#num, \ + .id = S2MPU02_LDO##num, \ + .ops = &s2mpu02_ldo_ops, \ + .type = REGULATOR_VOLTAGE, \ + .owner = THIS_MODULE, \ + .min_uV = S2MPU02_LDO_MIN_1050MV, \ + .uV_step = S2MPU02_LDO_STEP_25MV, \ + .linear_min_sel = S2MPU02_LDO_GROUP2_START_SEL, \ + .n_voltages = S2MPU02_LDO_N_VOLTAGES, \ + .vsel_reg = S2MPU02_REG_L2CTRL1, \ + .vsel_mask = S2MPU02_LDO_VSEL_MASK, \ + .enable_reg = S2MPU02_REG_L2CTRL1, \ + .enable_mask = S2MPU02_ENABLE_MASK \ +} +#define regulator_desc_s2mpu02_ldo3(num) { \ + .name = "LDO"#num, \ + .id = S2MPU02_LDO##num, \ + .ops = &s2mpu02_ldo_ops, \ + .type = REGULATOR_VOLTAGE, \ + .owner = THIS_MODULE, \ + .min_uV = S2MPU02_LDO_MIN_900MV, \ + .uV_step = S2MPU02_LDO_STEP_12_5MV, \ + .linear_min_sel = S2MPU02_LDO_GROUP1_START_SEL, \ + .n_voltages = S2MPU02_LDO_N_VOLTAGES, \ + .vsel_reg = S2MPU02_REG_L3CTRL + num - 3, \ + .vsel_mask = S2MPU02_LDO_VSEL_MASK, \ + .enable_reg = S2MPU02_REG_L3CTRL + num - 3, \ + .enable_mask = S2MPU02_ENABLE_MASK \ +} +#define regulator_desc_s2mpu02_ldo4(num) { \ + .name = "LDO"#num, \ + .id = S2MPU02_LDO##num, \ + .ops = &s2mpu02_ldo_ops, \ + .type = REGULATOR_VOLTAGE, \ + .owner = THIS_MODULE, \ + .min_uV = S2MPU02_LDO_MIN_1050MV, \ + .uV_step = S2MPU02_LDO_STEP_25MV, \ + .linear_min_sel = S2MPU02_LDO_GROUP2_START_SEL, \ + .n_voltages = S2MPU02_LDO_N_VOLTAGES, \ + .vsel_reg = S2MPU02_REG_L3CTRL + num - 3, \ + .vsel_mask = S2MPU02_LDO_VSEL_MASK, \ + .enable_reg = S2MPU02_REG_L3CTRL + num - 3, \ + .enable_mask = S2MPU02_ENABLE_MASK \ +} +#define regulator_desc_s2mpu02_ldo5(num) { \ + .name = "LDO"#num, \ + .id = S2MPU02_LDO##num, \ + .ops = &s2mpu02_ldo_ops, \ + .type = REGULATOR_VOLTAGE, \ + .owner = THIS_MODULE, \ + .min_uV = S2MPU02_LDO_MIN_1600MV, \ + .uV_step = S2MPU02_LDO_STEP_50MV, \ + .linear_min_sel = S2MPU02_LDO_GROUP3_START_SEL, \ + .n_voltages = S2MPU02_LDO_N_VOLTAGES, \ + .vsel_reg = S2MPU02_REG_L3CTRL + num - 3, \ + .vsel_mask = S2MPU02_LDO_VSEL_MASK, \ + .enable_reg = S2MPU02_REG_L3CTRL + num - 3, \ + .enable_mask = S2MPU02_ENABLE_MASK \ +} + +#define regulator_desc_s2mpu02_buck1234(num) { \ + .name = "BUCK"#num, \ + .id = S2MPU02_BUCK##num, \ + .ops = &s2mpu02_buck_ops, \ + .type = REGULATOR_VOLTAGE, \ + .owner = THIS_MODULE, \ + .min_uV = S2MPU02_BUCK1234_MIN_600MV, \ + .uV_step = S2MPU02_BUCK1234_STEP_6_25MV, \ + .n_voltages = S2MPU02_BUCK_N_VOLTAGES, \ + .linear_min_sel = S2MPU02_BUCK1234_START_SEL, \ + .ramp_delay = S2MPU02_BUCK_RAMP_DELAY, \ + .vsel_reg = S2MPU02_REG_B1CTRL2 + (num - 1) * 2, \ + .vsel_mask = S2MPU02_BUCK_VSEL_MASK, \ + .enable_reg = S2MPU02_REG_B1CTRL1 + (num - 1) * 2, \ + .enable_mask = S2MPU02_ENABLE_MASK \ +} +#define regulator_desc_s2mpu02_buck5(num) { \ + .name = "BUCK"#num, \ + .id = S2MPU02_BUCK##num, \ + .ops = &s2mpu02_ldo_ops, \ + .type = REGULATOR_VOLTAGE, \ + .owner = THIS_MODULE, \ + .min_uV = S2MPU02_BUCK5_MIN_1081_25MV, \ + .uV_step = S2MPU02_BUCK5_STEP_6_25MV, \ + .n_voltages = S2MPU02_BUCK_N_VOLTAGES, \ + .linear_min_sel = S2MPU02_BUCK5_START_SEL, \ + .ramp_delay = S2MPU02_BUCK_RAMP_DELAY, \ + .vsel_reg = S2MPU02_REG_B5CTRL2, \ + .vsel_mask = S2MPU02_BUCK_VSEL_MASK, \ + .enable_reg = S2MPU02_REG_B5CTRL1, \ + .enable_mask = S2MPU02_ENABLE_MASK \ +} +#define regulator_desc_s2mpu02_buck6(num) { \ + .name = "BUCK"#num, \ + .id = S2MPU02_BUCK##num, \ + .ops = &s2mpu02_ldo_ops, \ + .type = REGULATOR_VOLTAGE, \ + .owner = THIS_MODULE, \ + .min_uV = S2MPU02_BUCK6_MIN_1700MV, \ + .uV_step = S2MPU02_BUCK6_STEP_2_50MV, \ + .n_voltages = S2MPU02_BUCK_N_VOLTAGES, \ + .linear_min_sel = S2MPU02_BUCK6_START_SEL, \ + .ramp_delay = S2MPU02_BUCK_RAMP_DELAY, \ + .vsel_reg = S2MPU02_REG_B6CTRL2, \ + .vsel_mask = S2MPU02_BUCK_VSEL_MASK, \ + .enable_reg = S2MPU02_REG_B6CTRL1, \ + .enable_mask = S2MPU02_ENABLE_MASK \ +} +#define regulator_desc_s2mpu02_buck7(num) { \ + .name = "BUCK"#num, \ + .id = S2MPU02_BUCK##num, \ + .ops = &s2mpu02_ldo_ops, \ + .type = REGULATOR_VOLTAGE, \ + .owner = THIS_MODULE, \ + .min_uV = S2MPU02_BUCK7_MIN_900MV, \ + .uV_step = S2MPU02_BUCK7_STEP_6_25MV, \ + .n_voltages = S2MPU02_BUCK_N_VOLTAGES, \ + .linear_min_sel = S2MPU02_BUCK7_START_SEL, \ + .ramp_delay = S2MPU02_BUCK_RAMP_DELAY, \ + .vsel_reg = S2MPU02_REG_B7CTRL2, \ + .vsel_mask = S2MPU02_BUCK_VSEL_MASK, \ + .enable_reg = S2MPU02_REG_B7CTRL1, \ + .enable_mask = S2MPU02_ENABLE_MASK \ +} + +static const struct regulator_desc s2mpu02_regulators[] = { + regulator_desc_s2mpu02_ldo1(1), + regulator_desc_s2mpu02_ldo2(2), + regulator_desc_s2mpu02_ldo4(3), + regulator_desc_s2mpu02_ldo5(4), + regulator_desc_s2mpu02_ldo4(5), + regulator_desc_s2mpu02_ldo3(6), + regulator_desc_s2mpu02_ldo3(7), + regulator_desc_s2mpu02_ldo4(8), + regulator_desc_s2mpu02_ldo5(9), + regulator_desc_s2mpu02_ldo3(10), + regulator_desc_s2mpu02_ldo4(11), + regulator_desc_s2mpu02_ldo5(12), + regulator_desc_s2mpu02_ldo5(13), + regulator_desc_s2mpu02_ldo5(14), + regulator_desc_s2mpu02_ldo5(15), + regulator_desc_s2mpu02_ldo5(16), + regulator_desc_s2mpu02_ldo4(17), + regulator_desc_s2mpu02_ldo5(18), + regulator_desc_s2mpu02_ldo3(19), + regulator_desc_s2mpu02_ldo4(20), + regulator_desc_s2mpu02_ldo5(21), + regulator_desc_s2mpu02_ldo5(22), + regulator_desc_s2mpu02_ldo5(23), + regulator_desc_s2mpu02_ldo4(24), + regulator_desc_s2mpu02_ldo5(25), + regulator_desc_s2mpu02_ldo4(26), + regulator_desc_s2mpu02_ldo5(27), + regulator_desc_s2mpu02_ldo5(28), + regulator_desc_s2mpu02_buck1234(1), + regulator_desc_s2mpu02_buck1234(2), + regulator_desc_s2mpu02_buck1234(3), + regulator_desc_s2mpu02_buck1234(4), + regulator_desc_s2mpu02_buck5(5), + regulator_desc_s2mpu02_buck6(6), + regulator_desc_s2mpu02_buck7(7), +}; + static int s2mps11_pmic_probe(struct platform_device *pdev) { struct sec_pmic_dev *iodev = dev_get_drvdata(pdev->dev.parent); @@ -634,15 +819,14 @@ static int s2mps11_pmic_probe(struct platform_device *pdev) struct s2mps11_info *s2mps11; int i, ret = 0; const struct regulator_desc *regulators; - enum sec_device_type dev_type; s2mps11 = devm_kzalloc(&pdev->dev, sizeof(struct s2mps11_info), GFP_KERNEL); if (!s2mps11) return -ENOMEM; - dev_type = platform_get_device_id(pdev)->driver_data; - switch (dev_type) { + s2mps11->dev_type = platform_get_device_id(pdev)->driver_data; + switch (s2mps11->dev_type) { case S2MPS11X: s2mps11->rdev_num = ARRAY_SIZE(s2mps11_regulators); regulators = s2mps11_regulators; @@ -651,8 +835,13 @@ static int s2mps11_pmic_probe(struct platform_device *pdev) s2mps11->rdev_num = ARRAY_SIZE(s2mps14_regulators); regulators = s2mps14_regulators; break; + case S2MPU02: + s2mps11->rdev_num = ARRAY_SIZE(s2mpu02_regulators); + regulators = s2mpu02_regulators; + break; default: - dev_err(&pdev->dev, "Invalid device type: %u\n", dev_type); + dev_err(&pdev->dev, "Invalid device type: %u\n", + s2mps11->dev_type); return -EINVAL; }; @@ -686,7 +875,7 @@ static int s2mps11_pmic_probe(struct platform_device *pdev) for (i = 0; i < s2mps11->rdev_num; i++) rdata[i].name = regulators[i].name; - ret = s2mps11_pmic_dt_parse(pdev, rdata, s2mps11, dev_type); + ret = s2mps11_pmic_dt_parse(pdev, rdata, s2mps11); if (ret) goto out; @@ -739,6 +928,7 @@ out: static const struct platform_device_id s2mps11_pmic_id[] = { { "s2mps11-pmic", S2MPS11X}, { "s2mps14-pmic", S2MPS14X}, + { "s2mpu02-pmic", S2MPU02}, { }, }; MODULE_DEVICE_TABLE(platform, s2mps11_pmic_id); diff --git a/drivers/regulator/sky81452-regulator.c b/drivers/regulator/sky81452-regulator.c new file mode 100644 index 000000000000..97aff0ccd65f --- /dev/null +++ b/drivers/regulator/sky81452-regulator.c @@ -0,0 +1,130 @@ +/* + * sky81452-regulator.c SKY81452 regulator driver + * + * Copyright 2014 Skyworks Solutions Inc. + * Author : Gyungoh Yoo <jack.yoo@skyworksinc.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/platform_device.h> +#include <linux/init.h> +#include <linux/err.h> +#include <linux/of.h> +#include <linux/regulator/driver.h> +#include <linux/regulator/of_regulator.h> + +/* registers */ +#define SKY81452_REG1 0x01 +#define SKY81452_REG3 0x03 + +/* bit mask */ +#define SKY81452_LEN 0x40 +#define SKY81452_LOUT 0x1F + +static struct regulator_ops sky81452_reg_ops = { + .list_voltage = regulator_list_voltage_linear_range, + .map_voltage = regulator_map_voltage_linear_range, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, +}; + +static const struct regulator_linear_range sky81452_reg_ranges[] = { + REGULATOR_LINEAR_RANGE(4500000, 0, 14, 250000), + REGULATOR_LINEAR_RANGE(9000000, 15, 31, 1000000), +}; + +static const struct regulator_desc sky81452_reg = { + .name = "LOUT", + .ops = &sky81452_reg_ops, + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + .n_voltages = SKY81452_LOUT + 1, + .linear_ranges = sky81452_reg_ranges, + .n_linear_ranges = ARRAY_SIZE(sky81452_reg_ranges), + .vsel_reg = SKY81452_REG3, + .vsel_mask = SKY81452_LOUT, + .enable_reg = SKY81452_REG1, + .enable_mask = SKY81452_LEN, +}; + +#ifdef CONFIG_OF +static struct regulator_init_data *sky81452_reg_parse_dt(struct device *dev) +{ + struct regulator_init_data *init_data; + struct device_node *np; + + np = of_get_child_by_name(dev->parent->of_node, "regulator"); + if (unlikely(!np)) { + dev_err(dev, "regulator node not found"); + return NULL; + } + + init_data = of_get_regulator_init_data(dev, np); + + of_node_put(np); + return init_data; +} +#else +static struct regulator_init_data *sky81452_reg_parse_dt(struct device *dev) +{ + return ERR_PTR(-EINVAL); +} +#endif + +static int sky81452_reg_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + const struct regulator_init_data *init_data = dev_get_platdata(dev); + struct regulator_config config = { }; + struct regulator_dev *rdev; + + if (!init_data) { + init_data = sky81452_reg_parse_dt(dev); + if (IS_ERR(init_data)) + return PTR_ERR(init_data); + } + + config.dev = dev; + config.init_data = init_data; + config.of_node = dev->of_node; + config.regmap = dev_get_drvdata(dev->parent); + + rdev = devm_regulator_register(dev, &sky81452_reg, &config); + if (IS_ERR(rdev)) + return PTR_ERR(rdev); + + platform_set_drvdata(pdev, rdev); + + return 0; +} + +static struct platform_driver sky81452_reg_driver = { + .driver = { + .name = "sky81452-regulator", + }, + .probe = sky81452_reg_probe, +}; + +module_platform_driver(sky81452_reg_driver); + +MODULE_DESCRIPTION("Skyworks SKY81452 Regulator driver"); +MODULE_AUTHOR("Gyungoh Yoo <jack.yoo@skyworksinc.com>"); +MODULE_LICENSE("GPL"); +MODULE_VERSION("1.0"); diff --git a/drivers/regulator/st-pwm.c b/drivers/regulator/st-pwm.c deleted file mode 100644 index 5ea78df449f8..000000000000 --- a/drivers/regulator/st-pwm.c +++ /dev/null @@ -1,190 +0,0 @@ -/* - * Regulator driver for ST's PWM Regulators - * - * Copyright (C) 2014 - STMicroelectronics Inc. - * - * Author: Lee Jones <lee.jones@linaro.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include <linux/module.h> -#include <linux/init.h> -#include <linux/err.h> -#include <linux/regulator/driver.h> -#include <linux/regulator/machine.h> -#include <linux/regulator/of_regulator.h> -#include <linux/of.h> -#include <linux/of_device.h> -#include <linux/pwm.h> - -#define ST_PWM_REG_PERIOD 8448 - -struct st_pwm_regulator_pdata { - const struct regulator_desc *desc; - struct st_pwm_voltages *duty_cycle_table; -}; - -struct st_pwm_regulator_data { - const struct st_pwm_regulator_pdata *pdata; - struct pwm_device *pwm; - bool enabled; - int state; -}; - -struct st_pwm_voltages { - unsigned int uV; - unsigned int dutycycle; -}; - -static int st_pwm_regulator_get_voltage_sel(struct regulator_dev *dev) -{ - struct st_pwm_regulator_data *drvdata = rdev_get_drvdata(dev); - - return drvdata->state; -} - -static int st_pwm_regulator_set_voltage_sel(struct regulator_dev *dev, - unsigned selector) -{ - struct st_pwm_regulator_data *drvdata = rdev_get_drvdata(dev); - int dutycycle; - int ret; - - dutycycle = (ST_PWM_REG_PERIOD / 100) * - drvdata->pdata->duty_cycle_table[selector].dutycycle; - - ret = pwm_config(drvdata->pwm, dutycycle, ST_PWM_REG_PERIOD); - if (ret) { - dev_err(&dev->dev, "Failed to configure PWM\n"); - return ret; - } - - drvdata->state = selector; - - if (!drvdata->enabled) { - ret = pwm_enable(drvdata->pwm); - if (ret) { - dev_err(&dev->dev, "Failed to enable PWM\n"); - return ret; - } - drvdata->enabled = true; - } - - return 0; -} - -static int st_pwm_regulator_list_voltage(struct regulator_dev *dev, - unsigned selector) -{ - struct st_pwm_regulator_data *drvdata = rdev_get_drvdata(dev); - - if (selector >= dev->desc->n_voltages) - return -EINVAL; - - return drvdata->pdata->duty_cycle_table[selector].uV; -} - -static struct regulator_ops st_pwm_regulator_voltage_ops = { - .set_voltage_sel = st_pwm_regulator_set_voltage_sel, - .get_voltage_sel = st_pwm_regulator_get_voltage_sel, - .list_voltage = st_pwm_regulator_list_voltage, - .map_voltage = regulator_map_voltage_iterate, -}; - -static struct st_pwm_voltages b2105_duty_cycle_table[] = { - { .uV = 1114000, .dutycycle = 0, }, - { .uV = 1095000, .dutycycle = 10, }, - { .uV = 1076000, .dutycycle = 20, }, - { .uV = 1056000, .dutycycle = 30, }, - { .uV = 1036000, .dutycycle = 40, }, - { .uV = 1016000, .dutycycle = 50, }, - /* WARNING: Values above 50% duty-cycle cause boot failures. */ -}; - -static const struct regulator_desc b2105_desc = { - .name = "b2105-pwm-regulator", - .ops = &st_pwm_regulator_voltage_ops, - .type = REGULATOR_VOLTAGE, - .owner = THIS_MODULE, - .n_voltages = ARRAY_SIZE(b2105_duty_cycle_table), - .supply_name = "pwm", -}; - -static const struct st_pwm_regulator_pdata b2105_info = { - .desc = &b2105_desc, - .duty_cycle_table = b2105_duty_cycle_table, -}; - -static const struct of_device_id st_pwm_of_match[] = { - { .compatible = "st,b2105-pwm-regulator", .data = &b2105_info, }, - { }, -}; -MODULE_DEVICE_TABLE(of, st_pwm_of_match); - -static int st_pwm_regulator_probe(struct platform_device *pdev) -{ - struct st_pwm_regulator_data *drvdata; - struct regulator_dev *regulator; - struct regulator_config config = { }; - struct device_node *np = pdev->dev.of_node; - const struct of_device_id *of_match; - - if (!np) { - dev_err(&pdev->dev, "Device Tree node missing\n"); - return -EINVAL; - } - - drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL); - if (!drvdata) - return -ENOMEM; - - of_match = of_match_device(st_pwm_of_match, &pdev->dev); - if (!of_match) { - dev_err(&pdev->dev, "failed to match of device\n"); - return -ENODEV; - } - drvdata->pdata = of_match->data; - - config.init_data = of_get_regulator_init_data(&pdev->dev, np); - if (!config.init_data) - return -ENOMEM; - - config.of_node = np; - config.dev = &pdev->dev; - config.driver_data = drvdata; - - drvdata->pwm = devm_pwm_get(&pdev->dev, NULL); - if (IS_ERR(drvdata->pwm)) { - dev_err(&pdev->dev, "Failed to get PWM\n"); - return PTR_ERR(drvdata->pwm); - } - - regulator = devm_regulator_register(&pdev->dev, - drvdata->pdata->desc, &config); - if (IS_ERR(regulator)) { - dev_err(&pdev->dev, "Failed to register regulator %s\n", - drvdata->pdata->desc->name); - return PTR_ERR(regulator); - } - - return 0; -} - -static struct platform_driver st_pwm_regulator_driver = { - .driver = { - .name = "st-pwm-regulator", - .owner = THIS_MODULE, - .of_match_table = of_match_ptr(st_pwm_of_match), - }, - .probe = st_pwm_regulator_probe, -}; - -module_platform_driver(st_pwm_regulator_driver); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Lee Jones <lee.jones@linaro.org>"); -MODULE_DESCRIPTION("ST PWM Regulator Driver"); -MODULE_ALIAS("platform:st_pwm-regulator"); diff --git a/drivers/regulator/tps65023-regulator.c b/drivers/regulator/tps65023-regulator.c index 3ef67a86115c..7380af8bd50d 100644 --- a/drivers/regulator/tps65023-regulator.c +++ b/drivers/regulator/tps65023-regulator.c @@ -211,9 +211,6 @@ static int tps_65023_probe(struct i2c_client *client, int i; int error; - if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) - return -EIO; - /** * init_data points to array of regulator_init structures * coming from the board-evm file. diff --git a/drivers/regulator/tps65217-regulator.c b/drivers/regulator/tps65217-regulator.c index d58db72a63b0..adbe4fc5cf07 100644 --- a/drivers/regulator/tps65217-regulator.c +++ b/drivers/regulator/tps65217-regulator.c @@ -27,10 +27,13 @@ #include <linux/regulator/machine.h> #include <linux/mfd/tps65217.h> -#define TPS65217_REGULATOR(_name, _id, _ops, _n, _vr, _vm, _em, _t, _lr, _nlr) \ +#define TPS65217_REGULATOR(_name, _id, _of_match, _ops, _n, _vr, _vm, _em, \ + _t, _lr, _nlr) \ { \ .name = _name, \ .id = _id, \ + .of_match = of_match_ptr(_of_match), \ + .regulators_node= of_match_ptr("regulators"), \ .ops = &_ops, \ .n_voltages = _n, \ .type = REGULATOR_VOLTAGE, \ @@ -138,87 +141,40 @@ static struct regulator_ops tps65217_pmic_ldo1_ops = { }; static const struct regulator_desc regulators[] = { - TPS65217_REGULATOR("DCDC1", TPS65217_DCDC_1, tps65217_pmic_ops, 64, - TPS65217_REG_DEFDCDC1, TPS65217_DEFDCDCX_DCDC_MASK, - TPS65217_ENABLE_DC1_EN, NULL, tps65217_uv1_ranges, - 2), /* DCDC1 voltage range: 900000 ~ 1800000 */ - TPS65217_REGULATOR("DCDC2", TPS65217_DCDC_2, tps65217_pmic_ops, 64, - TPS65217_REG_DEFDCDC2, TPS65217_DEFDCDCX_DCDC_MASK, - TPS65217_ENABLE_DC2_EN, NULL, tps65217_uv1_ranges, + TPS65217_REGULATOR("DCDC1", TPS65217_DCDC_1, "dcdc1", + tps65217_pmic_ops, 64, TPS65217_REG_DEFDCDC1, + TPS65217_DEFDCDCX_DCDC_MASK, TPS65217_ENABLE_DC1_EN, + NULL, tps65217_uv1_ranges, 2), + TPS65217_REGULATOR("DCDC2", TPS65217_DCDC_2, "dcdc2", + tps65217_pmic_ops, 64, TPS65217_REG_DEFDCDC2, + TPS65217_DEFDCDCX_DCDC_MASK, TPS65217_ENABLE_DC2_EN, + NULL, tps65217_uv1_ranges, ARRAY_SIZE(tps65217_uv1_ranges)), - TPS65217_REGULATOR("DCDC3", TPS65217_DCDC_3, tps65217_pmic_ops, 64, - TPS65217_REG_DEFDCDC3, TPS65217_DEFDCDCX_DCDC_MASK, - TPS65217_ENABLE_DC3_EN, NULL, tps65217_uv1_ranges, - 1), /* DCDC3 voltage range: 900000 ~ 1500000 */ - TPS65217_REGULATOR("LDO1", TPS65217_LDO_1, tps65217_pmic_ldo1_ops, 16, - TPS65217_REG_DEFLDO1, TPS65217_DEFLDO1_LDO1_MASK, - TPS65217_ENABLE_LDO1_EN, LDO1_VSEL_table, NULL, 0), - TPS65217_REGULATOR("LDO2", TPS65217_LDO_2, tps65217_pmic_ops, 64, - TPS65217_REG_DEFLDO2, TPS65217_DEFLDO2_LDO2_MASK, - TPS65217_ENABLE_LDO2_EN, NULL, tps65217_uv1_ranges, + TPS65217_REGULATOR("DCDC3", TPS65217_DCDC_3, "dcdc3", + tps65217_pmic_ops, 64, TPS65217_REG_DEFDCDC3, + TPS65217_DEFDCDCX_DCDC_MASK, TPS65217_ENABLE_DC3_EN, + NULL, tps65217_uv1_ranges, 1), + TPS65217_REGULATOR("LDO1", TPS65217_LDO_1, "ldo1", + tps65217_pmic_ldo1_ops, 16, TPS65217_REG_DEFLDO1, + TPS65217_DEFLDO1_LDO1_MASK, TPS65217_ENABLE_LDO1_EN, + LDO1_VSEL_table, NULL, 0), + TPS65217_REGULATOR("LDO2", TPS65217_LDO_2, "ldo2", tps65217_pmic_ops, + 64, TPS65217_REG_DEFLDO2, + TPS65217_DEFLDO2_LDO2_MASK, TPS65217_ENABLE_LDO2_EN, + NULL, tps65217_uv1_ranges, ARRAY_SIZE(tps65217_uv1_ranges)), - TPS65217_REGULATOR("LDO3", TPS65217_LDO_3, tps65217_pmic_ops, 32, - TPS65217_REG_DEFLS1, TPS65217_DEFLDO3_LDO3_MASK, + TPS65217_REGULATOR("LDO3", TPS65217_LDO_3, "ldo3", tps65217_pmic_ops, + 32, TPS65217_REG_DEFLS1, TPS65217_DEFLDO3_LDO3_MASK, TPS65217_ENABLE_LS1_EN | TPS65217_DEFLDO3_LDO3_EN, NULL, tps65217_uv2_ranges, ARRAY_SIZE(tps65217_uv2_ranges)), - TPS65217_REGULATOR("LDO4", TPS65217_LDO_4, tps65217_pmic_ops, 32, - TPS65217_REG_DEFLS2, TPS65217_DEFLDO4_LDO4_MASK, + TPS65217_REGULATOR("LDO4", TPS65217_LDO_4, "ldo4", tps65217_pmic_ops, + 32, TPS65217_REG_DEFLS2, TPS65217_DEFLDO4_LDO4_MASK, TPS65217_ENABLE_LS2_EN | TPS65217_DEFLDO4_LDO4_EN, NULL, tps65217_uv2_ranges, ARRAY_SIZE(tps65217_uv2_ranges)), }; -#ifdef CONFIG_OF -static struct of_regulator_match reg_matches[] = { - { .name = "dcdc1", .driver_data = (void *)TPS65217_DCDC_1 }, - { .name = "dcdc2", .driver_data = (void *)TPS65217_DCDC_2 }, - { .name = "dcdc3", .driver_data = (void *)TPS65217_DCDC_3 }, - { .name = "ldo1", .driver_data = (void *)TPS65217_LDO_1 }, - { .name = "ldo2", .driver_data = (void *)TPS65217_LDO_2 }, - { .name = "ldo3", .driver_data = (void *)TPS65217_LDO_3 }, - { .name = "ldo4", .driver_data = (void *)TPS65217_LDO_4 }, -}; - -static struct tps65217_board *tps65217_parse_dt(struct platform_device *pdev) -{ - struct tps65217 *tps = dev_get_drvdata(pdev->dev.parent); - struct device_node *node = tps->dev->of_node; - struct tps65217_board *pdata; - struct device_node *regs; - int i, count; - - regs = of_get_child_by_name(node, "regulators"); - if (!regs) - return NULL; - - count = of_regulator_match(&pdev->dev, regs, reg_matches, - TPS65217_NUM_REGULATOR); - of_node_put(regs); - if ((count < 0) || (count > TPS65217_NUM_REGULATOR)) - return NULL; - - pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); - if (!pdata) - return NULL; - - for (i = 0; i < count; i++) { - if (!reg_matches[i].of_node) - continue; - - pdata->tps65217_init_data[i] = reg_matches[i].init_data; - pdata->of_node[i] = reg_matches[i].of_node; - } - - return pdata; -} -#else -static struct tps65217_board *tps65217_parse_dt(struct platform_device *pdev) -{ - return NULL; -} -#endif - static int tps65217_regulator_probe(struct platform_device *pdev) { struct tps65217 *tps = dev_get_drvdata(pdev->dev.parent); @@ -227,14 +183,6 @@ static int tps65217_regulator_probe(struct platform_device *pdev) struct regulator_config config = { }; int i; - if (tps->dev->of_node) - pdata = tps65217_parse_dt(pdev); - - if (!pdata) { - dev_err(&pdev->dev, "Platform data not found\n"); - return -EINVAL; - } - if (tps65217_chip_id(tps) != TPS65217) { dev_err(&pdev->dev, "Invalid tps chip version\n"); return -ENODEV; @@ -245,11 +193,10 @@ static int tps65217_regulator_probe(struct platform_device *pdev) for (i = 0; i < TPS65217_NUM_REGULATOR; i++) { /* Register the regulators */ config.dev = tps->dev; - config.init_data = pdata->tps65217_init_data[i]; + if (pdata) + config.init_data = pdata->tps65217_init_data[i]; config.driver_data = tps; config.regmap = tps->regmap; - if (tps->dev->of_node) - config.of_node = pdata->of_node[i]; rdev = devm_regulator_register(&pdev->dev, ®ulators[i], &config); @@ -259,6 +206,7 @@ static int tps65217_regulator_probe(struct platform_device *pdev) return PTR_ERR(rdev); } } + return 0; } diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c index fa7db8847578..18fc991175bc 100644 --- a/drivers/regulator/tps65910-regulator.c +++ b/drivers/regulator/tps65910-regulator.c @@ -1014,7 +1014,7 @@ static struct tps65910_board *tps65910_parse_dt_reg_data( if (!pmic_plat_data) return NULL; - np = of_node_get(pdev->dev.parent->of_node); + np = pdev->dev.parent->of_node; regulators = of_get_child_by_name(np, "regulators"); if (!regulators) { dev_err(&pdev->dev, "regulator node not found\n"); @@ -1047,7 +1047,7 @@ static struct tps65910_board *tps65910_parse_dt_reg_data( *tps65910_reg_matches = matches; for (idx = 0; idx < count; idx++) { - if (!matches[idx].init_data || !matches[idx].of_node) + if (!matches[idx].of_node) continue; pmic_plat_data->tps65910_pmic_init_data[idx] = @@ -1077,7 +1077,6 @@ static int tps65910_probe(struct platform_device *pdev) struct tps65910 *tps65910 = dev_get_drvdata(pdev->dev.parent); struct regulator_config config = { }; struct tps_info *info; - struct regulator_init_data *reg_data; struct regulator_dev *rdev; struct tps65910_reg *pmic; struct tps65910_board *pmic_plat_data; @@ -1140,14 +1139,6 @@ static int tps65910_probe(struct platform_device *pdev) for (i = 0; i < pmic->num_regulators && i < TPS65910_NUM_REGS; i++, info++) { - - reg_data = pmic_plat_data->tps65910_pmic_init_data[i]; - - /* Regulator API handles empty constraints but not NULL - * constraints */ - if (!reg_data) - continue; - /* Register the regulators */ pmic->info[i] = info; @@ -1199,7 +1190,7 @@ static int tps65910_probe(struct platform_device *pdev) pmic->desc[i].enable_mask = TPS65910_SUPPLY_STATE_ENABLED; config.dev = tps65910->dev; - config.init_data = reg_data; + config.init_data = pmic_plat_data->tps65910_pmic_init_data[i]; config.driver_data = pmic; config.regmap = tps65910->regmap; diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index 0754f5c7cb3b..a168e96142b9 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -373,6 +373,14 @@ config RTC_DRV_PCF8563 This driver can also be built as a module. If so, the module will be called rtc-pcf8563. +config RTC_DRV_PCF85063 + tristate "nxp PCF85063" + help + If you say yes here you get support for the PCF85063 RTC chip + + This driver can also be built as a module. If so, the module + will be called rtc-pcf85063. + config RTC_DRV_PCF8583 tristate "Philips PCF8583" help @@ -760,6 +768,15 @@ config RTC_DRV_DS1742 This driver can also be built as a module. If so, the module will be called rtc-ds1742. +config RTC_DRV_DS2404 + tristate "Maxim/Dallas DS2404" + help + If you say yes here you get support for the + Dallas DS2404 RTC chip. + + This driver can also be built as a module. If so, the module + will be called rtc-ds2404. + config RTC_DRV_DA9052 tristate "Dialog DA9052/DA9053 RTC" depends on PMIC_DA9052 @@ -789,7 +806,7 @@ config RTC_DRV_DA9063 config RTC_DRV_EFI tristate "EFI RTC" - depends on IA64 + depends on EFI help If you say yes here you will get support for the EFI Real Time Clock. @@ -873,15 +890,6 @@ config RTC_DRV_V3020 This driver can also be built as a module. If so, the module will be called rtc-v3020. -config RTC_DRV_DS2404 - tristate "Dallas DS2404" - help - If you say yes here you get support for the - Dallas DS2404 RTC chip. - - This driver can also be built as a module. If so, the module - will be called rtc-ds2404. - config RTC_DRV_WM831X tristate "Wolfson Microelectronics WM831x RTC" depends on MFD_WM831X @@ -1349,6 +1357,7 @@ config RTC_DRV_SIRFSOC config RTC_DRV_MOXART tristate "MOXA ART RTC" + depends on ARCH_MOXART || COMPILE_TEST help If you say yes here you get support for the MOXA ART RTC module. diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile index 70347d041d10..56f061c7c815 100644 --- a/drivers/rtc/Makefile +++ b/drivers/rtc/Makefile @@ -10,6 +10,10 @@ obj-$(CONFIG_RTC_SYSTOHC) += systohc.o obj-$(CONFIG_RTC_CLASS) += rtc-core.o rtc-core-y := class.o interface.o +ifdef CONFIG_RTC_DRV_EFI +rtc-core-y += rtc-efi-platform.o +endif + rtc-core-$(CONFIG_RTC_INTF_DEV) += rtc-dev.o rtc-core-$(CONFIG_RTC_INTF_PROC) += rtc-proc.o rtc-core-$(CONFIG_RTC_INTF_SYSFS) += rtc-sysfs.o @@ -93,6 +97,7 @@ obj-$(CONFIG_RTC_DRV_PCAP) += rtc-pcap.o obj-$(CONFIG_RTC_DRV_PCF2127) += rtc-pcf2127.o obj-$(CONFIG_RTC_DRV_PCF8523) += rtc-pcf8523.o obj-$(CONFIG_RTC_DRV_PCF8563) += rtc-pcf8563.o +obj-$(CONFIG_RTC_DRV_PCF85063) += rtc-pcf85063.o obj-$(CONFIG_RTC_DRV_PCF8583) += rtc-pcf8583.o obj-$(CONFIG_RTC_DRV_PCF2123) += rtc-pcf2123.o obj-$(CONFIG_RTC_DRV_PCF50633) += rtc-pcf50633.o diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c index 589351ef75d0..38e26be705be 100644 --- a/drivers/rtc/class.c +++ b/drivers/rtc/class.c @@ -53,6 +53,7 @@ static int rtc_suspend(struct device *dev) struct rtc_device *rtc = to_rtc_device(dev); struct rtc_time tm; struct timespec delta, delta_delta; + int err; if (has_persistent_clock()) return 0; @@ -61,7 +62,12 @@ static int rtc_suspend(struct device *dev) return 0; /* snapshot the current RTC and system time at suspend*/ - rtc_read_time(rtc, &tm); + err = rtc_read_time(rtc, &tm); + if (err < 0) { + pr_debug("%s: fail to read rtc time\n", dev_name(&rtc->dev)); + return 0; + } + getnstimeofday(&old_system); rtc_tm_to_time(&tm, &old_rtc.tv_sec); @@ -94,6 +100,7 @@ static int rtc_resume(struct device *dev) struct rtc_time tm; struct timespec new_system, new_rtc; struct timespec sleep_time; + int err; if (has_persistent_clock()) return 0; @@ -104,7 +111,12 @@ static int rtc_resume(struct device *dev) /* snapshot the current rtc and system time at resume */ getnstimeofday(&new_system); - rtc_read_time(rtc, &tm); + err = rtc_read_time(rtc, &tm); + if (err < 0) { + pr_debug("%s: fail to read rtc time\n", dev_name(&rtc->dev)); + return 0; + } + if (rtc_valid_tm(&tm) != 0) { pr_debug("%s: bogus resume time\n", dev_name(&rtc->dev)); return 0; diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index 5813fa52c3d4..5b2717f5dafa 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c @@ -348,6 +348,8 @@ static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) /* Make sure we're not setting alarms in the past */ err = __rtc_read_time(rtc, &tm); + if (err) + return err; rtc_tm_to_time(&tm, &now); if (scheduled <= now) return -ETIME; diff --git a/drivers/rtc/rtc-au1xxx.c b/drivers/rtc/rtc-au1xxx.c index ed526a192ce0..fd25e2374d4e 100644 --- a/drivers/rtc/rtc-au1xxx.c +++ b/drivers/rtc/rtc-au1xxx.c @@ -32,7 +32,7 @@ static int au1xtoy_rtc_read_time(struct device *dev, struct rtc_time *tm) { unsigned long t; - t = au_readl(SYS_TOYREAD); + t = alchemy_rdsys(AU1000_SYS_TOYREAD); rtc_time_to_tm(t, tm); @@ -45,13 +45,12 @@ static int au1xtoy_rtc_set_time(struct device *dev, struct rtc_time *tm) rtc_tm_to_time(tm, &t); - au_writel(t, SYS_TOYWRITE); - au_sync(); + alchemy_wrsys(t, AU1000_SYS_TOYWRITE); /* wait for the pending register write to succeed. This can * take up to 6 seconds... */ - while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C0S) + while (alchemy_rdsys(AU1000_SYS_CNTRCTRL) & SYS_CNTRL_C0S) msleep(1); return 0; @@ -68,7 +67,7 @@ static int au1xtoy_rtc_probe(struct platform_device *pdev) unsigned long t; int ret; - t = au_readl(SYS_COUNTER_CNTRL); + t = alchemy_rdsys(AU1000_SYS_CNTRCTRL); if (!(t & CNTR_OK)) { dev_err(&pdev->dev, "counters not working; aborting.\n"); ret = -ENODEV; @@ -78,10 +77,10 @@ static int au1xtoy_rtc_probe(struct platform_device *pdev) ret = -ETIMEDOUT; /* set counter0 tickrate to 1Hz if necessary */ - if (au_readl(SYS_TOYTRIM) != 32767) { + if (alchemy_rdsys(AU1000_SYS_TOYTRIM) != 32767) { /* wait until hardware gives access to TRIM register */ t = 0x00100000; - while ((au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_T0S) && --t) + while ((alchemy_rdsys(AU1000_SYS_CNTRCTRL) & SYS_CNTRL_T0S) && --t) msleep(1); if (!t) { @@ -93,12 +92,11 @@ static int au1xtoy_rtc_probe(struct platform_device *pdev) } /* set 1Hz TOY tick rate */ - au_writel(32767, SYS_TOYTRIM); - au_sync(); + alchemy_wrsys(32767, AU1000_SYS_TOYTRIM); } /* wait until the hardware allows writes to the counter reg */ - while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C0S) + while (alchemy_rdsys(AU1000_SYS_CNTRCTRL) & SYS_CNTRL_C0S) msleep(1); rtcdev = devm_rtc_device_register(&pdev->dev, "rtc-au1xxx", diff --git a/drivers/rtc/rtc-da9063.c b/drivers/rtc/rtc-da9063.c index 595393098b09..731ed1a97f59 100644 --- a/drivers/rtc/rtc-da9063.c +++ b/drivers/rtc/rtc-da9063.c @@ -29,6 +29,8 @@ #define YEARS_FROM_DA9063(year) ((year) + 100) #define MONTHS_FROM_DA9063(month) ((month) - 1) +#define RTC_ALARM_DATA_LEN (DA9063_AD_REG_ALARM_Y - DA9063_AD_REG_ALARM_MI + 1) + #define RTC_DATA_LEN (DA9063_REG_COUNT_Y - DA9063_REG_COUNT_S + 1) #define RTC_SEC 0 #define RTC_MIN 1 @@ -42,6 +44,10 @@ struct da9063_rtc { struct da9063 *hw; struct rtc_time alarm_time; bool rtc_sync; + int alarm_year; + int alarm_start; + int alarm_len; + int data_start; }; static void da9063_data_to_tm(u8 *data, struct rtc_time *tm) @@ -83,7 +89,7 @@ static int da9063_rtc_stop_alarm(struct device *dev) { struct da9063_rtc *rtc = dev_get_drvdata(dev); - return regmap_update_bits(rtc->hw->regmap, DA9063_REG_ALARM_Y, + return regmap_update_bits(rtc->hw->regmap, rtc->alarm_year, DA9063_ALARM_ON, 0); } @@ -91,7 +97,7 @@ static int da9063_rtc_start_alarm(struct device *dev) { struct da9063_rtc *rtc = dev_get_drvdata(dev); - return regmap_update_bits(rtc->hw->regmap, DA9063_REG_ALARM_Y, + return regmap_update_bits(rtc->hw->regmap, rtc->alarm_year, DA9063_ALARM_ON, DA9063_ALARM_ON); } @@ -151,8 +157,9 @@ static int da9063_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) int ret; unsigned int val; - ret = regmap_bulk_read(rtc->hw->regmap, DA9063_REG_ALARM_S, - &data[RTC_SEC], RTC_DATA_LEN); + data[RTC_SEC] = 0; + ret = regmap_bulk_read(rtc->hw->regmap, rtc->alarm_start, + &data[rtc->data_start], rtc->alarm_len); if (ret < 0) return ret; @@ -186,14 +193,14 @@ static int da9063_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) return ret; } - ret = regmap_bulk_write(rtc->hw->regmap, DA9063_REG_ALARM_S, - data, RTC_DATA_LEN); + ret = regmap_bulk_write(rtc->hw->regmap, rtc->alarm_start, + &data[rtc->data_start], rtc->alarm_len); if (ret < 0) { dev_err(dev, "Failed to write alarm: %d\n", ret); return ret; } - rtc->alarm_time = alrm->time; + da9063_data_to_tm(data, &rtc->alarm_time); if (alrm->enabled) { ret = da9063_rtc_start_alarm(dev); @@ -218,7 +225,7 @@ static irqreturn_t da9063_alarm_event(int irq, void *data) { struct da9063_rtc *rtc = data; - regmap_update_bits(rtc->hw->regmap, DA9063_REG_ALARM_Y, + regmap_update_bits(rtc->hw->regmap, rtc->alarm_year, DA9063_ALARM_ON, 0); rtc->rtc_sync = true; @@ -257,7 +264,23 @@ static int da9063_rtc_probe(struct platform_device *pdev) goto err; } - ret = regmap_update_bits(da9063->regmap, DA9063_REG_ALARM_S, + rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL); + if (!rtc) + return -ENOMEM; + + if (da9063->variant_code == PMIC_DA9063_AD) { + rtc->alarm_year = DA9063_AD_REG_ALARM_Y; + rtc->alarm_start = DA9063_AD_REG_ALARM_MI; + rtc->alarm_len = RTC_ALARM_DATA_LEN; + rtc->data_start = RTC_MIN; + } else { + rtc->alarm_year = DA9063_BB_REG_ALARM_Y; + rtc->alarm_start = DA9063_BB_REG_ALARM_S; + rtc->alarm_len = RTC_DATA_LEN; + rtc->data_start = RTC_SEC; + } + + ret = regmap_update_bits(da9063->regmap, rtc->alarm_start, DA9063_ALARM_STATUS_TICK | DA9063_ALARM_STATUS_ALARM, 0); if (ret < 0) { @@ -265,7 +288,7 @@ static int da9063_rtc_probe(struct platform_device *pdev) goto err; } - ret = regmap_update_bits(da9063->regmap, DA9063_REG_ALARM_S, + ret = regmap_update_bits(da9063->regmap, rtc->alarm_start, DA9063_ALARM_STATUS_ALARM, DA9063_ALARM_STATUS_ALARM); if (ret < 0) { @@ -273,25 +296,22 @@ static int da9063_rtc_probe(struct platform_device *pdev) goto err; } - ret = regmap_update_bits(da9063->regmap, DA9063_REG_ALARM_Y, + ret = regmap_update_bits(da9063->regmap, rtc->alarm_year, DA9063_TICK_ON, 0); if (ret < 0) { dev_err(&pdev->dev, "Failed to disable TICKs\n"); goto err; } - ret = regmap_bulk_read(da9063->regmap, DA9063_REG_ALARM_S, - data, RTC_DATA_LEN); + data[RTC_SEC] = 0; + ret = regmap_bulk_read(da9063->regmap, rtc->alarm_start, + &data[rtc->data_start], rtc->alarm_len); if (ret < 0) { dev_err(&pdev->dev, "Failed to read initial alarm data: %d\n", ret); goto err; } - rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL); - if (!rtc) - return -ENOMEM; - platform_set_drvdata(pdev, rtc); irq_alarm = platform_get_irq_byname(pdev, "ALARM"); diff --git a/drivers/rtc/rtc-ds1343.c b/drivers/rtc/rtc-ds1343.c index c3719189dd96..ae9f997223b1 100644 --- a/drivers/rtc/rtc-ds1343.c +++ b/drivers/rtc/rtc-ds1343.c @@ -4,6 +4,7 @@ * Real Time Clock * * Author : Raghavendra Chandra Ganiga <ravi23ganiga@gmail.com> + * Ankur Srivastava <sankurece@gmail.com> : DS1343 Nvram Support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -45,6 +46,9 @@ #define DS1343_CONTROL_REG 0x0F #define DS1343_STATUS_REG 0x10 #define DS1343_TRICKLE_REG 0x11 +#define DS1343_NVRAM 0x20 + +#define DS1343_NVRAM_LEN 96 /* DS1343 Control Registers bits */ #define DS1343_EOSC 0x80 @@ -149,6 +153,64 @@ static ssize_t ds1343_store_glitchfilter(struct device *dev, static DEVICE_ATTR(glitch_filter, S_IRUGO | S_IWUSR, ds1343_show_glitchfilter, ds1343_store_glitchfilter); +static ssize_t ds1343_nvram_write(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + int ret; + unsigned char address; + struct device *dev = kobj_to_dev(kobj); + struct ds1343_priv *priv = dev_get_drvdata(dev); + + if (unlikely(!count)) + return count; + + if ((count + off) > DS1343_NVRAM_LEN) + count = DS1343_NVRAM_LEN - off; + + address = DS1343_NVRAM + off; + + ret = regmap_bulk_write(priv->map, address, buf, count); + if (ret < 0) + dev_err(&priv->spi->dev, "Error in nvram write %d", ret); + + return (ret < 0) ? ret : count; +} + + +static ssize_t ds1343_nvram_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + int ret; + unsigned char address; + struct device *dev = kobj_to_dev(kobj); + struct ds1343_priv *priv = dev_get_drvdata(dev); + + if (unlikely(!count)) + return count; + + if ((count + off) > DS1343_NVRAM_LEN) + count = DS1343_NVRAM_LEN - off; + + address = DS1343_NVRAM + off; + + ret = regmap_bulk_read(priv->map, address, buf, count); + if (ret < 0) + dev_err(&priv->spi->dev, "Error in nvram read %d\n", ret); + + return (ret < 0) ? ret : count; +} + + +static struct bin_attribute nvram_attr = { + .attr.name = "nvram", + .attr.mode = S_IRUGO | S_IWUSR, + .read = ds1343_nvram_read, + .write = ds1343_nvram_write, + .size = DS1343_NVRAM_LEN, +}; + static ssize_t ds1343_show_alarmstatus(struct device *dev, struct device_attribute *attr, char *buf) { @@ -274,12 +336,16 @@ static int ds1343_sysfs_register(struct device *dev) if (err) goto error1; + err = device_create_bin_file(dev, &nvram_attr); + if (err) + goto error2; + if (priv->irq <= 0) return err; err = device_create_file(dev, &dev_attr_alarm_mode); if (err) - goto error2; + goto error3; err = device_create_file(dev, &dev_attr_alarm_status); if (!err) @@ -287,6 +353,9 @@ static int ds1343_sysfs_register(struct device *dev) device_remove_file(dev, &dev_attr_alarm_mode); +error3: + device_remove_bin_file(dev, &nvram_attr); + error2: device_remove_file(dev, &dev_attr_trickle_charger); @@ -302,6 +371,7 @@ static void ds1343_sysfs_unregister(struct device *dev) device_remove_file(dev, &dev_attr_glitch_filter); device_remove_file(dev, &dev_attr_trickle_charger); + device_remove_bin_file(dev, &nvram_attr); if (priv->irq <= 0) return; @@ -684,6 +754,7 @@ static struct spi_driver ds1343_driver = { module_spi_driver(ds1343_driver); MODULE_DESCRIPTION("DS1343 RTC SPI Driver"); -MODULE_AUTHOR("Raghavendra Chandra Ganiga <ravi23ganiga@gmail.com>"); +MODULE_AUTHOR("Raghavendra Chandra Ganiga <ravi23ganiga@gmail.com>," + "Ankur Srivastava <sankurece@gmail.com>"); MODULE_LICENSE("GPL v2"); MODULE_VERSION(DS1343_DRV_VERSION); diff --git a/drivers/rtc/rtc-ds1742.c b/drivers/rtc/rtc-ds1742.c index c6b2191a4128..9822715db8ba 100644 --- a/drivers/rtc/rtc-ds1742.c +++ b/drivers/rtc/rtc-ds1742.c @@ -231,7 +231,7 @@ static struct platform_driver ds1742_rtc_driver = { .driver = { .name = "rtc-ds1742", .owner = THIS_MODULE, - .of_match_table = ds1742_rtc_of_match, + .of_match_table = of_match_ptr(ds1742_rtc_of_match), }, }; diff --git a/drivers/rtc/rtc-efi-platform.c b/drivers/rtc/rtc-efi-platform.c new file mode 100644 index 000000000000..b40fbe332af4 --- /dev/null +++ b/drivers/rtc/rtc-efi-platform.c @@ -0,0 +1,31 @@ +/* + * Moved from arch/ia64/kernel/time.c + * + * Copyright (C) 1998-2003 Hewlett-Packard Co + * Stephane Eranian <eranian@hpl.hp.com> + * David Mosberger <davidm@hpl.hp.com> + * Copyright (C) 1999 Don Dugger <don.dugger@intel.com> + * Copyright (C) 1999-2000 VA Linux Systems + * Copyright (C) 1999-2000 Walt Drummond <drummond@valinux.com> + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/efi.h> +#include <linux/platform_device.h> + +static struct platform_device rtc_efi_dev = { + .name = "rtc-efi", + .id = -1, +}; + +static int __init rtc_init(void) +{ + if (efi_enabled(EFI_RUNTIME_SERVICES)) + if (platform_device_register(&rtc_efi_dev) < 0) + pr_err("unable to register rtc device...\n"); + + /* not necessarily an error */ + return 0; +} +module_init(rtc_init); diff --git a/drivers/rtc/rtc-efi.c b/drivers/rtc/rtc-efi.c index c4c38431012e..c384fec6d173 100644 --- a/drivers/rtc/rtc-efi.c +++ b/drivers/rtc/rtc-efi.c @@ -17,6 +17,7 @@ #include <linux/kernel.h> #include <linux/module.h> +#include <linux/stringify.h> #include <linux/time.h> #include <linux/platform_device.h> #include <linux/rtc.h> @@ -48,8 +49,8 @@ compute_wday(efi_time_t *eft) int y; int ndays = 0; - if (eft->year < 1998) { - pr_err("EFI year < 1998, invalid date\n"); + if (eft->year < EFI_RTC_EPOCH) { + pr_err("EFI year < " __stringify(EFI_RTC_EPOCH) ", invalid date\n"); return -1; } @@ -78,19 +79,36 @@ convert_to_efi_time(struct rtc_time *wtime, efi_time_t *eft) eft->timezone = EFI_UNSPECIFIED_TIMEZONE; } -static void +static bool convert_from_efi_time(efi_time_t *eft, struct rtc_time *wtime) { memset(wtime, 0, sizeof(*wtime)); + + if (eft->second >= 60) + return false; wtime->tm_sec = eft->second; + + if (eft->minute >= 60) + return false; wtime->tm_min = eft->minute; + + if (eft->hour >= 24) + return false; wtime->tm_hour = eft->hour; + + if (!eft->day || eft->day > 31) + return false; wtime->tm_mday = eft->day; + + if (!eft->month || eft->month > 12) + return false; wtime->tm_mon = eft->month - 1; wtime->tm_year = eft->year - 1900; /* day of the week [0-6], Sunday=0 */ wtime->tm_wday = compute_wday(eft); + if (wtime->tm_wday < 0) + return false; /* day in the year [1-365]*/ wtime->tm_yday = compute_yday(eft); @@ -106,6 +124,8 @@ convert_from_efi_time(efi_time_t *eft, struct rtc_time *wtime) default: wtime->tm_isdst = -1; } + + return true; } static int efi_read_alarm(struct device *dev, struct rtc_wkalrm *wkalrm) @@ -122,7 +142,8 @@ static int efi_read_alarm(struct device *dev, struct rtc_wkalrm *wkalrm) if (status != EFI_SUCCESS) return -EINVAL; - convert_from_efi_time(&eft, &wkalrm->time); + if (!convert_from_efi_time(&eft, &wkalrm->time)) + return -EIO; return rtc_valid_tm(&wkalrm->time); } @@ -163,7 +184,8 @@ static int efi_read_time(struct device *dev, struct rtc_time *tm) return -EINVAL; } - convert_from_efi_time(&eft, tm); + if (!convert_from_efi_time(&eft, tm)) + return -EIO; return rtc_valid_tm(tm); } @@ -210,6 +232,7 @@ static struct platform_driver efi_rtc_driver = { module_platform_driver_probe(efi_rtc_driver, efi_rtc_probe); +MODULE_ALIAS("platform:rtc-efi"); MODULE_AUTHOR("dann frazier <dannf@hp.com>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("EFI RTC driver"); diff --git a/drivers/rtc/rtc-isl12022.c b/drivers/rtc/rtc-isl12022.c index 03b891129428..aa55f081c505 100644 --- a/drivers/rtc/rtc-isl12022.c +++ b/drivers/rtc/rtc-isl12022.c @@ -17,6 +17,8 @@ #include <linux/slab.h> #include <linux/module.h> #include <linux/err.h> +#include <linux/of.h> +#include <linux/of_device.h> #define DRV_VERSION "0.1" @@ -271,6 +273,13 @@ static int isl12022_probe(struct i2c_client *client, return PTR_ERR_OR_ZERO(isl12022->rtc); } +#ifdef CONFIG_OF +static struct of_device_id isl12022_dt_match[] = { + { .compatible = "isl,isl12022" }, + { }, +}; +#endif + static const struct i2c_device_id isl12022_id[] = { { "isl12022", 0 }, { } @@ -280,6 +289,9 @@ MODULE_DEVICE_TABLE(i2c, isl12022_id); static struct i2c_driver isl12022_driver = { .driver = { .name = "rtc-isl12022", +#ifdef CONFIG_OF + .of_match_table = of_match_ptr(isl12022_dt_match), +#endif }, .probe = isl12022_probe, .id_table = isl12022_id, diff --git a/drivers/rtc/rtc-max77686.c b/drivers/rtc/rtc-max77686.c index 9efe118a28ba..d20a7f0786eb 100644 --- a/drivers/rtc/rtc-max77686.c +++ b/drivers/rtc/rtc-max77686.c @@ -492,16 +492,11 @@ static int max77686_rtc_init_reg(struct max77686_rtc_info *info) return ret; } -static struct regmap_config max77686_rtc_regmap_config = { - .reg_bits = 8, - .val_bits = 8, -}; - static int max77686_rtc_probe(struct platform_device *pdev) { struct max77686_dev *max77686 = dev_get_drvdata(pdev->dev.parent); struct max77686_rtc_info *info; - int ret, virq; + int ret; dev_info(&pdev->dev, "%s\n", __func__); @@ -514,14 +509,7 @@ static int max77686_rtc_probe(struct platform_device *pdev) info->dev = &pdev->dev; info->max77686 = max77686; info->rtc = max77686->rtc; - info->max77686->rtc_regmap = devm_regmap_init_i2c(info->max77686->rtc, - &max77686_rtc_regmap_config); - if (IS_ERR(info->max77686->rtc_regmap)) { - ret = PTR_ERR(info->max77686->rtc_regmap); - dev_err(info->max77686->dev, "Failed to allocate register map: %d\n", - ret); - return ret; - } + platform_set_drvdata(pdev, info); ret = max77686_rtc_init_reg(info); @@ -550,15 +538,16 @@ static int max77686_rtc_probe(struct platform_device *pdev) ret = -EINVAL; goto err_rtc; } - virq = irq_create_mapping(max77686->irq_domain, MAX77686_RTCIRQ_RTCA1); - if (!virq) { + + info->virq = regmap_irq_get_virq(max77686->rtc_irq_data, + MAX77686_RTCIRQ_RTCA1); + if (!info->virq) { ret = -ENXIO; goto err_rtc; } - info->virq = virq; - ret = devm_request_threaded_irq(&pdev->dev, virq, NULL, - max77686_rtc_alarm_irq, 0, "rtc-alarm0", info); + ret = devm_request_threaded_irq(&pdev->dev, info->virq, NULL, + max77686_rtc_alarm_irq, 0, "rtc-alarm1", info); if (ret < 0) dev_err(&pdev->dev, "Failed to request alarm IRQ: %d: %d\n", info->virq, ret); diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c new file mode 100644 index 000000000000..6a12bf62c504 --- /dev/null +++ b/drivers/rtc/rtc-pcf85063.c @@ -0,0 +1,204 @@ +/* + * An I2C driver for the PCF85063 RTC + * Copyright 2014 Rose Technology + * + * Author: Søren Andersen <san@rosetechnology.dk> + * Maintainers: http://www.nslu2-linux.org/ + * + * based on the other drivers in this same directory. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include <linux/i2c.h> +#include <linux/bcd.h> +#include <linux/rtc.h> +#include <linux/module.h> + +#define DRV_VERSION "0.0.1" + +#define PCF85063_REG_CTRL1 0x00 /* status */ +#define PCF85063_REG_CTRL2 0x01 + +#define PCF85063_REG_SC 0x04 /* datetime */ +#define PCF85063_REG_MN 0x05 +#define PCF85063_REG_HR 0x06 +#define PCF85063_REG_DM 0x07 +#define PCF85063_REG_DW 0x08 +#define PCF85063_REG_MO 0x09 +#define PCF85063_REG_YR 0x0A + +#define PCF85063_MO_C 0x80 /* century */ + +static struct i2c_driver pcf85063_driver; + +struct pcf85063 { + struct rtc_device *rtc; + int c_polarity; /* 0: MO_C=1 means 19xx, otherwise MO_C=1 means 20xx */ + int voltage_low; /* indicates if a low_voltage was detected */ +}; + +/* + * In the routines that deal directly with the pcf85063 hardware, we use + * rtc_time -- month 0-11, hour 0-23, yr = calendar year-epoch. + */ +static int pcf85063_get_datetime(struct i2c_client *client, struct rtc_time *tm) +{ + struct pcf85063 *pcf85063 = i2c_get_clientdata(client); + unsigned char buf[13] = { PCF85063_REG_CTRL1 }; + struct i2c_msg msgs[] = { + {/* setup read ptr */ + .addr = client->addr, + .len = 1, + .buf = buf + }, + {/* read status + date */ + .addr = client->addr, + .flags = I2C_M_RD, + .len = 13, + .buf = buf + }, + }; + + /* read registers */ + if ((i2c_transfer(client->adapter, msgs, 2)) != 2) { + dev_err(&client->dev, "%s: read error\n", __func__); + return -EIO; + } + + tm->tm_sec = bcd2bin(buf[PCF85063_REG_SC] & 0x7F); + tm->tm_min = bcd2bin(buf[PCF85063_REG_MN] & 0x7F); + tm->tm_hour = bcd2bin(buf[PCF85063_REG_HR] & 0x3F); /* rtc hr 0-23 */ + tm->tm_mday = bcd2bin(buf[PCF85063_REG_DM] & 0x3F); + tm->tm_wday = buf[PCF85063_REG_DW] & 0x07; + tm->tm_mon = bcd2bin(buf[PCF85063_REG_MO] & 0x1F) - 1; /* rtc mn 1-12 */ + tm->tm_year = bcd2bin(buf[PCF85063_REG_YR]); + if (tm->tm_year < 70) + tm->tm_year += 100; /* assume we are in 1970...2069 */ + /* detect the polarity heuristically. see note above. */ + pcf85063->c_polarity = (buf[PCF85063_REG_MO] & PCF85063_MO_C) ? + (tm->tm_year >= 100) : (tm->tm_year < 100); + + /* the clock can give out invalid datetime, but we cannot return + * -EINVAL otherwise hwclock will refuse to set the time on bootup. + */ + if (rtc_valid_tm(tm) < 0) + dev_err(&client->dev, "retrieved date/time is not valid.\n"); + + return 0; +} + +static int pcf85063_set_datetime(struct i2c_client *client, struct rtc_time *tm) +{ + int i = 0, err = 0; + unsigned char buf[11]; + + /* Control & status */ + buf[PCF85063_REG_CTRL1] = 0; + buf[PCF85063_REG_CTRL2] = 5; + + /* hours, minutes and seconds */ + buf[PCF85063_REG_SC] = bin2bcd(tm->tm_sec) & 0x7F; + + buf[PCF85063_REG_MN] = bin2bcd(tm->tm_min); + buf[PCF85063_REG_HR] = bin2bcd(tm->tm_hour); + + /* Day of month, 1 - 31 */ + buf[PCF85063_REG_DM] = bin2bcd(tm->tm_mday); + + /* Day, 0 - 6 */ + buf[PCF85063_REG_DW] = tm->tm_wday & 0x07; + + /* month, 1 - 12 */ + buf[PCF85063_REG_MO] = bin2bcd(tm->tm_mon + 1); + + /* year and century */ + buf[PCF85063_REG_YR] = bin2bcd(tm->tm_year % 100); + + /* write register's data */ + for (i = 0; i < sizeof(buf); i++) { + unsigned char data[2] = { i, buf[i] }; + + err = i2c_master_send(client, data, sizeof(data)); + if (err != sizeof(data)) { + dev_err(&client->dev, "%s: err=%d addr=%02x, data=%02x\n", + __func__, err, data[0], data[1]); + return -EIO; + } + } + + return 0; +} + +static int pcf85063_rtc_read_time(struct device *dev, struct rtc_time *tm) +{ + return pcf85063_get_datetime(to_i2c_client(dev), tm); +} + +static int pcf85063_rtc_set_time(struct device *dev, struct rtc_time *tm) +{ + return pcf85063_set_datetime(to_i2c_client(dev), tm); +} + +static const struct rtc_class_ops pcf85063_rtc_ops = { + .read_time = pcf85063_rtc_read_time, + .set_time = pcf85063_rtc_set_time +}; + +static int pcf85063_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct pcf85063 *pcf85063; + + dev_dbg(&client->dev, "%s\n", __func__); + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) + return -ENODEV; + + pcf85063 = devm_kzalloc(&client->dev, sizeof(struct pcf85063), + GFP_KERNEL); + if (!pcf85063) + return -ENOMEM; + + dev_info(&client->dev, "chip found, driver version " DRV_VERSION "\n"); + + i2c_set_clientdata(client, pcf85063); + + pcf85063->rtc = devm_rtc_device_register(&client->dev, + pcf85063_driver.driver.name, + &pcf85063_rtc_ops, THIS_MODULE); + + return PTR_ERR_OR_ZERO(pcf85063->rtc); +} + +static const struct i2c_device_id pcf85063_id[] = { + { "pcf85063", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, pcf85063_id); + +#ifdef CONFIG_OF +static const struct of_device_id pcf85063_of_match[] = { + { .compatible = "nxp,pcf85063" }, + {} +}; +MODULE_DEVICE_TABLE(of, pcf85063_of_match); +#endif + +static struct i2c_driver pcf85063_driver = { + .driver = { + .name = "rtc-pcf85063", + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(pcf85063_of_match), + }, + .probe = pcf85063_probe, + .id_table = pcf85063_id, +}; + +module_i2c_driver(pcf85063_driver); + +MODULE_AUTHOR("Søren Andersen <san@rosetechnology.dk>"); +MODULE_DESCRIPTION("PCF85063 RTC driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c index 63b558c48196..5a197d9dc7e7 100644 --- a/drivers/rtc/rtc-pcf8563.c +++ b/drivers/rtc/rtc-pcf8563.c @@ -26,6 +26,8 @@ #define PCF8563_REG_ST1 0x00 /* status */ #define PCF8563_REG_ST2 0x01 +#define PCF8563_BIT_AIE (1 << 1) +#define PCF8563_BIT_AF (1 << 3) #define PCF8563_REG_SC 0x02 /* datetime */ #define PCF8563_REG_MN 0x03 @@ -36,9 +38,6 @@ #define PCF8563_REG_YR 0x08 #define PCF8563_REG_AMN 0x09 /* alarm */ -#define PCF8563_REG_AHR 0x0A -#define PCF8563_REG_ADM 0x0B -#define PCF8563_REG_ADW 0x0C #define PCF8563_REG_CLKO 0x0D /* clock out */ #define PCF8563_REG_TMRC 0x0E /* timer control */ @@ -67,37 +66,133 @@ struct pcf8563 { */ int c_polarity; /* 0: MO_C=1 means 19xx, otherwise MO_C=1 means 20xx */ int voltage_low; /* incicates if a low_voltage was detected */ + + struct i2c_client *client; }; -/* - * In the routines that deal directly with the pcf8563 hardware, we use - * rtc_time -- month 0-11, hour 0-23, yr = calendar year-epoch. - */ -static int pcf8563_get_datetime(struct i2c_client *client, struct rtc_time *tm) +static int pcf8563_read_block_data(struct i2c_client *client, unsigned char reg, + unsigned char length, unsigned char *buf) { - struct pcf8563 *pcf8563 = i2c_get_clientdata(client); - unsigned char buf[13] = { PCF8563_REG_ST1 }; - struct i2c_msg msgs[] = { {/* setup read ptr */ .addr = client->addr, .len = 1, - .buf = buf + .buf = ®, }, - {/* read status + date */ + { .addr = client->addr, .flags = I2C_M_RD, - .len = 13, + .len = length, .buf = buf }, }; - /* read registers */ if ((i2c_transfer(client->adapter, msgs, 2)) != 2) { dev_err(&client->dev, "%s: read error\n", __func__); return -EIO; } + return 0; +} + +static int pcf8563_write_block_data(struct i2c_client *client, + unsigned char reg, unsigned char length, + unsigned char *buf) +{ + int i, err; + + for (i = 0; i < length; i++) { + unsigned char data[2] = { reg + i, buf[i] }; + + err = i2c_master_send(client, data, sizeof(data)); + if (err != sizeof(data)) { + dev_err(&client->dev, + "%s: err=%d addr=%02x, data=%02x\n", + __func__, err, data[0], data[1]); + return -EIO; + } + } + + return 0; +} + +static int pcf8563_set_alarm_mode(struct i2c_client *client, bool on) +{ + unsigned char buf[2]; + int err; + + err = pcf8563_read_block_data(client, PCF8563_REG_ST2, 1, buf + 1); + if (err < 0) + return err; + + if (on) + buf[1] |= PCF8563_BIT_AIE; + else + buf[1] &= ~PCF8563_BIT_AIE; + + buf[1] &= ~PCF8563_BIT_AF; + buf[0] = PCF8563_REG_ST2; + + err = pcf8563_write_block_data(client, PCF8563_REG_ST2, 1, buf + 1); + if (err < 0) { + dev_err(&client->dev, "%s: write error\n", __func__); + return -EIO; + } + + return 0; +} + +static int pcf8563_get_alarm_mode(struct i2c_client *client, unsigned char *en, + unsigned char *pen) +{ + unsigned char buf; + int err; + + err = pcf8563_read_block_data(client, PCF8563_REG_ST2, 1, &buf); + if (err) + return err; + + if (en) + *en = !!(buf & PCF8563_BIT_AIE); + if (pen) + *pen = !!(buf & PCF8563_BIT_AF); + + return 0; +} + +static irqreturn_t pcf8563_irq(int irq, void *dev_id) +{ + struct pcf8563 *pcf8563 = i2c_get_clientdata(dev_id); + int err; + char pending; + + err = pcf8563_get_alarm_mode(pcf8563->client, NULL, &pending); + if (err < 0) + return err; + + if (pending) { + rtc_update_irq(pcf8563->rtc, 1, RTC_IRQF | RTC_AF); + pcf8563_set_alarm_mode(pcf8563->client, 1); + return IRQ_HANDLED; + } + + return IRQ_NONE; +} + +/* + * In the routines that deal directly with the pcf8563 hardware, we use + * rtc_time -- month 0-11, hour 0-23, yr = calendar year-epoch. + */ +static int pcf8563_get_datetime(struct i2c_client *client, struct rtc_time *tm) +{ + struct pcf8563 *pcf8563 = i2c_get_clientdata(client); + unsigned char buf[9]; + int err; + + err = pcf8563_read_block_data(client, PCF8563_REG_ST1, 9, buf); + if (err) + return err; + if (buf[PCF8563_REG_SC] & PCF8563_SC_LV) { pcf8563->voltage_low = 1; dev_info(&client->dev, @@ -144,7 +239,7 @@ static int pcf8563_get_datetime(struct i2c_client *client, struct rtc_time *tm) static int pcf8563_set_datetime(struct i2c_client *client, struct rtc_time *tm) { struct pcf8563 *pcf8563 = i2c_get_clientdata(client); - int i, err; + int err; unsigned char buf[9]; dev_dbg(&client->dev, "%s: secs=%d, mins=%d, hours=%d, " @@ -170,19 +265,10 @@ static int pcf8563_set_datetime(struct i2c_client *client, struct rtc_time *tm) buf[PCF8563_REG_DW] = tm->tm_wday & 0x07; - /* write register's data */ - for (i = 0; i < 7; i++) { - unsigned char data[2] = { PCF8563_REG_SC + i, - buf[PCF8563_REG_SC + i] }; - - err = i2c_master_send(client, data, sizeof(data)); - if (err != sizeof(data)) { - dev_err(&client->dev, - "%s: err=%d addr=%02x, data=%02x\n", - __func__, err, data[0], data[1]); - return -EIO; - } - } + err = pcf8563_write_block_data(client, PCF8563_REG_SC, + 9 - PCF8563_REG_SC, buf + PCF8563_REG_SC); + if (err) + return err; return 0; } @@ -235,16 +321,83 @@ static int pcf8563_rtc_set_time(struct device *dev, struct rtc_time *tm) return pcf8563_set_datetime(to_i2c_client(dev), tm); } +static int pcf8563_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *tm) +{ + struct i2c_client *client = to_i2c_client(dev); + unsigned char buf[4]; + int err; + + err = pcf8563_read_block_data(client, PCF8563_REG_AMN, 4, buf); + if (err) + return err; + + dev_dbg(&client->dev, + "%s: raw data is min=%02x, hr=%02x, mday=%02x, wday=%02x\n", + __func__, buf[0], buf[1], buf[2], buf[3]); + + tm->time.tm_min = bcd2bin(buf[0] & 0x7F); + tm->time.tm_hour = bcd2bin(buf[1] & 0x7F); + tm->time.tm_mday = bcd2bin(buf[2] & 0x1F); + tm->time.tm_wday = bcd2bin(buf[3] & 0x7); + tm->time.tm_mon = -1; + tm->time.tm_year = -1; + tm->time.tm_yday = -1; + tm->time.tm_isdst = -1; + + err = pcf8563_get_alarm_mode(client, &tm->enabled, &tm->pending); + if (err < 0) + return err; + + dev_dbg(&client->dev, "%s: tm is mins=%d, hours=%d, mday=%d, wday=%d," + " enabled=%d, pending=%d\n", __func__, tm->time.tm_min, + tm->time.tm_hour, tm->time.tm_mday, tm->time.tm_wday, + tm->enabled, tm->pending); + + return 0; +} + +static int pcf8563_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *tm) +{ + struct i2c_client *client = to_i2c_client(dev); + unsigned char buf[4]; + int err; + + dev_dbg(dev, "%s, min=%d hour=%d wday=%d mday=%d " + "enabled=%d pending=%d\n", __func__, + tm->time.tm_min, tm->time.tm_hour, tm->time.tm_wday, + tm->time.tm_mday, tm->enabled, tm->pending); + + buf[0] = bin2bcd(tm->time.tm_min); + buf[1] = bin2bcd(tm->time.tm_hour); + buf[2] = bin2bcd(tm->time.tm_mday); + buf[3] = tm->time.tm_wday & 0x07; + + err = pcf8563_write_block_data(client, PCF8563_REG_AMN, 4, buf); + if (err) + return err; + + return pcf8563_set_alarm_mode(client, 1); +} + +static int pcf8563_irq_enable(struct device *dev, unsigned int enabled) +{ + return pcf8563_set_alarm_mode(to_i2c_client(dev), !!enabled); +} + static const struct rtc_class_ops pcf8563_rtc_ops = { .ioctl = pcf8563_rtc_ioctl, .read_time = pcf8563_rtc_read_time, .set_time = pcf8563_rtc_set_time, + .read_alarm = pcf8563_rtc_read_alarm, + .set_alarm = pcf8563_rtc_set_alarm, + .alarm_irq_enable = pcf8563_irq_enable, }; static int pcf8563_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct pcf8563 *pcf8563; + int err; dev_dbg(&client->dev, "%s\n", __func__); @@ -259,12 +412,30 @@ static int pcf8563_probe(struct i2c_client *client, dev_info(&client->dev, "chip found, driver version " DRV_VERSION "\n"); i2c_set_clientdata(client, pcf8563); + pcf8563->client = client; + device_set_wakeup_capable(&client->dev, 1); pcf8563->rtc = devm_rtc_device_register(&client->dev, pcf8563_driver.driver.name, &pcf8563_rtc_ops, THIS_MODULE); - return PTR_ERR_OR_ZERO(pcf8563->rtc); + if (IS_ERR(pcf8563->rtc)) + return PTR_ERR(pcf8563->rtc); + + if (client->irq > 0) { + err = devm_request_threaded_irq(&client->dev, client->irq, + NULL, pcf8563_irq, + IRQF_SHARED|IRQF_ONESHOT|IRQF_TRIGGER_FALLING, + pcf8563->rtc->name, client); + if (err) { + dev_err(&client->dev, "unable to request IRQ %d\n", + client->irq); + return err; + } + + } + + return 0; } static const struct i2c_device_id pcf8563_id[] = { diff --git a/drivers/rtc/rtc-s5m.c b/drivers/rtc/rtc-s5m.c index 8f06250a0389..8754c33361e8 100644 --- a/drivers/rtc/rtc-s5m.c +++ b/drivers/rtc/rtc-s5m.c @@ -717,12 +717,14 @@ static int s5m_rtc_probe(struct platform_device *pdev) info->device_type = s5m87xx->device_type; info->wtsr_smpl = s5m87xx->wtsr_smpl; - info->irq = regmap_irq_get_virq(s5m87xx->irq_data, alarm_irq); - if (info->irq <= 0) { - ret = -EINVAL; - dev_err(&pdev->dev, "Failed to get virtual IRQ %d\n", + if (s5m87xx->irq_data) { + info->irq = regmap_irq_get_virq(s5m87xx->irq_data, alarm_irq); + if (info->irq <= 0) { + ret = -EINVAL; + dev_err(&pdev->dev, "Failed to get virtual IRQ %d\n", alarm_irq); - goto err; + goto err; + } } platform_set_drvdata(pdev, info); @@ -744,6 +746,11 @@ static int s5m_rtc_probe(struct platform_device *pdev) goto err; } + if (!info->irq) { + dev_info(&pdev->dev, "Alarm IRQ not available\n"); + return 0; + } + ret = devm_request_threaded_irq(&pdev->dev, info->irq, NULL, s5m_rtc_alarm_irq, 0, "rtc-alarm0", info); @@ -802,7 +809,7 @@ static int s5m_rtc_resume(struct device *dev) struct s5m_rtc_info *info = dev_get_drvdata(dev); int ret = 0; - if (device_may_wakeup(dev)) + if (info->irq && device_may_wakeup(dev)) ret = disable_irq_wake(info->irq); return ret; @@ -813,7 +820,7 @@ static int s5m_rtc_suspend(struct device *dev) struct s5m_rtc_info *info = dev_get_drvdata(dev); int ret = 0; - if (device_may_wakeup(dev)) + if (info->irq && device_may_wakeup(dev)) ret = enable_irq_wake(info->irq); return ret; diff --git a/drivers/rtc/rtc-tps65910.c b/drivers/rtc/rtc-tps65910.c index 7af00208d637..2583349fbde5 100644 --- a/drivers/rtc/rtc-tps65910.c +++ b/drivers/rtc/rtc-tps65910.c @@ -258,6 +258,8 @@ static int tps65910_rtc_probe(struct platform_device *pdev) if (ret < 0) return ret; + platform_set_drvdata(pdev, tps_rtc); + irq = platform_get_irq(pdev, 0); if (irq <= 0) { dev_warn(&pdev->dev, "Wake up is not possible as irq = %d\n", @@ -283,8 +285,6 @@ static int tps65910_rtc_probe(struct platform_device *pdev) return ret; } - platform_set_drvdata(pdev, tps_rtc); - return 0; } diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 1eef0f586950..5df05f26b7d9 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -42,8 +42,10 @@ * SECTION: exported variables of dasd.c */ debug_info_t *dasd_debug_area; +EXPORT_SYMBOL(dasd_debug_area); static struct dentry *dasd_debugfs_root_entry; struct dasd_discipline *dasd_diag_discipline_pointer; +EXPORT_SYMBOL(dasd_diag_discipline_pointer); void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *); MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>"); @@ -164,6 +166,7 @@ struct dasd_block *dasd_alloc_block(void) return block; } +EXPORT_SYMBOL_GPL(dasd_alloc_block); /* * Free memory of a device structure. @@ -172,6 +175,7 @@ void dasd_free_block(struct dasd_block *block) { kfree(block); } +EXPORT_SYMBOL_GPL(dasd_free_block); /* * Make a new device known to the system. @@ -281,10 +285,15 @@ static int dasd_state_basic_to_known(struct dasd_device *device) { int rc; + if (device->discipline->basic_to_known) { + rc = device->discipline->basic_to_known(device); + if (rc) + return rc; + } + if (device->block) { dasd_profile_exit(&device->block->profile); - if (device->block->debugfs_dentry) - debugfs_remove(device->block->debugfs_dentry); + debugfs_remove(device->block->debugfs_dentry); dasd_gendisk_free(device->block); dasd_block_clear_timer(device->block); } @@ -293,9 +302,7 @@ static int dasd_state_basic_to_known(struct dasd_device *device) return rc; dasd_device_clear_timer(device); dasd_profile_exit(&device->profile); - if (device->debugfs_dentry) - debugfs_remove(device->debugfs_dentry); - + debugfs_remove(device->debugfs_dentry); DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device); if (device->debug_area != NULL) { debug_unregister(device->debug_area); @@ -374,11 +381,6 @@ static int dasd_state_ready_to_basic(struct dasd_device *device) { int rc; - if (device->discipline->ready_to_basic) { - rc = device->discipline->ready_to_basic(device); - if (rc) - return rc; - } device->state = DASD_STATE_BASIC; if (device->block) { struct dasd_block *block = device->block; @@ -579,6 +581,7 @@ void dasd_kick_device(struct dasd_device *device) /* queue call to dasd_kick_device to the kernel event daemon. */ schedule_work(&device->kick_work); } +EXPORT_SYMBOL(dasd_kick_device); /* * dasd_reload_device will schedule a call do do_reload_device to the kernel @@ -639,6 +642,7 @@ void dasd_set_target_state(struct dasd_device *device, int target) mutex_unlock(&device->state_mutex); dasd_put_device(device); } +EXPORT_SYMBOL(dasd_set_target_state); /* * Enable devices with device numbers in [from..to]. @@ -661,6 +665,7 @@ void dasd_enable_device(struct dasd_device *device) if (device->discipline->kick_validate) device->discipline->kick_validate(device); } +EXPORT_SYMBOL(dasd_enable_device); /* * SECTION: device operation (interrupt handler, start i/o, term i/o ...) @@ -972,37 +977,37 @@ static void dasd_stats_seq_print(struct seq_file *m, seq_printf(m, "total_sectors %u\n", data->dasd_io_sects); seq_printf(m, "total_pav %u\n", data->dasd_io_alias); seq_printf(m, "total_hpf %u\n", data->dasd_io_tpm); - seq_printf(m, "histogram_sectors "); + seq_puts(m, "histogram_sectors "); dasd_stats_array(m, data->dasd_io_secs); - seq_printf(m, "histogram_io_times "); + seq_puts(m, "histogram_io_times "); dasd_stats_array(m, data->dasd_io_times); - seq_printf(m, "histogram_io_times_weighted "); + seq_puts(m, "histogram_io_times_weighted "); dasd_stats_array(m, data->dasd_io_timps); - seq_printf(m, "histogram_time_build_to_ssch "); + seq_puts(m, "histogram_time_build_to_ssch "); dasd_stats_array(m, data->dasd_io_time1); - seq_printf(m, "histogram_time_ssch_to_irq "); + seq_puts(m, "histogram_time_ssch_to_irq "); dasd_stats_array(m, data->dasd_io_time2); - seq_printf(m, "histogram_time_ssch_to_irq_weighted "); + seq_puts(m, "histogram_time_ssch_to_irq_weighted "); dasd_stats_array(m, data->dasd_io_time2ps); - seq_printf(m, "histogram_time_irq_to_end "); + seq_puts(m, "histogram_time_irq_to_end "); dasd_stats_array(m, data->dasd_io_time3); - seq_printf(m, "histogram_ccw_queue_length "); + seq_puts(m, "histogram_ccw_queue_length "); dasd_stats_array(m, data->dasd_io_nr_req); seq_printf(m, "total_read_requests %u\n", data->dasd_read_reqs); seq_printf(m, "total_read_sectors %u\n", data->dasd_read_sects); seq_printf(m, "total_read_pav %u\n", data->dasd_read_alias); seq_printf(m, "total_read_hpf %u\n", data->dasd_read_tpm); - seq_printf(m, "histogram_read_sectors "); + seq_puts(m, "histogram_read_sectors "); dasd_stats_array(m, data->dasd_read_secs); - seq_printf(m, "histogram_read_times "); + seq_puts(m, "histogram_read_times "); dasd_stats_array(m, data->dasd_read_times); - seq_printf(m, "histogram_read_time_build_to_ssch "); + seq_puts(m, "histogram_read_time_build_to_ssch "); dasd_stats_array(m, data->dasd_read_time1); - seq_printf(m, "histogram_read_time_ssch_to_irq "); + seq_puts(m, "histogram_read_time_ssch_to_irq "); dasd_stats_array(m, data->dasd_read_time2); - seq_printf(m, "histogram_read_time_irq_to_end "); + seq_puts(m, "histogram_read_time_irq_to_end "); dasd_stats_array(m, data->dasd_read_time3); - seq_printf(m, "histogram_read_ccw_queue_length "); + seq_puts(m, "histogram_read_ccw_queue_length "); dasd_stats_array(m, data->dasd_read_nr_req); } @@ -1016,7 +1021,7 @@ static int dasd_stats_show(struct seq_file *m, void *v) data = profile->data; if (!data) { spin_unlock_bh(&profile->lock); - seq_printf(m, "disabled\n"); + seq_puts(m, "disabled\n"); return 0; } dasd_stats_seq_print(m, data); @@ -1069,7 +1074,7 @@ static ssize_t dasd_stats_global_write(struct file *file, static int dasd_stats_global_show(struct seq_file *m, void *v) { if (!dasd_global_profile_level) { - seq_printf(m, "disabled\n"); + seq_puts(m, "disabled\n"); return 0; } dasd_stats_seq_print(m, &dasd_global_profile_data); @@ -1111,23 +1116,17 @@ static void dasd_profile_init(struct dasd_profile *profile, static void dasd_profile_exit(struct dasd_profile *profile) { dasd_profile_off(profile); - if (profile->dentry) { - debugfs_remove(profile->dentry); - profile->dentry = NULL; - } + debugfs_remove(profile->dentry); + profile->dentry = NULL; } static void dasd_statistics_removeroot(void) { dasd_global_profile_level = DASD_PROFILE_OFF; - if (dasd_global_profile_dentry) { - debugfs_remove(dasd_global_profile_dentry); - dasd_global_profile_dentry = NULL; - } - if (dasd_debugfs_global_entry) - debugfs_remove(dasd_debugfs_global_entry); - if (dasd_debugfs_root_entry) - debugfs_remove(dasd_debugfs_root_entry); + debugfs_remove(dasd_global_profile_dentry); + dasd_global_profile_dentry = NULL; + debugfs_remove(dasd_debugfs_global_entry); + debugfs_remove(dasd_debugfs_root_entry); } static void dasd_statistics_createroot(void) @@ -1178,7 +1177,7 @@ static void dasd_statistics_removeroot(void) int dasd_stats_generic_show(struct seq_file *m, void *v) { - seq_printf(m, "Statistics are not activated in this kernel\n"); + seq_puts(m, "Statistics are not activated in this kernel\n"); return 0; } @@ -1243,6 +1242,7 @@ struct dasd_ccw_req *dasd_kmalloc_request(int magic, int cplength, dasd_get_device(device); return cqr; } +EXPORT_SYMBOL(dasd_kmalloc_request); struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, int datasize, @@ -1282,6 +1282,7 @@ struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, dasd_get_device(device); return cqr; } +EXPORT_SYMBOL(dasd_smalloc_request); /* * Free memory of a channel program. This function needs to free all the @@ -1304,6 +1305,7 @@ void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) kfree(cqr); dasd_put_device(device); } +EXPORT_SYMBOL(dasd_kfree_request); void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) { @@ -1314,6 +1316,7 @@ void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) spin_unlock_irqrestore(&device->mem_lock, flags); dasd_put_device(device); } +EXPORT_SYMBOL(dasd_sfree_request); /* * Check discipline magic in cqr. @@ -1391,6 +1394,7 @@ int dasd_term_IO(struct dasd_ccw_req *cqr) dasd_schedule_device_bh(device); return rc; } +EXPORT_SYMBOL(dasd_term_IO); /* * Start the i/o. This start_IO can fail if the channel is really busy. @@ -1509,6 +1513,7 @@ int dasd_start_IO(struct dasd_ccw_req *cqr) cqr->intrc = rc; return rc; } +EXPORT_SYMBOL(dasd_start_IO); /* * Timeout function for dasd devices. This is used for different purposes @@ -1541,6 +1546,7 @@ void dasd_device_set_timer(struct dasd_device *device, int expires) else mod_timer(&device->timer, jiffies + expires); } +EXPORT_SYMBOL(dasd_device_set_timer); /* * Clear timeout for a device. @@ -1549,6 +1555,7 @@ void dasd_device_clear_timer(struct dasd_device *device) { del_timer(&device->timer); } +EXPORT_SYMBOL(dasd_device_clear_timer); static void dasd_handle_killed_request(struct ccw_device *cdev, unsigned long intparm) @@ -1601,6 +1608,7 @@ void dasd_generic_handle_state_change(struct dasd_device *device) if (device->block) dasd_schedule_block_bh(device->block); } +EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change); /* * Interrupt handler for "normal" ssch-io based dasd devices. @@ -1667,8 +1675,11 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, if (cqr->status == DASD_CQR_CLEAR_PENDING && scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) { cqr->status = DASD_CQR_CLEARED; + if (cqr->callback_data == DASD_SLEEPON_START_TAG) + cqr->callback_data = DASD_SLEEPON_END_TAG; dasd_device_clear_timer(device); wake_up(&dasd_flush_wq); + wake_up(&generic_waitq); dasd_schedule_device_bh(device); return; } @@ -1722,6 +1733,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, dasd_device_clear_timer(device); dasd_schedule_device_bh(device); } +EXPORT_SYMBOL(dasd_int_handler); enum uc_todo dasd_generic_uc_handler(struct ccw_device *cdev, struct irb *irb) { @@ -1995,6 +2007,7 @@ finished: __dasd_device_process_final_queue(device, &flush_queue); return rc; } +EXPORT_SYMBOL_GPL(dasd_flush_device_queue); /* * Acquire the device lock and process queues for the device. @@ -2034,6 +2047,7 @@ void dasd_schedule_device_bh(struct dasd_device *device) dasd_get_device(device); tasklet_hi_schedule(&device->tasklet); } +EXPORT_SYMBOL(dasd_schedule_device_bh); void dasd_device_set_stop_bits(struct dasd_device *device, int bits) { @@ -2066,6 +2080,7 @@ void dasd_add_request_head(struct dasd_ccw_req *cqr) dasd_schedule_device_bh(device); spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); } +EXPORT_SYMBOL(dasd_add_request_head); /* * Queue a request to the tail of the device ccw_queue. @@ -2084,6 +2099,7 @@ void dasd_add_request_tail(struct dasd_ccw_req *cqr) dasd_schedule_device_bh(device); spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); } +EXPORT_SYMBOL(dasd_add_request_tail); /* * Wakeup helper for the 'sleep_on' functions. @@ -2291,13 +2307,27 @@ retry: rc = 0; list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) { - if (__dasd_sleep_on_erp(cqr)) - rc = 1; + /* + * for alias devices simplify error recovery and + * return to upper layer + */ + if (cqr->startdev != cqr->basedev && + (cqr->status == DASD_CQR_TERMINATED || + cqr->status == DASD_CQR_NEED_ERP)) + return -EAGAIN; + else { + /* normal recovery for basedev IO */ + if (__dasd_sleep_on_erp(cqr)) { + if (!cqr->status == DASD_CQR_TERMINATED && + !cqr->status == DASD_CQR_NEED_ERP) + break; + rc = 1; + } + } } if (rc) goto retry; - return 0; } @@ -2309,6 +2339,7 @@ int dasd_sleep_on(struct dasd_ccw_req *cqr) { return _dasd_sleep_on(cqr, 0); } +EXPORT_SYMBOL(dasd_sleep_on); /* * Start requests from a ccw_queue and wait for their completion. @@ -2327,6 +2358,7 @@ int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr) { return _dasd_sleep_on(cqr, 1); } +EXPORT_SYMBOL(dasd_sleep_on_interruptible); /* * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock @@ -2401,6 +2433,7 @@ int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) return rc; } +EXPORT_SYMBOL(dasd_sleep_on_immediatly); /* * Cancels a request that was started with dasd_sleep_on_req. @@ -2423,6 +2456,8 @@ int dasd_cancel_req(struct dasd_ccw_req *cqr) case DASD_CQR_QUEUED: /* request was not started - just set to cleared */ cqr->status = DASD_CQR_CLEARED; + if (cqr->callback_data == DASD_SLEEPON_START_TAG) + cqr->callback_data = DASD_SLEEPON_END_TAG; break; case DASD_CQR_IN_IO: /* request in IO - terminate IO and release again */ @@ -2442,6 +2477,7 @@ int dasd_cancel_req(struct dasd_ccw_req *cqr) dasd_schedule_device_bh(device); return rc; } +EXPORT_SYMBOL(dasd_cancel_req); /* * SECTION: Operations of the dasd_block layer. @@ -2475,6 +2511,7 @@ void dasd_block_set_timer(struct dasd_block *block, int expires) else mod_timer(&block->timer, jiffies + expires); } +EXPORT_SYMBOL(dasd_block_set_timer); /* * Clear timeout for a dasd_block. @@ -2483,6 +2520,7 @@ void dasd_block_clear_timer(struct dasd_block *block) { del_timer(&block->timer); } +EXPORT_SYMBOL(dasd_block_clear_timer); /* * Process finished error recovery ccw. @@ -2864,6 +2902,7 @@ void dasd_schedule_block_bh(struct dasd_block *block) dasd_get_device(block->base); tasklet_hi_schedule(&block->tasklet); } +EXPORT_SYMBOL(dasd_schedule_block_bh); /* @@ -3202,8 +3241,8 @@ static void dasd_generic_auto_online(void *data, async_cookie_t cookie) ret = ccw_device_set_online(cdev); if (ret) - pr_warning("%s: Setting the DASD online failed with rc=%d\n", - dev_name(&cdev->dev), ret); + pr_warn("%s: Setting the DASD online failed with rc=%d\n", + dev_name(&cdev->dev), ret); } /* @@ -3234,6 +3273,7 @@ int dasd_generic_probe(struct ccw_device *cdev, async_schedule(dasd_generic_auto_online, cdev); return 0; } +EXPORT_SYMBOL_GPL(dasd_generic_probe); /* * This will one day be called from a global not_oper handler. @@ -3276,6 +3316,7 @@ void dasd_generic_remove(struct ccw_device *cdev) dasd_remove_sysfs_files(cdev); } +EXPORT_SYMBOL_GPL(dasd_generic_remove); /* * Activate a device. This is called from dasd_{eckd,fba}_probe() when either @@ -3298,9 +3339,8 @@ int dasd_generic_set_online(struct ccw_device *cdev, discipline = base_discipline; if (device->features & DASD_FEATURE_USEDIAG) { if (!dasd_diag_discipline_pointer) { - pr_warning("%s Setting the DASD online failed because " - "of missing DIAG discipline\n", - dev_name(&cdev->dev)); + pr_warn("%s Setting the DASD online failed because of missing DIAG discipline\n", + dev_name(&cdev->dev)); dasd_delete_device(device); return -ENODEV; } @@ -3321,9 +3361,8 @@ int dasd_generic_set_online(struct ccw_device *cdev, /* check_device will allocate block device if necessary */ rc = discipline->check_device(device); if (rc) { - pr_warning("%s Setting the DASD online with discipline %s " - "failed with rc=%i\n", - dev_name(&cdev->dev), discipline->name, rc); + pr_warn("%s Setting the DASD online with discipline %s failed with rc=%i\n", + dev_name(&cdev->dev), discipline->name, rc); module_put(discipline->owner); module_put(base_discipline->owner); dasd_delete_device(device); @@ -3332,8 +3371,8 @@ int dasd_generic_set_online(struct ccw_device *cdev, dasd_set_target_state(device, DASD_STATE_ONLINE); if (device->state <= DASD_STATE_KNOWN) { - pr_warning("%s Setting the DASD online failed because of a " - "missing discipline\n", dev_name(&cdev->dev)); + pr_warn("%s Setting the DASD online failed because of a missing discipline\n", + dev_name(&cdev->dev)); rc = -ENODEV; dasd_set_target_state(device, DASD_STATE_NEW); if (device->block) @@ -3348,6 +3387,7 @@ int dasd_generic_set_online(struct ccw_device *cdev, dasd_put_device(device); return rc; } +EXPORT_SYMBOL_GPL(dasd_generic_set_online); int dasd_generic_set_offline(struct ccw_device *cdev) { @@ -3371,13 +3411,11 @@ int dasd_generic_set_offline(struct ccw_device *cdev) open_count = atomic_read(&device->block->open_count); if (open_count > max_count) { if (open_count > 0) - pr_warning("%s: The DASD cannot be set offline " - "with open count %i\n", - dev_name(&cdev->dev), open_count); + pr_warn("%s: The DASD cannot be set offline with open count %i\n", + dev_name(&cdev->dev), open_count); else - pr_warning("%s: The DASD cannot be set offline " - "while it is in use\n", - dev_name(&cdev->dev)); + pr_warn("%s: The DASD cannot be set offline while it is in use\n", + dev_name(&cdev->dev)); clear_bit(DASD_FLAG_OFFLINE, &device->flags); dasd_put_device(device); return -EBUSY; @@ -3451,6 +3489,7 @@ interrupted: dasd_put_device(device); return rc; } +EXPORT_SYMBOL_GPL(dasd_generic_set_offline); int dasd_generic_last_path_gone(struct dasd_device *device) { @@ -3492,6 +3531,10 @@ int dasd_generic_path_operational(struct dasd_device *device) dasd_schedule_device_bh(device); if (device->block) dasd_schedule_block_bh(device->block); + + if (!device->stopped) + wake_up(&generic_waitq); + return 1; } EXPORT_SYMBOL_GPL(dasd_generic_path_operational); @@ -3523,6 +3566,7 @@ int dasd_generic_notify(struct ccw_device *cdev, int event) dasd_put_device(device); return ret; } +EXPORT_SYMBOL_GPL(dasd_generic_notify); void dasd_generic_path_event(struct ccw_device *cdev, int *path_event) { @@ -3872,39 +3916,3 @@ failed: module_init(dasd_init); module_exit(dasd_exit); - -EXPORT_SYMBOL(dasd_debug_area); -EXPORT_SYMBOL(dasd_diag_discipline_pointer); - -EXPORT_SYMBOL(dasd_add_request_head); -EXPORT_SYMBOL(dasd_add_request_tail); -EXPORT_SYMBOL(dasd_cancel_req); -EXPORT_SYMBOL(dasd_device_clear_timer); -EXPORT_SYMBOL(dasd_block_clear_timer); -EXPORT_SYMBOL(dasd_enable_device); -EXPORT_SYMBOL(dasd_int_handler); -EXPORT_SYMBOL(dasd_kfree_request); -EXPORT_SYMBOL(dasd_kick_device); -EXPORT_SYMBOL(dasd_kmalloc_request); -EXPORT_SYMBOL(dasd_schedule_device_bh); -EXPORT_SYMBOL(dasd_schedule_block_bh); -EXPORT_SYMBOL(dasd_set_target_state); -EXPORT_SYMBOL(dasd_device_set_timer); -EXPORT_SYMBOL(dasd_block_set_timer); -EXPORT_SYMBOL(dasd_sfree_request); -EXPORT_SYMBOL(dasd_sleep_on); -EXPORT_SYMBOL(dasd_sleep_on_immediatly); -EXPORT_SYMBOL(dasd_sleep_on_interruptible); -EXPORT_SYMBOL(dasd_smalloc_request); -EXPORT_SYMBOL(dasd_start_IO); -EXPORT_SYMBOL(dasd_term_IO); - -EXPORT_SYMBOL_GPL(dasd_generic_probe); -EXPORT_SYMBOL_GPL(dasd_generic_remove); -EXPORT_SYMBOL_GPL(dasd_generic_notify); -EXPORT_SYMBOL_GPL(dasd_generic_set_online); -EXPORT_SYMBOL_GPL(dasd_generic_set_offline); -EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change); -EXPORT_SYMBOL_GPL(dasd_flush_device_queue); -EXPORT_SYMBOL_GPL(dasd_alloc_block); -EXPORT_SYMBOL_GPL(dasd_free_block); diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c index 2ead7e78c456..14ba80bfa571 100644 --- a/drivers/s390/block/dasd_devmap.c +++ b/drivers/s390/block/dasd_devmap.c @@ -77,7 +77,7 @@ EXPORT_SYMBOL_GPL(dasd_nofcx); * strings when running as a module. */ static char *dasd[256]; -module_param_array(dasd, charp, NULL, 0); +module_param_array(dasd, charp, NULL, S_IRUGO); /* * Single spinlock to protect devmap and servermap structures and lists. diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 2e8e0755070b..51dea7baf02c 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -2039,7 +2039,7 @@ static int dasd_eckd_online_to_ready(struct dasd_device *device) return 0; }; -static int dasd_eckd_ready_to_basic(struct dasd_device *device) +static int dasd_eckd_basic_to_known(struct dasd_device *device) { return dasd_alias_remove_device(device); }; @@ -2061,11 +2061,12 @@ dasd_eckd_fill_geometry(struct dasd_block *block, struct hd_geometry *geo) static struct dasd_ccw_req * dasd_eckd_build_format(struct dasd_device *base, - struct format_data_t *fdata) + struct format_data_t *fdata, + int enable_pav) { struct dasd_eckd_private *base_priv; struct dasd_eckd_private *start_priv; - struct dasd_device *startdev; + struct dasd_device *startdev = NULL; struct dasd_ccw_req *fcp; struct eckd_count *ect; struct ch_t address; @@ -2079,7 +2080,9 @@ dasd_eckd_build_format(struct dasd_device *base, int nr_tracks; int use_prefix; - startdev = dasd_alias_get_start_dev(base); + if (enable_pav) + startdev = dasd_alias_get_start_dev(base); + if (!startdev) startdev = base; @@ -2309,6 +2312,7 @@ dasd_eckd_build_format(struct dasd_device *base, fcp->startdev = startdev; fcp->memdev = startdev; + fcp->basedev = base; fcp->retries = 256; fcp->expires = startdev->default_expires * HZ; fcp->buildclk = get_tod_clock(); @@ -2319,7 +2323,8 @@ dasd_eckd_build_format(struct dasd_device *base, static int dasd_eckd_format_device(struct dasd_device *base, - struct format_data_t *fdata) + struct format_data_t *fdata, + int enable_pav) { struct dasd_ccw_req *cqr, *n; struct dasd_block *block; @@ -2327,7 +2332,7 @@ dasd_eckd_format_device(struct dasd_device *base, struct list_head format_queue; struct dasd_device *device; int old_stop, format_step; - int step, rc = 0; + int step, rc = 0, sleep_rc; block = base->block; private = (struct dasd_eckd_private *) base->private; @@ -2361,11 +2366,11 @@ dasd_eckd_format_device(struct dasd_device *base, } INIT_LIST_HEAD(&format_queue); - old_stop = fdata->stop_unit; + old_stop = fdata->stop_unit; while (fdata->start_unit <= 1) { fdata->stop_unit = fdata->start_unit; - cqr = dasd_eckd_build_format(base, fdata); + cqr = dasd_eckd_build_format(base, fdata, enable_pav); list_add(&cqr->blocklist, &format_queue); fdata->stop_unit = old_stop; @@ -2383,7 +2388,7 @@ retry: if (step > format_step) fdata->stop_unit = fdata->start_unit + format_step - 1; - cqr = dasd_eckd_build_format(base, fdata); + cqr = dasd_eckd_build_format(base, fdata, enable_pav); if (IS_ERR(cqr)) { if (PTR_ERR(cqr) == -ENOMEM) { /* @@ -2403,7 +2408,7 @@ retry: } sleep: - dasd_sleep_on_queue(&format_queue); + sleep_rc = dasd_sleep_on_queue(&format_queue); list_for_each_entry_safe(cqr, n, &format_queue, blocklist) { device = cqr->startdev; @@ -2415,6 +2420,9 @@ sleep: private->count--; } + if (sleep_rc) + return sleep_rc; + /* * in case of ENOMEM we need to retry after * first requests are finished @@ -4511,7 +4519,7 @@ static struct dasd_discipline dasd_eckd_discipline = { .verify_path = dasd_eckd_verify_path, .basic_to_ready = dasd_eckd_basic_to_ready, .online_to_ready = dasd_eckd_online_to_ready, - .ready_to_basic = dasd_eckd_ready_to_basic, + .basic_to_known = dasd_eckd_basic_to_known, .fill_geometry = dasd_eckd_fill_geometry, .start_IO = dasd_start_IO, .term_IO = dasd_term_IO, diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index 690001af0d09..c20170166909 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h @@ -175,6 +175,7 @@ struct dasd_ccw_req { struct dasd_block *block; /* the originating block device */ struct dasd_device *memdev; /* the device used to allocate this */ struct dasd_device *startdev; /* device the request is started on */ + struct dasd_device *basedev; /* base device if no block->base */ void *cpaddr; /* address of ccw or tcw */ unsigned char cpmode; /* 0 = cmd mode, 1 = itcw */ char status; /* status of this request */ @@ -304,7 +305,7 @@ struct dasd_discipline { */ int (*basic_to_ready) (struct dasd_device *); int (*online_to_ready) (struct dasd_device *); - int (*ready_to_basic) (struct dasd_device *); + int (*basic_to_known)(struct dasd_device *); /* (struct dasd_device *); * Device operation functions. build_cp creates a ccw chain for @@ -321,7 +322,7 @@ struct dasd_discipline { int (*term_IO) (struct dasd_ccw_req *); void (*handle_terminated_request) (struct dasd_ccw_req *); int (*format_device) (struct dasd_device *, - struct format_data_t *); + struct format_data_t *, int enable_pav); int (*free_cp) (struct dasd_ccw_req *, struct request *); /* diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c index 25a0f2f8b0b9..02837d0ad942 100644 --- a/drivers/s390/block/dasd_ioctl.c +++ b/drivers/s390/block/dasd_ioctl.c @@ -203,7 +203,9 @@ static int dasd_format(struct dasd_block *block, struct format_data_t *fdata) { struct dasd_device *base; - int rc; + int enable_pav = 1; + int rc, retries; + int start, stop; base = block->base; if (base->discipline->format_device == NULL) @@ -231,11 +233,30 @@ dasd_format(struct dasd_block *block, struct format_data_t *fdata) bdput(bdev); } - rc = base->discipline->format_device(base, fdata); - if (rc) - return rc; - - return 0; + retries = 255; + /* backup start- and endtrack for retries */ + start = fdata->start_unit; + stop = fdata->stop_unit; + do { + rc = base->discipline->format_device(base, fdata, enable_pav); + if (rc) { + if (rc == -EAGAIN) { + retries--; + /* disable PAV in case of errors */ + enable_pav = 0; + fdata->start_unit = start; + fdata->stop_unit = stop; + } else + return rc; + } else + /* success */ + break; + } while (retries); + + if (!retries) + return -EIO; + else + return 0; } /* diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c index 5af7f0bd6125..c43aca69fb30 100644 --- a/drivers/s390/char/con3215.c +++ b/drivers/s390/char/con3215.c @@ -288,12 +288,16 @@ static void raw3215_timeout(unsigned long __data) unsigned long flags; spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); - if (raw->flags & RAW3215_TIMER_RUNS) { - del_timer(&raw->timer); - raw->flags &= ~RAW3215_TIMER_RUNS; - if (!(raw->port.flags & ASYNC_SUSPENDED)) { - raw3215_mk_write_req(raw); - raw3215_start_io(raw); + raw->flags &= ~RAW3215_TIMER_RUNS; + if (!(raw->port.flags & ASYNC_SUSPENDED)) { + raw3215_mk_write_req(raw); + raw3215_start_io(raw); + if ((raw->queued_read || raw->queued_write) && + !(raw->flags & RAW3215_WORKING) && + !(raw->flags & RAW3215_TIMER_RUNS)) { + raw->timer.expires = RAW3215_TIMEOUT + jiffies; + add_timer(&raw->timer); + raw->flags |= RAW3215_TIMER_RUNS; } } spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); @@ -317,17 +321,15 @@ static inline void raw3215_try_io(struct raw3215_info *raw) (raw->flags & RAW3215_FLUSHING)) { /* execute write requests bigger than minimum size */ raw3215_start_io(raw); - if (raw->flags & RAW3215_TIMER_RUNS) { - del_timer(&raw->timer); - raw->flags &= ~RAW3215_TIMER_RUNS; - } - } else if (!(raw->flags & RAW3215_TIMER_RUNS)) { - /* delay small writes */ - raw->timer.expires = RAW3215_TIMEOUT + jiffies; - add_timer(&raw->timer); - raw->flags |= RAW3215_TIMER_RUNS; } } + if ((raw->queued_read || raw->queued_write) && + !(raw->flags & RAW3215_WORKING) && + !(raw->flags & RAW3215_TIMER_RUNS)) { + raw->timer.expires = RAW3215_TIMEOUT + jiffies; + add_timer(&raw->timer); + raw->flags |= RAW3215_TIMER_RUNS; + } } /* @@ -1033,12 +1035,26 @@ static int tty3215_write(struct tty_struct * tty, const unsigned char *buf, int count) { struct raw3215_info *raw; + int i, written; if (!tty) return 0; raw = (struct raw3215_info *) tty->driver_data; - raw3215_write(raw, buf, count); - return count; + written = count; + while (count > 0) { + for (i = 0; i < count; i++) + if (buf[i] == '\t' || buf[i] == '\n') + break; + raw3215_write(raw, buf, i); + count -= i; + buf += i; + if (count > 0) { + raw3215_putchar(raw, *buf); + count--; + buf++; + } + } + return written; } /* @@ -1186,7 +1202,7 @@ static int __init tty3215_init(void) driver->subtype = SYSTEM_TYPE_TTY; driver->init_termios = tty_std_termios; driver->init_termios.c_iflag = IGNBRK | IGNPAR; - driver->init_termios.c_oflag = ONLCR | XTABS; + driver->init_termios.c_oflag = ONLCR; driver->init_termios.c_lflag = ISIG; driver->flags = TTY_DRIVER_REAL_RAW; tty_set_operations(driver, &tty3215_ops); diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c index 7ed7a5987816..003663288e29 100644 --- a/drivers/s390/char/sclp_tty.c +++ b/drivers/s390/char/sclp_tty.c @@ -559,7 +559,7 @@ sclp_tty_init(void) driver->subtype = SYSTEM_TYPE_TTY; driver->init_termios = tty_std_termios; driver->init_termios.c_iflag = IGNBRK | IGNPAR; - driver->init_termios.c_oflag = ONLCR | XTABS; + driver->init_termios.c_oflag = ONLCR; driver->init_termios.c_lflag = ISIG | ECHO; driver->flags = TTY_DRIVER_REAL_RAW; tty_set_operations(driver, &sclp_ops); diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c index f5f4a91fab44..f76bff68d1de 100644 --- a/drivers/s390/cio/qdio_setup.c +++ b/drivers/s390/cio/qdio_setup.c @@ -17,6 +17,8 @@ #include "qdio.h" #include "qdio_debug.h" +#define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer)) + static struct kmem_cache *qdio_q_cache; static struct kmem_cache *qdio_aob_cache; @@ -32,6 +34,57 @@ void qdio_release_aob(struct qaob *aob) } EXPORT_SYMBOL_GPL(qdio_release_aob); +/** + * qdio_free_buffers() - free qdio buffers + * @buf: array of pointers to qdio buffers + * @count: number of qdio buffers to free + */ +void qdio_free_buffers(struct qdio_buffer **buf, unsigned int count) +{ + int pos; + + for (pos = 0; pos < count; pos += QBUFF_PER_PAGE) + free_page((unsigned long) buf[pos]); +} +EXPORT_SYMBOL_GPL(qdio_free_buffers); + +/** + * qdio_alloc_buffers() - allocate qdio buffers + * @buf: array of pointers to qdio buffers + * @count: number of qdio buffers to allocate + */ +int qdio_alloc_buffers(struct qdio_buffer **buf, unsigned int count) +{ + int pos; + + for (pos = 0; pos < count; pos += QBUFF_PER_PAGE) { + buf[pos] = (void *) get_zeroed_page(GFP_KERNEL); + if (!buf[pos]) { + qdio_free_buffers(buf, count); + return -ENOMEM; + } + } + for (pos = 0; pos < count; pos++) + if (pos % QBUFF_PER_PAGE) + buf[pos] = buf[pos - 1] + 1; + return 0; +} +EXPORT_SYMBOL_GPL(qdio_alloc_buffers); + +/** + * qdio_reset_buffers() - reset qdio buffers + * @buf: array of pointers to qdio buffers + * @count: number of qdio buffers that will be zeroed + */ +void qdio_reset_buffers(struct qdio_buffer **buf, unsigned int count) +{ + int pos; + + for (pos = 0; pos < count; pos++) + memset(buf[pos], 0, sizeof(struct qdio_buffer)); +} +EXPORT_SYMBOL_GPL(qdio_reset_buffers); + /* * qebsm is only available under 64bit but the adapter sets the feature * flag anyway, so we manually override it. diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index bbafbd0e017a..e7646ce3d659 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -439,10 +439,10 @@ struct qeth_qdio_buffer { }; struct qeth_qdio_q { - struct qdio_buffer qdio_bufs[QDIO_MAX_BUFFERS_PER_Q]; + struct qdio_buffer *qdio_bufs[QDIO_MAX_BUFFERS_PER_Q]; struct qeth_qdio_buffer bufs[QDIO_MAX_BUFFERS_PER_Q]; int next_buf_to_init; -} __attribute__ ((aligned(256))); +}; struct qeth_qdio_out_buffer { struct qdio_buffer *buffer; @@ -465,7 +465,7 @@ enum qeth_out_q_states { }; struct qeth_qdio_out_q { - struct qdio_buffer qdio_bufs[QDIO_MAX_BUFFERS_PER_Q]; + struct qdio_buffer *qdio_bufs[QDIO_MAX_BUFFERS_PER_Q]; struct qeth_qdio_out_buffer *bufs[QDIO_MAX_BUFFERS_PER_Q]; struct qdio_outbuf_state *bufstates; /* convenience pointer */ int queue_no; @@ -483,7 +483,7 @@ struct qeth_qdio_out_q { atomic_t used_buffers; /* indicates whether PCI flag must be set (or if one is outstanding) */ atomic_t set_pci_flags_count; -} __attribute__ ((aligned(256))); +}; struct qeth_qdio_info { atomic_t state; @@ -889,6 +889,7 @@ extern const struct attribute_group *qeth_generic_attr_groups[]; extern const struct attribute_group *qeth_osn_attr_groups[]; extern struct workqueue_struct *qeth_wq; +int qeth_card_hw_is_reachable(struct qeth_card *); const char *qeth_get_cardname_short(struct qeth_card *); int qeth_realloc_buffer_pool(struct qeth_card *, int); int qeth_core_load_discipline(struct qeth_card *, enum qeth_discipline_id); diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 71bfacfc097e..fd22c811cbe1 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -73,6 +73,13 @@ static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int); struct workqueue_struct *qeth_wq; EXPORT_SYMBOL_GPL(qeth_wq); +int qeth_card_hw_is_reachable(struct qeth_card *card) +{ + return (card->state == CARD_STATE_SOFTSETUP) || + (card->state == CARD_STATE_UP); +} +EXPORT_SYMBOL_GPL(qeth_card_hw_is_reachable); + static void qeth_close_dev_handler(struct work_struct *work) { struct qeth_card *card; @@ -292,14 +299,43 @@ int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt) } EXPORT_SYMBOL_GPL(qeth_realloc_buffer_pool); +static void qeth_free_qdio_queue(struct qeth_qdio_q *q) +{ + if (!q) + return; + + qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); + kfree(q); +} + +static struct qeth_qdio_q *qeth_alloc_qdio_queue(void) +{ + struct qeth_qdio_q *q = kzalloc(sizeof(*q), GFP_KERNEL); + int i; + + if (!q) + return NULL; + + if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) { + kfree(q); + return NULL; + } + + for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) + q->bufs[i].buffer = q->qdio_bufs[i]; + + QETH_DBF_HEX(SETUP, 2, &q, sizeof(void *)); + return q; +} + static inline int qeth_cq_init(struct qeth_card *card) { int rc; if (card->options.cq == QETH_CQ_ENABLED) { QETH_DBF_TEXT(SETUP, 2, "cqinit"); - memset(card->qdio.c_q->qdio_bufs, 0, - QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer)); + qdio_reset_buffers(card->qdio.c_q->qdio_bufs, + QDIO_MAX_BUFFERS_PER_Q); card->qdio.c_q->next_buf_to_init = 127; rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, card->qdio.no_in_queues - 1, 0, @@ -323,21 +359,12 @@ static inline int qeth_alloc_cq(struct qeth_card *card) struct qdio_outbuf_state *outbuf_states; QETH_DBF_TEXT(SETUP, 2, "cqon"); - card->qdio.c_q = kzalloc(sizeof(struct qeth_qdio_q), - GFP_KERNEL); + card->qdio.c_q = qeth_alloc_qdio_queue(); if (!card->qdio.c_q) { rc = -1; goto kmsg_out; } - QETH_DBF_HEX(SETUP, 2, &card->qdio.c_q, sizeof(void *)); - - for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) { - card->qdio.c_q->bufs[i].buffer = - &card->qdio.c_q->qdio_bufs[i]; - } - card->qdio.no_in_queues = 2; - card->qdio.out_bufstates = kzalloc(card->qdio.no_out_queues * QDIO_MAX_BUFFERS_PER_Q * @@ -361,7 +388,7 @@ static inline int qeth_alloc_cq(struct qeth_card *card) out: return rc; free_cq_out: - kfree(card->qdio.c_q); + qeth_free_qdio_queue(card->qdio.c_q); card->qdio.c_q = NULL; kmsg_out: dev_err(&card->gdev->dev, "Failed to create completion queue\n"); @@ -372,7 +399,7 @@ static inline void qeth_free_cq(struct qeth_card *card) { if (card->qdio.c_q) { --card->qdio.no_in_queues; - kfree(card->qdio.c_q); + qeth_free_qdio_queue(card->qdio.c_q); card->qdio.c_q = NULL; } kfree(card->qdio.out_bufstates); @@ -1282,35 +1309,6 @@ static void qeth_free_buffer_pool(struct qeth_card *card) } } -static void qeth_free_qdio_buffers(struct qeth_card *card) -{ - int i, j; - - if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) == - QETH_QDIO_UNINITIALIZED) - return; - - qeth_free_cq(card); - cancel_delayed_work_sync(&card->buffer_reclaim_work); - for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { - if (card->qdio.in_q->bufs[j].rx_skb) - dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb); - } - kfree(card->qdio.in_q); - card->qdio.in_q = NULL; - /* inbound buffer pool */ - qeth_free_buffer_pool(card); - /* free outbound qdio_qs */ - if (card->qdio.out_qs) { - for (i = 0; i < card->qdio.no_out_queues; ++i) { - qeth_clear_outq_buffers(card->qdio.out_qs[i], 1); - kfree(card->qdio.out_qs[i]); - } - kfree(card->qdio.out_qs); - card->qdio.out_qs = NULL; - } -} - static void qeth_clean_channel(struct qeth_channel *channel) { int cnt; @@ -2392,7 +2390,7 @@ static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx) rc = -ENOMEM; goto out; } - newbuf->buffer = &q->qdio_bufs[bidx]; + newbuf->buffer = q->qdio_bufs[bidx]; skb_queue_head_init(&newbuf->skb_list); lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key); newbuf->q = q; @@ -2411,6 +2409,28 @@ out: return rc; } +static void qeth_free_qdio_out_buf(struct qeth_qdio_out_q *q) +{ + if (!q) + return; + + qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); + kfree(q); +} + +static struct qeth_qdio_out_q *qeth_alloc_qdio_out_buf(void) +{ + struct qeth_qdio_out_q *q = kzalloc(sizeof(*q), GFP_KERNEL); + + if (!q) + return NULL; + + if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) { + kfree(q); + return NULL; + } + return q; +} static int qeth_alloc_qdio_buffers(struct qeth_card *card) { @@ -2422,19 +2442,11 @@ static int qeth_alloc_qdio_buffers(struct qeth_card *card) QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED) return 0; - card->qdio.in_q = kzalloc(sizeof(struct qeth_qdio_q), - GFP_KERNEL); + QETH_DBF_TEXT(SETUP, 2, "inq"); + card->qdio.in_q = qeth_alloc_qdio_queue(); if (!card->qdio.in_q) goto out_nomem; - QETH_DBF_TEXT(SETUP, 2, "inq"); - QETH_DBF_HEX(SETUP, 2, &card->qdio.in_q, sizeof(void *)); - memset(card->qdio.in_q, 0, sizeof(struct qeth_qdio_q)); - /* give inbound qeth_qdio_buffers their qdio_buffers */ - for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) { - card->qdio.in_q->bufs[i].buffer = - &card->qdio.in_q->qdio_bufs[i]; - card->qdio.in_q->bufs[i].rx_skb = NULL; - } + /* inbound buffer pool */ if (qeth_alloc_buffer_pool(card)) goto out_freeinq; @@ -2446,8 +2458,7 @@ static int qeth_alloc_qdio_buffers(struct qeth_card *card) if (!card->qdio.out_qs) goto out_freepool; for (i = 0; i < card->qdio.no_out_queues; ++i) { - card->qdio.out_qs[i] = kzalloc(sizeof(struct qeth_qdio_out_q), - GFP_KERNEL); + card->qdio.out_qs[i] = qeth_alloc_qdio_out_buf(); if (!card->qdio.out_qs[i]) goto out_freeoutq; QETH_DBF_TEXT_(SETUP, 2, "outq %i", i); @@ -2476,7 +2487,7 @@ out_freeoutqbufs: } out_freeoutq: while (i > 0) { - kfree(card->qdio.out_qs[--i]); + qeth_free_qdio_out_buf(card->qdio.out_qs[--i]); qeth_clear_outq_buffers(card->qdio.out_qs[i], 1); } kfree(card->qdio.out_qs); @@ -2484,13 +2495,42 @@ out_freeoutq: out_freepool: qeth_free_buffer_pool(card); out_freeinq: - kfree(card->qdio.in_q); + qeth_free_qdio_queue(card->qdio.in_q); card->qdio.in_q = NULL; out_nomem: atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED); return -ENOMEM; } +static void qeth_free_qdio_buffers(struct qeth_card *card) +{ + int i, j; + + if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) == + QETH_QDIO_UNINITIALIZED) + return; + + qeth_free_cq(card); + cancel_delayed_work_sync(&card->buffer_reclaim_work); + for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { + if (card->qdio.in_q->bufs[j].rx_skb) + dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb); + } + qeth_free_qdio_queue(card->qdio.in_q); + card->qdio.in_q = NULL; + /* inbound buffer pool */ + qeth_free_buffer_pool(card); + /* free outbound qdio_qs */ + if (card->qdio.out_qs) { + for (i = 0; i < card->qdio.no_out_queues; ++i) { + qeth_clear_outq_buffers(card->qdio.out_qs[i], 1); + qeth_free_qdio_out_buf(card->qdio.out_qs[i]); + } + kfree(card->qdio.out_qs); + card->qdio.out_qs = NULL; + } +} + static void qeth_create_qib_param_field(struct qeth_card *card, char *param_field) { @@ -2788,8 +2828,8 @@ int qeth_init_qdio_queues(struct qeth_card *card) QETH_DBF_TEXT(SETUP, 2, "initqdqs"); /* inbound queue */ - memset(card->qdio.in_q->qdio_bufs, 0, - QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer)); + qdio_reset_buffers(card->qdio.in_q->qdio_bufs, + QDIO_MAX_BUFFERS_PER_Q); qeth_initialize_working_pool_list(card); /*give only as many buffers to hardware as we have buffer pool entries*/ for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; ++i) @@ -2811,8 +2851,8 @@ int qeth_init_qdio_queues(struct qeth_card *card) /* outbound queue */ for (i = 0; i < card->qdio.no_out_queues; ++i) { - memset(card->qdio.out_qs[i]->qdio_bufs, 0, - QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer)); + qdio_reset_buffers(card->qdio.out_qs[i]->qdio_bufs, + QDIO_MAX_BUFFERS_PER_Q); for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { qeth_clear_output_buffer(card->qdio.out_qs[i], card->qdio.out_qs[i]->bufs[j], @@ -3569,7 +3609,7 @@ static void qeth_qdio_cq_handler(struct qeth_card *card, for (i = first_element; i < first_element + count; ++i) { int bidx = i % QDIO_MAX_BUFFERS_PER_Q; - struct qdio_buffer *buffer = &cq->qdio_bufs[bidx]; + struct qdio_buffer *buffer = cq->qdio_bufs[bidx]; int e; e = 0; @@ -5757,6 +5797,7 @@ int qeth_core_ethtool_get_settings(struct net_device *netdev, struct qeth_card *card = netdev->ml_priv; enum qeth_link_types link_type; struct carrier_info carrier_info; + int rc; u32 speed; if ((card->info.type == QETH_CARD_TYPE_IQD) || (card->info.guestlan)) @@ -5799,8 +5840,14 @@ int qeth_core_ethtool_get_settings(struct net_device *netdev, /* Check if we can obtain more accurate information. */ /* If QUERY_CARD_INFO command is not supported or fails, */ /* just return the heuristics that was filled above. */ - if (qeth_query_card_info(card, &carrier_info) != 0) + if (!qeth_card_hw_is_reachable(card)) + return -ENODEV; + rc = qeth_query_card_info(card, &carrier_info); + if (rc == -EOPNOTSUPP) /* for old hardware, return heuristic */ return 0; + if (rc) /* report error from the hardware operation */ + return rc; + /* on success, fill in the information got from the hardware */ netdev_dbg(netdev, "card info: card_type=0x%02x, port_mode=0x%04x, port_speed=0x%08x\n", diff --git a/drivers/s390/net/qeth_l2_sys.c b/drivers/s390/net/qeth_l2_sys.c index ae1bc04b8653..59e3aa538b4d 100644 --- a/drivers/s390/net/qeth_l2_sys.c +++ b/drivers/s390/net/qeth_l2_sys.c @@ -5,17 +5,12 @@ #include <linux/slab.h> #include <asm/ebcdic.h> +#include "qeth_core.h" #include "qeth_l2.h" #define QETH_DEVICE_ATTR(_id, _name, _mode, _show, _store) \ struct device_attribute dev_attr_##_id = __ATTR(_name, _mode, _show, _store) -static int qeth_card_hw_is_reachable(struct qeth_card *card) -{ - return (card->state == CARD_STATE_SOFTSETUP) || - (card->state == CARD_STATE_UP); -} - static ssize_t qeth_bridge_port_role_state_show(struct device *dev, struct device_attribute *attr, char *buf, int show_state) diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index 06025cdaa4ad..495e1cb3afa6 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c @@ -14,27 +14,10 @@ #include "zfcp_ext.h" #include "zfcp_qdio.h" -#define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer)) - static bool enable_multibuffer = 1; module_param_named(datarouter, enable_multibuffer, bool, 0400); MODULE_PARM_DESC(datarouter, "Enable hardware data router support (default on)"); -static int zfcp_qdio_buffers_enqueue(struct qdio_buffer **sbal) -{ - int pos; - - for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos += QBUFF_PER_PAGE) { - sbal[pos] = (struct qdio_buffer *) get_zeroed_page(GFP_KERNEL); - if (!sbal[pos]) - return -ENOMEM; - } - for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos++) - if (pos % QBUFF_PER_PAGE) - sbal[pos] = sbal[pos - 1] + 1; - return 0; -} - static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id, unsigned int qdio_err) { @@ -326,15 +309,30 @@ static void zfcp_qdio_setup_init_data(struct qdio_initialize *id, static int zfcp_qdio_allocate(struct zfcp_qdio *qdio) { struct qdio_initialize init_data; + int ret; - if (zfcp_qdio_buffers_enqueue(qdio->req_q) || - zfcp_qdio_buffers_enqueue(qdio->res_q)) + ret = qdio_alloc_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q); + if (ret) return -ENOMEM; + ret = qdio_alloc_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q); + if (ret) + goto free_req_q; + zfcp_qdio_setup_init_data(&init_data, qdio); init_waitqueue_head(&qdio->req_q_wq); - return qdio_allocate(&init_data); + ret = qdio_allocate(&init_data); + if (ret) + goto free_res_q; + + return 0; + +free_res_q: + qdio_free_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q); +free_req_q: + qdio_free_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q); + return ret; } /** @@ -448,19 +446,14 @@ failed_establish: void zfcp_qdio_destroy(struct zfcp_qdio *qdio) { - int p; - if (!qdio) return; if (qdio->adapter->ccw_device) qdio_free(qdio->adapter->ccw_device); - for (p = 0; p < QDIO_MAX_BUFFERS_PER_Q; p += QBUFF_PER_PAGE) { - free_page((unsigned long) qdio->req_q[p]); - free_page((unsigned long) qdio->res_q[p]); - } - + qdio_free_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q); + qdio_free_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q); kfree(qdio); } @@ -475,7 +468,7 @@ int zfcp_qdio_setup(struct zfcp_adapter *adapter) qdio->adapter = adapter; if (zfcp_qdio_allocate(qdio)) { - zfcp_qdio_destroy(qdio); + kfree(qdio); return -ENOMEM; } diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c index 4de346017e9f..6da6cec9a651 100644 --- a/drivers/scsi/3w-sas.c +++ b/drivers/scsi/3w-sas.c @@ -683,14 +683,13 @@ static int twl_allocate_memory(TW_Device_Extension *tw_dev, int size, int which) unsigned long *cpu_addr; int retval = 1; - cpu_addr = pci_alloc_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, &dma_handle); + cpu_addr = pci_zalloc_consistent(tw_dev->tw_pci_dev, size * TW_Q_LENGTH, + &dma_handle); if (!cpu_addr) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed"); goto out; } - memset(cpu_addr, 0, size*TW_Q_LENGTH); - for (i = 0; i < TW_Q_LENGTH; i++) { switch(which) { case 0: diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c index 972f8176665f..64c75143c89a 100644 --- a/drivers/scsi/BusLogic.c +++ b/drivers/scsi/BusLogic.c @@ -3893,7 +3893,7 @@ __setup("BusLogic=", blogic_setup); PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { } };*/ -static DEFINE_PCI_DEVICE_TABLE(blogic_pci_tbl) = { +static const struct pci_device_id blogic_pci_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER)}, {PCI_DEVICE(PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC)}, {PCI_DEVICE(PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_FLASHPOINT)}, diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 18a3358eb1d4..bd85fb4978e0 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -43,7 +43,7 @@ config SCSI_DMA config SCSI_NETLINK bool default n - select NET + depends on NET config SCSI_PROC_FS bool "legacy /proc/scsi/ support" @@ -257,7 +257,7 @@ config SCSI_SPI_ATTRS config SCSI_FC_ATTRS tristate "FiberChannel Transport Attributes" - depends on SCSI + depends on SCSI && NET select SCSI_NETLINK help If you wish to export transport-specific information about @@ -585,28 +585,28 @@ config HYPERV_STORAGE config LIBFC tristate "LibFC module" - select SCSI_FC_ATTRS + depends on SCSI_FC_ATTRS select CRC32 ---help--- Fibre Channel library module config LIBFCOE tristate "LibFCoE module" - select LIBFC + depends on LIBFC ---help--- Library for Fibre Channel over Ethernet module config FCOE tristate "FCoE module" depends on PCI - select LIBFCOE + depends on LIBFCOE ---help--- Fibre Channel over Ethernet module config FCOE_FNIC tristate "Cisco FNIC Driver" depends on PCI && X86 - select LIBFCOE + depends on LIBFCOE help This is support for the Cisco PCI-Express FCoE HBA. @@ -816,7 +816,7 @@ config SCSI_IBMVSCSI config SCSI_IBMVFC tristate "IBM Virtual FC support" depends on PPC_PSERIES && SCSI - select SCSI_FC_ATTRS + depends on SCSI_FC_ATTRS help This is the IBM POWER Virtual FC Client @@ -1266,7 +1266,7 @@ source "drivers/scsi/qla4xxx/Kconfig" config SCSI_LPFC tristate "Emulex LightPulse Fibre Channel Support" depends on PCI && SCSI - select SCSI_FC_ATTRS + depends on SCSI_FC_ATTRS select CRC_T10DIF help This lpfc driver supports the Emulex LightPulse @@ -1676,7 +1676,7 @@ config SCSI_SUNESP config ZFCP tristate "FCP host bus adapter driver for IBM eServer zSeries" depends on S390 && QDIO && SCSI - select SCSI_FC_ATTRS + depends on SCSI_FC_ATTRS help If you want to access SCSI devices attached to your IBM eServer zSeries by means of Fibre Channel interfaces say Y. @@ -1704,7 +1704,7 @@ config SCSI_PM8001 config SCSI_BFA_FC tristate "Brocade BFA Fibre Channel Support" depends on PCI && SCSI - select SCSI_FC_ATTRS + depends on SCSI_FC_ATTRS help This bfa driver supports all Brocade PCIe FC/FCOE host adapters. diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c index 522570d297ca..7e33a61c1ba4 100644 --- a/drivers/scsi/a100u2w.c +++ b/drivers/scsi/a100u2w.c @@ -1125,23 +1125,19 @@ static int inia100_probe_one(struct pci_dev *pdev, /* Get total memory needed for SCB */ sz = ORC_MAXQUEUE * sizeof(struct orc_scb); - host->scb_virt = pci_alloc_consistent(pdev, sz, - &host->scb_phys); + host->scb_virt = pci_zalloc_consistent(pdev, sz, &host->scb_phys); if (!host->scb_virt) { printk("inia100: SCB memory allocation error\n"); goto out_host_put; } - memset(host->scb_virt, 0, sz); /* Get total memory needed for ESCB */ sz = ORC_MAXQUEUE * sizeof(struct orc_extended_scb); - host->escb_virt = pci_alloc_consistent(pdev, sz, - &host->escb_phys); + host->escb_virt = pci_zalloc_consistent(pdev, sz, &host->escb_phys); if (!host->escb_virt) { printk("inia100: ESCB memory allocation error\n"); goto out_free_scb_array; } - memset(host->escb_virt, 0, sz); biosaddr = host->BIOScfg; biosaddr = (biosaddr << 4); diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 56467df3d6de..915c26b23ab6 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -539,7 +539,7 @@ static umode_t beiscsi_eth_get_attr_visibility(void *data, int type) } /*------------------- PCI Driver operations and data ----------------- */ -static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = { +static const struct pci_device_id beiscsi_pci_id_table[] = { { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) }, { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) }, { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) }, @@ -3538,10 +3538,9 @@ static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q, q->len = len; q->entry_size = entry_size; mem->size = len * entry_size; - mem->va = pci_alloc_consistent(phba->pcidev, mem->size, &mem->dma); + mem->va = pci_zalloc_consistent(phba->pcidev, mem->size, &mem->dma); if (!mem->va) return -ENOMEM; - memset(mem->va, 0, mem->size); return 0; } @@ -4320,9 +4319,9 @@ static int beiscsi_get_boot_info(struct beiscsi_hba *phba) "BM_%d : No boot session\n"); return ret; } - nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev, - sizeof(*session_resp), - &nonemb_cmd.dma); + nonemb_cmd.va = pci_zalloc_consistent(phba->ctrl.pdev, + sizeof(*session_resp), + &nonemb_cmd.dma); if (nonemb_cmd.va == NULL) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG, @@ -4332,7 +4331,6 @@ static int beiscsi_get_boot_info(struct beiscsi_hba *phba) return -ENOMEM; } - memset(nonemb_cmd.va, 0, sizeof(*session_resp)); tag = mgmt_get_session_info(phba, s_handle, &nonemb_cmd); if (!tag) { diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c index a3e56487616c..665afcb74a56 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.c +++ b/drivers/scsi/be2iscsi/be_mgmt.c @@ -900,13 +900,12 @@ free_cmd: static int mgmt_alloc_cmd_data(struct beiscsi_hba *phba, struct be_dma_mem *cmd, int iscsi_cmd, int size) { - cmd->va = pci_alloc_consistent(phba->ctrl.pdev, size, &cmd->dma); + cmd->va = pci_zalloc_consistent(phba->ctrl.pdev, size, &cmd->dma); if (!cmd->va) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, "BG_%d : Failed to allocate memory for if info\n"); return -ENOMEM; } - memset(cmd->va, 0, size); cmd->size = size; be_cmd_hdr_prepare(cmd->va, CMD_SUBSYSTEM_ISCSI, iscsi_cmd, size); return 0; diff --git a/drivers/scsi/bnx2fc/Kconfig b/drivers/scsi/bnx2fc/Kconfig index f245d543d7b1..097882882649 100644 --- a/drivers/scsi/bnx2fc/Kconfig +++ b/drivers/scsi/bnx2fc/Kconfig @@ -1,11 +1,12 @@ config SCSI_BNX2X_FCOE tristate "QLogic NetXtreme II FCoE support" depends on PCI + depends on (IPV6 || IPV6=n) + depends on LIBFC + depends on LIBFCOE select NETDEVICES select ETHERNET select NET_VENDOR_BROADCOM - select LIBFC - select LIBFCOE select CNIC ---help--- This driver supports FCoE offload for the QLogic NetXtreme II diff --git a/drivers/scsi/bnx2i/Kconfig b/drivers/scsi/bnx2i/Kconfig index 44ce54e536e5..ba30ff86d581 100644 --- a/drivers/scsi/bnx2i/Kconfig +++ b/drivers/scsi/bnx2i/Kconfig @@ -2,6 +2,7 @@ config SCSI_BNX2_ISCSI tristate "QLogic NetXtreme II iSCSI support" depends on NET depends on PCI + depends on (IPV6 || IPV6=n) select SCSI_ISCSI_ATTRS select NETDEVICES select ETHERNET diff --git a/drivers/scsi/csiostor/Kconfig b/drivers/scsi/csiostor/Kconfig index 4d03b032aa10..7c7e5085968b 100644 --- a/drivers/scsi/csiostor/Kconfig +++ b/drivers/scsi/csiostor/Kconfig @@ -1,7 +1,7 @@ config SCSI_CHELSIO_FCOE tristate "Chelsio Communications FCoE support" depends on PCI && SCSI - select SCSI_FC_ATTRS + depends on SCSI_FC_ATTRS select FW_LOADER help This driver supports FCoE Offload functionality over diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c index 1aafc331ee63..17794add855c 100644 --- a/drivers/scsi/csiostor/csio_init.c +++ b/drivers/scsi/csiostor/csio_init.c @@ -1167,7 +1167,7 @@ static struct pci_error_handlers csio_err_handler = { .resume = csio_pci_resume, }; -static DEFINE_PCI_DEVICE_TABLE(csio_pci_tbl) = { +static const struct pci_device_id csio_pci_tbl[] = { CSIO_DEVICE(CSIO_DEVID_T440DBG_FCOE, 0), /* T4 DEBUG FCOE */ CSIO_DEVICE(CSIO_DEVID_T420CR_FCOE, 0), /* T420CR FCOE */ CSIO_DEVICE(CSIO_DEVID_T422CR_FCOE, 0), /* T422CR FCOE */ diff --git a/drivers/scsi/csiostor/csio_wr.c b/drivers/scsi/csiostor/csio_wr.c index 4255ce264abf..773da14cfa14 100644 --- a/drivers/scsi/csiostor/csio_wr.c +++ b/drivers/scsi/csiostor/csio_wr.c @@ -232,7 +232,7 @@ csio_wr_alloc_q(struct csio_hw *hw, uint32_t qsize, uint32_t wrsize, q = wrm->q_arr[free_idx]; - q->vstart = pci_alloc_consistent(hw->pdev, qsz, &q->pstart); + q->vstart = pci_zalloc_consistent(hw->pdev, qsz, &q->pstart); if (!q->vstart) { csio_err(hw, "Failed to allocate DMA memory for " @@ -240,12 +240,6 @@ csio_wr_alloc_q(struct csio_hw *hw, uint32_t qsize, uint32_t wrsize, return -1; } - /* - * We need to zero out the contents, importantly for ingress, - * since we start with a generatiom bit of 1 for ingress. - */ - memset(q->vstart, 0, qsz); - q->type = type; q->owner = owner; q->pidx = q->cidx = q->inc_idx = 0; diff --git a/drivers/scsi/cxgbi/cxgb3i/Kconfig b/drivers/scsi/cxgbi/cxgb3i/Kconfig index 6bbc36fbd6ec..e4603985dce3 100644 --- a/drivers/scsi/cxgbi/cxgb3i/Kconfig +++ b/drivers/scsi/cxgbi/cxgb3i/Kconfig @@ -1,6 +1,6 @@ config SCSI_CXGB3_ISCSI tristate "Chelsio T3 iSCSI support" - depends on PCI && INET + depends on PCI && INET && (IPV6 || IPV6=n) select NETDEVICES select ETHERNET select NET_VENDOR_CHELSIO diff --git a/drivers/scsi/cxgbi/cxgb4i/Kconfig b/drivers/scsi/cxgbi/cxgb4i/Kconfig index 16b2c7d26617..8c4e423037b6 100644 --- a/drivers/scsi/cxgbi/cxgb4i/Kconfig +++ b/drivers/scsi/cxgbi/cxgb4i/Kconfig @@ -1,6 +1,6 @@ config SCSI_CXGB4_ISCSI tristate "Chelsio T4 iSCSI support" - depends on PCI && INET + depends on PCI && INET && (IPV6 || IPV6=n) select NETDEVICES select ETHERNET select NET_VENDOR_CHELSIO diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index 79788a12712d..02e69e7ee4a3 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c @@ -1647,7 +1647,7 @@ static int cxgbi_inet6addr_handler(struct notifier_block *this, if (event_dev->priv_flags & IFF_802_1Q_VLAN) event_dev = vlan_dev_real_dev(event_dev); - cdev = cxgbi_device_find_by_netdev(event_dev, NULL); + cdev = cxgbi_device_find_by_netdev_rcu(event_dev, NULL); if (!cdev) return ret; diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c index d65df6dc106f..addd1dddce14 100644 --- a/drivers/scsi/cxgbi/libcxgbi.c +++ b/drivers/scsi/cxgbi/libcxgbi.c @@ -57,6 +57,9 @@ MODULE_PARM_DESC(dbg_level, "libiscsi debug level (default=0)"); static LIST_HEAD(cdev_list); static DEFINE_MUTEX(cdev_mutex); +static LIST_HEAD(cdev_rcu_list); +static DEFINE_SPINLOCK(cdev_rcu_lock); + int cxgbi_device_portmap_create(struct cxgbi_device *cdev, unsigned int base, unsigned int max_conn) { @@ -142,6 +145,10 @@ struct cxgbi_device *cxgbi_device_register(unsigned int extra, list_add_tail(&cdev->list_head, &cdev_list); mutex_unlock(&cdev_mutex); + spin_lock(&cdev_rcu_lock); + list_add_tail_rcu(&cdev->rcu_node, &cdev_rcu_list); + spin_unlock(&cdev_rcu_lock); + log_debug(1 << CXGBI_DBG_DEV, "cdev 0x%p, p# %u.\n", cdev, nports); return cdev; @@ -153,9 +160,16 @@ void cxgbi_device_unregister(struct cxgbi_device *cdev) log_debug(1 << CXGBI_DBG_DEV, "cdev 0x%p, p# %u,%s.\n", cdev, cdev->nports, cdev->nports ? cdev->ports[0]->name : ""); + mutex_lock(&cdev_mutex); list_del(&cdev->list_head); mutex_unlock(&cdev_mutex); + + spin_lock(&cdev_rcu_lock); + list_del_rcu(&cdev->rcu_node); + spin_unlock(&cdev_rcu_lock); + synchronize_rcu(); + cxgbi_device_destroy(cdev); } EXPORT_SYMBOL_GPL(cxgbi_device_unregister); @@ -167,12 +181,9 @@ void cxgbi_device_unregister_all(unsigned int flag) mutex_lock(&cdev_mutex); list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) { if ((cdev->flags & flag) == flag) { - log_debug(1 << CXGBI_DBG_DEV, - "cdev 0x%p, p# %u,%s.\n", - cdev, cdev->nports, cdev->nports ? - cdev->ports[0]->name : ""); - list_del(&cdev->list_head); - cxgbi_device_destroy(cdev); + mutex_unlock(&cdev_mutex); + cxgbi_device_unregister(cdev); + mutex_lock(&cdev_mutex); } } mutex_unlock(&cdev_mutex); @@ -191,6 +202,7 @@ struct cxgbi_device *cxgbi_device_find_by_lldev(void *lldev) } } mutex_unlock(&cdev_mutex); + log_debug(1 << CXGBI_DBG_DEV, "lldev 0x%p, NO match found.\n", lldev); return NULL; @@ -230,6 +242,39 @@ struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *ndev, } EXPORT_SYMBOL_GPL(cxgbi_device_find_by_netdev); +struct cxgbi_device *cxgbi_device_find_by_netdev_rcu(struct net_device *ndev, + int *port) +{ + struct net_device *vdev = NULL; + struct cxgbi_device *cdev; + int i; + + if (ndev->priv_flags & IFF_802_1Q_VLAN) { + vdev = ndev; + ndev = vlan_dev_real_dev(ndev); + pr_info("vlan dev %s -> %s.\n", vdev->name, ndev->name); + } + + rcu_read_lock(); + list_for_each_entry_rcu(cdev, &cdev_rcu_list, rcu_node) { + for (i = 0; i < cdev->nports; i++) { + if (ndev == cdev->ports[i]) { + cdev->hbas[i]->vdev = vdev; + rcu_read_unlock(); + if (port) + *port = i; + return cdev; + } + } + } + rcu_read_unlock(); + + log_debug(1 << CXGBI_DBG_DEV, + "ndev 0x%p, %s, NO match found.\n", ndev, ndev->name); + return NULL; +} +EXPORT_SYMBOL_GPL(cxgbi_device_find_by_netdev_rcu); + static struct cxgbi_device *cxgbi_device_find_by_mac(struct net_device *ndev, int *port) { diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h index b3e6e7541cc5..1d98fad6a0ab 100644 --- a/drivers/scsi/cxgbi/libcxgbi.h +++ b/drivers/scsi/cxgbi/libcxgbi.h @@ -527,6 +527,7 @@ struct cxgbi_ports_map { #define CXGBI_FLAG_IPV4_SET 0x10 struct cxgbi_device { struct list_head list_head; + struct list_head rcu_node; unsigned int flags; struct net_device **ports; void *lldev; @@ -709,6 +710,8 @@ void cxgbi_device_unregister(struct cxgbi_device *); void cxgbi_device_unregister_all(unsigned int flag); struct cxgbi_device *cxgbi_device_find_by_lldev(void *); struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *, int *); +struct cxgbi_device *cxgbi_device_find_by_netdev_rcu(struct net_device *, + int *); int cxgbi_hbas_add(struct cxgbi_device *, u64, unsigned int, struct scsi_host_template *, struct scsi_transport_template *); diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c index 03372cff38f3..813dd5c998e4 100644 --- a/drivers/scsi/eata.c +++ b/drivers/scsi/eata.c @@ -1238,8 +1238,8 @@ static int port_detect(unsigned long port_base, unsigned int j, struct eata_config *cf; dma_addr_t cf_dma_addr; - cf = pci_alloc_consistent(pdev, sizeof(struct eata_config), - &cf_dma_addr); + cf = pci_zalloc_consistent(pdev, sizeof(struct eata_config), + &cf_dma_addr); if (!cf) { printk @@ -1249,7 +1249,6 @@ static int port_detect(unsigned long port_base, unsigned int j, } /* Set board configuration */ - memset((char *)cf, 0, sizeof(struct eata_config)); cf->len = (ushort) H2DEV16((ushort) 510); cf->ocena = 1; diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 8545d1826725..6b35d0dfe64c 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -4732,23 +4732,21 @@ static struct CommandList *cmd_special_alloc(struct ctlr_info *h) union u64bit temp64; dma_addr_t cmd_dma_handle, err_dma_handle; - c = pci_alloc_consistent(h->pdev, sizeof(*c), &cmd_dma_handle); + c = pci_zalloc_consistent(h->pdev, sizeof(*c), &cmd_dma_handle); if (c == NULL) return NULL; - memset(c, 0, sizeof(*c)); c->cmd_type = CMD_SCSI; c->cmdindex = -1; - c->err_info = pci_alloc_consistent(h->pdev, sizeof(*c->err_info), - &err_dma_handle); + c->err_info = pci_zalloc_consistent(h->pdev, sizeof(*c->err_info), + &err_dma_handle); if (c->err_info == NULL) { pci_free_consistent(h->pdev, sizeof(*c), c, cmd_dma_handle); return NULL; } - memset(c->err_info, 0, sizeof(*c->err_info)); INIT_LIST_HEAD(&c->list); c->busaddr = (u32) cmd_dma_handle; diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c index 4198e45ea941..2e890b1e2526 100644 --- a/drivers/scsi/isci/init.c +++ b/drivers/scsi/isci/init.c @@ -75,7 +75,7 @@ MODULE_VERSION(DRV_VERSION); static struct scsi_transport_template *isci_transport_template; -static DEFINE_PCI_DEVICE_TABLE(isci_id_table) = { +static const struct pci_device_id isci_id_table[] = { { PCI_VDEVICE(INTEL, 0x1D61),}, { PCI_VDEVICE(INTEL, 0x1D63),}, { PCI_VDEVICE(INTEL, 0x1D65),}, diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index f9f3a1224dfa..191b59793519 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -717,11 +717,21 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, return NULL; } + if (data_size > ISCSI_DEF_MAX_RECV_SEG_LEN) { + iscsi_conn_printk(KERN_ERR, conn, "Invalid buffer len of %u for login task. Max len is %u\n", data_size, ISCSI_DEF_MAX_RECV_SEG_LEN); + return NULL; + } + task = conn->login_task; } else { if (session->state != ISCSI_STATE_LOGGED_IN) return NULL; + if (data_size != 0) { + iscsi_conn_printk(KERN_ERR, conn, "Can not send data buffer of len %u for op 0x%x\n", data_size, opcode); + return NULL; + } + BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE); BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED); @@ -2097,7 +2107,7 @@ static void iscsi_check_transport_timeouts(unsigned long data) conn->ping_timeout, conn->recv_timeout, last_recv, conn->last_ping, jiffies); spin_unlock(&session->frwd_lock); - iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); + iscsi_conn_failure(conn, ISCSI_ERR_NOP_TIMEDOUT); return; } diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c index e2237a97cb9d..531dce419c18 100644 --- a/drivers/scsi/megaraid/megaraid_mbox.c +++ b/drivers/scsi/megaraid/megaraid_mbox.c @@ -998,8 +998,9 @@ megaraid_alloc_cmd_packets(adapter_t *adapter) * Allocate the common 16-byte aligned memory for the handshake * mailbox. */ - raid_dev->una_mbox64 = pci_alloc_consistent(adapter->pdev, - sizeof(mbox64_t), &raid_dev->una_mbox64_dma); + raid_dev->una_mbox64 = pci_zalloc_consistent(adapter->pdev, + sizeof(mbox64_t), + &raid_dev->una_mbox64_dma); if (!raid_dev->una_mbox64) { con_log(CL_ANN, (KERN_WARNING @@ -1007,7 +1008,6 @@ megaraid_alloc_cmd_packets(adapter_t *adapter) __LINE__)); return -1; } - memset(raid_dev->una_mbox64, 0, sizeof(mbox64_t)); /* * Align the mailbox at 16-byte boundary @@ -1026,8 +1026,8 @@ megaraid_alloc_cmd_packets(adapter_t *adapter) align; // Allocate memory for commands issued internally - adapter->ibuf = pci_alloc_consistent(pdev, MBOX_IBUF_SIZE, - &adapter->ibuf_dma_h); + adapter->ibuf = pci_zalloc_consistent(pdev, MBOX_IBUF_SIZE, + &adapter->ibuf_dma_h); if (!adapter->ibuf) { con_log(CL_ANN, (KERN_WARNING @@ -1036,7 +1036,6 @@ megaraid_alloc_cmd_packets(adapter_t *adapter) goto out_free_common_mbox; } - memset(adapter->ibuf, 0, MBOX_IBUF_SIZE); // Allocate memory for our SCSI Command Blocks and their associated // memory @@ -2972,8 +2971,8 @@ megaraid_mbox_product_info(adapter_t *adapter) * Issue an ENQUIRY3 command to find out certain adapter parameters, * e.g., max channels, max commands etc. */ - pinfo = pci_alloc_consistent(adapter->pdev, sizeof(mraid_pinfo_t), - &pinfo_dma_h); + pinfo = pci_zalloc_consistent(adapter->pdev, sizeof(mraid_pinfo_t), + &pinfo_dma_h); if (pinfo == NULL) { con_log(CL_ANN, (KERN_WARNING @@ -2982,7 +2981,6 @@ megaraid_mbox_product_info(adapter_t *adapter) return -1; } - memset(pinfo, 0, sizeof(mraid_pinfo_t)); mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h; memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE); diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 112799b131a9..22a04e37b70a 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -2038,9 +2038,9 @@ int megasas_sriov_start_heartbeat(struct megasas_instance *instance, if (initial) { instance->hb_host_mem = - pci_alloc_consistent(instance->pdev, - sizeof(struct MR_CTRL_HB_HOST_MEM), - &instance->hb_host_mem_h); + pci_zalloc_consistent(instance->pdev, + sizeof(struct MR_CTRL_HB_HOST_MEM), + &instance->hb_host_mem_h); if (!instance->hb_host_mem) { printk(KERN_DEBUG "megasas: SR-IOV: Couldn't allocate" " memory for heartbeat host memory for " @@ -2048,8 +2048,6 @@ int megasas_sriov_start_heartbeat(struct megasas_instance *instance, retval = -ENOMEM; goto out; } - memset(instance->hb_host_mem, 0, - sizeof(struct MR_CTRL_HB_HOST_MEM)); } memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c index 7a6160f172ce..57a95e2c3442 100644 --- a/drivers/scsi/mesh.c +++ b/drivers/scsi/mesh.c @@ -1915,14 +1915,12 @@ static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match) /* We use the PCI APIs for now until the generic one gets fixed * enough or until we get some macio-specific versions */ - dma_cmd_space = pci_alloc_consistent(macio_get_pci_dev(mdev), - ms->dma_cmd_size, - &dma_cmd_bus); + dma_cmd_space = pci_zalloc_consistent(macio_get_pci_dev(mdev), + ms->dma_cmd_size, &dma_cmd_bus); if (dma_cmd_space == NULL) { printk(KERN_ERR "mesh: can't allocate DMA table\n"); goto out_unmap; } - memset(dma_cmd_space, 0, ms->dma_cmd_size); ms->dma_cmds = (struct dbdma_cmd *) DBDMA_ALIGN(dma_cmd_space); ms->dma_cmd_space = dma_cmd_space; diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 7cf48c5c15a7..135f12c20ecf 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -247,7 +247,7 @@ struct _scsi_io_transfer { /* * The pci device ids are defined in mpi/mpi2_cnfg.h. */ -static DEFINE_PCI_DEVICE_TABLE(scsih_pci_table) = { +static const struct pci_device_id scsih_pci_table[] = { /* Fury ~ 3004 and 3008 */ { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3004, PCI_ANY_ID, PCI_ANY_ID }, diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c index edbee8dc62c9..3e6b866759fe 100644 --- a/drivers/scsi/mvumi.c +++ b/drivers/scsi/mvumi.c @@ -48,7 +48,7 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("jyli@marvell.com"); MODULE_DESCRIPTION("Marvell UMI Driver"); -static DEFINE_PCI_DEVICE_TABLE(mvumi_pci_table) = { +static const struct pci_device_id mvumi_pci_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, PCI_DEVICE_ID_MARVELL_MV9143) }, { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, PCI_DEVICE_ID_MARVELL_MV9580) }, { 0 } @@ -142,8 +142,8 @@ static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba, case RESOURCE_UNCACHED_MEMORY: size = round_up(size, 8); - res->virt_addr = pci_alloc_consistent(mhba->pdev, size, - &res->bus_addr); + res->virt_addr = pci_zalloc_consistent(mhba->pdev, size, + &res->bus_addr); if (!res->virt_addr) { dev_err(&mhba->pdev->dev, "unable to allocate consistent mem," @@ -151,7 +151,6 @@ static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba, kfree(res); return NULL; } - memset(res->virt_addr, 0, size); break; default: @@ -258,12 +257,10 @@ static int mvumi_internal_cmd_sgl(struct mvumi_hba *mhba, struct mvumi_cmd *cmd, if (size == 0) return 0; - virt_addr = pci_alloc_consistent(mhba->pdev, size, &phy_addr); + virt_addr = pci_zalloc_consistent(mhba->pdev, size, &phy_addr); if (!virt_addr) return -1; - memset(virt_addr, 0, size); - m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0]; cmd->frame->sg_counts = 1; cmd->data_buf = virt_addr; diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c index d3a08aea0948..7abbf284da1a 100644 --- a/drivers/scsi/pm8001/pm8001_ctl.c +++ b/drivers/scsi/pm8001/pm8001_ctl.c @@ -526,18 +526,19 @@ static int pm8001_set_nvmd(struct pm8001_hba_info *pm8001_ha) { struct pm8001_ioctl_payload *payload; DECLARE_COMPLETION_ONSTACK(completion); - u8 *ioctlbuffer = NULL; - u32 length = 0; - u32 ret = 0; + u8 *ioctlbuffer; + u32 ret; + u32 length = 1024 * 5 + sizeof(*payload) - 1; + + if (pm8001_ha->fw_image->size > 4096) { + pm8001_ha->fw_status = FAIL_FILE_SIZE; + return -EFAULT; + } - length = 1024 * 5 + sizeof(*payload) - 1; ioctlbuffer = kzalloc(length, GFP_KERNEL); - if (!ioctlbuffer) + if (!ioctlbuffer) { + pm8001_ha->fw_status = FAIL_OUT_MEMORY; return -ENOMEM; - if ((pm8001_ha->fw_image->size <= 0) || - (pm8001_ha->fw_image->size > 4096)) { - ret = FAIL_FILE_SIZE; - goto out; } payload = (struct pm8001_ioctl_payload *)ioctlbuffer; memcpy((u8 *)&payload->func_specific, (u8 *)pm8001_ha->fw_image->data, @@ -547,6 +548,10 @@ static int pm8001_set_nvmd(struct pm8001_hba_info *pm8001_ha) payload->minor_function = 0x1; pm8001_ha->nvmd_completion = &completion; ret = PM8001_CHIP_DISP->set_nvmd_req(pm8001_ha, payload); + if (ret) { + pm8001_ha->fw_status = FAIL_OUT_MEMORY; + goto out; + } wait_for_completion(&completion); out: kfree(ioctlbuffer); @@ -557,35 +562,31 @@ static int pm8001_update_flash(struct pm8001_hba_info *pm8001_ha) { struct pm8001_ioctl_payload *payload; DECLARE_COMPLETION_ONSTACK(completion); - u8 *ioctlbuffer = NULL; - u32 length = 0; + u8 *ioctlbuffer; struct fw_control_info *fwControl; - u32 loopNumber, loopcount = 0; - u32 sizeRead = 0; u32 partitionSize, partitionSizeTmp; - u32 ret = 0; - u32 partitionNumber = 0; + u32 loopNumber, loopcount; struct pm8001_fw_image_header *image_hdr; + u32 sizeRead = 0; + u32 ret = 0; + u32 length = 1024 * 16 + sizeof(*payload) - 1; - length = 1024 * 16 + sizeof(*payload) - 1; + if (pm8001_ha->fw_image->size < 28) { + pm8001_ha->fw_status = FAIL_FILE_SIZE; + return -EFAULT; + } ioctlbuffer = kzalloc(length, GFP_KERNEL); - image_hdr = (struct pm8001_fw_image_header *)pm8001_ha->fw_image->data; - if (!ioctlbuffer) + if (!ioctlbuffer) { + pm8001_ha->fw_status = FAIL_OUT_MEMORY; return -ENOMEM; - if (pm8001_ha->fw_image->size < 28) { - ret = FAIL_FILE_SIZE; - goto out; } - + image_hdr = (struct pm8001_fw_image_header *)pm8001_ha->fw_image->data; while (sizeRead < pm8001_ha->fw_image->size) { partitionSizeTmp = *(u32 *)((u8 *)&image_hdr->image_length + sizeRead); partitionSize = be32_to_cpu(partitionSizeTmp); - loopcount = (partitionSize + HEADER_LEN)/IOCTL_BUF_SIZE; - if (loopcount % IOCTL_BUF_SIZE) - loopcount++; - if (loopcount == 0) - loopcount++; + loopcount = DIV_ROUND_UP(partitionSize + HEADER_LEN, + IOCTL_BUF_SIZE); for (loopNumber = 0; loopNumber < loopcount; loopNumber++) { payload = (struct pm8001_ioctl_payload *)ioctlbuffer; payload->length = 1024*16; @@ -617,18 +618,18 @@ static int pm8001_update_flash(struct pm8001_hba_info *pm8001_ha) pm8001_ha->nvmd_completion = &completion; ret = PM8001_CHIP_DISP->fw_flash_update_req(pm8001_ha, payload); - if (ret) - break; + if (ret) { + pm8001_ha->fw_status = FAIL_OUT_MEMORY; + goto out; + } wait_for_completion(&completion); if (fwControl->retcode > FLASH_UPDATE_IN_PROGRESS) { - ret = fwControl->retcode; - break; + pm8001_ha->fw_status = fwControl->retcode; + ret = -EFAULT; + goto out; + } } } - if (ret) - break; - partitionNumber++; -} out: kfree(ioctlbuffer); return ret; @@ -643,22 +644,29 @@ static ssize_t pm8001_store_update_fw(struct device *cdev, char *cmd_ptr, *filename_ptr; int res, i; int flash_command = FLASH_CMD_NONE; - int err = 0; + int ret; + if (!capable(CAP_SYS_ADMIN)) return -EACCES; - cmd_ptr = kzalloc(count*2, GFP_KERNEL); + /* this test protects us from running two flash processes at once, + * so we should start with this test */ + if (pm8001_ha->fw_status == FLASH_IN_PROGRESS) + return -EINPROGRESS; + pm8001_ha->fw_status = FLASH_IN_PROGRESS; + cmd_ptr = kzalloc(count*2, GFP_KERNEL); if (!cmd_ptr) { - err = FAIL_OUT_MEMORY; - goto out; + pm8001_ha->fw_status = FAIL_OUT_MEMORY; + return -ENOMEM; } filename_ptr = cmd_ptr + count; res = sscanf(buf, "%s %s", cmd_ptr, filename_ptr); if (res != 2) { - err = FAIL_PARAMETERS; - goto out1; + pm8001_ha->fw_status = FAIL_PARAMETERS; + ret = -EINVAL; + goto out; } for (i = 0; flash_command_table[i].code != FLASH_CMD_NONE; i++) { @@ -669,50 +677,38 @@ static ssize_t pm8001_store_update_fw(struct device *cdev, } } if (flash_command == FLASH_CMD_NONE) { - err = FAIL_PARAMETERS; - goto out1; + pm8001_ha->fw_status = FAIL_PARAMETERS; + ret = -EINVAL; + goto out; } - if (pm8001_ha->fw_status == FLASH_IN_PROGRESS) { - err = FLASH_IN_PROGRESS; - goto out1; - } - err = request_firmware(&pm8001_ha->fw_image, + ret = request_firmware(&pm8001_ha->fw_image, filename_ptr, pm8001_ha->dev); - if (err) { + if (ret) { PM8001_FAIL_DBG(pm8001_ha, - pm8001_printk("Failed to load firmware image file %s," - " error %d\n", filename_ptr, err)); - err = FAIL_OPEN_BIOS_FILE; - goto out1; + pm8001_printk( + "Failed to load firmware image file %s, error %d\n", + filename_ptr, ret)); + pm8001_ha->fw_status = FAIL_OPEN_BIOS_FILE; + goto out; } - switch (flash_command) { - case FLASH_CMD_UPDATE: - pm8001_ha->fw_status = FLASH_IN_PROGRESS; - err = pm8001_update_flash(pm8001_ha); - break; - case FLASH_CMD_SET_NVMD: - pm8001_ha->fw_status = FLASH_IN_PROGRESS; - err = pm8001_set_nvmd(pm8001_ha); - break; - default: - pm8001_ha->fw_status = FAIL_PARAMETERS; - err = FAIL_PARAMETERS; - break; - } + if (FLASH_CMD_UPDATE == flash_command) + ret = pm8001_update_flash(pm8001_ha); + else + ret = pm8001_set_nvmd(pm8001_ha); + release_firmware(pm8001_ha->fw_image); -out1: - kfree(cmd_ptr); out: - pm8001_ha->fw_status = err; + kfree(cmd_ptr); - if (!err) - return count; - else - return -err; + if (ret) + return ret; + + pm8001_ha->fw_status = FLASH_OK; + return count; } static ssize_t pm8001_show_update_fw(struct device *cdev, diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c index 173831016f5f..dd12c6fe57a6 100644 --- a/drivers/scsi/pm8001/pm8001_hwi.c +++ b/drivers/scsi/pm8001/pm8001_hwi.c @@ -4824,7 +4824,7 @@ int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha, rc = pm8001_tag_alloc(pm8001_ha, &tag); if (rc) { kfree(fw_control_context); - return rc; + return -EBUSY; } ccb = &pm8001_ha->ccb_info[tag]; ccb->fw_control_context = fw_control_context; @@ -4946,7 +4946,7 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha, rc = pm8001_tag_alloc(pm8001_ha, &tag); if (rc) { kfree(fw_control_context); - return rc; + return -EBUSY; } ccb = &pm8001_ha->ccb_info[tag]; ccb->fw_control_context = fw_control_context; diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c index e49623a897a7..666bf5af06e2 100644 --- a/drivers/scsi/pm8001/pm8001_init.c +++ b/drivers/scsi/pm8001/pm8001_init.c @@ -748,34 +748,35 @@ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha) sizeof(pm8001_ha->msix_entries[0]); for (i = 0; i < max_entry ; i++) pm8001_ha->msix_entries[i].entry = i; - rc = pci_enable_msix(pm8001_ha->pdev, pm8001_ha->msix_entries, + rc = pci_enable_msix_exact(pm8001_ha->pdev, pm8001_ha->msix_entries, number_of_intr); pm8001_ha->number_of_intr = number_of_intr; - if (!rc) { - PM8001_INIT_DBG(pm8001_ha, pm8001_printk( - "pci_enable_msix request ret:%d no of intr %d\n", - rc, pm8001_ha->number_of_intr)); + if (rc) + return rc; + PM8001_INIT_DBG(pm8001_ha, pm8001_printk( + "pci_enable_msix_exact request ret:%d no of intr %d\n", + rc, pm8001_ha->number_of_intr)); - for (i = 0; i < number_of_intr; i++) { - snprintf(intr_drvname[i], sizeof(intr_drvname[0]), - DRV_NAME"%d", i); - pm8001_ha->irq_vector[i].irq_id = i; - pm8001_ha->irq_vector[i].drv_inst = pm8001_ha; + for (i = 0; i < number_of_intr; i++) { + snprintf(intr_drvname[i], sizeof(intr_drvname[0]), + DRV_NAME"%d", i); + pm8001_ha->irq_vector[i].irq_id = i; + pm8001_ha->irq_vector[i].drv_inst = pm8001_ha; - rc = request_irq(pm8001_ha->msix_entries[i].vector, - pm8001_interrupt_handler_msix, flag, - intr_drvname[i], &(pm8001_ha->irq_vector[i])); - if (rc) { - for (j = 0; j < i; j++) - free_irq( - pm8001_ha->msix_entries[j].vector, + rc = request_irq(pm8001_ha->msix_entries[i].vector, + pm8001_interrupt_handler_msix, flag, + intr_drvname[i], &(pm8001_ha->irq_vector[i])); + if (rc) { + for (j = 0; j < i; j++) { + free_irq(pm8001_ha->msix_entries[j].vector, &(pm8001_ha->irq_vector[i])); - pci_disable_msix(pm8001_ha->pdev); - break; } + pci_disable_msix(pm8001_ha->pdev); + break; } } + return rc; } #endif diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c index 34cea8291772..76570e6a547d 100644 --- a/drivers/scsi/pm8001/pm8001_sas.c +++ b/drivers/scsi/pm8001/pm8001_sas.c @@ -116,13 +116,12 @@ int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr, u64 align_offset = 0; if (align) align_offset = (dma_addr_t)align - 1; - mem_virt_alloc = - pci_alloc_consistent(pdev, mem_size + align, &mem_dma_handle); + mem_virt_alloc = pci_zalloc_consistent(pdev, mem_size + align, + &mem_dma_handle); if (!mem_virt_alloc) { pm8001_printk("memory allocation error\n"); return -1; } - memset((void *)mem_virt_alloc, 0, mem_size+align); *pphys_addr = mem_dma_handle; phys_align = (*pphys_addr + align_offset) & ~align_offset; *virt_addr = (void *)mem_virt_alloc + phys_align - *pphys_addr; diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c index 017f8b9554e5..6f3275d020a0 100644 --- a/drivers/scsi/pmcraid.c +++ b/drivers/scsi/pmcraid.c @@ -4213,9 +4213,9 @@ static ssize_t pmcraid_store_log_level( { struct Scsi_Host *shost; struct pmcraid_instance *pinstance; - unsigned long val; + u8 val; - if (strict_strtoul(buf, 10, &val)) + if (kstrtou8(buf, 10, &val)) return -EINVAL; /* log-level should be from 0 to 2 */ if (val > 2) diff --git a/drivers/scsi/qla2xxx/Kconfig b/drivers/scsi/qla2xxx/Kconfig index 23d607218ae8..113e6c9826a1 100644 --- a/drivers/scsi/qla2xxx/Kconfig +++ b/drivers/scsi/qla2xxx/Kconfig @@ -1,7 +1,7 @@ config SCSI_QLA_FC tristate "QLogic QLA2XXX Fibre Channel Support" depends on PCI && SCSI - select SCSI_FC_ATTRS + depends on SCSI_FC_ATTRS select FW_LOADER ---help--- This qla2xxx driver supports all QLogic Fibre Channel @@ -31,7 +31,7 @@ config SCSI_QLA_FC config TCM_QLA2XXX tristate "TCM_QLA2XXX fabric module for Qlogic 2xxx series target mode HBAs" depends on SCSI_QLA_FC && TARGET_CORE - select LIBFC + depends on LIBFC select BTREE default n ---help--- diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c index 6f12f859b11d..4180d6d9fe78 100644 --- a/drivers/scsi/qla4xxx/ql4_init.c +++ b/drivers/scsi/qla4xxx/ql4_init.c @@ -334,6 +334,12 @@ void qla4xxx_alloc_fw_dump(struct scsi_qla_host *ha) /* Allocate memory for saving the template */ md_tmp = dma_alloc_coherent(&ha->pdev->dev, ha->fw_dump_tmplt_size, &md_tmp_dma, GFP_KERNEL); + if (!md_tmp) { + ql4_printk(KERN_INFO, ha, + "scsi%ld: Failed to allocate DMA memory\n", + ha->host_no); + return; + } /* Request template */ status = qla4xxx_get_minidump_template(ha, md_tmp_dma); diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c index fdfae79924ac..c291fdff1b33 100644 --- a/drivers/scsi/qla4xxx/ql4_mbx.c +++ b/drivers/scsi/qla4xxx/ql4_mbx.c @@ -1620,8 +1620,8 @@ int qla4xxx_get_chap(struct scsi_qla_host *ha, char *username, char *password, goto exit_get_chap; } - strncpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN); - strncpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN); + strlcpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN); + strlcpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN); chap_table->cookie = __constant_cpu_to_le16(CHAP_VALID_COOKIE); exit_get_chap: @@ -1663,8 +1663,8 @@ int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username, char *password, else chap_table->flags |= BIT_7; /* local */ chap_table->secret_len = strlen(password); - strncpy(chap_table->secret, password, MAX_CHAP_SECRET_LEN); - strncpy(chap_table->name, username, MAX_CHAP_NAME_LEN); + strncpy(chap_table->secret, password, MAX_CHAP_SECRET_LEN - 1); + strncpy(chap_table->name, username, MAX_CHAP_NAME_LEN - 1); chap_table->cookie = __constant_cpu_to_le16(CHAP_VALID_COOKIE); if (is_qla40XX(ha)) { @@ -1742,8 +1742,8 @@ int qla4xxx_get_uni_chap_at_index(struct scsi_qla_host *ha, char *username, goto exit_unlock_uni_chap; } - strncpy(password, chap_table->secret, MAX_CHAP_SECRET_LEN); - strncpy(username, chap_table->name, MAX_CHAP_NAME_LEN); + strlcpy(password, chap_table->secret, MAX_CHAP_SECRET_LEN); + strlcpy(username, chap_table->name, MAX_CHAP_NAME_LEN); rval = QLA_SUCCESS; @@ -2295,7 +2295,7 @@ int qla4_8xxx_set_param(struct scsi_qla_host *ha, int param) if (param == SET_DRVR_VERSION) { mbox_cmd[1] = SET_DRVR_VERSION; strncpy((char *)&mbox_cmd[2], QLA4XXX_DRIVER_VERSION, - MAX_DRVR_VER_LEN); + MAX_DRVR_VER_LEN - 1); } else { ql4_printk(KERN_ERR, ha, "%s: invalid parameter 0x%x\n", __func__, param); diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c index 9dbdb4be2d8f..7c3365864242 100644 --- a/drivers/scsi/qla4xxx/ql4_nx.c +++ b/drivers/scsi/qla4xxx/ql4_nx.c @@ -4221,7 +4221,7 @@ qla4_8xxx_enable_msix(struct scsi_qla_host *ha) for (i = 0; i < QLA_MSIX_ENTRIES; i++) entries[i].entry = qla4_8xxx_msix_entries[i].entry; - ret = pci_enable_msix(ha->pdev, entries, ARRAY_SIZE(entries)); + ret = pci_enable_msix_exact(ha->pdev, entries, ARRAY_SIZE(entries)); if (ret) { ql4_printk(KERN_WARNING, ha, "MSI-X: Failed to enable support -- %d/%d\n", diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index c5d9564d455c..199fcf79a051 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -756,9 +756,9 @@ static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx, continue; chap_rec->chap_tbl_idx = i; - strncpy(chap_rec->username, chap_table->name, + strlcpy(chap_rec->username, chap_table->name, ISCSI_CHAP_AUTH_NAME_MAX_LEN); - strncpy(chap_rec->password, chap_table->secret, + strlcpy(chap_rec->password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN); chap_rec->password_length = chap_table->secret_len; @@ -1050,6 +1050,7 @@ static int qla4xxx_get_host_stats(struct Scsi_Host *shost, char *buf, int len) if (!ql_iscsi_stats) { ql4_printk(KERN_ERR, ha, "Unable to allocate memory for iscsi stats\n"); + ret = -ENOMEM; goto exit_host_stats; } @@ -1058,6 +1059,7 @@ static int qla4xxx_get_host_stats(struct Scsi_Host *shost, char *buf, int len) if (ret != QLA_SUCCESS) { ql4_printk(KERN_ERR, ha, "Unable to retrieve iscsi stats\n"); + ret = -EIO; goto exit_host_stats; } host_stats->mactx_frames = le64_to_cpu(ql_iscsi_stats->mac_tx_frames); @@ -6027,8 +6029,8 @@ static int qla4xxx_get_bidi_chap(struct scsi_qla_host *ha, char *username, if (!(chap_table->flags & BIT_6)) /* Not BIDI */ continue; - strncpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN); - strncpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN); + strlcpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN); + strlcpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN); ret = 0; break; } @@ -6258,8 +6260,8 @@ static void qla4xxx_get_param_ddb(struct ddb_entry *ddb_entry, tddb->tpgt = sess->tpgt; tddb->port = conn->persistent_port; - strncpy(tddb->iscsi_name, sess->targetname, ISCSI_NAME_SIZE); - strncpy(tddb->ip_addr, conn->persistent_address, DDB_IPADDR_LEN); + strlcpy(tddb->iscsi_name, sess->targetname, ISCSI_NAME_SIZE); + strlcpy(tddb->ip_addr, conn->persistent_address, DDB_IPADDR_LEN); } static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry, @@ -7764,7 +7766,7 @@ static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess, goto exit_ddb_logout; } - strncpy(flash_tddb->iscsi_name, fnode_sess->targetname, + strlcpy(flash_tddb->iscsi_name, fnode_sess->targetname, ISCSI_NAME_SIZE); if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index df3306019a7e..d81f3cc43ff1 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -377,6 +377,10 @@ scsi_alloc_host_cmd_pool(struct Scsi_Host *shost) pool->slab_flags |= SLAB_CACHE_DMA; pool->gfp_mask = __GFP_DMA; } + + if (hostt->cmd_size) + hostt->cmd_pool = pool; + return pool; } @@ -421,8 +425,10 @@ out: out_free_slab: kmem_cache_destroy(pool->cmd_slab); out_free_pool: - if (hostt->cmd_size) + if (hostt->cmd_size) { scsi_free_host_cmd_pool(pool); + hostt->cmd_pool = NULL; + } goto out; } @@ -444,8 +450,10 @@ static void scsi_put_host_cmd_pool(struct Scsi_Host *shost) if (!--pool->users) { kmem_cache_destroy(pool->cmd_slab); kmem_cache_destroy(pool->sense_slab); - if (hostt->cmd_size) + if (hostt->cmd_size) { scsi_free_host_cmd_pool(pool); + hostt->cmd_pool = NULL; + } } mutex_unlock(&host_cmd_pool_mutex); } diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 9c44392b748f..aaea4b98af16 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -733,12 +733,13 @@ static bool scsi_end_request(struct request *req, int error, } else { unsigned long flags; + if (bidi_bytes) + scsi_release_bidi_buffers(cmd); + spin_lock_irqsave(q->queue_lock, flags); blk_finish_request(req, error); spin_unlock_irqrestore(q->queue_lock, flags); - if (bidi_bytes) - scsi_release_bidi_buffers(cmd); scsi_release_buffers(cmd); scsi_next_command(cmd); } @@ -1774,7 +1775,7 @@ static void scsi_request_fn(struct request_queue *q) blk_requeue_request(q, req); atomic_dec(&sdev->device_busy); out_delay: - if (atomic_read(&sdev->device_busy) && !scsi_device_blocked(sdev)) + if (!atomic_read(&sdev->device_busy) && !scsi_device_blocked(sdev)) blk_delay_queue(q, SCSI_QUEUE_DELAY); } @@ -1808,7 +1809,6 @@ static int scsi_mq_prep_fn(struct request *req) cmd->tag = req->tag; - req->cmd = req->__cmd; cmd->cmnd = req->cmd; cmd->prot_op = SCSI_PROT_NORMAL; diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index 406b3038bbad..8b4105a22ac2 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -910,9 +910,9 @@ sdev_store_queue_ramp_up_period(struct device *dev, const char *buf, size_t count) { struct scsi_device *sdev = to_scsi_device(dev); - unsigned long period; + unsigned int period; - if (strict_strtoul(buf, 10, &period)) + if (kstrtouint(buf, 10, &period)) return -EINVAL; sdev->queue_ramp_up_period = msecs_to_jiffies(period); diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index b481e62a12cc..67d43e35693d 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -3429,7 +3429,7 @@ iscsi_get_host_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh) char *buf; if (!transport->get_host_stats) - return -EINVAL; + return -ENOSYS; priv = iscsi_if_transport_lookup(transport); if (!priv) @@ -3467,6 +3467,10 @@ iscsi_get_host_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh) memset(buf, 0, host_stats_size); err = transport->get_host_stats(shost, buf, host_stats_size); + if (err) { + kfree_skb(skbhost_stats); + goto exit_host_stats; + } actual_size = nlmsg_total_size(sizeof(*ev) + host_stats_size); skb_trim(skbhost_stats, NLMSG_ALIGN(actual_size)); diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c index 43fea2219f83..ae45bd99baed 100644 --- a/drivers/scsi/scsi_transport_srp.c +++ b/drivers/scsi/scsi_transport_srp.c @@ -472,7 +472,8 @@ static void __srp_start_tl_fail_timers(struct srp_rport *rport) if (delay > 0) queue_delayed_work(system_long_wq, &rport->reconnect_work, 1UL * delay * HZ); - if (srp_rport_set_state(rport, SRP_RPORT_BLOCKED) == 0) { + if ((fast_io_fail_tmo >= 0 || dev_loss_tmo >= 0) && + srp_rport_set_state(rport, SRP_RPORT_BLOCKED) == 0) { pr_debug("%s new state: %d\n", dev_name(&shost->shost_gendev), rport->state); scsi_target_block(&shost->shost_gendev); diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c index 4e76fe863fc4..d8dcf36aed11 100644 --- a/drivers/scsi/u14-34f.c +++ b/drivers/scsi/u14-34f.c @@ -1006,7 +1006,7 @@ static int port_detect \ sh[j]->irq, dma_name, sh[j]->sg_tablesize, sh[j]->can_queue); if (sh[j]->max_id > 8 || sh[j]->max_lun > 8) - printk("%s: wide SCSI support enabled, max_id %u, max_lun %u.\n", + printk("%s: wide SCSI support enabled, max_id %u, max_lun %llu.\n", BN(j), sh[j]->max_id, sh[j]->max_lun); for (i = 0; i <= sh[j]->max_channel; i++) @@ -1285,7 +1285,7 @@ static int u14_34f_queuecommand_lck(struct scsi_cmnd *SCpnt, void (*done)(struct cpp->cpp_index = i; SCpnt->host_scribble = (unsigned char *) &cpp->cpp_index; - if (do_trace) printk("%s: qcomm, mbox %d, target %d.%d:%llu.\n", + if (do_trace) printk("%s: qcomm, mbox %d, target %d.%d:%u.\n", BN(j), i, SCpnt->device->channel, SCpnt->device->id, (u8)SCpnt->device->lun); diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c index c007a7a69c28..afaabe2aeac8 100644 --- a/drivers/scsi/ufs/ufshcd-pci.c +++ b/drivers/scsi/ufs/ufshcd-pci.c @@ -185,7 +185,7 @@ static const struct dev_pm_ops ufshcd_pci_pm_ops = { .runtime_idle = ufshcd_pci_runtime_idle, }; -static DEFINE_PCI_DEVICE_TABLE(ufshcd_pci_tbl) = { +static const struct pci_device_id ufshcd_pci_tbl[] = { { PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { } /* terminate list */ }; diff --git a/drivers/sh/Makefile b/drivers/sh/Makefile index 788ed9b59b4e..114203f32843 100644 --- a/drivers/sh/Makefile +++ b/drivers/sh/Makefile @@ -1,8 +1,7 @@ # # Makefile for the SuperH specific drivers. # -obj-$(CONFIG_SUPERH) += intc/ -obj-$(CONFIG_ARCH_SHMOBILE_LEGACY) += intc/ +obj-$(CONFIG_SH_INTC) += intc/ ifneq ($(CONFIG_COMMON_CLK),y) obj-$(CONFIG_HAVE_CLK) += clk/ endif diff --git a/drivers/sh/intc/Kconfig b/drivers/sh/intc/Kconfig index 60228fae943f..6a1b05ddc8c9 100644 --- a/drivers/sh/intc/Kconfig +++ b/drivers/sh/intc/Kconfig @@ -1,7 +1,9 @@ config SH_INTC - def_bool y + bool select IRQ_DOMAIN +if SH_INTC + comment "Interrupt controller options" config INTC_USERIMASK @@ -37,3 +39,5 @@ config INTC_MAPPING_DEBUG between system IRQs and the per-controller id tables. If in doubt, say N. + +endif diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile index 0f7c44793b29..3b1b95d932d1 100644 --- a/drivers/soc/Makefile +++ b/drivers/soc/Makefile @@ -3,3 +3,4 @@ # obj-$(CONFIG_ARCH_QCOM) += qcom/ +obj-$(CONFIG_ARCH_TEGRA) += tegra/ diff --git a/drivers/soc/qcom/qcom_gsbi.c b/drivers/soc/qcom/qcom_gsbi.c index 447458e696a9..7e1f120f2b32 100644 --- a/drivers/soc/qcom/qcom_gsbi.c +++ b/drivers/soc/qcom/qcom_gsbi.c @@ -22,44 +22,63 @@ #define GSBI_CTRL_REG 0x0000 #define GSBI_PROTOCOL_SHIFT 4 +struct gsbi_info { + struct clk *hclk; + u32 mode; + u32 crci; +}; + static int gsbi_probe(struct platform_device *pdev) { struct device_node *node = pdev->dev.of_node; struct resource *res; void __iomem *base; - struct clk *hclk; - u32 mode, crci = 0; + struct gsbi_info *gsbi; + + gsbi = devm_kzalloc(&pdev->dev, sizeof(*gsbi), GFP_KERNEL); + + if (!gsbi) + return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(base)) return PTR_ERR(base); - if (of_property_read_u32(node, "qcom,mode", &mode)) { + if (of_property_read_u32(node, "qcom,mode", &gsbi->mode)) { dev_err(&pdev->dev, "missing mode configuration\n"); return -EINVAL; } /* not required, so default to 0 if not present */ - of_property_read_u32(node, "qcom,crci", &crci); + of_property_read_u32(node, "qcom,crci", &gsbi->crci); - dev_info(&pdev->dev, "GSBI port protocol: %d crci: %d\n", mode, crci); + dev_info(&pdev->dev, "GSBI port protocol: %d crci: %d\n", + gsbi->mode, gsbi->crci); + gsbi->hclk = devm_clk_get(&pdev->dev, "iface"); + if (IS_ERR(gsbi->hclk)) + return PTR_ERR(gsbi->hclk); - hclk = devm_clk_get(&pdev->dev, "iface"); - if (IS_ERR(hclk)) - return PTR_ERR(hclk); + clk_prepare_enable(gsbi->hclk); - clk_prepare_enable(hclk); - - writel_relaxed((mode << GSBI_PROTOCOL_SHIFT) | crci, + writel_relaxed((gsbi->mode << GSBI_PROTOCOL_SHIFT) | gsbi->crci, base + GSBI_CTRL_REG); /* make sure the gsbi control write is not reordered */ wmb(); - clk_disable_unprepare(hclk); + platform_set_drvdata(pdev, gsbi); + + return of_platform_populate(node, NULL, NULL, &pdev->dev); +} + +static int gsbi_remove(struct platform_device *pdev) +{ + struct gsbi_info *gsbi = platform_get_drvdata(pdev); + + clk_disable_unprepare(gsbi->hclk); - return of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev); + return 0; } static const struct of_device_id gsbi_dt_match[] = { @@ -76,6 +95,7 @@ static struct platform_driver gsbi_driver = { .of_match_table = gsbi_dt_match, }, .probe = gsbi_probe, + .remove = gsbi_remove, }; module_platform_driver(gsbi_driver); diff --git a/drivers/soc/tegra/Makefile b/drivers/soc/tegra/Makefile new file mode 100644 index 000000000000..cdaad9d53a05 --- /dev/null +++ b/drivers/soc/tegra/Makefile @@ -0,0 +1,4 @@ +obj-$(CONFIG_ARCH_TEGRA) += fuse/ + +obj-$(CONFIG_ARCH_TEGRA) += common.o +obj-$(CONFIG_ARCH_TEGRA) += pmc.o diff --git a/drivers/soc/tegra/common.c b/drivers/soc/tegra/common.c new file mode 100644 index 000000000000..a71cb74f3674 --- /dev/null +++ b/drivers/soc/tegra/common.c @@ -0,0 +1,30 @@ +/* + * Copyright (C) 2014 NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/of.h> + +#include <soc/tegra/common.h> + +static const struct of_device_id tegra_machine_match[] = { + { .compatible = "nvidia,tegra20", }, + { .compatible = "nvidia,tegra30", }, + { .compatible = "nvidia,tegra114", }, + { .compatible = "nvidia,tegra124", }, + { } +}; + +bool soc_is_tegra(void) +{ + struct device_node *root; + + root = of_find_node_by_path("/"); + if (!root) + return false; + + return of_match_node(tegra_machine_match, root) != NULL; +} diff --git a/drivers/soc/tegra/fuse/Makefile b/drivers/soc/tegra/fuse/Makefile new file mode 100644 index 000000000000..3af357da91f3 --- /dev/null +++ b/drivers/soc/tegra/fuse/Makefile @@ -0,0 +1,8 @@ +obj-y += fuse-tegra.o +obj-y += fuse-tegra30.o +obj-y += tegra-apbmisc.o +obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += fuse-tegra20.o +obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += speedo-tegra20.o +obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += speedo-tegra30.o +obj-$(CONFIG_ARCH_TEGRA_114_SOC) += speedo-tegra114.o +obj-$(CONFIG_ARCH_TEGRA_124_SOC) += speedo-tegra124.o diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c new file mode 100644 index 000000000000..11a5043959dc --- /dev/null +++ b/drivers/soc/tegra/fuse/fuse-tegra.c @@ -0,0 +1,163 @@ +/* + * Copyright (c) 2013-2014, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + */ + +#include <linux/device.h> +#include <linux/kobject.h> +#include <linux/kernel.h> +#include <linux/platform_device.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/io.h> + +#include <soc/tegra/common.h> +#include <soc/tegra/fuse.h> + +#include "fuse.h" + +static u32 (*fuse_readl)(const unsigned int offset); +static int fuse_size; +struct tegra_sku_info tegra_sku_info; + +static const char *tegra_revision_name[TEGRA_REVISION_MAX] = { + [TEGRA_REVISION_UNKNOWN] = "unknown", + [TEGRA_REVISION_A01] = "A01", + [TEGRA_REVISION_A02] = "A02", + [TEGRA_REVISION_A03] = "A03", + [TEGRA_REVISION_A03p] = "A03 prime", + [TEGRA_REVISION_A04] = "A04", +}; + +static u8 fuse_readb(const unsigned int offset) +{ + u32 val; + + val = fuse_readl(round_down(offset, 4)); + val >>= (offset % 4) * 8; + val &= 0xff; + + return val; +} + +static ssize_t fuse_read(struct file *fd, struct kobject *kobj, + struct bin_attribute *attr, char *buf, + loff_t pos, size_t size) +{ + int i; + + if (pos < 0 || pos >= fuse_size) + return 0; + + if (size > fuse_size - pos) + size = fuse_size - pos; + + for (i = 0; i < size; i++) + buf[i] = fuse_readb(pos + i); + + return i; +} + +static struct bin_attribute fuse_bin_attr = { + .attr = { .name = "fuse", .mode = S_IRUGO, }, + .read = fuse_read, +}; + +static const struct of_device_id car_match[] __initconst = { + { .compatible = "nvidia,tegra20-car", }, + { .compatible = "nvidia,tegra30-car", }, + { .compatible = "nvidia,tegra114-car", }, + { .compatible = "nvidia,tegra124-car", }, + {}, +}; + +static void tegra_enable_fuse_clk(void __iomem *base) +{ + u32 reg; + + reg = readl_relaxed(base + 0x48); + reg |= 1 << 28; + writel(reg, base + 0x48); + + /* + * Enable FUSE clock. This needs to be hardcoded because the clock + * subsystem is not active during early boot. + */ + reg = readl(base + 0x14); + reg |= 1 << 7; + writel(reg, base + 0x14); +} + +int tegra_fuse_readl(unsigned long offset, u32 *value) +{ + if (!fuse_readl) + return -EPROBE_DEFER; + + *value = fuse_readl(offset); + + return 0; +} +EXPORT_SYMBOL(tegra_fuse_readl); + +int tegra_fuse_create_sysfs(struct device *dev, int size, + u32 (*readl)(const unsigned int offset)) +{ + if (fuse_size) + return -ENODEV; + + fuse_bin_attr.size = size; + fuse_bin_attr.read = fuse_read; + + fuse_size = size; + fuse_readl = readl; + + return device_create_bin_file(dev, &fuse_bin_attr); +} + +static int __init tegra_init_fuse(void) +{ + struct device_node *np; + void __iomem *car_base; + + if (!soc_is_tegra()) + return 0; + + tegra_init_apbmisc(); + + np = of_find_matching_node(NULL, car_match); + car_base = of_iomap(np, 0); + if (car_base) { + tegra_enable_fuse_clk(car_base); + iounmap(car_base); + } else { + pr_err("Could not enable fuse clk. ioremap tegra car failed.\n"); + return -ENXIO; + } + + if (tegra_get_chip_id() == TEGRA20) + tegra20_init_fuse_early(); + else + tegra30_init_fuse_early(); + + pr_info("Tegra Revision: %s SKU: %d CPU Process: %d Core Process: %d\n", + tegra_revision_name[tegra_sku_info.revision], + tegra_sku_info.sku_id, tegra_sku_info.cpu_process_id, + tegra_sku_info.core_process_id); + pr_debug("Tegra CPU Speedo ID %d, Soc Speedo ID %d\n", + tegra_sku_info.cpu_speedo_id, tegra_sku_info.soc_speedo_id); + + return 0; +} +early_initcall(tegra_init_fuse); diff --git a/drivers/soc/tegra/fuse/fuse-tegra20.c b/drivers/soc/tegra/fuse/fuse-tegra20.c new file mode 100644 index 000000000000..7cb63ab6aac2 --- /dev/null +++ b/drivers/soc/tegra/fuse/fuse-tegra20.c @@ -0,0 +1,215 @@ +/* + * Copyright (c) 2013-2014, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + * Based on drivers/misc/eeprom/sunxi_sid.c + */ + +#include <linux/device.h> +#include <linux/clk.h> +#include <linux/completion.h> +#include <linux/dmaengine.h> +#include <linux/dma-mapping.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/kobject.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/random.h> + +#include <soc/tegra/fuse.h> + +#include "fuse.h" + +#define FUSE_BEGIN 0x100 +#define FUSE_SIZE 0x1f8 +#define FUSE_UID_LOW 0x08 +#define FUSE_UID_HIGH 0x0c + +static phys_addr_t fuse_phys; +static struct clk *fuse_clk; +static void __iomem __initdata *fuse_base; + +static DEFINE_MUTEX(apb_dma_lock); +static DECLARE_COMPLETION(apb_dma_wait); +static struct dma_chan *apb_dma_chan; +static struct dma_slave_config dma_sconfig; +static u32 *apb_buffer; +static dma_addr_t apb_buffer_phys; + +static void apb_dma_complete(void *args) +{ + complete(&apb_dma_wait); +} + +static u32 tegra20_fuse_readl(const unsigned int offset) +{ + int ret; + u32 val = 0; + struct dma_async_tx_descriptor *dma_desc; + + mutex_lock(&apb_dma_lock); + + dma_sconfig.src_addr = fuse_phys + FUSE_BEGIN + offset; + ret = dmaengine_slave_config(apb_dma_chan, &dma_sconfig); + if (ret) + goto out; + + dma_desc = dmaengine_prep_slave_single(apb_dma_chan, apb_buffer_phys, + sizeof(u32), DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!dma_desc) + goto out; + + dma_desc->callback = apb_dma_complete; + dma_desc->callback_param = NULL; + + reinit_completion(&apb_dma_wait); + + clk_prepare_enable(fuse_clk); + + dmaengine_submit(dma_desc); + dma_async_issue_pending(apb_dma_chan); + ret = wait_for_completion_timeout(&apb_dma_wait, msecs_to_jiffies(50)); + + if (WARN(ret == 0, "apb read dma timed out")) + dmaengine_terminate_all(apb_dma_chan); + else + val = *apb_buffer; + + clk_disable_unprepare(fuse_clk); +out: + mutex_unlock(&apb_dma_lock); + + return val; +} + +static const struct of_device_id tegra20_fuse_of_match[] = { + { .compatible = "nvidia,tegra20-efuse" }, + {}, +}; + +static int apb_dma_init(void) +{ + dma_cap_mask_t mask; + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + apb_dma_chan = dma_request_channel(mask, NULL, NULL); + if (!apb_dma_chan) + return -EPROBE_DEFER; + + apb_buffer = dma_alloc_coherent(NULL, sizeof(u32), &apb_buffer_phys, + GFP_KERNEL); + if (!apb_buffer) { + dma_release_channel(apb_dma_chan); + return -ENOMEM; + } + + dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + dma_sconfig.src_maxburst = 1; + dma_sconfig.dst_maxburst = 1; + + return 0; +} + +static int tegra20_fuse_probe(struct platform_device *pdev) +{ + struct resource *res; + int err; + + fuse_clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(fuse_clk)) { + dev_err(&pdev->dev, "missing clock"); + return PTR_ERR(fuse_clk); + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -EINVAL; + fuse_phys = res->start; + + err = apb_dma_init(); + if (err) + return err; + + if (tegra_fuse_create_sysfs(&pdev->dev, FUSE_SIZE, tegra20_fuse_readl)) + return -ENODEV; + + dev_dbg(&pdev->dev, "loaded\n"); + + return 0; +} + +static struct platform_driver tegra20_fuse_driver = { + .probe = tegra20_fuse_probe, + .driver = { + .name = "tegra20_fuse", + .owner = THIS_MODULE, + .of_match_table = tegra20_fuse_of_match, + } +}; + +static int __init tegra20_fuse_init(void) +{ + return platform_driver_register(&tegra20_fuse_driver); +} +postcore_initcall(tegra20_fuse_init); + +/* Early boot code. This code is called before the devices are created */ + +u32 __init tegra20_fuse_early(const unsigned int offset) +{ + return readl_relaxed(fuse_base + FUSE_BEGIN + offset); +} + +bool __init tegra20_spare_fuse_early(int spare_bit) +{ + u32 offset = spare_bit * 4; + bool value; + + value = tegra20_fuse_early(offset + 0x100); + + return value; +} + +static void __init tegra20_fuse_add_randomness(void) +{ + u32 randomness[7]; + + randomness[0] = tegra_sku_info.sku_id; + randomness[1] = tegra_read_straps(); + randomness[2] = tegra_read_chipid(); + randomness[3] = tegra_sku_info.cpu_process_id << 16; + randomness[3] |= tegra_sku_info.core_process_id; + randomness[4] = tegra_sku_info.cpu_speedo_id << 16; + randomness[4] |= tegra_sku_info.soc_speedo_id; + randomness[5] = tegra20_fuse_early(FUSE_UID_LOW); + randomness[6] = tegra20_fuse_early(FUSE_UID_HIGH); + + add_device_randomness(randomness, sizeof(randomness)); +} + +void __init tegra20_init_fuse_early(void) +{ + fuse_base = ioremap(TEGRA_FUSE_BASE, TEGRA_FUSE_SIZE); + + tegra_init_revision(); + tegra20_init_speedo_data(&tegra_sku_info); + tegra20_fuse_add_randomness(); + + iounmap(fuse_base); +} diff --git a/drivers/soc/tegra/fuse/fuse-tegra30.c b/drivers/soc/tegra/fuse/fuse-tegra30.c new file mode 100644 index 000000000000..5999cf34ab70 --- /dev/null +++ b/drivers/soc/tegra/fuse/fuse-tegra30.c @@ -0,0 +1,224 @@ +/* + * Copyright (c) 2013-2014, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + */ + +#include <linux/device.h> +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/of_device.h> +#include <linux/of_address.h> +#include <linux/platform_device.h> +#include <linux/random.h> + +#include <soc/tegra/fuse.h> + +#include "fuse.h" + +#define FUSE_BEGIN 0x100 + +/* Tegra30 and later */ +#define FUSE_VENDOR_CODE 0x100 +#define FUSE_FAB_CODE 0x104 +#define FUSE_LOT_CODE_0 0x108 +#define FUSE_LOT_CODE_1 0x10c +#define FUSE_WAFER_ID 0x110 +#define FUSE_X_COORDINATE 0x114 +#define FUSE_Y_COORDINATE 0x118 + +#define FUSE_HAS_REVISION_INFO BIT(0) + +enum speedo_idx { + SPEEDO_TEGRA30 = 0, + SPEEDO_TEGRA114, + SPEEDO_TEGRA124, +}; + +struct tegra_fuse_info { + int size; + int spare_bit; + enum speedo_idx speedo_idx; +}; + +static void __iomem *fuse_base; +static struct clk *fuse_clk; +static struct tegra_fuse_info *fuse_info; + +u32 tegra30_fuse_readl(const unsigned int offset) +{ + u32 val; + + /* + * early in the boot, the fuse clock will be enabled by + * tegra_init_fuse() + */ + + if (fuse_clk) + clk_prepare_enable(fuse_clk); + + val = readl_relaxed(fuse_base + FUSE_BEGIN + offset); + + if (fuse_clk) + clk_disable_unprepare(fuse_clk); + + return val; +} + +static struct tegra_fuse_info tegra30_info = { + .size = 0x2a4, + .spare_bit = 0x144, + .speedo_idx = SPEEDO_TEGRA30, +}; + +static struct tegra_fuse_info tegra114_info = { + .size = 0x2a0, + .speedo_idx = SPEEDO_TEGRA114, +}; + +static struct tegra_fuse_info tegra124_info = { + .size = 0x300, + .speedo_idx = SPEEDO_TEGRA124, +}; + +static const struct of_device_id tegra30_fuse_of_match[] = { + { .compatible = "nvidia,tegra30-efuse", .data = &tegra30_info }, + { .compatible = "nvidia,tegra114-efuse", .data = &tegra114_info }, + { .compatible = "nvidia,tegra124-efuse", .data = &tegra124_info }, + {}, +}; + +static int tegra30_fuse_probe(struct platform_device *pdev) +{ + const struct of_device_id *of_dev_id; + + of_dev_id = of_match_device(tegra30_fuse_of_match, &pdev->dev); + if (!of_dev_id) + return -ENODEV; + + fuse_clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(fuse_clk)) { + dev_err(&pdev->dev, "missing clock"); + return PTR_ERR(fuse_clk); + } + + platform_set_drvdata(pdev, NULL); + + if (tegra_fuse_create_sysfs(&pdev->dev, fuse_info->size, + tegra30_fuse_readl)) + return -ENODEV; + + dev_dbg(&pdev->dev, "loaded\n"); + + return 0; +} + +static struct platform_driver tegra30_fuse_driver = { + .probe = tegra30_fuse_probe, + .driver = { + .name = "tegra_fuse", + .owner = THIS_MODULE, + .of_match_table = tegra30_fuse_of_match, + } +}; + +static int __init tegra30_fuse_init(void) +{ + return platform_driver_register(&tegra30_fuse_driver); +} +postcore_initcall(tegra30_fuse_init); + +/* Early boot code. This code is called before the devices are created */ + +typedef void (*speedo_f)(struct tegra_sku_info *sku_info); + +static speedo_f __initdata speedo_tbl[] = { + [SPEEDO_TEGRA30] = tegra30_init_speedo_data, + [SPEEDO_TEGRA114] = tegra114_init_speedo_data, + [SPEEDO_TEGRA124] = tegra124_init_speedo_data, +}; + +static void __init tegra30_fuse_add_randomness(void) +{ + u32 randomness[12]; + + randomness[0] = tegra_sku_info.sku_id; + randomness[1] = tegra_read_straps(); + randomness[2] = tegra_read_chipid(); + randomness[3] = tegra_sku_info.cpu_process_id << 16; + randomness[3] |= tegra_sku_info.core_process_id; + randomness[4] = tegra_sku_info.cpu_speedo_id << 16; + randomness[4] |= tegra_sku_info.soc_speedo_id; + randomness[5] = tegra30_fuse_readl(FUSE_VENDOR_CODE); + randomness[6] = tegra30_fuse_readl(FUSE_FAB_CODE); + randomness[7] = tegra30_fuse_readl(FUSE_LOT_CODE_0); + randomness[8] = tegra30_fuse_readl(FUSE_LOT_CODE_1); + randomness[9] = tegra30_fuse_readl(FUSE_WAFER_ID); + randomness[10] = tegra30_fuse_readl(FUSE_X_COORDINATE); + randomness[11] = tegra30_fuse_readl(FUSE_Y_COORDINATE); + + add_device_randomness(randomness, sizeof(randomness)); +} + +static void __init legacy_fuse_init(void) +{ + switch (tegra_get_chip_id()) { + case TEGRA30: + fuse_info = &tegra30_info; + break; + case TEGRA114: + fuse_info = &tegra114_info; + break; + case TEGRA124: + fuse_info = &tegra124_info; + break; + default: + return; + } + + fuse_base = ioremap(TEGRA_FUSE_BASE, TEGRA_FUSE_SIZE); +} + +bool __init tegra30_spare_fuse(int spare_bit) +{ + u32 offset = fuse_info->spare_bit + spare_bit * 4; + + return tegra30_fuse_readl(offset) & 1; +} + +void __init tegra30_init_fuse_early(void) +{ + struct device_node *np; + const struct of_device_id *of_match; + + np = of_find_matching_node_and_match(NULL, tegra30_fuse_of_match, + &of_match); + if (np) { + fuse_base = of_iomap(np, 0); + fuse_info = (struct tegra_fuse_info *)of_match->data; + } else + legacy_fuse_init(); + + if (!fuse_base) { + pr_warn("fuse DT node missing and unknown chip id: 0x%02x\n", + tegra_get_chip_id()); + return; + } + + tegra_init_revision(); + speedo_tbl[fuse_info->speedo_idx](&tegra_sku_info); + tegra30_fuse_add_randomness(); +} diff --git a/drivers/soc/tegra/fuse/fuse.h b/drivers/soc/tegra/fuse/fuse.h new file mode 100644 index 000000000000..3a398bf3572c --- /dev/null +++ b/drivers/soc/tegra/fuse/fuse.h @@ -0,0 +1,71 @@ +/* + * Copyright (C) 2010 Google, Inc. + * Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved. + * + * Author: + * Colin Cross <ccross@android.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __DRIVERS_MISC_TEGRA_FUSE_H +#define __DRIVERS_MISC_TEGRA_FUSE_H + +#define TEGRA_FUSE_BASE 0x7000f800 +#define TEGRA_FUSE_SIZE 0x400 + +int tegra_fuse_create_sysfs(struct device *dev, int size, + u32 (*readl)(const unsigned int offset)); + +bool tegra30_spare_fuse(int bit); +u32 tegra30_fuse_readl(const unsigned int offset); +void tegra30_init_fuse_early(void); +void tegra_init_revision(void); +void tegra_init_apbmisc(void); + +#ifdef CONFIG_ARCH_TEGRA_2x_SOC +void tegra20_init_speedo_data(struct tegra_sku_info *sku_info); +bool tegra20_spare_fuse_early(int spare_bit); +void tegra20_init_fuse_early(void); +u32 tegra20_fuse_early(const unsigned int offset); +#else +static inline void tegra20_init_speedo_data(struct tegra_sku_info *sku_info) {} +static inline bool tegra20_spare_fuse_early(int spare_bit) +{ + return false; +} +static inline void tegra20_init_fuse_early(void) {} +static inline u32 tegra20_fuse_early(const unsigned int offset) +{ + return 0; +} +#endif + + +#ifdef CONFIG_ARCH_TEGRA_3x_SOC +void tegra30_init_speedo_data(struct tegra_sku_info *sku_info); +#else +static inline void tegra30_init_speedo_data(struct tegra_sku_info *sku_info) {} +#endif + +#ifdef CONFIG_ARCH_TEGRA_114_SOC +void tegra114_init_speedo_data(struct tegra_sku_info *sku_info); +#else +static inline void tegra114_init_speedo_data(struct tegra_sku_info *sku_info) {} +#endif + +#ifdef CONFIG_ARCH_TEGRA_124_SOC +void tegra124_init_speedo_data(struct tegra_sku_info *sku_info); +#else +static inline void tegra124_init_speedo_data(struct tegra_sku_info *sku_info) {} +#endif + +#endif diff --git a/drivers/soc/tegra/fuse/speedo-tegra114.c b/drivers/soc/tegra/fuse/speedo-tegra114.c new file mode 100644 index 000000000000..2a6ca036f09f --- /dev/null +++ b/drivers/soc/tegra/fuse/speedo-tegra114.c @@ -0,0 +1,110 @@ +/* + * Copyright (c) 2013-2014, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/bug.h> +#include <linux/device.h> +#include <linux/kernel.h> + +#include <soc/tegra/fuse.h> + +#include "fuse.h" + +#define CORE_PROCESS_CORNERS 2 +#define CPU_PROCESS_CORNERS 2 + +enum { + THRESHOLD_INDEX_0, + THRESHOLD_INDEX_1, + THRESHOLD_INDEX_COUNT, +}; + +static const u32 __initconst core_process_speedos[][CORE_PROCESS_CORNERS] = { + {1123, UINT_MAX}, + {0, UINT_MAX}, +}; + +static const u32 __initconst cpu_process_speedos[][CPU_PROCESS_CORNERS] = { + {1695, UINT_MAX}, + {0, UINT_MAX}, +}; + +static void __init rev_sku_to_speedo_ids(struct tegra_sku_info *sku_info, + int *threshold) +{ + u32 tmp; + u32 sku = sku_info->sku_id; + enum tegra_revision rev = sku_info->revision; + + switch (sku) { + case 0x00: + case 0x10: + case 0x05: + case 0x06: + sku_info->cpu_speedo_id = 1; + sku_info->soc_speedo_id = 0; + *threshold = THRESHOLD_INDEX_0; + break; + + case 0x03: + case 0x04: + sku_info->cpu_speedo_id = 2; + sku_info->soc_speedo_id = 1; + *threshold = THRESHOLD_INDEX_1; + break; + + default: + pr_err("Tegra Unknown SKU %d\n", sku); + sku_info->cpu_speedo_id = 0; + sku_info->soc_speedo_id = 0; + *threshold = THRESHOLD_INDEX_0; + break; + } + + if (rev == TEGRA_REVISION_A01) { + tmp = tegra30_fuse_readl(0x270) << 1; + tmp |= tegra30_fuse_readl(0x26c); + if (!tmp) + sku_info->cpu_speedo_id = 0; + } +} + +void __init tegra114_init_speedo_data(struct tegra_sku_info *sku_info) +{ + u32 cpu_speedo_val; + u32 core_speedo_val; + int threshold; + int i; + + BUILD_BUG_ON(ARRAY_SIZE(cpu_process_speedos) != + THRESHOLD_INDEX_COUNT); + BUILD_BUG_ON(ARRAY_SIZE(core_process_speedos) != + THRESHOLD_INDEX_COUNT); + + rev_sku_to_speedo_ids(sku_info, &threshold); + + cpu_speedo_val = tegra30_fuse_readl(0x12c) + 1024; + core_speedo_val = tegra30_fuse_readl(0x134); + + for (i = 0; i < CPU_PROCESS_CORNERS; i++) + if (cpu_speedo_val < cpu_process_speedos[threshold][i]) + break; + sku_info->cpu_process_id = i; + + for (i = 0; i < CORE_PROCESS_CORNERS; i++) + if (core_speedo_val < core_process_speedos[threshold][i]) + break; + sku_info->core_process_id = i; +} diff --git a/drivers/soc/tegra/fuse/speedo-tegra124.c b/drivers/soc/tegra/fuse/speedo-tegra124.c new file mode 100644 index 000000000000..46362387d974 --- /dev/null +++ b/drivers/soc/tegra/fuse/speedo-tegra124.c @@ -0,0 +1,168 @@ +/* + * Copyright (c) 2013-2014, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/device.h> +#include <linux/kernel.h> +#include <linux/bug.h> + +#include <soc/tegra/fuse.h> + +#include "fuse.h" + +#define CPU_PROCESS_CORNERS 2 +#define GPU_PROCESS_CORNERS 2 +#define CORE_PROCESS_CORNERS 2 + +#define FUSE_CPU_SPEEDO_0 0x14 +#define FUSE_CPU_SPEEDO_1 0x2c +#define FUSE_CPU_SPEEDO_2 0x30 +#define FUSE_SOC_SPEEDO_0 0x34 +#define FUSE_SOC_SPEEDO_1 0x38 +#define FUSE_SOC_SPEEDO_2 0x3c +#define FUSE_CPU_IDDQ 0x18 +#define FUSE_SOC_IDDQ 0x40 +#define FUSE_GPU_IDDQ 0x128 +#define FUSE_FT_REV 0x28 + +enum { + THRESHOLD_INDEX_0, + THRESHOLD_INDEX_1, + THRESHOLD_INDEX_COUNT, +}; + +static const u32 __initconst cpu_process_speedos[][CPU_PROCESS_CORNERS] = { + {2190, UINT_MAX}, + {0, UINT_MAX}, +}; + +static const u32 __initconst gpu_process_speedos[][GPU_PROCESS_CORNERS] = { + {1965, UINT_MAX}, + {0, UINT_MAX}, +}; + +static const u32 __initconst core_process_speedos[][CORE_PROCESS_CORNERS] = { + {2101, UINT_MAX}, + {0, UINT_MAX}, +}; + +static void __init rev_sku_to_speedo_ids(struct tegra_sku_info *sku_info, + int *threshold) +{ + int sku = sku_info->sku_id; + + /* Assign to default */ + sku_info->cpu_speedo_id = 0; + sku_info->soc_speedo_id = 0; + sku_info->gpu_speedo_id = 0; + *threshold = THRESHOLD_INDEX_0; + + switch (sku) { + case 0x00: /* Eng sku */ + case 0x0F: + case 0x23: + /* Using the default */ + break; + case 0x83: + sku_info->cpu_speedo_id = 2; + break; + + case 0x1F: + case 0x87: + case 0x27: + sku_info->cpu_speedo_id = 2; + sku_info->soc_speedo_id = 0; + sku_info->gpu_speedo_id = 1; + *threshold = THRESHOLD_INDEX_0; + break; + case 0x81: + case 0x21: + case 0x07: + sku_info->cpu_speedo_id = 1; + sku_info->soc_speedo_id = 1; + sku_info->gpu_speedo_id = 1; + *threshold = THRESHOLD_INDEX_1; + break; + case 0x49: + case 0x4A: + case 0x48: + sku_info->cpu_speedo_id = 4; + sku_info->soc_speedo_id = 2; + sku_info->gpu_speedo_id = 3; + *threshold = THRESHOLD_INDEX_1; + break; + default: + pr_err("Tegra Unknown SKU %d\n", sku); + /* Using the default for the error case */ + break; + } +} + +void __init tegra124_init_speedo_data(struct tegra_sku_info *sku_info) +{ + int i, threshold, cpu_speedo_0_value, soc_speedo_0_value; + int cpu_iddq_value, gpu_iddq_value, soc_iddq_value; + + BUILD_BUG_ON(ARRAY_SIZE(cpu_process_speedos) != + THRESHOLD_INDEX_COUNT); + BUILD_BUG_ON(ARRAY_SIZE(gpu_process_speedos) != + THRESHOLD_INDEX_COUNT); + BUILD_BUG_ON(ARRAY_SIZE(core_process_speedos) != + THRESHOLD_INDEX_COUNT); + + cpu_speedo_0_value = tegra30_fuse_readl(FUSE_CPU_SPEEDO_0); + + /* GPU Speedo is stored in CPU_SPEEDO_2 */ + sku_info->gpu_speedo_value = tegra30_fuse_readl(FUSE_CPU_SPEEDO_2); + + soc_speedo_0_value = tegra30_fuse_readl(FUSE_SOC_SPEEDO_0); + + cpu_iddq_value = tegra30_fuse_readl(FUSE_CPU_IDDQ); + soc_iddq_value = tegra30_fuse_readl(FUSE_SOC_IDDQ); + gpu_iddq_value = tegra30_fuse_readl(FUSE_GPU_IDDQ); + + sku_info->cpu_speedo_value = cpu_speedo_0_value; + + if (sku_info->cpu_speedo_value == 0) { + pr_warn("Tegra Warning: Speedo value not fused.\n"); + WARN_ON(1); + return; + } + + rev_sku_to_speedo_ids(sku_info, &threshold); + + sku_info->cpu_iddq_value = tegra30_fuse_readl(FUSE_CPU_IDDQ); + + for (i = 0; i < GPU_PROCESS_CORNERS; i++) + if (sku_info->gpu_speedo_value < + gpu_process_speedos[threshold][i]) + break; + sku_info->gpu_process_id = i; + + for (i = 0; i < CPU_PROCESS_CORNERS; i++) + if (sku_info->cpu_speedo_value < + cpu_process_speedos[threshold][i]) + break; + sku_info->cpu_process_id = i; + + for (i = 0; i < CORE_PROCESS_CORNERS; i++) + if (soc_speedo_0_value < + core_process_speedos[threshold][i]) + break; + sku_info->core_process_id = i; + + pr_debug("Tegra GPU Speedo ID=%d, Speedo Value=%d\n", + sku_info->gpu_speedo_id, sku_info->gpu_speedo_value); +} diff --git a/drivers/soc/tegra/fuse/speedo-tegra20.c b/drivers/soc/tegra/fuse/speedo-tegra20.c new file mode 100644 index 000000000000..eff1b63f330d --- /dev/null +++ b/drivers/soc/tegra/fuse/speedo-tegra20.c @@ -0,0 +1,110 @@ +/* + * Copyright (c) 2012-2014, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/bug.h> +#include <linux/device.h> +#include <linux/kernel.h> + +#include <soc/tegra/fuse.h> + +#include "fuse.h" + +#define CPU_SPEEDO_LSBIT 20 +#define CPU_SPEEDO_MSBIT 29 +#define CPU_SPEEDO_REDUND_LSBIT 30 +#define CPU_SPEEDO_REDUND_MSBIT 39 +#define CPU_SPEEDO_REDUND_OFFS (CPU_SPEEDO_REDUND_MSBIT - CPU_SPEEDO_MSBIT) + +#define CORE_SPEEDO_LSBIT 40 +#define CORE_SPEEDO_MSBIT 47 +#define CORE_SPEEDO_REDUND_LSBIT 48 +#define CORE_SPEEDO_REDUND_MSBIT 55 +#define CORE_SPEEDO_REDUND_OFFS (CORE_SPEEDO_REDUND_MSBIT - CORE_SPEEDO_MSBIT) + +#define SPEEDO_MULT 4 + +#define PROCESS_CORNERS_NUM 4 + +#define SPEEDO_ID_SELECT_0(rev) ((rev) <= 2) +#define SPEEDO_ID_SELECT_1(sku) \ + (((sku) != 20) && ((sku) != 23) && ((sku) != 24) && \ + ((sku) != 27) && ((sku) != 28)) + +enum { + SPEEDO_ID_0, + SPEEDO_ID_1, + SPEEDO_ID_2, + SPEEDO_ID_COUNT, +}; + +static const u32 __initconst cpu_process_speedos[][PROCESS_CORNERS_NUM] = { + {315, 366, 420, UINT_MAX}, + {303, 368, 419, UINT_MAX}, + {316, 331, 383, UINT_MAX}, +}; + +static const u32 __initconst core_process_speedos[][PROCESS_CORNERS_NUM] = { + {165, 195, 224, UINT_MAX}, + {165, 195, 224, UINT_MAX}, + {165, 195, 224, UINT_MAX}, +}; + +void __init tegra20_init_speedo_data(struct tegra_sku_info *sku_info) +{ + u32 reg; + u32 val; + int i; + + BUILD_BUG_ON(ARRAY_SIZE(cpu_process_speedos) != SPEEDO_ID_COUNT); + BUILD_BUG_ON(ARRAY_SIZE(core_process_speedos) != SPEEDO_ID_COUNT); + + if (SPEEDO_ID_SELECT_0(sku_info->revision)) + sku_info->soc_speedo_id = SPEEDO_ID_0; + else if (SPEEDO_ID_SELECT_1(sku_info->sku_id)) + sku_info->soc_speedo_id = SPEEDO_ID_1; + else + sku_info->soc_speedo_id = SPEEDO_ID_2; + + val = 0; + for (i = CPU_SPEEDO_MSBIT; i >= CPU_SPEEDO_LSBIT; i--) { + reg = tegra20_spare_fuse_early(i) | + tegra20_spare_fuse_early(i + CPU_SPEEDO_REDUND_OFFS); + val = (val << 1) | (reg & 0x1); + } + val = val * SPEEDO_MULT; + pr_debug("Tegra CPU speedo value %u\n", val); + + for (i = 0; i < (PROCESS_CORNERS_NUM - 1); i++) { + if (val <= cpu_process_speedos[sku_info->soc_speedo_id][i]) + break; + } + sku_info->cpu_process_id = i; + + val = 0; + for (i = CORE_SPEEDO_MSBIT; i >= CORE_SPEEDO_LSBIT; i--) { + reg = tegra20_spare_fuse_early(i) | + tegra20_spare_fuse_early(i + CORE_SPEEDO_REDUND_OFFS); + val = (val << 1) | (reg & 0x1); + } + val = val * SPEEDO_MULT; + pr_debug("Core speedo value %u\n", val); + + for (i = 0; i < (PROCESS_CORNERS_NUM - 1); i++) { + if (val <= core_process_speedos[sku_info->soc_speedo_id][i]) + break; + } + sku_info->core_process_id = i; +} diff --git a/drivers/soc/tegra/fuse/speedo-tegra30.c b/drivers/soc/tegra/fuse/speedo-tegra30.c new file mode 100644 index 000000000000..b17f0dcdfebe --- /dev/null +++ b/drivers/soc/tegra/fuse/speedo-tegra30.c @@ -0,0 +1,288 @@ +/* + * Copyright (c) 2012-2014, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/bug.h> +#include <linux/device.h> +#include <linux/kernel.h> + +#include <soc/tegra/fuse.h> + +#include "fuse.h" + +#define CORE_PROCESS_CORNERS 1 +#define CPU_PROCESS_CORNERS 6 + +#define FUSE_SPEEDO_CALIB_0 0x14 +#define FUSE_PACKAGE_INFO 0XFC +#define FUSE_TEST_PROG_VER 0X28 + +#define G_SPEEDO_BIT_MINUS1 58 +#define G_SPEEDO_BIT_MINUS1_R 59 +#define G_SPEEDO_BIT_MINUS2 60 +#define G_SPEEDO_BIT_MINUS2_R 61 +#define LP_SPEEDO_BIT_MINUS1 62 +#define LP_SPEEDO_BIT_MINUS1_R 63 +#define LP_SPEEDO_BIT_MINUS2 64 +#define LP_SPEEDO_BIT_MINUS2_R 65 + +enum { + THRESHOLD_INDEX_0, + THRESHOLD_INDEX_1, + THRESHOLD_INDEX_2, + THRESHOLD_INDEX_3, + THRESHOLD_INDEX_4, + THRESHOLD_INDEX_5, + THRESHOLD_INDEX_6, + THRESHOLD_INDEX_7, + THRESHOLD_INDEX_8, + THRESHOLD_INDEX_9, + THRESHOLD_INDEX_10, + THRESHOLD_INDEX_11, + THRESHOLD_INDEX_COUNT, +}; + +static const u32 __initconst core_process_speedos[][CORE_PROCESS_CORNERS] = { + {180}, + {170}, + {195}, + {180}, + {168}, + {192}, + {180}, + {170}, + {195}, + {180}, + {180}, + {180}, +}; + +static const u32 __initconst cpu_process_speedos[][CPU_PROCESS_CORNERS] = { + {306, 338, 360, 376, UINT_MAX}, + {295, 336, 358, 375, UINT_MAX}, + {325, 325, 358, 375, UINT_MAX}, + {325, 325, 358, 375, UINT_MAX}, + {292, 324, 348, 364, UINT_MAX}, + {324, 324, 348, 364, UINT_MAX}, + {324, 324, 348, 364, UINT_MAX}, + {295, 336, 358, 375, UINT_MAX}, + {358, 358, 358, 358, 397, UINT_MAX}, + {364, 364, 364, 364, 397, UINT_MAX}, + {295, 336, 358, 375, 391, UINT_MAX}, + {295, 336, 358, 375, 391, UINT_MAX}, +}; + +static int threshold_index __initdata; + +static void __init fuse_speedo_calib(u32 *speedo_g, u32 *speedo_lp) +{ + u32 reg; + int ate_ver; + int bit_minus1; + int bit_minus2; + + reg = tegra30_fuse_readl(FUSE_SPEEDO_CALIB_0); + + *speedo_lp = (reg & 0xFFFF) * 4; + *speedo_g = ((reg >> 16) & 0xFFFF) * 4; + + ate_ver = tegra30_fuse_readl(FUSE_TEST_PROG_VER); + pr_debug("Tegra ATE prog ver %d.%d\n", ate_ver/10, ate_ver%10); + + if (ate_ver >= 26) { + bit_minus1 = tegra30_spare_fuse(LP_SPEEDO_BIT_MINUS1); + bit_minus1 |= tegra30_spare_fuse(LP_SPEEDO_BIT_MINUS1_R); + bit_minus2 = tegra30_spare_fuse(LP_SPEEDO_BIT_MINUS2); + bit_minus2 |= tegra30_spare_fuse(LP_SPEEDO_BIT_MINUS2_R); + *speedo_lp |= (bit_minus1 << 1) | bit_minus2; + + bit_minus1 = tegra30_spare_fuse(G_SPEEDO_BIT_MINUS1); + bit_minus1 |= tegra30_spare_fuse(G_SPEEDO_BIT_MINUS1_R); + bit_minus2 = tegra30_spare_fuse(G_SPEEDO_BIT_MINUS2); + bit_minus2 |= tegra30_spare_fuse(G_SPEEDO_BIT_MINUS2_R); + *speedo_g |= (bit_minus1 << 1) | bit_minus2; + } else { + *speedo_lp |= 0x3; + *speedo_g |= 0x3; + } +} + +static void __init rev_sku_to_speedo_ids(struct tegra_sku_info *sku_info) +{ + int package_id = tegra30_fuse_readl(FUSE_PACKAGE_INFO) & 0x0F; + + switch (sku_info->revision) { + case TEGRA_REVISION_A01: + sku_info->cpu_speedo_id = 0; + sku_info->soc_speedo_id = 0; + threshold_index = THRESHOLD_INDEX_0; + break; + case TEGRA_REVISION_A02: + case TEGRA_REVISION_A03: + switch (sku_info->sku_id) { + case 0x87: + case 0x82: + sku_info->cpu_speedo_id = 1; + sku_info->soc_speedo_id = 1; + threshold_index = THRESHOLD_INDEX_1; + break; + case 0x81: + switch (package_id) { + case 1: + sku_info->cpu_speedo_id = 2; + sku_info->soc_speedo_id = 2; + threshold_index = THRESHOLD_INDEX_2; + break; + case 2: + sku_info->cpu_speedo_id = 4; + sku_info->soc_speedo_id = 1; + threshold_index = THRESHOLD_INDEX_7; + break; + default: + pr_err("Tegra Unknown pkg %d\n", package_id); + break; + } + break; + case 0x80: + switch (package_id) { + case 1: + sku_info->cpu_speedo_id = 5; + sku_info->soc_speedo_id = 2; + threshold_index = THRESHOLD_INDEX_8; + break; + case 2: + sku_info->cpu_speedo_id = 6; + sku_info->soc_speedo_id = 2; + threshold_index = THRESHOLD_INDEX_9; + break; + default: + pr_err("Tegra Unknown pkg %d\n", package_id); + break; + } + break; + case 0x83: + switch (package_id) { + case 1: + sku_info->cpu_speedo_id = 7; + sku_info->soc_speedo_id = 1; + threshold_index = THRESHOLD_INDEX_10; + break; + case 2: + sku_info->cpu_speedo_id = 3; + sku_info->soc_speedo_id = 2; + threshold_index = THRESHOLD_INDEX_3; + break; + default: + pr_err("Tegra Unknown pkg %d\n", package_id); + break; + } + break; + case 0x8F: + sku_info->cpu_speedo_id = 8; + sku_info->soc_speedo_id = 1; + threshold_index = THRESHOLD_INDEX_11; + break; + case 0x08: + sku_info->cpu_speedo_id = 1; + sku_info->soc_speedo_id = 1; + threshold_index = THRESHOLD_INDEX_4; + break; + case 0x02: + sku_info->cpu_speedo_id = 2; + sku_info->soc_speedo_id = 2; + threshold_index = THRESHOLD_INDEX_5; + break; + case 0x04: + sku_info->cpu_speedo_id = 3; + sku_info->soc_speedo_id = 2; + threshold_index = THRESHOLD_INDEX_6; + break; + case 0: + switch (package_id) { + case 1: + sku_info->cpu_speedo_id = 2; + sku_info->soc_speedo_id = 2; + threshold_index = THRESHOLD_INDEX_2; + break; + case 2: + sku_info->cpu_speedo_id = 3; + sku_info->soc_speedo_id = 2; + threshold_index = THRESHOLD_INDEX_3; + break; + default: + pr_err("Tegra Unknown pkg %d\n", package_id); + break; + } + break; + default: + pr_warn("Tegra Unknown SKU %d\n", sku_info->sku_id); + sku_info->cpu_speedo_id = 0; + sku_info->soc_speedo_id = 0; + threshold_index = THRESHOLD_INDEX_0; + break; + } + break; + default: + pr_warn("Tegra Unknown chip rev %d\n", sku_info->revision); + sku_info->cpu_speedo_id = 0; + sku_info->soc_speedo_id = 0; + threshold_index = THRESHOLD_INDEX_0; + break; + } +} + +void __init tegra30_init_speedo_data(struct tegra_sku_info *sku_info) +{ + u32 cpu_speedo_val; + u32 core_speedo_val; + int i; + + BUILD_BUG_ON(ARRAY_SIZE(cpu_process_speedos) != + THRESHOLD_INDEX_COUNT); + BUILD_BUG_ON(ARRAY_SIZE(core_process_speedos) != + THRESHOLD_INDEX_COUNT); + + + rev_sku_to_speedo_ids(sku_info); + fuse_speedo_calib(&cpu_speedo_val, &core_speedo_val); + pr_debug("Tegra CPU speedo value %u\n", cpu_speedo_val); + pr_debug("Tegra Core speedo value %u\n", core_speedo_val); + + for (i = 0; i < CPU_PROCESS_CORNERS; i++) { + if (cpu_speedo_val < cpu_process_speedos[threshold_index][i]) + break; + } + sku_info->cpu_process_id = i - 1; + + if (sku_info->cpu_process_id == -1) { + pr_warn("Tegra CPU speedo value %3d out of range", + cpu_speedo_val); + sku_info->cpu_process_id = 0; + sku_info->cpu_speedo_id = 1; + } + + for (i = 0; i < CORE_PROCESS_CORNERS; i++) { + if (core_speedo_val < core_process_speedos[threshold_index][i]) + break; + } + sku_info->core_process_id = i - 1; + + if (sku_info->core_process_id == -1) { + pr_warn("Tegra CORE speedo value %3d out of range", + core_speedo_val); + sku_info->core_process_id = 0; + sku_info->soc_speedo_id = 1; + } +} diff --git a/drivers/soc/tegra/fuse/tegra-apbmisc.c b/drivers/soc/tegra/fuse/tegra-apbmisc.c new file mode 100644 index 000000000000..3bf5aba4caaa --- /dev/null +++ b/drivers/soc/tegra/fuse/tegra-apbmisc.c @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + */ + +#include <linux/kernel.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/io.h> + +#include <soc/tegra/fuse.h> + +#include "fuse.h" + +#define APBMISC_BASE 0x70000800 +#define APBMISC_SIZE 0x64 +#define FUSE_SKU_INFO 0x10 + +static void __iomem *apbmisc_base; +static void __iomem *strapping_base; + +u32 tegra_read_chipid(void) +{ + return readl_relaxed(apbmisc_base + 4); +} + +u8 tegra_get_chip_id(void) +{ + if (!apbmisc_base) { + WARN(1, "Tegra Chip ID not yet available\n"); + return 0; + } + + return (tegra_read_chipid() >> 8) & 0xff; +} + +u32 tegra_read_straps(void) +{ + if (strapping_base) + return readl_relaxed(strapping_base); + else + return 0; +} + +static const struct of_device_id apbmisc_match[] __initconst = { + { .compatible = "nvidia,tegra20-apbmisc", }, + {}, +}; + +void __init tegra_init_revision(void) +{ + u32 id, chip_id, minor_rev; + int rev; + + id = tegra_read_chipid(); + chip_id = (id >> 8) & 0xff; + minor_rev = (id >> 16) & 0xf; + + switch (minor_rev) { + case 1: + rev = TEGRA_REVISION_A01; + break; + case 2: + rev = TEGRA_REVISION_A02; + break; + case 3: + if (chip_id == TEGRA20 && (tegra20_spare_fuse_early(18) || + tegra20_spare_fuse_early(19))) + rev = TEGRA_REVISION_A03p; + else + rev = TEGRA_REVISION_A03; + break; + case 4: + rev = TEGRA_REVISION_A04; + break; + default: + rev = TEGRA_REVISION_UNKNOWN; + } + + tegra_sku_info.revision = rev; + + if (chip_id == TEGRA20) + tegra_sku_info.sku_id = tegra20_fuse_early(FUSE_SKU_INFO); + else + tegra_sku_info.sku_id = tegra30_fuse_readl(FUSE_SKU_INFO); +} + +void __init tegra_init_apbmisc(void) +{ + struct device_node *np; + + np = of_find_matching_node(NULL, apbmisc_match); + apbmisc_base = of_iomap(np, 0); + if (!apbmisc_base) { + pr_warn("ioremap tegra apbmisc failed. using %08x instead\n", + APBMISC_BASE); + apbmisc_base = ioremap(APBMISC_BASE, APBMISC_SIZE); + } + + strapping_base = of_iomap(np, 1); + if (!strapping_base) + pr_err("ioremap tegra strapping_base failed\n"); +} diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c new file mode 100644 index 000000000000..a2c0ceb95f8f --- /dev/null +++ b/drivers/soc/tegra/pmc.c @@ -0,0 +1,957 @@ +/* + * drivers/soc/tegra/pmc.c + * + * Copyright (c) 2010 Google, Inc + * + * Author: + * Colin Cross <ccross@google.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/kernel.h> +#include <linux/clk.h> +#include <linux/clk/tegra.h> +#include <linux/debugfs.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/export.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/platform_device.h> +#include <linux/reboot.h> +#include <linux/reset.h> +#include <linux/seq_file.h> +#include <linux/spinlock.h> + +#include <soc/tegra/common.h> +#include <soc/tegra/fuse.h> +#include <soc/tegra/pmc.h> + +#define PMC_CNTRL 0x0 +#define PMC_CNTRL_SYSCLK_POLARITY (1 << 10) /* sys clk polarity */ +#define PMC_CNTRL_SYSCLK_OE (1 << 11) /* system clock enable */ +#define PMC_CNTRL_SIDE_EFFECT_LP0 (1 << 14) /* LP0 when CPU pwr gated */ +#define PMC_CNTRL_CPU_PWRREQ_POLARITY (1 << 15) /* CPU pwr req polarity */ +#define PMC_CNTRL_CPU_PWRREQ_OE (1 << 16) /* CPU pwr req enable */ +#define PMC_CNTRL_INTR_POLARITY (1 << 17) /* inverts INTR polarity */ + +#define DPD_SAMPLE 0x020 +#define DPD_SAMPLE_ENABLE (1 << 0) +#define DPD_SAMPLE_DISABLE (0 << 0) + +#define PWRGATE_TOGGLE 0x30 +#define PWRGATE_TOGGLE_START (1 << 8) + +#define REMOVE_CLAMPING 0x34 + +#define PWRGATE_STATUS 0x38 + +#define PMC_SCRATCH0 0x50 +#define PMC_SCRATCH0_MODE_RECOVERY (1 << 31) +#define PMC_SCRATCH0_MODE_BOOTLOADER (1 << 30) +#define PMC_SCRATCH0_MODE_RCM (1 << 1) +#define PMC_SCRATCH0_MODE_MASK (PMC_SCRATCH0_MODE_RECOVERY | \ + PMC_SCRATCH0_MODE_BOOTLOADER | \ + PMC_SCRATCH0_MODE_RCM) + +#define PMC_CPUPWRGOOD_TIMER 0xc8 +#define PMC_CPUPWROFF_TIMER 0xcc + +#define PMC_SCRATCH41 0x140 + +#define IO_DPD_REQ 0x1b8 +#define IO_DPD_REQ_CODE_IDLE (0 << 30) +#define IO_DPD_REQ_CODE_OFF (1 << 30) +#define IO_DPD_REQ_CODE_ON (2 << 30) +#define IO_DPD_REQ_CODE_MASK (3 << 30) + +#define IO_DPD_STATUS 0x1bc +#define IO_DPD2_REQ 0x1c0 +#define IO_DPD2_STATUS 0x1c4 +#define SEL_DPD_TIM 0x1c8 + +#define GPU_RG_CNTRL 0x2d4 + +struct tegra_pmc_soc { + unsigned int num_powergates; + const char *const *powergates; + unsigned int num_cpu_powergates; + const u8 *cpu_powergates; +}; + +/** + * struct tegra_pmc - NVIDIA Tegra PMC + * @base: pointer to I/O remapped register region + * @clk: pointer to pclk clock + * @rate: currently configured rate of pclk + * @suspend_mode: lowest suspend mode available + * @cpu_good_time: CPU power good time (in microseconds) + * @cpu_off_time: CPU power off time (in microsecends) + * @core_osc_time: core power good OSC time (in microseconds) + * @core_pmu_time: core power good PMU time (in microseconds) + * @core_off_time: core power off time (in microseconds) + * @corereq_high: core power request is active-high + * @sysclkreq_high: system clock request is active-high + * @combined_req: combined power request for CPU & core + * @cpu_pwr_good_en: CPU power good signal is enabled + * @lp0_vec_phys: physical base address of the LP0 warm boot code + * @lp0_vec_size: size of the LP0 warm boot code + * @powergates_lock: mutex for power gate register access + */ +struct tegra_pmc { + void __iomem *base; + struct clk *clk; + + const struct tegra_pmc_soc *soc; + + unsigned long rate; + + enum tegra_suspend_mode suspend_mode; + u32 cpu_good_time; + u32 cpu_off_time; + u32 core_osc_time; + u32 core_pmu_time; + u32 core_off_time; + bool corereq_high; + bool sysclkreq_high; + bool combined_req; + bool cpu_pwr_good_en; + u32 lp0_vec_phys; + u32 lp0_vec_size; + + struct mutex powergates_lock; +}; + +static struct tegra_pmc *pmc = &(struct tegra_pmc) { + .base = NULL, + .suspend_mode = TEGRA_SUSPEND_NONE, +}; + +static u32 tegra_pmc_readl(unsigned long offset) +{ + return readl(pmc->base + offset); +} + +static void tegra_pmc_writel(u32 value, unsigned long offset) +{ + writel(value, pmc->base + offset); +} + +/** + * tegra_powergate_set() - set the state of a partition + * @id: partition ID + * @new_state: new state of the partition + */ +static int tegra_powergate_set(int id, bool new_state) +{ + bool status; + + mutex_lock(&pmc->powergates_lock); + + status = tegra_pmc_readl(PWRGATE_STATUS) & (1 << id); + + if (status == new_state) { + mutex_unlock(&pmc->powergates_lock); + return 0; + } + + tegra_pmc_writel(PWRGATE_TOGGLE_START | id, PWRGATE_TOGGLE); + + mutex_unlock(&pmc->powergates_lock); + + return 0; +} + +/** + * tegra_powergate_power_on() - power on partition + * @id: partition ID + */ +int tegra_powergate_power_on(int id) +{ + if (!pmc->soc || id < 0 || id >= pmc->soc->num_powergates) + return -EINVAL; + + return tegra_powergate_set(id, true); +} + +/** + * tegra_powergate_power_off() - power off partition + * @id: partition ID + */ +int tegra_powergate_power_off(int id) +{ + if (!pmc->soc || id < 0 || id >= pmc->soc->num_powergates) + return -EINVAL; + + return tegra_powergate_set(id, false); +} +EXPORT_SYMBOL(tegra_powergate_power_off); + +/** + * tegra_powergate_is_powered() - check if partition is powered + * @id: partition ID + */ +int tegra_powergate_is_powered(int id) +{ + u32 status; + + if (!pmc->soc || id < 0 || id >= pmc->soc->num_powergates) + return -EINVAL; + + status = tegra_pmc_readl(PWRGATE_STATUS) & (1 << id); + return !!status; +} + +/** + * tegra_powergate_remove_clamping() - remove power clamps for partition + * @id: partition ID + */ +int tegra_powergate_remove_clamping(int id) +{ + u32 mask; + + if (!pmc->soc || id < 0 || id >= pmc->soc->num_powergates) + return -EINVAL; + + /* + * The Tegra124 GPU has a separate register (with different semantics) + * to remove clamps. + */ + if (tegra_get_chip_id() == TEGRA124) { + if (id == TEGRA_POWERGATE_3D) { + tegra_pmc_writel(0, GPU_RG_CNTRL); + return 0; + } + } + + /* + * Tegra 2 has a bug where PCIE and VDE clamping masks are + * swapped relatively to the partition ids + */ + if (id == TEGRA_POWERGATE_VDEC) + mask = (1 << TEGRA_POWERGATE_PCIE); + else if (id == TEGRA_POWERGATE_PCIE) + mask = (1 << TEGRA_POWERGATE_VDEC); + else + mask = (1 << id); + + tegra_pmc_writel(mask, REMOVE_CLAMPING); + + return 0; +} +EXPORT_SYMBOL(tegra_powergate_remove_clamping); + +/** + * tegra_powergate_sequence_power_up() - power up partition + * @id: partition ID + * @clk: clock for partition + * @rst: reset for partition + * + * Must be called with clk disabled, and returns with clk enabled. + */ +int tegra_powergate_sequence_power_up(int id, struct clk *clk, + struct reset_control *rst) +{ + int ret; + + reset_control_assert(rst); + + ret = tegra_powergate_power_on(id); + if (ret) + goto err_power; + + ret = clk_prepare_enable(clk); + if (ret) + goto err_clk; + + usleep_range(10, 20); + + ret = tegra_powergate_remove_clamping(id); + if (ret) + goto err_clamp; + + usleep_range(10, 20); + reset_control_deassert(rst); + + return 0; + +err_clamp: + clk_disable_unprepare(clk); +err_clk: + tegra_powergate_power_off(id); +err_power: + return ret; +} +EXPORT_SYMBOL(tegra_powergate_sequence_power_up); + +#ifdef CONFIG_SMP +/** + * tegra_get_cpu_powergate_id() - convert from CPU ID to partition ID + * @cpuid: CPU partition ID + * + * Returns the partition ID corresponding to the CPU partition ID or a + * negative error code on failure. + */ +static int tegra_get_cpu_powergate_id(int cpuid) +{ + if (pmc->soc && cpuid > 0 && cpuid < pmc->soc->num_cpu_powergates) + return pmc->soc->cpu_powergates[cpuid]; + + return -EINVAL; +} + +/** + * tegra_pmc_cpu_is_powered() - check if CPU partition is powered + * @cpuid: CPU partition ID + */ +bool tegra_pmc_cpu_is_powered(int cpuid) +{ + int id; + + id = tegra_get_cpu_powergate_id(cpuid); + if (id < 0) + return false; + + return tegra_powergate_is_powered(id); +} + +/** + * tegra_pmc_cpu_power_on() - power on CPU partition + * @cpuid: CPU partition ID + */ +int tegra_pmc_cpu_power_on(int cpuid) +{ + int id; + + id = tegra_get_cpu_powergate_id(cpuid); + if (id < 0) + return id; + + return tegra_powergate_set(id, true); +} + +/** + * tegra_pmc_cpu_remove_clamping() - remove power clamps for CPU partition + * @cpuid: CPU partition ID + */ +int tegra_pmc_cpu_remove_clamping(int cpuid) +{ + int id; + + id = tegra_get_cpu_powergate_id(cpuid); + if (id < 0) + return id; + + return tegra_powergate_remove_clamping(id); +} +#endif /* CONFIG_SMP */ + +/** + * tegra_pmc_restart() - reboot the system + * @mode: which mode to reboot in + * @cmd: reboot command + */ +void tegra_pmc_restart(enum reboot_mode mode, const char *cmd) +{ + u32 value; + + value = tegra_pmc_readl(PMC_SCRATCH0); + value &= ~PMC_SCRATCH0_MODE_MASK; + + if (cmd) { + if (strcmp(cmd, "recovery") == 0) + value |= PMC_SCRATCH0_MODE_RECOVERY; + + if (strcmp(cmd, "bootloader") == 0) + value |= PMC_SCRATCH0_MODE_BOOTLOADER; + + if (strcmp(cmd, "forced-recovery") == 0) + value |= PMC_SCRATCH0_MODE_RCM; + } + + tegra_pmc_writel(value, PMC_SCRATCH0); + + value = tegra_pmc_readl(0); + value |= 0x10; + tegra_pmc_writel(value, 0); +} + +static int powergate_show(struct seq_file *s, void *data) +{ + unsigned int i; + + seq_printf(s, " powergate powered\n"); + seq_printf(s, "------------------\n"); + + for (i = 0; i < pmc->soc->num_powergates; i++) { + if (!pmc->soc->powergates[i]) + continue; + + seq_printf(s, " %9s %7s\n", pmc->soc->powergates[i], + tegra_powergate_is_powered(i) ? "yes" : "no"); + } + + return 0; +} + +static int powergate_open(struct inode *inode, struct file *file) +{ + return single_open(file, powergate_show, inode->i_private); +} + +static const struct file_operations powergate_fops = { + .open = powergate_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int tegra_powergate_debugfs_init(void) +{ + struct dentry *d; + + d = debugfs_create_file("powergate", S_IRUGO, NULL, NULL, + &powergate_fops); + if (!d) + return -ENOMEM; + + return 0; +} + +static int tegra_io_rail_prepare(int id, unsigned long *request, + unsigned long *status, unsigned int *bit) +{ + unsigned long rate, value; + struct clk *clk; + + *bit = id % 32; + + /* + * There are two sets of 30 bits to select IO rails, but bits 30 and + * 31 are control bits rather than IO rail selection bits. + */ + if (id > 63 || *bit == 30 || *bit == 31) + return -EINVAL; + + if (id < 32) { + *status = IO_DPD_STATUS; + *request = IO_DPD_REQ; + } else { + *status = IO_DPD2_STATUS; + *request = IO_DPD2_REQ; + } + + clk = clk_get_sys(NULL, "pclk"); + if (IS_ERR(clk)) + return PTR_ERR(clk); + + rate = clk_get_rate(clk); + clk_put(clk); + + tegra_pmc_writel(DPD_SAMPLE_ENABLE, DPD_SAMPLE); + + /* must be at least 200 ns, in APB (PCLK) clock cycles */ + value = DIV_ROUND_UP(1000000000, rate); + value = DIV_ROUND_UP(200, value); + tegra_pmc_writel(value, SEL_DPD_TIM); + + return 0; +} + +static int tegra_io_rail_poll(unsigned long offset, unsigned long mask, + unsigned long val, unsigned long timeout) +{ + unsigned long value; + + timeout = jiffies + msecs_to_jiffies(timeout); + + while (time_after(timeout, jiffies)) { + value = tegra_pmc_readl(offset); + if ((value & mask) == val) + return 0; + + usleep_range(250, 1000); + } + + return -ETIMEDOUT; +} + +static void tegra_io_rail_unprepare(void) +{ + tegra_pmc_writel(DPD_SAMPLE_DISABLE, DPD_SAMPLE); +} + +int tegra_io_rail_power_on(int id) +{ + unsigned long request, status, value; + unsigned int bit, mask; + int err; + + err = tegra_io_rail_prepare(id, &request, &status, &bit); + if (err < 0) + return err; + + mask = 1 << bit; + + value = tegra_pmc_readl(request); + value |= mask; + value &= ~IO_DPD_REQ_CODE_MASK; + value |= IO_DPD_REQ_CODE_OFF; + tegra_pmc_writel(value, request); + + err = tegra_io_rail_poll(status, mask, 0, 250); + if (err < 0) + return err; + + tegra_io_rail_unprepare(); + + return 0; +} +EXPORT_SYMBOL(tegra_io_rail_power_on); + +int tegra_io_rail_power_off(int id) +{ + unsigned long request, status, value; + unsigned int bit, mask; + int err; + + err = tegra_io_rail_prepare(id, &request, &status, &bit); + if (err < 0) + return err; + + mask = 1 << bit; + + value = tegra_pmc_readl(request); + value |= mask; + value &= ~IO_DPD_REQ_CODE_MASK; + value |= IO_DPD_REQ_CODE_ON; + tegra_pmc_writel(value, request); + + err = tegra_io_rail_poll(status, mask, mask, 250); + if (err < 0) + return err; + + tegra_io_rail_unprepare(); + + return 0; +} +EXPORT_SYMBOL(tegra_io_rail_power_off); + +#ifdef CONFIG_PM_SLEEP +enum tegra_suspend_mode tegra_pmc_get_suspend_mode(void) +{ + return pmc->suspend_mode; +} + +void tegra_pmc_set_suspend_mode(enum tegra_suspend_mode mode) +{ + if (mode < TEGRA_SUSPEND_NONE || mode >= TEGRA_MAX_SUSPEND_MODE) + return; + + pmc->suspend_mode = mode; +} + +void tegra_pmc_enter_suspend_mode(enum tegra_suspend_mode mode) +{ + unsigned long long rate = 0; + u32 value; + + switch (mode) { + case TEGRA_SUSPEND_LP1: + rate = 32768; + break; + + case TEGRA_SUSPEND_LP2: + rate = clk_get_rate(pmc->clk); + break; + + default: + break; + } + + if (WARN_ON_ONCE(rate == 0)) + rate = 100000000; + + if (rate != pmc->rate) { + u64 ticks; + + ticks = pmc->cpu_good_time * rate + USEC_PER_SEC - 1; + do_div(ticks, USEC_PER_SEC); + tegra_pmc_writel(ticks, PMC_CPUPWRGOOD_TIMER); + + ticks = pmc->cpu_off_time * rate + USEC_PER_SEC - 1; + do_div(ticks, USEC_PER_SEC); + tegra_pmc_writel(ticks, PMC_CPUPWROFF_TIMER); + + wmb(); + + pmc->rate = rate; + } + + value = tegra_pmc_readl(PMC_CNTRL); + value &= ~PMC_CNTRL_SIDE_EFFECT_LP0; + value |= PMC_CNTRL_CPU_PWRREQ_OE; + tegra_pmc_writel(value, PMC_CNTRL); +} +#endif + +static int tegra_pmc_parse_dt(struct tegra_pmc *pmc, struct device_node *np) +{ + u32 value, values[2]; + + if (of_property_read_u32(np, "nvidia,suspend-mode", &value)) { + } else { + switch (value) { + case 0: + pmc->suspend_mode = TEGRA_SUSPEND_LP0; + break; + + case 1: + pmc->suspend_mode = TEGRA_SUSPEND_LP1; + break; + + case 2: + pmc->suspend_mode = TEGRA_SUSPEND_LP2; + break; + + default: + pmc->suspend_mode = TEGRA_SUSPEND_NONE; + break; + } + } + + pmc->suspend_mode = tegra_pm_validate_suspend_mode(pmc->suspend_mode); + + if (of_property_read_u32(np, "nvidia,cpu-pwr-good-time", &value)) + pmc->suspend_mode = TEGRA_SUSPEND_NONE; + + pmc->cpu_good_time = value; + + if (of_property_read_u32(np, "nvidia,cpu-pwr-off-time", &value)) + pmc->suspend_mode = TEGRA_SUSPEND_NONE; + + pmc->cpu_off_time = value; + + if (of_property_read_u32_array(np, "nvidia,core-pwr-good-time", + values, ARRAY_SIZE(values))) + pmc->suspend_mode = TEGRA_SUSPEND_NONE; + + pmc->core_osc_time = values[0]; + pmc->core_pmu_time = values[1]; + + if (of_property_read_u32(np, "nvidia,core-pwr-off-time", &value)) + pmc->suspend_mode = TEGRA_SUSPEND_NONE; + + pmc->core_off_time = value; + + pmc->corereq_high = of_property_read_bool(np, + "nvidia,core-power-req-active-high"); + + pmc->sysclkreq_high = of_property_read_bool(np, + "nvidia,sys-clock-req-active-high"); + + pmc->combined_req = of_property_read_bool(np, + "nvidia,combined-power-req"); + + pmc->cpu_pwr_good_en = of_property_read_bool(np, + "nvidia,cpu-pwr-good-en"); + + if (of_property_read_u32_array(np, "nvidia,lp0-vec", values, + ARRAY_SIZE(values))) + if (pmc->suspend_mode == TEGRA_SUSPEND_LP0) + pmc->suspend_mode = TEGRA_SUSPEND_LP1; + + pmc->lp0_vec_phys = values[0]; + pmc->lp0_vec_size = values[1]; + + return 0; +} + +static void tegra_pmc_init(struct tegra_pmc *pmc) +{ + u32 value; + + /* Always enable CPU power request */ + value = tegra_pmc_readl(PMC_CNTRL); + value |= PMC_CNTRL_CPU_PWRREQ_OE; + tegra_pmc_writel(value, PMC_CNTRL); + + value = tegra_pmc_readl(PMC_CNTRL); + + if (pmc->sysclkreq_high) + value &= ~PMC_CNTRL_SYSCLK_POLARITY; + else + value |= PMC_CNTRL_SYSCLK_POLARITY; + + /* configure the output polarity while the request is tristated */ + tegra_pmc_writel(value, PMC_CNTRL); + + /* now enable the request */ + value = tegra_pmc_readl(PMC_CNTRL); + value |= PMC_CNTRL_SYSCLK_OE; + tegra_pmc_writel(value, PMC_CNTRL); +} + +static int tegra_pmc_probe(struct platform_device *pdev) +{ + void __iomem *base = pmc->base; + struct resource *res; + int err; + + err = tegra_pmc_parse_dt(pmc, pdev->dev.of_node); + if (err < 0) + return err; + + /* take over the memory region from the early initialization */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + pmc->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(pmc->base)) + return PTR_ERR(pmc->base); + + iounmap(base); + + pmc->clk = devm_clk_get(&pdev->dev, "pclk"); + if (IS_ERR(pmc->clk)) { + err = PTR_ERR(pmc->clk); + dev_err(&pdev->dev, "failed to get pclk: %d\n", err); + return err; + } + + tegra_pmc_init(pmc); + + if (IS_ENABLED(CONFIG_DEBUG_FS)) { + err = tegra_powergate_debugfs_init(); + if (err < 0) + return err; + } + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int tegra_pmc_suspend(struct device *dev) +{ + tegra_pmc_writel(virt_to_phys(tegra_resume), PMC_SCRATCH41); + + return 0; +} + +static int tegra_pmc_resume(struct device *dev) +{ + tegra_pmc_writel(0x0, PMC_SCRATCH41); + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(tegra_pmc_pm_ops, tegra_pmc_suspend, tegra_pmc_resume); + +static const char * const tegra20_powergates[] = { + [TEGRA_POWERGATE_CPU] = "cpu", + [TEGRA_POWERGATE_3D] = "3d", + [TEGRA_POWERGATE_VENC] = "venc", + [TEGRA_POWERGATE_VDEC] = "vdec", + [TEGRA_POWERGATE_PCIE] = "pcie", + [TEGRA_POWERGATE_L2] = "l2", + [TEGRA_POWERGATE_MPE] = "mpe", +}; + +static const struct tegra_pmc_soc tegra20_pmc_soc = { + .num_powergates = ARRAY_SIZE(tegra20_powergates), + .powergates = tegra20_powergates, + .num_cpu_powergates = 0, + .cpu_powergates = NULL, +}; + +static const char * const tegra30_powergates[] = { + [TEGRA_POWERGATE_CPU] = "cpu0", + [TEGRA_POWERGATE_3D] = "3d0", + [TEGRA_POWERGATE_VENC] = "venc", + [TEGRA_POWERGATE_VDEC] = "vdec", + [TEGRA_POWERGATE_PCIE] = "pcie", + [TEGRA_POWERGATE_L2] = "l2", + [TEGRA_POWERGATE_MPE] = "mpe", + [TEGRA_POWERGATE_HEG] = "heg", + [TEGRA_POWERGATE_SATA] = "sata", + [TEGRA_POWERGATE_CPU1] = "cpu1", + [TEGRA_POWERGATE_CPU2] = "cpu2", + [TEGRA_POWERGATE_CPU3] = "cpu3", + [TEGRA_POWERGATE_CELP] = "celp", + [TEGRA_POWERGATE_3D1] = "3d1", +}; + +static const u8 tegra30_cpu_powergates[] = { + TEGRA_POWERGATE_CPU, + TEGRA_POWERGATE_CPU1, + TEGRA_POWERGATE_CPU2, + TEGRA_POWERGATE_CPU3, +}; + +static const struct tegra_pmc_soc tegra30_pmc_soc = { + .num_powergates = ARRAY_SIZE(tegra30_powergates), + .powergates = tegra30_powergates, + .num_cpu_powergates = ARRAY_SIZE(tegra30_cpu_powergates), + .cpu_powergates = tegra30_cpu_powergates, +}; + +static const char * const tegra114_powergates[] = { + [TEGRA_POWERGATE_CPU] = "crail", + [TEGRA_POWERGATE_3D] = "3d", + [TEGRA_POWERGATE_VENC] = "venc", + [TEGRA_POWERGATE_VDEC] = "vdec", + [TEGRA_POWERGATE_MPE] = "mpe", + [TEGRA_POWERGATE_HEG] = "heg", + [TEGRA_POWERGATE_CPU1] = "cpu1", + [TEGRA_POWERGATE_CPU2] = "cpu2", + [TEGRA_POWERGATE_CPU3] = "cpu3", + [TEGRA_POWERGATE_CELP] = "celp", + [TEGRA_POWERGATE_CPU0] = "cpu0", + [TEGRA_POWERGATE_C0NC] = "c0nc", + [TEGRA_POWERGATE_C1NC] = "c1nc", + [TEGRA_POWERGATE_DIS] = "dis", + [TEGRA_POWERGATE_DISB] = "disb", + [TEGRA_POWERGATE_XUSBA] = "xusba", + [TEGRA_POWERGATE_XUSBB] = "xusbb", + [TEGRA_POWERGATE_XUSBC] = "xusbc", +}; + +static const u8 tegra114_cpu_powergates[] = { + TEGRA_POWERGATE_CPU0, + TEGRA_POWERGATE_CPU1, + TEGRA_POWERGATE_CPU2, + TEGRA_POWERGATE_CPU3, +}; + +static const struct tegra_pmc_soc tegra114_pmc_soc = { + .num_powergates = ARRAY_SIZE(tegra114_powergates), + .powergates = tegra114_powergates, + .num_cpu_powergates = ARRAY_SIZE(tegra114_cpu_powergates), + .cpu_powergates = tegra114_cpu_powergates, +}; + +static const char * const tegra124_powergates[] = { + [TEGRA_POWERGATE_CPU] = "crail", + [TEGRA_POWERGATE_3D] = "3d", + [TEGRA_POWERGATE_VENC] = "venc", + [TEGRA_POWERGATE_PCIE] = "pcie", + [TEGRA_POWERGATE_VDEC] = "vdec", + [TEGRA_POWERGATE_L2] = "l2", + [TEGRA_POWERGATE_MPE] = "mpe", + [TEGRA_POWERGATE_HEG] = "heg", + [TEGRA_POWERGATE_SATA] = "sata", + [TEGRA_POWERGATE_CPU1] = "cpu1", + [TEGRA_POWERGATE_CPU2] = "cpu2", + [TEGRA_POWERGATE_CPU3] = "cpu3", + [TEGRA_POWERGATE_CELP] = "celp", + [TEGRA_POWERGATE_CPU0] = "cpu0", + [TEGRA_POWERGATE_C0NC] = "c0nc", + [TEGRA_POWERGATE_C1NC] = "c1nc", + [TEGRA_POWERGATE_SOR] = "sor", + [TEGRA_POWERGATE_DIS] = "dis", + [TEGRA_POWERGATE_DISB] = "disb", + [TEGRA_POWERGATE_XUSBA] = "xusba", + [TEGRA_POWERGATE_XUSBB] = "xusbb", + [TEGRA_POWERGATE_XUSBC] = "xusbc", + [TEGRA_POWERGATE_VIC] = "vic", + [TEGRA_POWERGATE_IRAM] = "iram", +}; + +static const u8 tegra124_cpu_powergates[] = { + TEGRA_POWERGATE_CPU0, + TEGRA_POWERGATE_CPU1, + TEGRA_POWERGATE_CPU2, + TEGRA_POWERGATE_CPU3, +}; + +static const struct tegra_pmc_soc tegra124_pmc_soc = { + .num_powergates = ARRAY_SIZE(tegra124_powergates), + .powergates = tegra124_powergates, + .num_cpu_powergates = ARRAY_SIZE(tegra124_cpu_powergates), + .cpu_powergates = tegra124_cpu_powergates, +}; + +static const struct of_device_id tegra_pmc_match[] = { + { .compatible = "nvidia,tegra124-pmc", .data = &tegra124_pmc_soc }, + { .compatible = "nvidia,tegra114-pmc", .data = &tegra114_pmc_soc }, + { .compatible = "nvidia,tegra30-pmc", .data = &tegra30_pmc_soc }, + { .compatible = "nvidia,tegra20-pmc", .data = &tegra20_pmc_soc }, + { } +}; + +static struct platform_driver tegra_pmc_driver = { + .driver = { + .name = "tegra-pmc", + .suppress_bind_attrs = true, + .of_match_table = tegra_pmc_match, + .pm = &tegra_pmc_pm_ops, + }, + .probe = tegra_pmc_probe, +}; +module_platform_driver(tegra_pmc_driver); + +/* + * Early initialization to allow access to registers in the very early boot + * process. + */ +static int __init tegra_pmc_early_init(void) +{ + const struct of_device_id *match; + struct device_node *np; + struct resource regs; + bool invert; + u32 value; + + if (!soc_is_tegra()) + return 0; + + np = of_find_matching_node_and_match(NULL, tegra_pmc_match, &match); + if (!np) { + pr_warn("PMC device node not found, disabling powergating\n"); + + regs.start = 0x7000e400; + regs.end = 0x7000e7ff; + regs.flags = IORESOURCE_MEM; + + pr_warn("Using memory region %pR\n", ®s); + } else { + pmc->soc = match->data; + } + + if (of_address_to_resource(np, 0, ®s) < 0) { + pr_err("failed to get PMC registers\n"); + return -ENXIO; + } + + pmc->base = ioremap_nocache(regs.start, resource_size(®s)); + if (!pmc->base) { + pr_err("failed to map PMC registers\n"); + return -ENXIO; + } + + mutex_init(&pmc->powergates_lock); + + invert = of_property_read_bool(np, "nvidia,invert-interrupt"); + + value = tegra_pmc_readl(PMC_CNTRL); + + if (invert) + value |= PMC_CNTRL_INTR_POLARITY; + else + value &= ~PMC_CNTRL_INTR_POLARITY; + + tegra_pmc_writel(value, PMC_CNTRL); + + return 0; +} +early_initcall(tegra_pmc_early_init); diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 62e2242ad7e0..84e7c9e6ccef 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -69,6 +69,7 @@ config SPI_ATH79 config SPI_ATMEL tristate "Atmel SPI Controller" + depends on HAS_DMA depends on (ARCH_AT91 || AVR32 || COMPILE_TEST) help This selects a driver for the Atmel SPI Controller, present on @@ -112,6 +113,14 @@ config SPI_AU1550 If you say yes to this option, support will be included for the PSC SPI controller found on Au1550, Au1200 and Au1300 series. +config SPI_BCM53XX + tristate "Broadcom BCM53xx SPI controller" + depends on ARCH_BCM_5301X + depends on BCMA_POSSIBLE + select BCMA + help + Enable support for the SPI controller on Broadcom BCM53xx ARM SoCs. + config SPI_BCM63XX tristate "Broadcom BCM63xx SPI controller" depends on BCM63XX @@ -185,6 +194,7 @@ config SPI_EFM32 config SPI_EP93XX tristate "Cirrus Logic EP93xx SPI controller" + depends on HAS_DMA depends on ARCH_EP93XX || COMPILE_TEST help This enables using the Cirrus EP93xx SPI controller in master @@ -314,6 +324,7 @@ config SPI_OMAP_UWIRE config SPI_OMAP24XX tristate "McSPI driver for OMAP" + depends on HAS_DMA depends on ARM || ARM64 || AVR32 || HEXAGON || MIPS || SUPERH depends on ARCH_OMAP2PLUS || COMPILE_TEST help @@ -380,7 +391,7 @@ config SPI_PXA2XX additional documentation can be found a Documentation/spi/pxa2xx. config SPI_PXA2XX_PCI - def_tristate SPI_PXA2XX && PCI + def_tristate SPI_PXA2XX && PCI && COMMON_CLK config SPI_ROCKCHIP tristate "Rockchip SPI controller driver" @@ -500,7 +511,7 @@ config SPI_MXS config SPI_TEGRA114 tristate "NVIDIA Tegra114 SPI Controller" depends on (ARCH_TEGRA && TEGRA20_APB_DMA) || COMPILE_TEST - depends on RESET_CONTROLLER + depends on RESET_CONTROLLER && HAS_DMA help SPI driver for NVIDIA Tegra114 SPI Controller interface. This controller is different than the older SoCs SPI controller and also register interface @@ -518,7 +529,7 @@ config SPI_TEGRA20_SFLASH config SPI_TEGRA20_SLINK tristate "Nvidia Tegra20/Tegra30 SLINK Controller" depends on (ARCH_TEGRA && TEGRA20_APB_DMA) || COMPILE_TEST - depends on RESET_CONTROLLER + depends on RESET_CONTROLLER && HAS_DMA help SPI driver for Nvidia Tegra20/Tegra30 SLINK Controller interface. @@ -591,7 +602,7 @@ config SPI_DW_PCI depends on SPI_DESIGNWARE && PCI config SPI_DW_MID_DMA - bool "DMA support for DW SPI controller on Intel Moorestown platform" + bool "DMA support for DW SPI controller on Intel MID platform" depends on SPI_DW_PCI && INTEL_MID_DMAC config SPI_DW_MMIO diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index 762da0741148..78f24ca36fcf 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -15,6 +15,7 @@ obj-$(CONFIG_SPI_ATMEL) += spi-atmel.o obj-$(CONFIG_SPI_ATH79) += spi-ath79.o obj-$(CONFIG_SPI_AU1550) += spi-au1550.o obj-$(CONFIG_SPI_BCM2835) += spi-bcm2835.o +obj-$(CONFIG_SPI_BCM53XX) += spi-bcm53xx.o obj-$(CONFIG_SPI_BCM63XX) += spi-bcm63xx.o obj-$(CONFIG_SPI_BCM63XX_HSSPI) += spi-bcm63xx-hsspi.o obj-$(CONFIG_SPI_BFIN5XX) += spi-bfin5xx.o diff --git a/drivers/spi/spi-au1550.c b/drivers/spi/spi-au1550.c index fb61464348a1..f40b34cdf2fc 100644 --- a/drivers/spi/spi-au1550.c +++ b/drivers/spi/spi-au1550.c @@ -141,13 +141,13 @@ static inline void au1550_spi_mask_ack_all(struct au1550_spi *hw) PSC_SPIMSK_MM | PSC_SPIMSK_RR | PSC_SPIMSK_RO | PSC_SPIMSK_RU | PSC_SPIMSK_TR | PSC_SPIMSK_TO | PSC_SPIMSK_TU | PSC_SPIMSK_SD | PSC_SPIMSK_MD; - au_sync(); + wmb(); /* drain writebuffer */ hw->regs->psc_spievent = PSC_SPIEVNT_MM | PSC_SPIEVNT_RR | PSC_SPIEVNT_RO | PSC_SPIEVNT_RU | PSC_SPIEVNT_TR | PSC_SPIEVNT_TO | PSC_SPIEVNT_TU | PSC_SPIEVNT_SD | PSC_SPIEVNT_MD; - au_sync(); + wmb(); /* drain writebuffer */ } static void au1550_spi_reset_fifos(struct au1550_spi *hw) @@ -155,10 +155,10 @@ static void au1550_spi_reset_fifos(struct au1550_spi *hw) u32 pcr; hw->regs->psc_spipcr = PSC_SPIPCR_RC | PSC_SPIPCR_TC; - au_sync(); + wmb(); /* drain writebuffer */ do { pcr = hw->regs->psc_spipcr; - au_sync(); + wmb(); /* drain writebuffer */ } while (pcr != 0); } @@ -188,9 +188,9 @@ static void au1550_spi_chipsel(struct spi_device *spi, int value) au1550_spi_bits_handlers_set(hw, spi->bits_per_word); cfg = hw->regs->psc_spicfg; - au_sync(); + wmb(); /* drain writebuffer */ hw->regs->psc_spicfg = cfg & ~PSC_SPICFG_DE_ENABLE; - au_sync(); + wmb(); /* drain writebuffer */ if (spi->mode & SPI_CPOL) cfg |= PSC_SPICFG_BI; @@ -218,10 +218,10 @@ static void au1550_spi_chipsel(struct spi_device *spi, int value) cfg |= au1550_spi_baudcfg(hw, spi->max_speed_hz); hw->regs->psc_spicfg = cfg | PSC_SPICFG_DE_ENABLE; - au_sync(); + wmb(); /* drain writebuffer */ do { stat = hw->regs->psc_spistat; - au_sync(); + wmb(); /* drain writebuffer */ } while ((stat & PSC_SPISTAT_DR) == 0); if (hw->pdata->activate_cs) @@ -252,9 +252,9 @@ static int au1550_spi_setupxfer(struct spi_device *spi, struct spi_transfer *t) au1550_spi_bits_handlers_set(hw, spi->bits_per_word); cfg = hw->regs->psc_spicfg; - au_sync(); + wmb(); /* drain writebuffer */ hw->regs->psc_spicfg = cfg & ~PSC_SPICFG_DE_ENABLE; - au_sync(); + wmb(); /* drain writebuffer */ if (hw->usedma && bpw <= 8) cfg &= ~PSC_SPICFG_DD_DISABLE; @@ -268,12 +268,12 @@ static int au1550_spi_setupxfer(struct spi_device *spi, struct spi_transfer *t) cfg |= au1550_spi_baudcfg(hw, hz); hw->regs->psc_spicfg = cfg; - au_sync(); + wmb(); /* drain writebuffer */ if (cfg & PSC_SPICFG_DE_ENABLE) { do { stat = hw->regs->psc_spistat; - au_sync(); + wmb(); /* drain writebuffer */ } while ((stat & PSC_SPISTAT_DR) == 0); } @@ -396,11 +396,11 @@ static int au1550_spi_dma_txrxb(struct spi_device *spi, struct spi_transfer *t) /* by default enable nearly all events interrupt */ hw->regs->psc_spimsk = PSC_SPIMSK_SD; - au_sync(); + wmb(); /* drain writebuffer */ /* start the transfer */ hw->regs->psc_spipcr = PSC_SPIPCR_MS; - au_sync(); + wmb(); /* drain writebuffer */ wait_for_completion(&hw->master_done); @@ -429,7 +429,7 @@ static irqreturn_t au1550_spi_dma_irq_callback(struct au1550_spi *hw) stat = hw->regs->psc_spistat; evnt = hw->regs->psc_spievent; - au_sync(); + wmb(); /* drain writebuffer */ if ((stat & PSC_SPISTAT_DI) == 0) { dev_err(hw->dev, "Unexpected IRQ!\n"); return IRQ_NONE; @@ -484,7 +484,7 @@ static irqreturn_t au1550_spi_dma_irq_callback(struct au1550_spi *hw) static void au1550_spi_rx_word_##size(struct au1550_spi *hw) \ { \ u32 fifoword = hw->regs->psc_spitxrx & (u32)(mask); \ - au_sync(); \ + wmb(); /* drain writebuffer */ \ if (hw->rx) { \ *(u##size *)hw->rx = (u##size)fifoword; \ hw->rx += (size) / 8; \ @@ -504,7 +504,7 @@ static void au1550_spi_tx_word_##size(struct au1550_spi *hw) \ if (hw->tx_count >= hw->len) \ fifoword |= PSC_SPITXRX_LC; \ hw->regs->psc_spitxrx = fifoword; \ - au_sync(); \ + wmb(); /* drain writebuffer */ \ } AU1550_SPI_RX_WORD(8,0xff) @@ -539,18 +539,18 @@ static int au1550_spi_pio_txrxb(struct spi_device *spi, struct spi_transfer *t) } stat = hw->regs->psc_spistat; - au_sync(); + wmb(); /* drain writebuffer */ if (stat & PSC_SPISTAT_TF) break; } /* enable event interrupts */ hw->regs->psc_spimsk = mask; - au_sync(); + wmb(); /* drain writebuffer */ /* start the transfer */ hw->regs->psc_spipcr = PSC_SPIPCR_MS; - au_sync(); + wmb(); /* drain writebuffer */ wait_for_completion(&hw->master_done); @@ -564,7 +564,7 @@ static irqreturn_t au1550_spi_pio_irq_callback(struct au1550_spi *hw) stat = hw->regs->psc_spistat; evnt = hw->regs->psc_spievent; - au_sync(); + wmb(); /* drain writebuffer */ if ((stat & PSC_SPISTAT_DI) == 0) { dev_err(hw->dev, "Unexpected IRQ!\n"); return IRQ_NONE; @@ -594,7 +594,7 @@ static irqreturn_t au1550_spi_pio_irq_callback(struct au1550_spi *hw) do { busy = 0; stat = hw->regs->psc_spistat; - au_sync(); + wmb(); /* drain writebuffer */ /* * Take care to not let the Rx FIFO overflow. @@ -615,7 +615,7 @@ static irqreturn_t au1550_spi_pio_irq_callback(struct au1550_spi *hw) } while (busy); hw->regs->psc_spievent = PSC_SPIEVNT_RR | PSC_SPIEVNT_TR; - au_sync(); + wmb(); /* drain writebuffer */ /* * Restart the SPI transmission in case of a transmit underflow. @@ -634,9 +634,9 @@ static irqreturn_t au1550_spi_pio_irq_callback(struct au1550_spi *hw) */ if (evnt & PSC_SPIEVNT_TU) { hw->regs->psc_spievent = PSC_SPIEVNT_TU | PSC_SPIEVNT_MD; - au_sync(); + wmb(); /* drain writebuffer */ hw->regs->psc_spipcr = PSC_SPIPCR_MS; - au_sync(); + wmb(); /* drain writebuffer */ } if (hw->rx_count >= hw->len) { @@ -690,19 +690,19 @@ static void au1550_spi_setup_psc_as_spi(struct au1550_spi *hw) /* set up the PSC for SPI mode */ hw->regs->psc_ctrl = PSC_CTRL_DISABLE; - au_sync(); + wmb(); /* drain writebuffer */ hw->regs->psc_sel = PSC_SEL_PS_SPIMODE; - au_sync(); + wmb(); /* drain writebuffer */ hw->regs->psc_spicfg = 0; - au_sync(); + wmb(); /* drain writebuffer */ hw->regs->psc_ctrl = PSC_CTRL_ENABLE; - au_sync(); + wmb(); /* drain writebuffer */ do { stat = hw->regs->psc_spistat; - au_sync(); + wmb(); /* drain writebuffer */ } while ((stat & PSC_SPISTAT_SR) == 0); @@ -717,16 +717,16 @@ static void au1550_spi_setup_psc_as_spi(struct au1550_spi *hw) #endif hw->regs->psc_spicfg = cfg; - au_sync(); + wmb(); /* drain writebuffer */ au1550_spi_mask_ack_all(hw); hw->regs->psc_spicfg |= PSC_SPICFG_DE_ENABLE; - au_sync(); + wmb(); /* drain writebuffer */ do { stat = hw->regs->psc_spistat; - au_sync(); + wmb(); /* drain writebuffer */ } while ((stat & PSC_SPISTAT_DR) == 0); au1550_spi_reset_fifos(hw); @@ -945,7 +945,7 @@ static int au1550_spi_remove(struct platform_device *pdev) spi_bitbang_stop(&hw->bitbang); free_irq(hw->irq, hw); iounmap((void __iomem *)hw->regs); - release_mem_region(r->start, sizeof(psc_spi_t)); + release_mem_region(hw->ioarea->start, sizeof(psc_spi_t)); if (hw->usedma) { au1550_spi_dma_rxtmp_free(hw); diff --git a/drivers/spi/spi-bcm53xx.c b/drivers/spi/spi-bcm53xx.c new file mode 100644 index 000000000000..17b34cbadc03 --- /dev/null +++ b/drivers/spi/spi-bcm53xx.c @@ -0,0 +1,299 @@ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/delay.h> +#include <linux/bcma/bcma.h> +#include <linux/spi/spi.h> + +#include "spi-bcm53xx.h" + +#define BCM53XXSPI_MAX_SPI_BAUD 13500000 /* 216 MHz? */ + +/* The longest observed required wait was 19 ms */ +#define BCM53XXSPI_SPE_TIMEOUT_MS 80 + +struct bcm53xxspi { + struct bcma_device *core; + struct spi_master *master; + + size_t read_offset; +}; + +static inline u32 bcm53xxspi_read(struct bcm53xxspi *b53spi, u16 offset) +{ + return bcma_read32(b53spi->core, offset); +} + +static inline void bcm53xxspi_write(struct bcm53xxspi *b53spi, u16 offset, + u32 value) +{ + bcma_write32(b53spi->core, offset, value); +} + +static inline unsigned int bcm53xxspi_calc_timeout(size_t len) +{ + /* Do some magic calculation based on length and buad. Add 10% and 1. */ + return (len * 9000 / BCM53XXSPI_MAX_SPI_BAUD * 110 / 100) + 1; +} + +static int bcm53xxspi_wait(struct bcm53xxspi *b53spi, unsigned int timeout_ms) +{ + unsigned long deadline; + u32 tmp; + + /* SPE bit has to be 0 before we read MSPI STATUS */ + deadline = jiffies + BCM53XXSPI_SPE_TIMEOUT_MS * HZ / 1000; + do { + tmp = bcm53xxspi_read(b53spi, B53SPI_MSPI_SPCR2); + if (!(tmp & B53SPI_MSPI_SPCR2_SPE)) + break; + udelay(5); + } while (!time_after_eq(jiffies, deadline)); + + if (tmp & B53SPI_MSPI_SPCR2_SPE) + goto spi_timeout; + + /* Check status */ + deadline = jiffies + timeout_ms * HZ / 1000; + do { + tmp = bcm53xxspi_read(b53spi, B53SPI_MSPI_MSPI_STATUS); + if (tmp & B53SPI_MSPI_MSPI_STATUS_SPIF) { + bcm53xxspi_write(b53spi, B53SPI_MSPI_MSPI_STATUS, 0); + return 0; + } + + cpu_relax(); + udelay(100); + } while (!time_after_eq(jiffies, deadline)); + +spi_timeout: + bcm53xxspi_write(b53spi, B53SPI_MSPI_MSPI_STATUS, 0); + + pr_err("Timeout waiting for SPI to be ready!\n"); + + return -EBUSY; +} + +static void bcm53xxspi_buf_write(struct bcm53xxspi *b53spi, u8 *w_buf, + size_t len, bool cont) +{ + u32 tmp; + int i; + + for (i = 0; i < len; i++) { + /* Transmit Register File MSB */ + bcm53xxspi_write(b53spi, B53SPI_MSPI_TXRAM + 4 * (i * 2), + (unsigned int)w_buf[i]); + } + + for (i = 0; i < len; i++) { + tmp = B53SPI_CDRAM_CONT | B53SPI_CDRAM_PCS_DISABLE_ALL | + B53SPI_CDRAM_PCS_DSCK; + if (!cont && i == len - 1) + tmp &= ~B53SPI_CDRAM_CONT; + tmp &= ~0x1; + /* Command Register File */ + bcm53xxspi_write(b53spi, B53SPI_MSPI_CDRAM + 4 * i, tmp); + } + + /* Set queue pointers */ + bcm53xxspi_write(b53spi, B53SPI_MSPI_NEWQP, 0); + bcm53xxspi_write(b53spi, B53SPI_MSPI_ENDQP, len - 1); + + if (cont) + bcm53xxspi_write(b53spi, B53SPI_MSPI_WRITE_LOCK, 1); + + /* Start SPI transfer */ + tmp = bcm53xxspi_read(b53spi, B53SPI_MSPI_SPCR2); + tmp |= B53SPI_MSPI_SPCR2_SPE; + if (cont) + tmp |= B53SPI_MSPI_SPCR2_CONT_AFTER_CMD; + bcm53xxspi_write(b53spi, B53SPI_MSPI_SPCR2, tmp); + + /* Wait for SPI to finish */ + bcm53xxspi_wait(b53spi, bcm53xxspi_calc_timeout(len)); + + if (!cont) + bcm53xxspi_write(b53spi, B53SPI_MSPI_WRITE_LOCK, 0); + + b53spi->read_offset = len; +} + +static void bcm53xxspi_buf_read(struct bcm53xxspi *b53spi, u8 *r_buf, + size_t len, bool cont) +{ + u32 tmp; + int i; + + for (i = 0; i < b53spi->read_offset + len; i++) { + tmp = B53SPI_CDRAM_CONT | B53SPI_CDRAM_PCS_DISABLE_ALL | + B53SPI_CDRAM_PCS_DSCK; + if (!cont && i == b53spi->read_offset + len - 1) + tmp &= ~B53SPI_CDRAM_CONT; + tmp &= ~0x1; + /* Command Register File */ + bcm53xxspi_write(b53spi, B53SPI_MSPI_CDRAM + 4 * i, tmp); + } + + /* Set queue pointers */ + bcm53xxspi_write(b53spi, B53SPI_MSPI_NEWQP, 0); + bcm53xxspi_write(b53spi, B53SPI_MSPI_ENDQP, + b53spi->read_offset + len - 1); + + if (cont) + bcm53xxspi_write(b53spi, B53SPI_MSPI_WRITE_LOCK, 1); + + /* Start SPI transfer */ + tmp = bcm53xxspi_read(b53spi, B53SPI_MSPI_SPCR2); + tmp |= B53SPI_MSPI_SPCR2_SPE; + if (cont) + tmp |= B53SPI_MSPI_SPCR2_CONT_AFTER_CMD; + bcm53xxspi_write(b53spi, B53SPI_MSPI_SPCR2, tmp); + + /* Wait for SPI to finish */ + bcm53xxspi_wait(b53spi, bcm53xxspi_calc_timeout(len)); + + if (!cont) + bcm53xxspi_write(b53spi, B53SPI_MSPI_WRITE_LOCK, 0); + + for (i = 0; i < len; ++i) { + int offset = b53spi->read_offset + i; + + /* Data stored in the transmit register file LSB */ + r_buf[i] = (u8)bcm53xxspi_read(b53spi, B53SPI_MSPI_RXRAM + 4 * (1 + offset * 2)); + } + + b53spi->read_offset = 0; +} + +static int bcm53xxspi_transfer_one(struct spi_master *master, + struct spi_device *spi, + struct spi_transfer *t) +{ + struct bcm53xxspi *b53spi = spi_master_get_devdata(master); + u8 *buf; + size_t left; + + if (t->tx_buf) { + buf = (u8 *)t->tx_buf; + left = t->len; + while (left) { + size_t to_write = min_t(size_t, 16, left); + bool cont = left - to_write > 0; + + bcm53xxspi_buf_write(b53spi, buf, to_write, cont); + left -= to_write; + buf += to_write; + } + } + + if (t->rx_buf) { + buf = (u8 *)t->rx_buf; + left = t->len; + while (left) { + size_t to_read = min_t(size_t, 16 - b53spi->read_offset, + left); + bool cont = left - to_read > 0; + + bcm53xxspi_buf_read(b53spi, buf, to_read, cont); + left -= to_read; + buf += to_read; + } + } + + return 0; +} + +/************************************************** + * BCMA + **************************************************/ + +static struct spi_board_info bcm53xx_info = { + .modalias = "bcm53xxspiflash", +}; + +static const struct bcma_device_id bcm53xxspi_bcma_tbl[] = { + BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_NS_QSPI, BCMA_ANY_REV, BCMA_ANY_CLASS), + BCMA_CORETABLE_END +}; +MODULE_DEVICE_TABLE(bcma, bcm53xxspi_bcma_tbl); + +static int bcm53xxspi_bcma_probe(struct bcma_device *core) +{ + struct bcm53xxspi *b53spi; + struct spi_master *master; + int err; + + if (core->bus->drv_cc.core->id.rev != 42) { + pr_err("SPI on SoC with unsupported ChipCommon rev\n"); + return -ENOTSUPP; + } + + master = spi_alloc_master(&core->dev, sizeof(*b53spi)); + if (!master) + return -ENOMEM; + + b53spi = spi_master_get_devdata(master); + b53spi->master = master; + b53spi->core = core; + + master->transfer_one = bcm53xxspi_transfer_one; + + bcma_set_drvdata(core, b53spi); + + err = devm_spi_register_master(&core->dev, master); + if (err) { + spi_master_put(master); + bcma_set_drvdata(core, NULL); + goto out; + } + + /* Broadcom SoCs (at least with the CC rev 42) use SPI for flash only */ + spi_new_device(master, &bcm53xx_info); + +out: + return err; +} + +static void bcm53xxspi_bcma_remove(struct bcma_device *core) +{ + struct bcm53xxspi *b53spi = bcma_get_drvdata(core); + + spi_unregister_master(b53spi->master); +} + +static struct bcma_driver bcm53xxspi_bcma_driver = { + .name = KBUILD_MODNAME, + .id_table = bcm53xxspi_bcma_tbl, + .probe = bcm53xxspi_bcma_probe, + .remove = bcm53xxspi_bcma_remove, +}; + +/************************************************** + * Init & exit + **************************************************/ + +static int __init bcm53xxspi_module_init(void) +{ + int err = 0; + + err = bcma_driver_register(&bcm53xxspi_bcma_driver); + if (err) + pr_err("Failed to register bcma driver: %d\n", err); + + return err; +} + +static void __exit bcm53xxspi_module_exit(void) +{ + bcma_driver_unregister(&bcm53xxspi_bcma_driver); +} + +module_init(bcm53xxspi_module_init); +module_exit(bcm53xxspi_module_exit); + +MODULE_DESCRIPTION("Broadcom BCM53xx SPI Controller driver"); +MODULE_AUTHOR("RafaÅ‚ MiÅ‚ecki <zajec5@gmail.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-bcm53xx.h b/drivers/spi/spi-bcm53xx.h new file mode 100644 index 000000000000..73575dfe6916 --- /dev/null +++ b/drivers/spi/spi-bcm53xx.h @@ -0,0 +1,72 @@ +#ifndef SPI_BCM53XX_H +#define SPI_BCM53XX_H + +#define B53SPI_BSPI_REVISION_ID 0x000 +#define B53SPI_BSPI_SCRATCH 0x004 +#define B53SPI_BSPI_MAST_N_BOOT_CTRL 0x008 +#define B53SPI_BSPI_BUSY_STATUS 0x00c +#define B53SPI_BSPI_INTR_STATUS 0x010 +#define B53SPI_BSPI_B0_STATUS 0x014 +#define B53SPI_BSPI_B0_CTRL 0x018 +#define B53SPI_BSPI_B1_STATUS 0x01c +#define B53SPI_BSPI_B1_CTRL 0x020 +#define B53SPI_BSPI_STRAP_OVERRIDE_CTRL 0x024 +#define B53SPI_BSPI_FLEX_MODE_ENABLE 0x028 +#define B53SPI_BSPI_BITS_PER_CYCLE 0x02c +#define B53SPI_BSPI_BITS_PER_PHASE 0x030 +#define B53SPI_BSPI_CMD_AND_MODE_BYTE 0x034 +#define B53SPI_BSPI_BSPI_FLASH_UPPER_ADDR_BYTE 0x038 +#define B53SPI_BSPI_BSPI_XOR_VALUE 0x03c +#define B53SPI_BSPI_BSPI_XOR_ENABLE 0x040 +#define B53SPI_BSPI_BSPI_PIO_MODE_ENABLE 0x044 +#define B53SPI_BSPI_BSPI_PIO_IODIR 0x048 +#define B53SPI_BSPI_BSPI_PIO_DATA 0x04c + +/* RAF */ +#define B53SPI_RAF_START_ADDR 0x100 +#define B53SPI_RAF_NUM_WORDS 0x104 +#define B53SPI_RAF_CTRL 0x108 +#define B53SPI_RAF_FULLNESS 0x10c +#define B53SPI_RAF_WATERMARK 0x110 +#define B53SPI_RAF_STATUS 0x114 +#define B53SPI_RAF_READ_DATA 0x118 +#define B53SPI_RAF_WORD_CNT 0x11c +#define B53SPI_RAF_CURR_ADDR 0x120 + +/* MSPI */ +#define B53SPI_MSPI_SPCR0_LSB 0x200 +#define B53SPI_MSPI_SPCR0_MSB 0x204 +#define B53SPI_MSPI_SPCR1_LSB 0x208 +#define B53SPI_MSPI_SPCR1_MSB 0x20c +#define B53SPI_MSPI_NEWQP 0x210 +#define B53SPI_MSPI_ENDQP 0x214 +#define B53SPI_MSPI_SPCR2 0x218 +#define B53SPI_MSPI_SPCR2_SPE 0x00000040 +#define B53SPI_MSPI_SPCR2_CONT_AFTER_CMD 0x00000080 +#define B53SPI_MSPI_MSPI_STATUS 0x220 +#define B53SPI_MSPI_MSPI_STATUS_SPIF 0x00000001 +#define B53SPI_MSPI_CPTQP 0x224 +#define B53SPI_MSPI_TXRAM 0x240 /* 32 registers, up to 0x2b8 */ +#define B53SPI_MSPI_RXRAM 0x2c0 /* 32 registers, up to 0x33c */ +#define B53SPI_MSPI_CDRAM 0x340 /* 16 registers, up to 0x37c */ +#define B53SPI_CDRAM_PCS_PCS0 0x00000001 +#define B53SPI_CDRAM_PCS_PCS1 0x00000002 +#define B53SPI_CDRAM_PCS_PCS2 0x00000004 +#define B53SPI_CDRAM_PCS_PCS3 0x00000008 +#define B53SPI_CDRAM_PCS_DISABLE_ALL 0x0000000f +#define B53SPI_CDRAM_PCS_DSCK 0x00000010 +#define B53SPI_CDRAM_BITSE 0x00000040 +#define B53SPI_CDRAM_CONT 0x00000080 +#define B53SPI_MSPI_WRITE_LOCK 0x380 +#define B53SPI_MSPI_DISABLE_FLUSH_GEN 0x384 + +/* Interrupt */ +#define B53SPI_INTR_RAF_LR_FULLNESS_REACHED 0x3a0 +#define B53SPI_INTR_RAF_LR_TRUNCATED 0x3a4 +#define B53SPI_INTR_RAF_LR_IMPATIENT 0x3a8 +#define B53SPI_INTR_RAF_LR_SESSION_DONE 0x3ac +#define B53SPI_INTR_RAF_LR_OVERREAD 0x3b0 +#define B53SPI_INTR_MSPI_DONE 0x3b4 +#define B53SPI_INTR_MSPI_HALT_SET_TRANSACTION_DONE 0x3b8 + +#endif /* SPI_BCM53XX_H */ diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c index 562ff83debd9..7b811e38c7ad 100644 --- a/drivers/spi/spi-cadence.c +++ b/drivers/spi/spi-cadence.c @@ -677,7 +677,6 @@ static struct platform_driver cdns_spi_driver = { .remove = cdns_spi_remove, .driver = { .name = CDNS_SPI_NAME, - .owner = THIS_MODULE, .of_match_table = cdns_spi_of_match, .pm = &cdns_spi_dev_pm_ops, }, diff --git a/drivers/spi/spi-clps711x.c b/drivers/spi/spi-clps711x.c index ce538dad526b..181cf2262006 100644 --- a/drivers/spi/spi-clps711x.c +++ b/drivers/spi/spi-clps711x.c @@ -30,7 +30,6 @@ struct spi_clps711x_data { void __iomem *syncio; struct regmap *syscon; - struct regmap *syscon1; struct clk *spi_clk; u8 *tx_buf; @@ -47,27 +46,6 @@ static int spi_clps711x_setup(struct spi_device *spi) return 0; } -static void spi_clps711x_setup_xfer(struct spi_device *spi, - struct spi_transfer *xfer) -{ - struct spi_master *master = spi->master; - struct spi_clps711x_data *hw = spi_master_get_devdata(master); - - /* Setup SPI frequency divider */ - if (xfer->speed_hz >= master->max_speed_hz) - regmap_update_bits(hw->syscon1, SYSCON_OFFSET, - SYSCON1_ADCKSEL_MASK, SYSCON1_ADCKSEL(3)); - else if (xfer->speed_hz >= (master->max_speed_hz / 2)) - regmap_update_bits(hw->syscon1, SYSCON_OFFSET, - SYSCON1_ADCKSEL_MASK, SYSCON1_ADCKSEL(2)); - else if (xfer->speed_hz >= (master->max_speed_hz / 8)) - regmap_update_bits(hw->syscon1, SYSCON_OFFSET, - SYSCON1_ADCKSEL_MASK, SYSCON1_ADCKSEL(1)); - else - regmap_update_bits(hw->syscon1, SYSCON_OFFSET, - SYSCON1_ADCKSEL_MASK, SYSCON1_ADCKSEL(0)); -} - static int spi_clps711x_prepare_message(struct spi_master *master, struct spi_message *msg) { @@ -87,7 +65,7 @@ static int spi_clps711x_transfer_one(struct spi_master *master, struct spi_clps711x_data *hw = spi_master_get_devdata(master); u8 data; - spi_clps711x_setup_xfer(spi, xfer); + clk_set_rate(hw->spi_clk, xfer->speed_hz ? : spi->max_speed_hz); hw->len = xfer->len; hw->bpw = xfer->bits_per_word; @@ -176,13 +154,11 @@ static int spi_clps711x_probe(struct platform_device *pdev) } } - hw->spi_clk = devm_clk_get(&pdev->dev, "spi"); + hw->spi_clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(hw->spi_clk)) { - dev_err(&pdev->dev, "Can't get clocks\n"); ret = PTR_ERR(hw->spi_clk); goto err_out; } - master->max_speed_hz = clk_get_rate(hw->spi_clk); hw->syscon = syscon_regmap_lookup_by_pdevname("syscon.3"); if (IS_ERR(hw->syscon)) { @@ -190,12 +166,6 @@ static int spi_clps711x_probe(struct platform_device *pdev) goto err_out; } - hw->syscon1 = syscon_regmap_lookup_by_pdevname("syscon.1"); - if (IS_ERR(hw->syscon1)) { - ret = PTR_ERR(hw->syscon1); - goto err_out; - } - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); hw->syncio = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(hw->syncio)) { diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c index 276a3884fb3c..63700ab7bd9f 100644 --- a/drivers/spi/spi-davinci.c +++ b/drivers/spi/spi-davinci.c @@ -65,6 +65,7 @@ /* SPIDAT1 (upper 16 bit defines) */ #define SPIDAT1_CSHOLD_MASK BIT(12) +#define SPIDAT1_WDEL BIT(10) /* SPIGCR1 */ #define SPIGCR1_CLKMOD_MASK BIT(1) @@ -167,8 +168,10 @@ static void davinci_spi_rx_buf_u16(u32 data, struct davinci_spi *dspi) static u32 davinci_spi_tx_buf_u8(struct davinci_spi *dspi) { u32 data = 0; + if (dspi->tx) { const u8 *tx = dspi->tx; + data = *tx++; dspi->tx = tx; } @@ -178,8 +181,10 @@ static u32 davinci_spi_tx_buf_u8(struct davinci_spi *dspi) static u32 davinci_spi_tx_buf_u16(struct davinci_spi *dspi) { u32 data = 0; + if (dspi->tx) { const u16 *tx = dspi->tx; + data = *tx++; dspi->tx = tx; } @@ -209,6 +214,7 @@ static void davinci_spi_chipselect(struct spi_device *spi, int value) { struct davinci_spi *dspi; struct davinci_spi_platform_data *pdata; + struct davinci_spi_config *spicfg = spi->controller_data; u8 chip_sel = spi->chip_select; u16 spidat1 = CS_DEFAULT; bool gpio_chipsel = false; @@ -223,6 +229,10 @@ static void davinci_spi_chipselect(struct spi_device *spi, int value) gpio = spi->cs_gpio; } + /* program delay transfers if tx_delay is non zero */ + if (spicfg->wdelay) + spidat1 |= SPIDAT1_WDEL; + /* * Board specific chip select logic decides the polarity and cs * line for the controller @@ -237,9 +247,9 @@ static void davinci_spi_chipselect(struct spi_device *spi, int value) spidat1 |= SPIDAT1_CSHOLD_MASK; spidat1 &= ~(0x1 << chip_sel); } - - iowrite16(spidat1, dspi->base + SPIDAT1 + 2); } + + iowrite16(spidat1, dspi->base + SPIDAT1 + 2); } /** @@ -285,7 +295,7 @@ static int davinci_spi_setup_transfer(struct spi_device *spi, int prescale; dspi = spi_master_get_devdata(spi->master); - spicfg = (struct davinci_spi_config *)spi->controller_data; + spicfg = spi->controller_data; if (!spicfg) spicfg = &davinci_spi_default_cfg; @@ -333,6 +343,14 @@ static int davinci_spi_setup_transfer(struct spi_device *spi, spifmt |= SPIFMT_PHASE_MASK; /* + * Assume wdelay is used only on SPI peripherals that has this field + * in SPIFMTn register and when it's configured from board file or DT. + */ + if (spicfg->wdelay) + spifmt |= ((spicfg->wdelay << SPIFMT_WDELAY_SHIFT) + & SPIFMT_WDELAY_MASK); + + /* * Version 1 hardware supports two basic SPI modes: * - Standard SPI mode uses 4 pins, with chipselect * - 3 pin SPI is a 4 pin variant without CS (SPI_NO_CS) @@ -349,9 +367,6 @@ static int davinci_spi_setup_transfer(struct spi_device *spi, u32 delay = 0; - spifmt |= ((spicfg->wdelay << SPIFMT_WDELAY_SHIFT) - & SPIFMT_WDELAY_MASK); - if (spicfg->odd_parity) spifmt |= SPIFMT_ODD_PARITY_MASK; @@ -383,6 +398,26 @@ static int davinci_spi_setup_transfer(struct spi_device *spi, return 0; } +static int davinci_spi_of_setup(struct spi_device *spi) +{ + struct davinci_spi_config *spicfg = spi->controller_data; + struct device_node *np = spi->dev.of_node; + u32 prop; + + if (spicfg == NULL && np) { + spicfg = kzalloc(sizeof(*spicfg), GFP_KERNEL); + if (!spicfg) + return -ENOMEM; + *spicfg = davinci_spi_default_cfg; + /* override with dt configured values */ + if (!of_property_read_u32(np, "ti,spi-wdelay", &prop)) + spicfg->wdelay = (u8)prop; + spi->controller_data = spicfg; + } + + return 0; +} + /** * davinci_spi_setup - This functions will set default transfer method * @spi: spi device on which data transfer to be done @@ -397,36 +432,33 @@ static int davinci_spi_setup(struct spi_device *spi) struct spi_master *master = spi->master; struct device_node *np = spi->dev.of_node; bool internal_cs = true; - unsigned long flags = GPIOF_DIR_OUT; dspi = spi_master_get_devdata(spi->master); pdata = &dspi->pdata; - flags |= (spi->mode & SPI_CS_HIGH) ? GPIOF_INIT_LOW : GPIOF_INIT_HIGH; - if (!(spi->mode & SPI_NO_CS)) { if (np && (master->cs_gpios != NULL) && (spi->cs_gpio >= 0)) { - retval = gpio_request_one(spi->cs_gpio, - flags, dev_name(&spi->dev)); + retval = gpio_direction_output( + spi->cs_gpio, !(spi->mode & SPI_CS_HIGH)); internal_cs = false; } else if (pdata->chip_sel && spi->chip_select < pdata->num_chipselect && pdata->chip_sel[spi->chip_select] != SPI_INTERN_CS) { spi->cs_gpio = pdata->chip_sel[spi->chip_select]; - retval = gpio_request_one(spi->cs_gpio, - flags, dev_name(&spi->dev)); + retval = gpio_direction_output( + spi->cs_gpio, !(spi->mode & SPI_CS_HIGH)); internal_cs = false; } - } - if (retval) { - dev_err(&spi->dev, "GPIO %d setup failed (%d)\n", - spi->cs_gpio, retval); - return retval; - } + if (retval) { + dev_err(&spi->dev, "GPIO %d setup failed (%d)\n", + spi->cs_gpio, retval); + return retval; + } - if (internal_cs) - set_io_bits(dspi->base + SPIPC0, 1 << spi->chip_select); + if (internal_cs) + set_io_bits(dspi->base + SPIPC0, 1 << spi->chip_select); + } if (spi->mode & SPI_READY) set_io_bits(dspi->base + SPIPC0, SPIPC0_SPIENA_MASK); @@ -436,13 +468,16 @@ static int davinci_spi_setup(struct spi_device *spi) else clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_LOOPBACK_MASK); - return retval; + return davinci_spi_of_setup(spi); } static void davinci_spi_cleanup(struct spi_device *spi) { - if (spi->cs_gpio >= 0) - gpio_free(spi->cs_gpio); + struct davinci_spi_config *spicfg = spi->controller_data; + + spi->controller_data = NULL; + if (spi->dev.of_node) + kfree(spicfg); } static int davinci_spi_check_error(struct davinci_spi *dspi, int int_status) @@ -967,6 +1002,27 @@ static int davinci_spi_probe(struct platform_device *pdev) if (dspi->version == SPI_VERSION_2) dspi->bitbang.flags |= SPI_READY; + if (pdev->dev.of_node) { + int i; + + for (i = 0; i < pdata->num_chipselect; i++) { + int cs_gpio = of_get_named_gpio(pdev->dev.of_node, + "cs-gpios", i); + + if (cs_gpio == -EPROBE_DEFER) { + ret = cs_gpio; + goto free_clk; + } + + if (gpio_is_valid(cs_gpio)) { + ret = devm_gpio_request(&pdev->dev, cs_gpio, + dev_name(&pdev->dev)); + if (ret) + goto free_clk; + } + } + } + r = platform_get_resource(pdev, IORESOURCE_DMA, 0); if (r) dma_rx_chan = r->start; @@ -985,8 +1041,8 @@ static int davinci_spi_probe(struct platform_device *pdev) goto free_clk; dev_info(&pdev->dev, "DMA: supported\n"); - dev_info(&pdev->dev, "DMA: RX channel: %pa, TX channel: %pa, " - "event queue: %d\n", &dma_rx_chan, &dma_tx_chan, + dev_info(&pdev->dev, "DMA: RX channel: %pa, TX channel: %pa, event queue: %d\n", + &dma_rx_chan, &dma_tx_chan, pdata->dma_event_q); } diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c index 6d207afec8cb..46c6d58e1fda 100644 --- a/drivers/spi/spi-dw-mid.c +++ b/drivers/spi/spi-dw-mid.c @@ -1,7 +1,7 @@ /* * Special handling for DW core on Intel MID platform * - * Copyright (c) 2009, Intel Corporation. + * Copyright (c) 2009, 2014 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -11,10 +11,6 @@ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. */ #include <linux/dma-mapping.h> @@ -39,22 +35,25 @@ static bool mid_spi_dma_chan_filter(struct dma_chan *chan, void *param) { struct dw_spi *dws = param; - return dws->dmac && (&dws->dmac->dev == chan->device->dev); + return dws->dma_dev == chan->device->dev; } static int mid_spi_dma_init(struct dw_spi *dws) { struct mid_dma *dw_dma = dws->dma_priv; + struct pci_dev *dma_dev; struct intel_mid_dma_slave *rxs, *txs; dma_cap_mask_t mask; /* * Get pci device for DMA controller, currently it could only - * be the DMA controller of either Moorestown or Medfield + * be the DMA controller of Medfield */ - dws->dmac = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0813, NULL); - if (!dws->dmac) - dws->dmac = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0827, NULL); + dma_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0827, NULL); + if (!dma_dev) + return -ENODEV; + + dws->dma_dev = &dma_dev->dev; dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); @@ -83,13 +82,18 @@ static int mid_spi_dma_init(struct dw_spi *dws) free_rxchan: dma_release_channel(dws->rxchan); err_exit: - return -1; - + return -EBUSY; } static void mid_spi_dma_exit(struct dw_spi *dws) { + if (!dws->dma_inited) + return; + + dmaengine_terminate_all(dws->txchan); dma_release_channel(dws->txchan); + + dmaengine_terminate_all(dws->rxchan); dma_release_channel(dws->rxchan); } @@ -109,8 +113,7 @@ static void dw_spi_dma_done(void *arg) static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change) { - struct dma_async_tx_descriptor *txdesc = NULL, *rxdesc = NULL; - struct dma_chan *txchan, *rxchan; + struct dma_async_tx_descriptor *txdesc, *rxdesc; struct dma_slave_config txconf, rxconf; u16 dma_ctrl = 0; @@ -120,37 +123,34 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change) dw_writew(dws, DW_SPI_DMARDLR, 0xf); dw_writew(dws, DW_SPI_DMATDLR, 0x10); if (dws->tx_dma) - dma_ctrl |= 0x2; + dma_ctrl |= SPI_DMA_TDMAE; if (dws->rx_dma) - dma_ctrl |= 0x1; + dma_ctrl |= SPI_DMA_RDMAE; dw_writew(dws, DW_SPI_DMACR, dma_ctrl); spi_enable_chip(dws, 1); } dws->dma_chan_done = 0; - txchan = dws->txchan; - rxchan = dws->rxchan; /* 2. Prepare the TX dma transfer */ txconf.direction = DMA_MEM_TO_DEV; txconf.dst_addr = dws->dma_addr; txconf.dst_maxburst = LNW_DMA_MSIZE_16; txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; - txconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; + txconf.dst_addr_width = dws->dma_width; txconf.device_fc = false; - txchan->device->device_control(txchan, DMA_SLAVE_CONFIG, - (unsigned long) &txconf); + dmaengine_slave_config(dws->txchan, &txconf); memset(&dws->tx_sgl, 0, sizeof(dws->tx_sgl)); dws->tx_sgl.dma_address = dws->tx_dma; dws->tx_sgl.length = dws->len; - txdesc = dmaengine_prep_slave_sg(txchan, + txdesc = dmaengine_prep_slave_sg(dws->txchan, &dws->tx_sgl, 1, DMA_MEM_TO_DEV, - DMA_PREP_INTERRUPT); + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); txdesc->callback = dw_spi_dma_done; txdesc->callback_param = dws; @@ -159,27 +159,30 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change) rxconf.src_addr = dws->dma_addr; rxconf.src_maxburst = LNW_DMA_MSIZE_16; rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; - rxconf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; + rxconf.src_addr_width = dws->dma_width; rxconf.device_fc = false; - rxchan->device->device_control(rxchan, DMA_SLAVE_CONFIG, - (unsigned long) &rxconf); + dmaengine_slave_config(dws->rxchan, &rxconf); memset(&dws->rx_sgl, 0, sizeof(dws->rx_sgl)); dws->rx_sgl.dma_address = dws->rx_dma; dws->rx_sgl.length = dws->len; - rxdesc = dmaengine_prep_slave_sg(rxchan, + rxdesc = dmaengine_prep_slave_sg(dws->rxchan, &dws->rx_sgl, 1, DMA_DEV_TO_MEM, - DMA_PREP_INTERRUPT); + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); rxdesc->callback = dw_spi_dma_done; rxdesc->callback_param = dws; /* rx must be started before tx due to spi instinct */ - rxdesc->tx_submit(rxdesc); - txdesc->tx_submit(txdesc); + dmaengine_submit(rxdesc); + dma_async_issue_pending(dws->rxchan); + + dmaengine_submit(txdesc); + dma_async_issue_pending(dws->txchan); + return 0; } @@ -190,7 +193,7 @@ static struct dw_spi_dma_ops mid_dma_ops = { }; #endif -/* Some specific info for SPI0 controller on Moorestown */ +/* Some specific info for SPI0 controller on Intel MID */ /* HW info for MRST CLk Control Unit, one 32b reg */ #define MRST_SPI_CLK_BASE 100000000 /* 100m */ diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c index 3f3dc1226edf..ba68da12cdf0 100644 --- a/drivers/spi/spi-dw-pci.c +++ b/drivers/spi/spi-dw-pci.c @@ -1,7 +1,7 @@ /* * PCI interface driver for DW SPI Core * - * Copyright (c) 2009, Intel Corporation. + * Copyright (c) 2009, 2014 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -11,10 +11,6 @@ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. */ #include <linux/interrupt.h> @@ -32,17 +28,22 @@ struct dw_spi_pci { struct dw_spi dws; }; -static int spi_pci_probe(struct pci_dev *pdev, - const struct pci_device_id *ent) +struct spi_pci_desc { + int (*setup)(struct dw_spi *); +}; + +static struct spi_pci_desc spi_pci_mid_desc = { + .setup = dw_spi_mid_init, +}; + +static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct dw_spi_pci *dwpci; struct dw_spi *dws; + struct spi_pci_desc *desc = (struct spi_pci_desc *)ent->driver_data; int pci_bar = 0; int ret; - dev_info(&pdev->dev, "found PCI SPI controller(ID: %04x:%04x)\n", - pdev->vendor, pdev->device); - ret = pcim_enable_device(pdev); if (ret) return ret; @@ -58,20 +59,22 @@ static int spi_pci_probe(struct pci_dev *pdev, /* Get basic io resource and map it */ dws->paddr = pci_resource_start(pdev, pci_bar); - ret = pcim_iomap_regions(pdev, 1, dev_name(&pdev->dev)); + ret = pcim_iomap_regions(pdev, 1 << pci_bar, pci_name(pdev)); if (ret) return ret; + dws->regs = pcim_iomap_table(pdev)[pci_bar]; + dws->bus_num = 0; dws->num_cs = 4; dws->irq = pdev->irq; /* - * Specific handling for Intel MID paltforms, like dma setup, + * Specific handling for paltforms, like dma setup, * clock rate, FIFO depth. */ - if (pdev->device == 0x0800) { - ret = dw_spi_mid_init(dws); + if (desc && desc->setup) { + ret = desc->setup(dws); if (ret) return ret; } @@ -83,6 +86,9 @@ static int spi_pci_probe(struct pci_dev *pdev, /* PCI hook and SPI hook use the same drv data */ pci_set_drvdata(pdev, dwpci); + dev_info(&pdev->dev, "found PCI SPI controller(ID: %04x:%04x)\n", + pdev->vendor, pdev->device); + return 0; } @@ -93,41 +99,29 @@ static void spi_pci_remove(struct pci_dev *pdev) dw_spi_remove_host(&dwpci->dws); } -#ifdef CONFIG_PM -static int spi_suspend(struct pci_dev *pdev, pm_message_t state) +#ifdef CONFIG_PM_SLEEP +static int spi_suspend(struct device *dev) { + struct pci_dev *pdev = to_pci_dev(dev); struct dw_spi_pci *dwpci = pci_get_drvdata(pdev); - int ret; - ret = dw_spi_suspend_host(&dwpci->dws); - if (ret) - return ret; - pci_save_state(pdev); - pci_disable_device(pdev); - pci_set_power_state(pdev, pci_choose_state(pdev, state)); - return ret; + return dw_spi_suspend_host(&dwpci->dws); } -static int spi_resume(struct pci_dev *pdev) +static int spi_resume(struct device *dev) { + struct pci_dev *pdev = to_pci_dev(dev); struct dw_spi_pci *dwpci = pci_get_drvdata(pdev); - int ret; - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - ret = pci_enable_device(pdev); - if (ret) - return ret; return dw_spi_resume_host(&dwpci->dws); } -#else -#define spi_suspend NULL -#define spi_resume NULL #endif +static SIMPLE_DEV_PM_OPS(dw_spi_pm_ops, spi_suspend, spi_resume); + static const struct pci_device_id pci_ids[] = { /* Intel MID platform SPI controller 0 */ - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) }, + { PCI_VDEVICE(INTEL, 0x0800), (kernel_ulong_t)&spi_pci_mid_desc}, {}, }; @@ -136,8 +130,9 @@ static struct pci_driver dw_spi_driver = { .id_table = pci_ids, .probe = spi_pci_probe, .remove = spi_pci_remove, - .suspend = spi_suspend, - .resume = spi_resume, + .driver = { + .pm = &dw_spi_pm_ops, + }, }; module_pci_driver(dw_spi_driver); diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c index 29f33143b795..729215885250 100644 --- a/drivers/spi/spi-dw.c +++ b/drivers/spi/spi-dw.c @@ -11,10 +11,6 @@ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. */ #include <linux/dma-mapping.h> @@ -59,22 +55,20 @@ struct chip_data { #ifdef CONFIG_DEBUG_FS #define SPI_REGS_BUFSIZE 1024 -static ssize_t spi_show_regs(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) +static ssize_t dw_spi_show_regs(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) { - struct dw_spi *dws; + struct dw_spi *dws = file->private_data; char *buf; u32 len = 0; ssize_t ret; - dws = file->private_data; - buf = kzalloc(SPI_REGS_BUFSIZE, GFP_KERNEL); if (!buf) return 0; len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, - "MRST SPI0 registers:\n"); + "%s registers:\n", dev_name(&dws->master->dev)); len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, "=================================\n"); len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, @@ -110,42 +104,41 @@ static ssize_t spi_show_regs(struct file *file, char __user *user_buf, len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, "=================================\n"); - ret = simple_read_from_buffer(user_buf, count, ppos, buf, len); + ret = simple_read_from_buffer(user_buf, count, ppos, buf, len); kfree(buf); return ret; } -static const struct file_operations mrst_spi_regs_ops = { +static const struct file_operations dw_spi_regs_ops = { .owner = THIS_MODULE, .open = simple_open, - .read = spi_show_regs, + .read = dw_spi_show_regs, .llseek = default_llseek, }; -static int mrst_spi_debugfs_init(struct dw_spi *dws) +static int dw_spi_debugfs_init(struct dw_spi *dws) { - dws->debugfs = debugfs_create_dir("mrst_spi", NULL); + dws->debugfs = debugfs_create_dir("dw_spi", NULL); if (!dws->debugfs) return -ENOMEM; debugfs_create_file("registers", S_IFREG | S_IRUGO, - dws->debugfs, (void *)dws, &mrst_spi_regs_ops); + dws->debugfs, (void *)dws, &dw_spi_regs_ops); return 0; } -static void mrst_spi_debugfs_remove(struct dw_spi *dws) +static void dw_spi_debugfs_remove(struct dw_spi *dws) { - if (dws->debugfs) - debugfs_remove_recursive(dws->debugfs); + debugfs_remove_recursive(dws->debugfs); } #else -static inline int mrst_spi_debugfs_init(struct dw_spi *dws) +static inline int dw_spi_debugfs_init(struct dw_spi *dws) { return 0; } -static inline void mrst_spi_debugfs_remove(struct dw_spi *dws) +static inline void dw_spi_debugfs_remove(struct dw_spi *dws) { } #endif /* CONFIG_DEBUG_FS */ @@ -177,7 +170,7 @@ static inline u32 rx_max(struct dw_spi *dws) { u32 rx_left = (dws->rx_end - dws->rx) / dws->n_bytes; - return min(rx_left, (u32)dw_readw(dws, DW_SPI_RXFLR)); + return min_t(u32, rx_left, dw_readw(dws, DW_SPI_RXFLR)); } static void dw_writer(struct dw_spi *dws) @@ -228,8 +221,9 @@ static void *next_transfer(struct dw_spi *dws) struct spi_transfer, transfer_list); return RUNNING_STATE; - } else - return DONE_STATE; + } + + return DONE_STATE; } /* @@ -271,7 +265,7 @@ static void giveback(struct dw_spi *dws) transfer_list); if (!last_transfer->cs_change) - spi_chip_sel(dws, dws->cur_msg->spi, 0); + spi_chip_sel(dws, msg->spi, 0); spi_finalize_current_message(dws->master); } @@ -396,7 +390,7 @@ static void pump_transfers(unsigned long data) goto early_exit; } - /* Delay if requested at end of transfer*/ + /* Delay if requested at end of transfer */ if (message->state == RUNNING_STATE) { previous = list_entry(transfer->transfer_list.prev, struct spi_transfer, @@ -471,10 +465,12 @@ static void pump_transfers(unsigned long data) */ if (!dws->dma_mapped && !chip->poll_mode) { int templen = dws->len / dws->n_bytes; + txint_level = dws->fifo_len / 2; txint_level = (templen > txint_level) ? txint_level : templen; - imask |= SPI_INT_TXEI | SPI_INT_TXOI | SPI_INT_RXUI | SPI_INT_RXOI; + imask |= SPI_INT_TXEI | SPI_INT_TXOI | + SPI_INT_RXUI | SPI_INT_RXOI; dws->transfer_handler = interrupt_transfer; } @@ -515,7 +511,6 @@ static void pump_transfers(unsigned long data) early_exit: giveback(dws); - return; } static int dw_spi_transfer_one_message(struct spi_master *master, @@ -524,7 +519,7 @@ static int dw_spi_transfer_one_message(struct spi_master *master, struct dw_spi *dws = spi_master_get_devdata(master); dws->cur_msg = msg; - /* Initial message state*/ + /* Initial message state */ dws->cur_msg->state = START_STATE; dws->cur_transfer = list_entry(dws->cur_msg->transfers.next, struct spi_transfer, @@ -547,8 +542,7 @@ static int dw_spi_setup(struct spi_device *spi) /* Only alloc on first setup */ chip = spi_get_ctldata(spi); if (!chip) { - chip = devm_kzalloc(&spi->dev, sizeof(struct chip_data), - GFP_KERNEL); + chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); if (!chip) return -ENOMEM; spi_set_ctldata(spi, chip); @@ -596,6 +590,9 @@ static int dw_spi_setup(struct spi_device *spi) | (spi->mode << SPI_MODE_OFFSET) | (chip->tmode << SPI_TMOD_OFFSET); + if (spi->mode & SPI_LOOP) + chip->cr0 |= 1 << SPI_SRL_OFFSET; + if (gpio_is_valid(spi->cs_gpio)) { ret = gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH)); @@ -606,6 +603,14 @@ static int dw_spi_setup(struct spi_device *spi) return 0; } +static void dw_spi_cleanup(struct spi_device *spi) +{ + struct chip_data *chip = spi_get_ctldata(spi); + + kfree(chip); + spi_set_ctldata(spi, NULL); +} + /* Restart the controller, disable all interrupts, clean rx fifo */ static void spi_hw_init(struct dw_spi *dws) { @@ -619,6 +624,7 @@ static void spi_hw_init(struct dw_spi *dws) */ if (!dws->fifo_len) { u32 fifo; + for (fifo = 2; fifo <= 257; fifo++) { dw_writew(dws, DW_SPI_TXFLTR, fifo); if (fifo != dw_readw(dws, DW_SPI_TXFLTR)) @@ -646,8 +652,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws) dws->prev_chip = NULL; dws->dma_inited = 0; dws->dma_addr = (dma_addr_t)(dws->paddr + 0x60); - snprintf(dws->name, sizeof(dws->name), "dw_spi%d", - dws->bus_num); + snprintf(dws->name, sizeof(dws->name), "dw_spi%d", dws->bus_num); ret = devm_request_irq(dev, dws->irq, dw_spi_irq, IRQF_SHARED, dws->name, dws); @@ -656,11 +661,12 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws) goto err_free_master; } - master->mode_bits = SPI_CPOL | SPI_CPHA; + master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP; master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16); master->bus_num = dws->bus_num; master->num_chipselect = dws->num_cs; master->setup = dw_spi_setup; + master->cleanup = dw_spi_cleanup; master->transfer_one_message = dw_spi_transfer_one_message; master->max_speed_hz = dws->max_freq; @@ -684,7 +690,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws) goto err_dma_exit; } - mrst_spi_debugfs_init(dws); + dw_spi_debugfs_init(dws); return 0; err_dma_exit: @@ -701,7 +707,7 @@ void dw_spi_remove_host(struct dw_spi *dws) { if (!dws) return; - mrst_spi_debugfs_remove(dws); + dw_spi_debugfs_remove(dws); if (dws->dma_ops && dws->dma_ops->dma_exit) dws->dma_ops->dma_exit(dws); diff --git a/drivers/spi/spi-dw.h b/drivers/spi/spi-dw.h index 6d2acad34f64..83a103a76481 100644 --- a/drivers/spi/spi-dw.h +++ b/drivers/spi/spi-dw.h @@ -74,6 +74,10 @@ #define SPI_INT_RXFI (1 << 4) #define SPI_INT_MSTI (1 << 5) +/* Bit fields in DMACR */ +#define SPI_DMA_RDMAE (1 << 0) +#define SPI_DMA_TDMAE (1 << 1) + /* TX RX interrupt level threshold, max can be 256 */ #define SPI_INT_THRESHOLD 32 @@ -140,7 +144,6 @@ struct dw_spi { dma_addr_t dma_addr; /* phy address of the Data register */ struct dw_spi_dma_ops *dma_ops; void *dma_priv; /* platform relate info */ - struct pci_dev *dmac; /* Bus interface info */ void *priv; @@ -217,11 +220,11 @@ static inline void spi_umask_intr(struct dw_spi *dws, u32 mask) * Each SPI slave device to work with dw_api controller should * has such a structure claiming its working mode (PIO/DMA etc), * which can be save in the "controller_data" member of the - * struct spi_device + * struct spi_device. */ struct dw_spi_chip { - u8 poll_mode; /* 0 for contoller polling mode */ - u8 type; /* SPI/SSP/Micrwire */ + u8 poll_mode; /* 1 for controller polling mode */ + u8 type; /* SPI/SSP/MicroWire */ u8 enable_dma; void (*cs_control)(u32 command); }; diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c index 2f675d32df0e..bf9728773247 100644 --- a/drivers/spi/spi-ep93xx.c +++ b/drivers/spi/spi-ep93xx.c @@ -266,6 +266,7 @@ static int ep93xx_spi_setup(struct spi_device *spi) if (chip->ops && chip->ops->setup) { int ret = chip->ops->setup(spi); + if (ret) { kfree(chip); return ret; diff --git a/drivers/spi/spi-fsl-cpm.c b/drivers/spi/spi-fsl-cpm.c index 54b06376f03c..c5dd20beee22 100644 --- a/drivers/spi/spi-fsl-cpm.c +++ b/drivers/spi/spi-fsl-cpm.c @@ -15,17 +15,17 @@ * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ -#include <linux/types.h> -#include <linux/kernel.h> -#include <linux/spi/spi.h> -#include <linux/fsl_devices.h> -#include <linux/dma-mapping.h> -#include <linux/of_address.h> #include <asm/cpm.h> #include <asm/qe.h> +#include <linux/dma-mapping.h> +#include <linux/fsl_devices.h> +#include <linux/kernel.h> +#include <linux/of_address.h> +#include <linux/spi/spi.h> +#include <linux/types.h> -#include "spi-fsl-lib.h" #include "spi-fsl-cpm.h" +#include "spi-fsl-lib.h" #include "spi-fsl-spi.h" /* CPM1 and CPM2 are mutually exclusive. */ diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c index 5021ddf03f60..448216025ce8 100644 --- a/drivers/spi/spi-fsl-dspi.c +++ b/drivers/spi/spi-fsl-dspi.c @@ -13,22 +13,22 @@ * */ +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> -#include <linux/interrupt.h> -#include <linux/errno.h> +#include <linux/of.h> +#include <linux/of_device.h> #include <linux/platform_device.h> +#include <linux/pm_runtime.h> #include <linux/regmap.h> #include <linux/sched.h> -#include <linux/delay.h> -#include <linux/io.h> -#include <linux/clk.h> -#include <linux/err.h> #include <linux/spi/spi.h> #include <linux/spi/spi_bitbang.h> -#include <linux/pm_runtime.h> -#include <linux/of.h> -#include <linux/of_device.h> #define DRIVER_NAME "fsl-dspi" @@ -493,9 +493,6 @@ static int dspi_probe(struct platform_device *pdev) } dspi_regmap_config.lock_arg = dspi; - dspi_regmap_config.val_format_endian = - of_property_read_bool(np, "big-endian") - ? REGMAP_ENDIAN_BIG : REGMAP_ENDIAN_DEFAULT; dspi->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "dspi", base, &dspi_regmap_config); if (IS_ERR(dspi->regmap)) { @@ -535,7 +532,6 @@ static int dspi_probe(struct platform_device *pdev) goto out_clk_put; } - pr_info(KERN_INFO "Freescale DSPI master initialized\n"); return ret; out_clk_put: diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c index 8ebd724e4c59..a7f94b6a9e70 100644 --- a/drivers/spi/spi-fsl-espi.c +++ b/drivers/spi/spi-fsl-espi.c @@ -8,19 +8,19 @@ * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ -#include <linux/module.h> #include <linux/delay.h> -#include <linux/irq.h> -#include <linux/spi/spi.h> -#include <linux/platform_device.h> +#include <linux/err.h> #include <linux/fsl_devices.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/module.h> #include <linux/mm.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/of_platform.h> -#include <linux/interrupt.h> -#include <linux/err.h> +#include <linux/platform_device.h> +#include <linux/spi/spi.h> #include <sysdev/fsl_soc.h> #include "spi-fsl-lib.h" @@ -452,16 +452,16 @@ static int fsl_espi_setup(struct spi_device *spi) int retval; u32 hw_mode; u32 loop_mode; - struct spi_mpc8xxx_cs *cs = spi->controller_state; + struct spi_mpc8xxx_cs *cs = spi_get_ctldata(spi); if (!spi->max_speed_hz) return -EINVAL; if (!cs) { - cs = devm_kzalloc(&spi->dev, sizeof(*cs), GFP_KERNEL); + cs = kzalloc(sizeof(*cs), GFP_KERNEL); if (!cs) return -ENOMEM; - spi->controller_state = cs; + spi_set_ctldata(spi, cs); } mpc8xxx_spi = spi_master_get_devdata(spi->master); @@ -496,6 +496,14 @@ static int fsl_espi_setup(struct spi_device *spi) return 0; } +static void fsl_espi_cleanup(struct spi_device *spi) +{ + struct spi_mpc8xxx_cs *cs = spi_get_ctldata(spi); + + kfree(cs); + spi_set_ctldata(spi, NULL); +} + void fsl_espi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events) { struct fsl_espi_reg *reg_base = mspi->reg_base; @@ -605,6 +613,7 @@ static struct spi_master * fsl_espi_probe(struct device *dev, master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16); master->setup = fsl_espi_setup; + master->cleanup = fsl_espi_cleanup; mpc8xxx_spi = spi_master_get_devdata(master); mpc8xxx_spi->spi_do_one_msg = fsl_espi_do_one_msg; diff --git a/drivers/spi/spi-fsl-lib.c b/drivers/spi/spi-fsl-lib.c index e0b773fc29cb..5ddb5b098e4e 100644 --- a/drivers/spi/spi-fsl-lib.c +++ b/drivers/spi/spi-fsl-lib.c @@ -16,10 +16,10 @@ * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ -#include <linux/kernel.h> -#include <linux/interrupt.h> -#include <linux/fsl_devices.h> #include <linux/dma-mapping.h> +#include <linux/fsl_devices.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> #include <linux/mm.h> #include <linux/of_platform.h> #include <linux/spi/spi.h> diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c index 9452f6740997..ed792880c9d6 100644 --- a/drivers/spi/spi-fsl-spi.c +++ b/drivers/spi/spi-fsl-spi.c @@ -19,25 +19,25 @@ * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ -#include <linux/module.h> -#include <linux/types.h> -#include <linux/kernel.h> -#include <linux/interrupt.h> #include <linux/delay.h> -#include <linux/irq.h> -#include <linux/spi/spi.h> -#include <linux/spi/spi_bitbang.h> -#include <linux/platform_device.h> -#include <linux/fsl_devices.h> #include <linux/dma-mapping.h> +#include <linux/fsl_devices.h> +#include <linux/gpio.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/kernel.h> #include <linux/mm.h> +#include <linux/module.h> #include <linux/mutex.h> #include <linux/of.h> -#include <linux/of_platform.h> #include <linux/of_address.h> #include <linux/of_irq.h> -#include <linux/gpio.h> #include <linux/of_gpio.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/spi/spi.h> +#include <linux/spi/spi_bitbang.h> +#include <linux/types.h> #include "spi-fsl-lib.h" #include "spi-fsl-cpm.h" @@ -425,16 +425,16 @@ static int fsl_spi_setup(struct spi_device *spi) struct fsl_spi_reg *reg_base; int retval; u32 hw_mode; - struct spi_mpc8xxx_cs *cs = spi->controller_state; + struct spi_mpc8xxx_cs *cs = spi_get_ctldata(spi); if (!spi->max_speed_hz) return -EINVAL; if (!cs) { - cs = devm_kzalloc(&spi->dev, sizeof(*cs), GFP_KERNEL); + cs = kzalloc(sizeof(*cs), GFP_KERNEL); if (!cs) return -ENOMEM; - spi->controller_state = cs; + spi_set_ctldata(spi, cs); } mpc8xxx_spi = spi_master_get_devdata(spi->master); @@ -496,9 +496,13 @@ static int fsl_spi_setup(struct spi_device *spi) static void fsl_spi_cleanup(struct spi_device *spi) { struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); + struct spi_mpc8xxx_cs *cs = spi_get_ctldata(spi); if (mpc8xxx_spi->type == TYPE_GRLIB && gpio_is_valid(spi->cs_gpio)) gpio_free(spi->cs_gpio); + + kfree(cs); + spi_set_ctldata(spi, NULL); } static void fsl_spi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events) diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c index 5daff2054ae4..3637847b5370 100644 --- a/drivers/spi/spi-imx.c +++ b/drivers/spi/spi-imx.c @@ -21,6 +21,8 @@ #include <linux/clk.h> #include <linux/completion.h> #include <linux/delay.h> +#include <linux/dmaengine.h> +#include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/gpio.h> #include <linux/interrupt.h> @@ -37,6 +39,7 @@ #include <linux/of_device.h> #include <linux/of_gpio.h> +#include <linux/platform_data/dma-imx.h> #include <linux/platform_data/spi-imx.h> #define DRIVER_NAME "spi_imx" @@ -51,6 +54,9 @@ #define MXC_INT_RR (1 << 0) /* Receive data ready interrupt */ #define MXC_INT_TE (1 << 1) /* Transmit FIFO empty interrupt */ +/* The maximum bytes that a sdma BD can transfer.*/ +#define MAX_SDMA_BD_BYTES (1 << 15) +#define IMX_DMA_TIMEOUT (msecs_to_jiffies(3000)) struct spi_imx_config { unsigned int speed_hz; unsigned int bpw; @@ -95,6 +101,16 @@ struct spi_imx_data { const void *tx_buf; unsigned int txfifo; /* number of words pushed in tx FIFO */ + /* DMA */ + unsigned int dma_is_inited; + unsigned int dma_finished; + bool usedma; + u32 rx_wml; + u32 tx_wml; + u32 rxt_wml; + struct completion dma_rx_completion; + struct completion dma_tx_completion; + const struct spi_imx_devtype_data *devtype_data; int chipselect[0]; }; @@ -181,9 +197,21 @@ static unsigned int spi_imx_clkdiv_2(unsigned int fin, return 7; } +static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi, + struct spi_transfer *transfer) +{ + struct spi_imx_data *spi_imx = spi_master_get_devdata(master); + + if (spi_imx->dma_is_inited && (transfer->len > spi_imx->rx_wml) + && (transfer->len > spi_imx->tx_wml)) + return true; + return false; +} + #define MX51_ECSPI_CTRL 0x08 #define MX51_ECSPI_CTRL_ENABLE (1 << 0) #define MX51_ECSPI_CTRL_XCH (1 << 2) +#define MX51_ECSPI_CTRL_SMC (1 << 3) #define MX51_ECSPI_CTRL_MODE_MASK (0xf << 4) #define MX51_ECSPI_CTRL_POSTDIV_OFFSET 8 #define MX51_ECSPI_CTRL_PREDIV_OFFSET 12 @@ -201,6 +229,18 @@ static unsigned int spi_imx_clkdiv_2(unsigned int fin, #define MX51_ECSPI_INT_TEEN (1 << 0) #define MX51_ECSPI_INT_RREN (1 << 3) +#define MX51_ECSPI_DMA 0x14 +#define MX51_ECSPI_DMA_TX_WML_OFFSET 0 +#define MX51_ECSPI_DMA_TX_WML_MASK 0x3F +#define MX51_ECSPI_DMA_RX_WML_OFFSET 16 +#define MX51_ECSPI_DMA_RX_WML_MASK (0x3F << 16) +#define MX51_ECSPI_DMA_RXT_WML_OFFSET 24 +#define MX51_ECSPI_DMA_RXT_WML_MASK (0x3F << 24) + +#define MX51_ECSPI_DMA_TEDEN_OFFSET 7 +#define MX51_ECSPI_DMA_RXDEN_OFFSET 23 +#define MX51_ECSPI_DMA_RXTDEN_OFFSET 31 + #define MX51_ECSPI_STAT 0x18 #define MX51_ECSPI_STAT_RR (1 << 3) @@ -257,17 +297,22 @@ static void __maybe_unused mx51_ecspi_intctrl(struct spi_imx_data *spi_imx, int static void __maybe_unused mx51_ecspi_trigger(struct spi_imx_data *spi_imx) { - u32 reg; - - reg = readl(spi_imx->base + MX51_ECSPI_CTRL); - reg |= MX51_ECSPI_CTRL_XCH; + u32 reg = readl(spi_imx->base + MX51_ECSPI_CTRL); + + if (!spi_imx->usedma) + reg |= MX51_ECSPI_CTRL_XCH; + else if (!spi_imx->dma_finished) + reg |= MX51_ECSPI_CTRL_SMC; + else + reg &= ~MX51_ECSPI_CTRL_SMC; writel(reg, spi_imx->base + MX51_ECSPI_CTRL); } static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx, struct spi_imx_config *config) { - u32 ctrl = MX51_ECSPI_CTRL_ENABLE, cfg = 0; + u32 ctrl = MX51_ECSPI_CTRL_ENABLE, cfg = 0, dma = 0; + u32 tx_wml_cfg, rx_wml_cfg, rxt_wml_cfg; u32 clk = config->speed_hz, delay; /* @@ -319,6 +364,30 @@ static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx, else /* SCLK is _very_ slow */ usleep_range(delay, delay + 10); + /* + * Configure the DMA register: setup the watermark + * and enable DMA request. + */ + if (spi_imx->dma_is_inited) { + dma = readl(spi_imx->base + MX51_ECSPI_DMA); + + spi_imx->tx_wml = spi_imx_get_fifosize(spi_imx) / 2; + spi_imx->rx_wml = spi_imx_get_fifosize(spi_imx) / 2; + spi_imx->rxt_wml = spi_imx_get_fifosize(spi_imx) / 2; + rx_wml_cfg = spi_imx->rx_wml << MX51_ECSPI_DMA_RX_WML_OFFSET; + tx_wml_cfg = spi_imx->tx_wml << MX51_ECSPI_DMA_TX_WML_OFFSET; + rxt_wml_cfg = spi_imx->rxt_wml << MX51_ECSPI_DMA_RXT_WML_OFFSET; + dma = (dma & ~MX51_ECSPI_DMA_TX_WML_MASK + & ~MX51_ECSPI_DMA_RX_WML_MASK + & ~MX51_ECSPI_DMA_RXT_WML_MASK) + | rx_wml_cfg | tx_wml_cfg | rxt_wml_cfg + |(1 << MX51_ECSPI_DMA_TEDEN_OFFSET) + |(1 << MX51_ECSPI_DMA_RXDEN_OFFSET) + |(1 << MX51_ECSPI_DMA_RXTDEN_OFFSET); + + writel(dma, spi_imx->base + MX51_ECSPI_DMA); + } + return 0; } @@ -730,7 +799,186 @@ static int spi_imx_setupxfer(struct spi_device *spi, return 0; } -static int spi_imx_transfer(struct spi_device *spi, +static void spi_imx_sdma_exit(struct spi_imx_data *spi_imx) +{ + struct spi_master *master = spi_imx->bitbang.master; + + if (master->dma_rx) { + dma_release_channel(master->dma_rx); + master->dma_rx = NULL; + } + + if (master->dma_tx) { + dma_release_channel(master->dma_tx); + master->dma_tx = NULL; + } + + spi_imx->dma_is_inited = 0; +} + +static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx, + struct spi_master *master, + const struct resource *res) +{ + struct dma_slave_config slave_config = {}; + int ret; + + /* Prepare for TX DMA: */ + master->dma_tx = dma_request_slave_channel(dev, "tx"); + if (!master->dma_tx) { + dev_err(dev, "cannot get the TX DMA channel!\n"); + ret = -EINVAL; + goto err; + } + + slave_config.direction = DMA_MEM_TO_DEV; + slave_config.dst_addr = res->start + MXC_CSPITXDATA; + slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; + slave_config.dst_maxburst = spi_imx_get_fifosize(spi_imx) / 2; + ret = dmaengine_slave_config(master->dma_tx, &slave_config); + if (ret) { + dev_err(dev, "error in TX dma configuration.\n"); + goto err; + } + + /* Prepare for RX : */ + master->dma_rx = dma_request_slave_channel(dev, "rx"); + if (!master->dma_rx) { + dev_dbg(dev, "cannot get the DMA channel.\n"); + ret = -EINVAL; + goto err; + } + + slave_config.direction = DMA_DEV_TO_MEM; + slave_config.src_addr = res->start + MXC_CSPIRXDATA; + slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; + slave_config.src_maxburst = spi_imx_get_fifosize(spi_imx) / 2; + ret = dmaengine_slave_config(master->dma_rx, &slave_config); + if (ret) { + dev_err(dev, "error in RX dma configuration.\n"); + goto err; + } + + init_completion(&spi_imx->dma_rx_completion); + init_completion(&spi_imx->dma_tx_completion); + master->can_dma = spi_imx_can_dma; + master->max_dma_len = MAX_SDMA_BD_BYTES; + spi_imx->bitbang.master->flags = SPI_MASTER_MUST_RX | + SPI_MASTER_MUST_TX; + spi_imx->dma_is_inited = 1; + + return 0; +err: + spi_imx_sdma_exit(spi_imx); + return ret; +} + +static void spi_imx_dma_rx_callback(void *cookie) +{ + struct spi_imx_data *spi_imx = (struct spi_imx_data *)cookie; + + complete(&spi_imx->dma_rx_completion); +} + +static void spi_imx_dma_tx_callback(void *cookie) +{ + struct spi_imx_data *spi_imx = (struct spi_imx_data *)cookie; + + complete(&spi_imx->dma_tx_completion); +} + +static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx, + struct spi_transfer *transfer) +{ + struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL; + int ret; + u32 dma; + int left; + struct spi_master *master = spi_imx->bitbang.master; + struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg; + + if (tx) { + desc_tx = dmaengine_prep_slave_sg(master->dma_tx, + tx->sgl, tx->nents, DMA_TO_DEVICE, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc_tx) + goto no_dma; + + desc_tx->callback = spi_imx_dma_tx_callback; + desc_tx->callback_param = (void *)spi_imx; + dmaengine_submit(desc_tx); + } + + if (rx) { + desc_rx = dmaengine_prep_slave_sg(master->dma_rx, + rx->sgl, rx->nents, DMA_FROM_DEVICE, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc_rx) + goto no_dma; + + desc_rx->callback = spi_imx_dma_rx_callback; + desc_rx->callback_param = (void *)spi_imx; + dmaengine_submit(desc_rx); + } + + reinit_completion(&spi_imx->dma_rx_completion); + reinit_completion(&spi_imx->dma_tx_completion); + + /* Trigger the cspi module. */ + spi_imx->dma_finished = 0; + + dma = readl(spi_imx->base + MX51_ECSPI_DMA); + dma = dma & (~MX51_ECSPI_DMA_RXT_WML_MASK); + /* Change RX_DMA_LENGTH trigger dma fetch tail data */ + left = transfer->len % spi_imx->rxt_wml; + if (left) + writel(dma | (left << MX51_ECSPI_DMA_RXT_WML_OFFSET), + spi_imx->base + MX51_ECSPI_DMA); + spi_imx->devtype_data->trigger(spi_imx); + + dma_async_issue_pending(master->dma_tx); + dma_async_issue_pending(master->dma_rx); + /* Wait SDMA to finish the data transfer.*/ + ret = wait_for_completion_timeout(&spi_imx->dma_tx_completion, + IMX_DMA_TIMEOUT); + if (!ret) { + pr_warn("%s %s: I/O Error in DMA TX\n", + dev_driver_string(&master->dev), + dev_name(&master->dev)); + dmaengine_terminate_all(master->dma_tx); + } else { + ret = wait_for_completion_timeout(&spi_imx->dma_rx_completion, + IMX_DMA_TIMEOUT); + if (!ret) { + pr_warn("%s %s: I/O Error in DMA RX\n", + dev_driver_string(&master->dev), + dev_name(&master->dev)); + spi_imx->devtype_data->reset(spi_imx); + dmaengine_terminate_all(master->dma_rx); + } + writel(dma | + spi_imx->rxt_wml << MX51_ECSPI_DMA_RXT_WML_OFFSET, + spi_imx->base + MX51_ECSPI_DMA); + } + + spi_imx->dma_finished = 1; + spi_imx->devtype_data->trigger(spi_imx); + + if (!ret) + ret = -ETIMEDOUT; + else if (ret > 0) + ret = transfer->len; + + return ret; + +no_dma: + pr_warn_once("%s %s: DMA not available, falling back to PIO\n", + dev_driver_string(&master->dev), + dev_name(&master->dev)); + return -EAGAIN; +} + +static int spi_imx_pio_transfer(struct spi_device *spi, struct spi_transfer *transfer) { struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); @@ -751,6 +999,24 @@ static int spi_imx_transfer(struct spi_device *spi, return transfer->len; } +static int spi_imx_transfer(struct spi_device *spi, + struct spi_transfer *transfer) +{ + int ret; + struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); + + if (spi_imx->bitbang.master->can_dma && + spi_imx_can_dma(spi_imx->bitbang.master, spi, transfer)) { + spi_imx->usedma = true; + ret = spi_imx_dma_transfer(spi_imx, transfer); + if (ret != -EAGAIN) + return ret; + } + spi_imx->usedma = false; + + return spi_imx_pio_transfer(spi, transfer); +} + static int spi_imx_setup(struct spi_device *spi) { struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); @@ -911,6 +1177,13 @@ static int spi_imx_probe(struct platform_device *pdev) goto out_put_per; spi_imx->spi_clk = clk_get_rate(spi_imx->clk_per); + /* + * Only validated on i.mx6 now, can remove the constrain if validated on + * other chips. + */ + if (spi_imx->devtype_data == &imx51_ecspi_devtype_data + && spi_imx_sdma_init(&pdev->dev, spi_imx, master, res)) + dev_err(&pdev->dev, "dma setup error,use pio instead\n"); spi_imx->devtype_data->reset(spi_imx); @@ -949,6 +1222,7 @@ static int spi_imx_remove(struct platform_device *pdev) writel(0, spi_imx->base + MXC_CSPICTRL); clk_unprepare(spi_imx->clk_ipg); clk_unprepare(spi_imx->clk_per); + spi_imx_sdma_exit(spi_imx); spi_master_put(master); return 0; diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c index 2884f0c2f5f0..51460878af04 100644 --- a/drivers/spi/spi-mxs.c +++ b/drivers/spi/spi-mxs.c @@ -85,7 +85,7 @@ static int mxs_spi_setup_transfer(struct spi_device *dev, mxs_ssp_set_clk_rate(ssp, hz); /* * Save requested rate, hz, rather than the actual rate, - * ssp->clk_rate. Otherwise we would set the rate every trasfer + * ssp->clk_rate. Otherwise we would set the rate every transfer * when the actual rate is not quite the same as requested rate. */ spi->sck = hz; @@ -154,12 +154,14 @@ static int mxs_ssp_wait(struct mxs_spi *spi, int offset, int mask, bool set) static void mxs_ssp_dma_irq_callback(void *param) { struct mxs_spi *spi = param; + complete(&spi->c); } static irqreturn_t mxs_ssp_irq_handler(int irq, void *dev_id) { struct mxs_ssp *ssp = dev_id; + dev_err(ssp->dev, "%s[%i] CTRL1=%08x STATUS=%08x\n", __func__, __LINE__, readl(ssp->base + HW_SSP_CTRL1(ssp)), @@ -189,7 +191,7 @@ static int mxs_spi_txrx_dma(struct mxs_spi *spi, if (!len) return -EINVAL; - dma_xfer = kzalloc(sizeof(*dma_xfer) * sgs, GFP_KERNEL); + dma_xfer = kcalloc(sgs, sizeof(*dma_xfer), GFP_KERNEL); if (!dma_xfer) return -ENOMEM; diff --git a/drivers/spi/spi-omap-100k.c b/drivers/spi/spi-omap-100k.c index 5e91858f6f01..fb522765ce5a 100644 --- a/drivers/spi/spi-omap-100k.c +++ b/drivers/spi/spi-omap-100k.c @@ -70,10 +70,6 @@ #define SPI_STATUS_WE (1UL << 1) #define SPI_STATUS_RD (1UL << 0) -#define WRITE 0 -#define READ 1 - - /* use PIO for small transfers, avoiding DMA setup/teardown overhead and * cache operations; better heuristics consider wordsize and bitrate. */ diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c index 68441fa448de..352eed7463ac 100644 --- a/drivers/spi/spi-omap2-mcspi.c +++ b/drivers/spi/spi-omap2-mcspi.c @@ -329,7 +329,8 @@ static void omap2_mcspi_set_fifo(const struct spi_device *spi, disable_fifo: if (t->rx_buf != NULL) chconf &= ~OMAP2_MCSPI_CHCONF_FFER; - else + + if (t->tx_buf != NULL) chconf &= ~OMAP2_MCSPI_CHCONF_FFET; mcspi_write_chconf0(spi, chconf); diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c index c4675fa8b645..835cdda6f4f5 100644 --- a/drivers/spi/spi-orion.c +++ b/drivers/spi/spi-orion.c @@ -18,6 +18,7 @@ #include <linux/module.h> #include <linux/pm_runtime.h> #include <linux/of.h> +#include <linux/of_device.h> #include <linux/clk.h> #include <linux/sizes.h> #include <asm/unaligned.h> @@ -40,13 +41,27 @@ #define ORION_SPI_MODE_CPHA (1 << 12) #define ORION_SPI_IF_8_16_BIT_MODE (1 << 5) #define ORION_SPI_CLK_PRESCALE_MASK 0x1F +#define ARMADA_SPI_CLK_PRESCALE_MASK 0xDF #define ORION_SPI_MODE_MASK (ORION_SPI_MODE_CPOL | \ ORION_SPI_MODE_CPHA) +enum orion_spi_type { + ORION_SPI, + ARMADA_SPI, +}; + +struct orion_spi_dev { + enum orion_spi_type typ; + unsigned int min_divisor; + unsigned int max_divisor; + u32 prescale_mask; +}; + struct orion_spi { struct spi_master *master; void __iomem *base; struct clk *clk; + const struct orion_spi_dev *devdata; }; static inline void __iomem *spi_reg(struct orion_spi *orion_spi, u32 reg) @@ -83,30 +98,66 @@ static int orion_spi_baudrate_set(struct spi_device *spi, unsigned int speed) u32 prescale; u32 reg; struct orion_spi *orion_spi; + const struct orion_spi_dev *devdata; orion_spi = spi_master_get_devdata(spi->master); + devdata = orion_spi->devdata; tclk_hz = clk_get_rate(orion_spi->clk); - /* - * the supported rates are: 4,6,8...30 - * round up as we look for equal or less speed - */ - rate = DIV_ROUND_UP(tclk_hz, speed); - rate = roundup(rate, 2); + if (devdata->typ == ARMADA_SPI) { + unsigned int clk, spr, sppr, sppr2, err; + unsigned int best_spr, best_sppr, best_err; - /* check if requested speed is too small */ - if (rate > 30) - return -EINVAL; + best_err = speed; + best_spr = 0; + best_sppr = 0; - if (rate < 4) - rate = 4; + /* Iterate over the valid range looking for best fit */ + for (sppr = 0; sppr < 8; sppr++) { + sppr2 = 0x1 << sppr; + + spr = tclk_hz / sppr2; + spr = DIV_ROUND_UP(spr, speed); + if ((spr == 0) || (spr > 15)) + continue; + + clk = tclk_hz / (spr * sppr2); + err = speed - clk; + + if (err < best_err) { + best_spr = spr; + best_sppr = sppr; + best_err = err; + } + } - /* Convert the rate to SPI clock divisor value. */ - prescale = 0x10 + rate/2; + if ((best_sppr == 0) && (best_spr == 0)) + return -EINVAL; + + prescale = ((best_sppr & 0x6) << 5) | + ((best_sppr & 0x1) << 4) | best_spr; + } else { + /* + * the supported rates are: 4,6,8...30 + * round up as we look for equal or less speed + */ + rate = DIV_ROUND_UP(tclk_hz, speed); + rate = roundup(rate, 2); + + /* check if requested speed is too small */ + if (rate > 30) + return -EINVAL; + + if (rate < 4) + rate = 4; + + /* Convert the rate to SPI clock divisor value. */ + prescale = 0x10 + rate/2; + } reg = readl(spi_reg(orion_spi, ORION_SPI_IF_CONFIG_REG)); - reg = ((reg & ~ORION_SPI_CLK_PRESCALE_MASK) | prescale); + reg = ((reg & ~devdata->prescale_mask) | prescale); writel(reg, spi_reg(orion_spi, ORION_SPI_IF_CONFIG_REG)); return 0; @@ -179,8 +230,8 @@ static inline int orion_spi_wait_till_ready(struct orion_spi *orion_spi) for (i = 0; i < ORION_SPI_WAIT_RDY_MAX_LOOP; i++) { if (readl(spi_reg(orion_spi, ORION_SPI_INT_CAUSE_REG))) return 1; - else - udelay(1); + + udelay(1); } return -1; @@ -342,8 +393,31 @@ static int orion_spi_reset(struct orion_spi *orion_spi) return 0; } +static const struct orion_spi_dev orion_spi_dev_data = { + .typ = ORION_SPI, + .min_divisor = 4, + .max_divisor = 30, + .prescale_mask = ORION_SPI_CLK_PRESCALE_MASK, +}; + +static const struct orion_spi_dev armada_spi_dev_data = { + .typ = ARMADA_SPI, + .min_divisor = 1, + .max_divisor = 1920, + .prescale_mask = ARMADA_SPI_CLK_PRESCALE_MASK, +}; + +static const struct of_device_id orion_spi_of_match_table[] = { + { .compatible = "marvell,orion-spi", .data = &orion_spi_dev_data, }, + { .compatible = "marvell,armada-370-spi", .data = &armada_spi_dev_data, }, + {} +}; +MODULE_DEVICE_TABLE(of, orion_spi_of_match_table); + static int orion_spi_probe(struct platform_device *pdev) { + const struct of_device_id *of_id; + const struct orion_spi_dev *devdata; struct spi_master *master; struct orion_spi *spi; struct resource *r; @@ -360,6 +434,7 @@ static int orion_spi_probe(struct platform_device *pdev) master->bus_num = pdev->id; if (pdev->dev.of_node) { u32 cell_index; + if (!of_property_read_u32(pdev->dev.of_node, "cell-index", &cell_index)) master->bus_num = cell_index; @@ -378,6 +453,10 @@ static int orion_spi_probe(struct platform_device *pdev) spi = spi_master_get_devdata(master); spi->master = master; + of_id = of_match_device(orion_spi_of_match_table, &pdev->dev); + devdata = of_id->data; + spi->devdata = devdata; + spi->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(spi->clk)) { status = PTR_ERR(spi->clk); @@ -389,8 +468,8 @@ static int orion_spi_probe(struct platform_device *pdev) goto out; tclk_hz = clk_get_rate(spi->clk); - master->max_speed_hz = DIV_ROUND_UP(tclk_hz, 4); - master->min_speed_hz = DIV_ROUND_UP(tclk_hz, 30); + master->max_speed_hz = DIV_ROUND_UP(tclk_hz, devdata->min_divisor); + master->min_speed_hz = DIV_ROUND_UP(tclk_hz, devdata->max_divisor); r = platform_get_resource(pdev, IORESOURCE_MEM, 0); spi->base = devm_ioremap_resource(&pdev->dev, r); @@ -469,12 +548,6 @@ static const struct dev_pm_ops orion_spi_pm_ops = { NULL) }; -static const struct of_device_id orion_spi_of_match_table[] = { - { .compatible = "marvell,orion-spi", }, - {} -}; -MODULE_DEVICE_TABLE(of, orion_spi_of_match_table); - static struct platform_driver orion_spi_driver = { .driver = { .name = DRIVER_NAME, diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c index 1189cfd96477..f35f723816ea 100644 --- a/drivers/spi/spi-pl022.c +++ b/drivers/spi/spi-pl022.c @@ -82,6 +82,7 @@ #define SSP_MIS(r) (r + 0x01C) #define SSP_ICR(r) (r + 0x020) #define SSP_DMACR(r) (r + 0x024) +#define SSP_CSR(r) (r + 0x030) /* vendor extension */ #define SSP_ITCR(r) (r + 0x080) #define SSP_ITIP(r) (r + 0x084) #define SSP_ITOP(r) (r + 0x088) @@ -198,6 +199,12 @@ #define SSP_DMACR_MASK_TXDMAE (0x1UL << 1) /* + * SSP Chip Select Control Register - SSP_CSR + * (vendor extension) + */ +#define SSP_CSR_CSVALUE_MASK (0x1FUL << 0) + +/* * SSP Integration Test control Register - SSP_ITCR */ #define SSP_ITCR_MASK_ITEN (0x1UL << 0) @@ -313,6 +320,7 @@ enum ssp_writing { * @extended_cr: 32 bit wide control register 0 with extra * features and extra features in CR1 as found in the ST variants * @pl023: supports a subset of the ST extensions called "PL023" + * @internal_cs_ctrl: supports chip select control register */ struct vendor_data { int fifodepth; @@ -321,6 +329,7 @@ struct vendor_data { bool extended_cr; bool pl023; bool loopback; + bool internal_cs_ctrl; }; /** @@ -440,9 +449,32 @@ static void null_cs_control(u32 command) pr_debug("pl022: dummy chip select control, CS=0x%x\n", command); } +/** + * internal_cs_control - Control chip select signals via SSP_CSR. + * @pl022: SSP driver private data structure + * @command: select/delect the chip + * + * Used on controller with internal chip select control via SSP_CSR register + * (vendor extension). Each of the 5 LSB in the register controls one chip + * select signal. + */ +static void internal_cs_control(struct pl022 *pl022, u32 command) +{ + u32 tmp; + + tmp = readw(SSP_CSR(pl022->virtbase)); + if (command == SSP_CHIP_SELECT) + tmp &= ~BIT(pl022->cur_cs); + else + tmp |= BIT(pl022->cur_cs); + writew(tmp, SSP_CSR(pl022->virtbase)); +} + static void pl022_cs_control(struct pl022 *pl022, u32 command) { - if (gpio_is_valid(pl022->cur_cs)) + if (pl022->vendor->internal_cs_ctrl) + internal_cs_control(pl022, command); + else if (gpio_is_valid(pl022->cur_cs)) gpio_set_value(pl022->cur_cs, command); else pl022->cur_chip->cs_control(command); @@ -2100,6 +2132,10 @@ static int pl022_probe(struct amba_device *adev, const struct amba_id *id) pl022->vendor = id->data; pl022->chipselects = devm_kzalloc(dev, num_cs * sizeof(int), GFP_KERNEL); + if (!pl022->chipselects) { + status = -ENOMEM; + goto err_no_mem; + } /* * Bus Number Which has been Assigned to this SSP controller @@ -2118,6 +2154,9 @@ static int pl022_probe(struct amba_device *adev, const struct amba_id *id) if (platform_info->num_chipselect && platform_info->chipselects) { for (i = 0; i < num_cs; i++) pl022->chipselects[i] = platform_info->chipselects[i]; + } else if (pl022->vendor->internal_cs_ctrl) { + for (i = 0; i < num_cs; i++) + pl022->chipselects[i] = i; } else if (IS_ENABLED(CONFIG_OF)) { for (i = 0; i < num_cs; i++) { int cs_gpio = of_get_named_gpio(np, "cs-gpios", i); @@ -2136,7 +2175,7 @@ static int pl022_probe(struct amba_device *adev, const struct amba_id *id) cs_gpio); else if (gpio_direction_output(cs_gpio, 1)) dev_err(&adev->dev, - "could set gpio %d as output\n", + "could not set gpio %d as output\n", cs_gpio); } } @@ -2241,6 +2280,7 @@ static int pl022_probe(struct amba_device *adev, const struct amba_id *id) amba_release_regions(adev); err_no_ioregion: err_no_gpio: + err_no_mem: spi_master_put(master); return status; } @@ -2347,6 +2387,7 @@ static struct vendor_data vendor_arm = { .extended_cr = false, .pl023 = false, .loopback = true, + .internal_cs_ctrl = false, }; static struct vendor_data vendor_st = { @@ -2356,6 +2397,7 @@ static struct vendor_data vendor_st = { .extended_cr = true, .pl023 = false, .loopback = true, + .internal_cs_ctrl = false, }; static struct vendor_data vendor_st_pl023 = { @@ -2365,6 +2407,17 @@ static struct vendor_data vendor_st_pl023 = { .extended_cr = true, .pl023 = true, .loopback = false, + .internal_cs_ctrl = false, +}; + +static struct vendor_data vendor_lsi = { + .fifodepth = 8, + .max_bpw = 16, + .unidir = false, + .extended_cr = false, + .pl023 = false, + .loopback = true, + .internal_cs_ctrl = true, }; static struct amba_id pl022_ids[] = { @@ -2398,6 +2451,15 @@ static struct amba_id pl022_ids[] = { .mask = 0xffffffff, .data = &vendor_st_pl023, }, + { + /* + * PL022 variant that has a chip select control register whih + * allows control of 5 output signals nCS[0:4]. + */ + .id = 0x000b6022, + .mask = 0x000fffff, + .data = &vendor_lsi, + }, { 0, 0 }, }; diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c index c1865c92ccb9..536c863bebf1 100644 --- a/drivers/spi/spi-pxa2xx-pci.c +++ b/drivers/spi/spi-pxa2xx-pci.c @@ -7,6 +7,8 @@ #include <linux/of_device.h> #include <linux/module.h> #include <linux/spi/pxa2xx_spi.h> +#include <linux/clk.h> +#include <linux/clk-provider.h> enum { PORT_CE4100, @@ -21,6 +23,7 @@ struct pxa_spi_info { int tx_chan_id; int rx_slave_id; int rx_chan_id; + unsigned long max_clk_rate; }; static struct pxa_spi_info spi_info_configs[] = { @@ -32,6 +35,7 @@ static struct pxa_spi_info spi_info_configs[] = { .tx_chan_id = -1, .rx_slave_id = -1, .rx_chan_id = -1, + .max_clk_rate = 3686400, }, [PORT_BYT] = { .type = LPSS_SSP, @@ -41,6 +45,7 @@ static struct pxa_spi_info spi_info_configs[] = { .tx_chan_id = 0, .rx_slave_id = 1, .rx_chan_id = 1, + .max_clk_rate = 50000000, }, }; @@ -53,6 +58,7 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev, struct pxa2xx_spi_master spi_pdata; struct ssp_device *ssp; struct pxa_spi_info *c; + char buf[40]; ret = pcim_enable_device(dev); if (ret) @@ -84,6 +90,12 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev, ssp->port_id = (c->port_id >= 0) ? c->port_id : dev->devfn; ssp->type = c->type; + snprintf(buf, sizeof(buf), "pxa2xx-spi.%d", ssp->port_id); + ssp->clk = clk_register_fixed_rate(&dev->dev, buf , NULL, + CLK_IS_ROOT, c->max_clk_rate); + if (IS_ERR(ssp->clk)) + return PTR_ERR(ssp->clk); + memset(&pi, 0, sizeof(pi)); pi.parent = &dev->dev; pi.name = "pxa2xx-spi"; @@ -92,8 +104,10 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev, pi.size_data = sizeof(spi_pdata); pdev = platform_device_register_full(&pi); - if (IS_ERR(pdev)) + if (IS_ERR(pdev)) { + clk_unregister(ssp->clk); return PTR_ERR(pdev); + } pci_set_drvdata(dev, pdev); @@ -103,8 +117,12 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev, static void pxa2xx_spi_pci_remove(struct pci_dev *dev) { struct platform_device *pdev = pci_get_drvdata(dev); + struct pxa2xx_spi_master *spi_pdata; + + spi_pdata = dev_get_platdata(&pdev->dev); platform_device_unregister(pdev); + clk_unregister(spi_pdata->ssp.clk); } static const struct pci_device_id pxa2xx_spi_pci_devices[] = { diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index fe792106bdc5..46f45ca2c694 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c @@ -1074,6 +1074,7 @@ static struct acpi_device_id pxa2xx_spi_acpi_match[] = { { "INT3430", 0 }, { "INT3431", 0 }, { "80860F0E", 0 }, + { "8086228E", 0 }, { }, }; MODULE_DEVICE_TABLE(acpi, pxa2xx_spi_acpi_match); diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c index c0743604b906..f96ea8a38d64 100644 --- a/drivers/spi/spi-rockchip.c +++ b/drivers/spi/spi-rockchip.c @@ -220,7 +220,7 @@ static inline void wait_for_idle(struct rockchip_spi *rs) do { if (!(readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_BUSY)) return; - } while (time_before(jiffies, timeout)); + } while (!time_after(jiffies, timeout)); dev_warn(rs->dev, "spi controller is in busy state!\n"); } @@ -415,7 +415,7 @@ static void rockchip_spi_dma_txcb(void *data) spin_unlock_irqrestore(&rs->lock, flags); } -static int rockchip_spi_dma_transfer(struct rockchip_spi *rs) +static void rockchip_spi_prepare_dma(struct rockchip_spi *rs) { unsigned long flags; struct dma_slave_config rxconf, txconf; @@ -474,8 +474,6 @@ static int rockchip_spi_dma_transfer(struct rockchip_spi *rs) dmaengine_submit(txdesc); dma_async_issue_pending(rs->dma_tx.ch); } - - return 1; } static void rockchip_spi_config(struct rockchip_spi *rs) @@ -499,7 +497,7 @@ static void rockchip_spi_config(struct rockchip_spi *rs) } /* div doesn't support odd number */ - div = rs->max_freq / rs->speed; + div = max_t(u32, rs->max_freq / rs->speed, 1); div = (div + 1) & 0xfffe; spi_enable_chip(rs, 0); @@ -529,7 +527,8 @@ static int rockchip_spi_transfer_one( int ret = 0; struct rockchip_spi *rs = spi_master_get_devdata(master); - WARN_ON((readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_BUSY)); + WARN_ON(readl_relaxed(rs->regs + ROCKCHIP_SPI_SSIENR) && + (readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_BUSY)); if (!xfer->tx_buf && !xfer->rx_buf) { dev_err(rs->dev, "No buffer for transfer\n"); @@ -556,16 +555,17 @@ static int rockchip_spi_transfer_one( else if (rs->rx) rs->tmode = CR0_XFM_RO; - if (master->can_dma && master->can_dma(master, spi, xfer)) + /* we need prepare dma before spi was enabled */ + if (master->can_dma && master->can_dma(master, spi, xfer)) { rs->use_dma = 1; - else + rockchip_spi_prepare_dma(rs); + } else { rs->use_dma = 0; + } rockchip_spi_config(rs); - if (rs->use_dma) - ret = rockchip_spi_dma_transfer(rs); - else + if (!rs->use_dma) ret = rockchip_spi_pio_transfer(rs); return ret; @@ -678,7 +678,7 @@ static int rockchip_spi_probe(struct platform_device *pdev) rs->dma_tx.addr = (dma_addr_t)(mem->start + ROCKCHIP_SPI_TXDR); rs->dma_rx.addr = (dma_addr_t)(mem->start + ROCKCHIP_SPI_RXDR); rs->dma_tx.direction = DMA_MEM_TO_DEV; - rs->dma_tx.direction = DMA_DEV_TO_MEM; + rs->dma_rx.direction = DMA_DEV_TO_MEM; master->can_dma = rockchip_spi_can_dma; master->dma_tx = rs->dma_tx.ch; diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c index c850dfdfa9e3..54bb0faec155 100644 --- a/drivers/spi/spi-rspi.c +++ b/drivers/spi/spi-rspi.c @@ -87,7 +87,7 @@ /* RSPI on SH only */ #define SPCR_TXMD 0x02 /* TX Only Mode (vs. Full Duplex) */ #define SPCR_SPMS 0x01 /* 3-wire Mode (vs. 4-wire) */ -/* QSPI on R-Car M2 only */ +/* QSPI on R-Car Gen2 only */ #define SPCR_WSWAP 0x02 /* Word Swap of read-data for DMAC */ #define SPCR_BSWAP 0x01 /* Byte Swap of read-data for DMAC */ @@ -472,25 +472,52 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx, dma_cookie_t cookie; int ret; - if (tx) { - desc_tx = dmaengine_prep_slave_sg(rspi->master->dma_tx, - tx->sgl, tx->nents, DMA_TO_DEVICE, - DMA_PREP_INTERRUPT | DMA_CTRL_ACK); - if (!desc_tx) - goto no_dma; - - irq_mask |= SPCR_SPTIE; - } + /* First prepare and submit the DMA request(s), as this may fail */ if (rx) { desc_rx = dmaengine_prep_slave_sg(rspi->master->dma_rx, rx->sgl, rx->nents, DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); - if (!desc_rx) - goto no_dma; + if (!desc_rx) { + ret = -EAGAIN; + goto no_dma_rx; + } + + desc_rx->callback = rspi_dma_complete; + desc_rx->callback_param = rspi; + cookie = dmaengine_submit(desc_rx); + if (dma_submit_error(cookie)) { + ret = cookie; + goto no_dma_rx; + } irq_mask |= SPCR_SPRIE; } + if (tx) { + desc_tx = dmaengine_prep_slave_sg(rspi->master->dma_tx, + tx->sgl, tx->nents, DMA_TO_DEVICE, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc_tx) { + ret = -EAGAIN; + goto no_dma_tx; + } + + if (rx) { + /* No callback */ + desc_tx->callback = NULL; + } else { + desc_tx->callback = rspi_dma_complete; + desc_tx->callback_param = rspi; + } + cookie = dmaengine_submit(desc_tx); + if (dma_submit_error(cookie)) { + ret = cookie; + goto no_dma_tx; + } + + irq_mask |= SPCR_SPTIE; + } + /* * DMAC needs SPxIE, but if SPxIE is set, the IRQ routine will be * called. So, this driver disables the IRQ while DMA transfer. @@ -503,34 +530,24 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx, rspi_enable_irq(rspi, irq_mask); rspi->dma_callbacked = 0; - if (rx) { - desc_rx->callback = rspi_dma_complete; - desc_rx->callback_param = rspi; - cookie = dmaengine_submit(desc_rx); - if (dma_submit_error(cookie)) - return cookie; + /* Now start DMA */ + if (rx) dma_async_issue_pending(rspi->master->dma_rx); - } - if (tx) { - if (rx) { - /* No callback */ - desc_tx->callback = NULL; - } else { - desc_tx->callback = rspi_dma_complete; - desc_tx->callback_param = rspi; - } - cookie = dmaengine_submit(desc_tx); - if (dma_submit_error(cookie)) - return cookie; + if (tx) dma_async_issue_pending(rspi->master->dma_tx); - } ret = wait_event_interruptible_timeout(rspi->wait, rspi->dma_callbacked, HZ); if (ret > 0 && rspi->dma_callbacked) ret = 0; - else if (!ret) + else if (!ret) { + dev_err(&rspi->master->dev, "DMA timeout\n"); ret = -ETIMEDOUT; + if (tx) + dmaengine_terminate_all(rspi->master->dma_tx); + if (rx) + dmaengine_terminate_all(rspi->master->dma_rx); + } rspi_disable_irq(rspi, irq_mask); @@ -541,11 +558,16 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx, return ret; -no_dma: - pr_warn_once("%s %s: DMA not available, falling back to PIO\n", - dev_driver_string(&rspi->master->dev), - dev_name(&rspi->master->dev)); - return -EAGAIN; +no_dma_tx: + if (rx) + dmaengine_terminate_all(rspi->master->dma_rx); +no_dma_rx: + if (ret == -EAGAIN) { + pr_warn_once("%s %s: DMA not available, falling back to PIO\n", + dev_driver_string(&rspi->master->dev), + dev_name(&rspi->master->dev)); + } + return ret; } static void rspi_receive_init(const struct rspi_data *rspi) @@ -887,20 +909,24 @@ static struct dma_chan *rspi_request_dma_chan(struct device *dev, dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); - chan = dma_request_channel(mask, shdma_chan_filter, - (void *)(unsigned long)id); + chan = dma_request_slave_channel_compat(mask, shdma_chan_filter, + (void *)(unsigned long)id, dev, + dir == DMA_MEM_TO_DEV ? "tx" : "rx"); if (!chan) { - dev_warn(dev, "dma_request_channel failed\n"); + dev_warn(dev, "dma_request_slave_channel_compat failed\n"); return NULL; } memset(&cfg, 0, sizeof(cfg)); cfg.slave_id = id; cfg.direction = dir; - if (dir == DMA_MEM_TO_DEV) + if (dir == DMA_MEM_TO_DEV) { cfg.dst_addr = port_addr; - else + cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; + } else { cfg.src_addr = port_addr; + cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; + } ret = dmaengine_slave_config(chan, &cfg); if (ret) { @@ -916,22 +942,30 @@ static int rspi_request_dma(struct device *dev, struct spi_master *master, const struct resource *res) { const struct rspi_plat_data *rspi_pd = dev_get_platdata(dev); + unsigned int dma_tx_id, dma_rx_id; + + if (dev->of_node) { + /* In the OF case we will get the slave IDs from the DT */ + dma_tx_id = 0; + dma_rx_id = 0; + } else if (rspi_pd && rspi_pd->dma_tx_id && rspi_pd->dma_rx_id) { + dma_tx_id = rspi_pd->dma_tx_id; + dma_rx_id = rspi_pd->dma_rx_id; + } else { + /* The driver assumes no error. */ + return 0; + } - if (!rspi_pd || !rspi_pd->dma_rx_id || !rspi_pd->dma_tx_id) - return 0; /* The driver assumes no error. */ - - master->dma_rx = rspi_request_dma_chan(dev, DMA_DEV_TO_MEM, - rspi_pd->dma_rx_id, + master->dma_tx = rspi_request_dma_chan(dev, DMA_MEM_TO_DEV, dma_tx_id, res->start + RSPI_SPDR); - if (!master->dma_rx) + if (!master->dma_tx) return -ENODEV; - master->dma_tx = rspi_request_dma_chan(dev, DMA_MEM_TO_DEV, - rspi_pd->dma_tx_id, + master->dma_rx = rspi_request_dma_chan(dev, DMA_DEV_TO_MEM, dma_rx_id, res->start + RSPI_SPDR); - if (!master->dma_tx) { - dma_release_channel(master->dma_rx); - master->dma_rx = NULL; + if (!master->dma_rx) { + dma_release_channel(master->dma_tx); + master->dma_tx = NULL; return -ENODEV; } @@ -1024,12 +1058,11 @@ static int rspi_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler, const char *suffix, void *dev_id) { - const char *base = dev_name(dev); - size_t len = strlen(base) + strlen(suffix) + 2; - char *name = devm_kzalloc(dev, len, GFP_KERNEL); + const char *name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s", + dev_name(dev), suffix); if (!name) return -ENOMEM; - snprintf(name, len, "%s:%s", base, suffix); + return devm_request_irq(dev, irq, handler, 0, name, dev_id); } @@ -1062,7 +1095,7 @@ static int rspi_probe(struct platform_device *pdev) master->num_chipselect = rspi_pd->num_chipselect; else master->num_chipselect = 2; /* default */ - }; + } /* ops parameter check */ if (!ops->set_config_register) { diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c index 1c36311935d7..480133ee1eb3 100644 --- a/drivers/spi/spi-s3c64xx.c +++ b/drivers/spi/spi-s3c64xx.c @@ -1317,19 +1317,6 @@ static struct s3c64xx_spi_port_config s3c6410_spi_port_config = { .tx_st_done = 21, }; -static struct s3c64xx_spi_port_config s5p64x0_spi_port_config = { - .fifo_lvl_mask = { 0x1ff, 0x7F }, - .rx_lvl_offset = 15, - .tx_st_done = 25, -}; - -static struct s3c64xx_spi_port_config s5pc100_spi_port_config = { - .fifo_lvl_mask = { 0x7f, 0x7F }, - .rx_lvl_offset = 13, - .tx_st_done = 21, - .high_speed = true, -}; - static struct s3c64xx_spi_port_config s5pv210_spi_port_config = { .fifo_lvl_mask = { 0x1ff, 0x7F }, .rx_lvl_offset = 15, @@ -1362,12 +1349,6 @@ static struct platform_device_id s3c64xx_spi_driver_ids[] = { .name = "s3c6410-spi", .driver_data = (kernel_ulong_t)&s3c6410_spi_port_config, }, { - .name = "s5p64x0-spi", - .driver_data = (kernel_ulong_t)&s5p64x0_spi_port_config, - }, { - .name = "s5pc100-spi", - .driver_data = (kernel_ulong_t)&s5pc100_spi_port_config, - }, { .name = "s5pv210-spi", .driver_data = (kernel_ulong_t)&s5pv210_spi_port_config, }, { @@ -1384,9 +1365,6 @@ static const struct of_device_id s3c64xx_spi_dt_match[] = { { .compatible = "samsung,s3c6410-spi", .data = (void *)&s3c6410_spi_port_config, }, - { .compatible = "samsung,s5pc100-spi", - .data = (void *)&s5pc100_spi_port_config, - }, { .compatible = "samsung,s5pv210-spi", .data = (void *)&s5pv210_spi_port_config, }, diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c index 2a4354dcd661..3f365402fcc0 100644 --- a/drivers/spi/spi-sh-msiof.c +++ b/drivers/spi/spi-sh-msiof.c @@ -636,17 +636,7 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx, dma_cookie_t cookie; int ret; - if (tx) { - ier_bits |= IER_TDREQE | IER_TDMAE; - dma_sync_single_for_device(p->master->dma_tx->device->dev, - p->tx_dma_addr, len, DMA_TO_DEVICE); - desc_tx = dmaengine_prep_slave_single(p->master->dma_tx, - p->tx_dma_addr, len, DMA_TO_DEVICE, - DMA_PREP_INTERRUPT | DMA_CTRL_ACK); - if (!desc_tx) - return -EAGAIN; - } - + /* First prepare and submit the DMA request(s), as this may fail */ if (rx) { ier_bits |= IER_RDREQE | IER_RDMAE; desc_rx = dmaengine_prep_slave_single(p->master->dma_rx, @@ -654,30 +644,26 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!desc_rx) return -EAGAIN; - } - - /* 1 stage FIFO watermarks for DMA */ - sh_msiof_write(p, FCTR, FCTR_TFWM_1 | FCTR_RFWM_1); - /* setup msiof transfer mode registers (32-bit words) */ - sh_msiof_spi_set_mode_regs(p, tx, rx, 32, len / 4); - - sh_msiof_write(p, IER, ier_bits); - - reinit_completion(&p->done); - - if (rx) { desc_rx->callback = sh_msiof_dma_complete; desc_rx->callback_param = p; cookie = dmaengine_submit(desc_rx); - if (dma_submit_error(cookie)) { - ret = cookie; - goto stop_ier; - } - dma_async_issue_pending(p->master->dma_rx); + if (dma_submit_error(cookie)) + return cookie; } if (tx) { + ier_bits |= IER_TDREQE | IER_TDMAE; + dma_sync_single_for_device(p->master->dma_tx->device->dev, + p->tx_dma_addr, len, DMA_TO_DEVICE); + desc_tx = dmaengine_prep_slave_single(p->master->dma_tx, + p->tx_dma_addr, len, DMA_TO_DEVICE, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc_tx) { + ret = -EAGAIN; + goto no_dma_tx; + } + if (rx) { /* No callback */ desc_tx->callback = NULL; @@ -688,15 +674,30 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx, cookie = dmaengine_submit(desc_tx); if (dma_submit_error(cookie)) { ret = cookie; - goto stop_rx; + goto no_dma_tx; } - dma_async_issue_pending(p->master->dma_tx); } + /* 1 stage FIFO watermarks for DMA */ + sh_msiof_write(p, FCTR, FCTR_TFWM_1 | FCTR_RFWM_1); + + /* setup msiof transfer mode registers (32-bit words) */ + sh_msiof_spi_set_mode_regs(p, tx, rx, 32, len / 4); + + sh_msiof_write(p, IER, ier_bits); + + reinit_completion(&p->done); + + /* Now start DMA */ + if (rx) + dma_async_issue_pending(p->master->dma_rx); + if (tx) + dma_async_issue_pending(p->master->dma_tx); + ret = sh_msiof_spi_start(p, rx); if (ret) { dev_err(&p->pdev->dev, "failed to start hardware\n"); - goto stop_tx; + goto stop_dma; } /* wait for tx fifo to be emptied / rx fifo to be filled */ @@ -726,13 +727,12 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx, stop_reset: sh_msiof_reset_str(p); sh_msiof_spi_stop(p, rx); -stop_tx: +stop_dma: if (tx) dmaengine_terminate_all(p->master->dma_tx); -stop_rx: +no_dma_tx: if (rx) dmaengine_terminate_all(p->master->dma_rx); -stop_ier: sh_msiof_write(p, IER, 0); return ret; } @@ -928,6 +928,9 @@ static const struct of_device_id sh_msiof_match[] = { { .compatible = "renesas,sh-mobile-msiof", .data = &sh_data }, { .compatible = "renesas,msiof-r8a7790", .data = &r8a779x_data }, { .compatible = "renesas,msiof-r8a7791", .data = &r8a779x_data }, + { .compatible = "renesas,msiof-r8a7792", .data = &r8a779x_data }, + { .compatible = "renesas,msiof-r8a7793", .data = &r8a779x_data }, + { .compatible = "renesas,msiof-r8a7794", .data = &r8a779x_data }, {}, }; MODULE_DEVICE_TABLE(of, sh_msiof_match); @@ -972,20 +975,24 @@ static struct dma_chan *sh_msiof_request_dma_chan(struct device *dev, dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); - chan = dma_request_channel(mask, shdma_chan_filter, - (void *)(unsigned long)id); + chan = dma_request_slave_channel_compat(mask, shdma_chan_filter, + (void *)(unsigned long)id, dev, + dir == DMA_MEM_TO_DEV ? "tx" : "rx"); if (!chan) { - dev_warn(dev, "dma_request_channel failed\n"); + dev_warn(dev, "dma_request_slave_channel_compat failed\n"); return NULL; } memset(&cfg, 0, sizeof(cfg)); cfg.slave_id = id; cfg.direction = dir; - if (dir == DMA_MEM_TO_DEV) + if (dir == DMA_MEM_TO_DEV) { cfg.dst_addr = port_addr; - else + cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + } else { cfg.src_addr = port_addr; + cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + } ret = dmaengine_slave_config(chan, &cfg); if (ret) { @@ -1002,12 +1009,22 @@ static int sh_msiof_request_dma(struct sh_msiof_spi_priv *p) struct platform_device *pdev = p->pdev; struct device *dev = &pdev->dev; const struct sh_msiof_spi_info *info = dev_get_platdata(dev); + unsigned int dma_tx_id, dma_rx_id; const struct resource *res; struct spi_master *master; struct device *tx_dev, *rx_dev; - if (!info || !info->dma_tx_id || !info->dma_rx_id) - return 0; /* The driver assumes no error */ + if (dev->of_node) { + /* In the OF case we will get the slave IDs from the DT */ + dma_tx_id = 0; + dma_rx_id = 0; + } else if (info && info->dma_tx_id && info->dma_rx_id) { + dma_tx_id = info->dma_tx_id; + dma_rx_id = info->dma_rx_id; + } else { + /* The driver assumes no error */ + return 0; + } /* The DMA engine uses the second register set, if present */ res = platform_get_resource(pdev, IORESOURCE_MEM, 1); @@ -1016,13 +1033,13 @@ static int sh_msiof_request_dma(struct sh_msiof_spi_priv *p) master = p->master; master->dma_tx = sh_msiof_request_dma_chan(dev, DMA_MEM_TO_DEV, - info->dma_tx_id, + dma_tx_id, res->start + TFDR); if (!master->dma_tx) return -ENODEV; master->dma_rx = sh_msiof_request_dma_chan(dev, DMA_DEV_TO_MEM, - info->dma_rx_id, + dma_rx_id, res->start + RFDR); if (!master->dma_rx) goto free_tx_chan; @@ -1205,6 +1222,9 @@ static struct platform_device_id spi_driver_ids[] = { { "spi_sh_msiof", (kernel_ulong_t)&sh_data }, { "spi_r8a7790_msiof", (kernel_ulong_t)&r8a779x_data }, { "spi_r8a7791_msiof", (kernel_ulong_t)&r8a779x_data }, + { "spi_r8a7792_msiof", (kernel_ulong_t)&r8a779x_data }, + { "spi_r8a7793_msiof", (kernel_ulong_t)&r8a779x_data }, + { "spi_r8a7794_msiof", (kernel_ulong_t)&r8a779x_data }, {}, }; MODULE_DEVICE_TABLE(platform, spi_driver_ids); diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c index 95ac276eaafe..39e2c0a55a28 100644 --- a/drivers/spi/spi-sirf.c +++ b/drivers/spi/spi-sirf.c @@ -62,15 +62,15 @@ #define SIRFSOC_SPI_TRAN_DAT_FORMAT_12 (1 << 26) #define SIRFSOC_SPI_TRAN_DAT_FORMAT_16 (2 << 26) #define SIRFSOC_SPI_TRAN_DAT_FORMAT_32 (3 << 26) -#define SIRFSOC_SPI_CMD_BYTE_NUM(x) ((x & 3) << 28) -#define SIRFSOC_SPI_ENA_AUTO_CLR BIT(30) -#define SIRFSOC_SPI_MUL_DAT_MODE BIT(31) +#define SIRFSOC_SPI_CMD_BYTE_NUM(x) ((x & 3) << 28) +#define SIRFSOC_SPI_ENA_AUTO_CLR BIT(30) +#define SIRFSOC_SPI_MUL_DAT_MODE BIT(31) /* Interrupt Enable */ -#define SIRFSOC_SPI_RX_DONE_INT_EN BIT(0) -#define SIRFSOC_SPI_TX_DONE_INT_EN BIT(1) -#define SIRFSOC_SPI_RX_OFLOW_INT_EN BIT(2) -#define SIRFSOC_SPI_TX_UFLOW_INT_EN BIT(3) +#define SIRFSOC_SPI_RX_DONE_INT_EN BIT(0) +#define SIRFSOC_SPI_TX_DONE_INT_EN BIT(1) +#define SIRFSOC_SPI_RX_OFLOW_INT_EN BIT(2) +#define SIRFSOC_SPI_TX_UFLOW_INT_EN BIT(3) #define SIRFSOC_SPI_RX_IO_DMA_INT_EN BIT(4) #define SIRFSOC_SPI_TX_IO_DMA_INT_EN BIT(5) #define SIRFSOC_SPI_RXFIFO_FULL_INT_EN BIT(6) @@ -79,7 +79,7 @@ #define SIRFSOC_SPI_TXFIFO_THD_INT_EN BIT(9) #define SIRFSOC_SPI_FRM_END_INT_EN BIT(10) -#define SIRFSOC_SPI_INT_MASK_ALL 0x1FFF +#define SIRFSOC_SPI_INT_MASK_ALL 0x1FFF /* Interrupt status */ #define SIRFSOC_SPI_RX_DONE BIT(0) @@ -170,8 +170,7 @@ struct sirfsoc_spi { * command model */ bool tx_by_cmd; - - int chipselect[0]; + bool hw_cs; }; static void spi_sirfsoc_rx_word_u8(struct sirfsoc_spi *sspi) @@ -304,7 +303,7 @@ static void spi_sirfsoc_dma_fini_callback(void *data) complete(dma_complete); } -static int spi_sirfsoc_cmd_transfer(struct spi_device *spi, +static void spi_sirfsoc_cmd_transfer(struct spi_device *spi, struct spi_transfer *t) { struct sirfsoc_spi *sspi; @@ -312,6 +311,8 @@ static int spi_sirfsoc_cmd_transfer(struct spi_device *spi, u32 cmd; sspi = spi_master_get_devdata(spi->master); + writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP); + writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP); memcpy(&cmd, sspi->tx, t->len); if (sspi->word_width == 1 && !(spi->mode & SPI_LSB_FIRST)) cmd = cpu_to_be32(cmd) >> @@ -326,10 +327,9 @@ static int spi_sirfsoc_cmd_transfer(struct spi_device *spi, sspi->base + SIRFSOC_SPI_TX_RX_EN); if (wait_for_completion_timeout(&sspi->tx_done, timeout) == 0) { dev_err(&spi->dev, "cmd transfer timeout\n"); - return 0; + return; } - - return t->len; + sspi->left_rx_word -= t->len; } static void spi_sirfsoc_dma_transfer(struct spi_device *spi, @@ -438,7 +438,8 @@ static void spi_sirfsoc_pio_transfer(struct spi_device *spi, sspi->tx_word(sspi); writel(SIRFSOC_SPI_TXFIFO_EMPTY_INT_EN | SIRFSOC_SPI_TX_UFLOW_INT_EN | - SIRFSOC_SPI_RX_OFLOW_INT_EN, + SIRFSOC_SPI_RX_OFLOW_INT_EN | + SIRFSOC_SPI_RX_IO_DMA_INT_EN, sspi->base + SIRFSOC_SPI_INT_EN); writel(SIRFSOC_SPI_RX_EN | SIRFSOC_SPI_TX_EN, sspi->base + SIRFSOC_SPI_TX_RX_EN); @@ -484,7 +485,7 @@ static void spi_sirfsoc_chipselect(struct spi_device *spi, int value) { struct sirfsoc_spi *sspi = spi_master_get_devdata(spi->master); - if (sspi->chipselect[spi->chip_select] == 0) { + if (sspi->hw_cs) { u32 regval = readl(sspi->base + SIRFSOC_SPI_CTRL); switch (value) { case BITBANG_CS_ACTIVE: @@ -502,14 +503,13 @@ static void spi_sirfsoc_chipselect(struct spi_device *spi, int value) } writel(regval, sspi->base + SIRFSOC_SPI_CTRL); } else { - int gpio = sspi->chipselect[spi->chip_select]; switch (value) { case BITBANG_CS_ACTIVE: - gpio_direction_output(gpio, + gpio_direction_output(spi->cs_gpio, spi->mode & SPI_CS_HIGH ? 1 : 0); break; case BITBANG_CS_INACTIVE: - gpio_direction_output(gpio, + gpio_direction_output(spi->cs_gpio, spi->mode & SPI_CS_HIGH ? 0 : 1); break; } @@ -603,8 +603,8 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t) sspi->tx_by_cmd = false; } /* - * set spi controller in RISC chipselect mode, we are controlling CS by - * software BITBANG_CS_ACTIVE and BITBANG_CS_INACTIVE. + * it should never set to hardware cs mode because in hardware cs mode, + * cs signal can't controlled by driver. */ regval |= SIRFSOC_SPI_CS_IO_MODE; writel(regval, sspi->base + SIRFSOC_SPI_CTRL); @@ -627,9 +627,17 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t) static int spi_sirfsoc_setup(struct spi_device *spi) { + struct sirfsoc_spi *sspi; + if (!spi->max_speed_hz) return -EINVAL; + sspi = spi_master_get_devdata(spi->master); + + if (spi->cs_gpio == -ENOENT) + sspi->hw_cs = true; + else + sspi->hw_cs = false; return spi_sirfsoc_setup_transfer(spi, NULL); } @@ -638,19 +646,10 @@ static int spi_sirfsoc_probe(struct platform_device *pdev) struct sirfsoc_spi *sspi; struct spi_master *master; struct resource *mem_res; - int num_cs, cs_gpio, irq; - int i; - int ret; + int irq; + int i, ret; - ret = of_property_read_u32(pdev->dev.of_node, - "sirf,spi-num-chipselects", &num_cs); - if (ret < 0) { - dev_err(&pdev->dev, "Unable to get chip select number\n"); - goto err_cs; - } - - master = spi_alloc_master(&pdev->dev, - sizeof(*sspi) + sizeof(int) * num_cs); + master = spi_alloc_master(&pdev->dev, sizeof(*sspi)); if (!master) { dev_err(&pdev->dev, "Unable to allocate SPI master\n"); return -ENOMEM; @@ -658,32 +657,6 @@ static int spi_sirfsoc_probe(struct platform_device *pdev) platform_set_drvdata(pdev, master); sspi = spi_master_get_devdata(master); - master->num_chipselect = num_cs; - - for (i = 0; i < master->num_chipselect; i++) { - cs_gpio = of_get_named_gpio(pdev->dev.of_node, "cs-gpios", i); - if (cs_gpio < 0) { - dev_err(&pdev->dev, "can't get cs gpio from DT\n"); - ret = -ENODEV; - goto free_master; - } - - sspi->chipselect[i] = cs_gpio; - if (cs_gpio == 0) - continue; /* use cs from spi controller */ - - ret = gpio_request(cs_gpio, DRIVER_NAME); - if (ret) { - while (i > 0) { - i--; - if (sspi->chipselect[i] > 0) - gpio_free(sspi->chipselect[i]); - } - dev_err(&pdev->dev, "fail to request cs gpios\n"); - goto free_master; - } - } - mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); sspi->base = devm_ioremap_resource(&pdev->dev, mem_res); if (IS_ERR(sspi->base)) { @@ -753,7 +726,21 @@ static int spi_sirfsoc_probe(struct platform_device *pdev) ret = spi_bitbang_start(&sspi->bitbang); if (ret) goto free_dummypage; - + for (i = 0; master->cs_gpios && i < master->num_chipselect; i++) { + if (master->cs_gpios[i] == -ENOENT) + continue; + if (!gpio_is_valid(master->cs_gpios[i])) { + dev_err(&pdev->dev, "no valid gpio\n"); + ret = -EINVAL; + goto free_dummypage; + } + ret = devm_gpio_request(&pdev->dev, + master->cs_gpios[i], DRIVER_NAME); + if (ret) { + dev_err(&pdev->dev, "failed to request gpio\n"); + goto free_dummypage; + } + } dev_info(&pdev->dev, "registerred, bus number = %d\n", master->bus_num); return 0; @@ -768,7 +755,7 @@ free_rx_dma: dma_release_channel(sspi->rx_chan); free_master: spi_master_put(master); -err_cs: + return ret; } @@ -776,16 +763,11 @@ static int spi_sirfsoc_remove(struct platform_device *pdev) { struct spi_master *master; struct sirfsoc_spi *sspi; - int i; master = platform_get_drvdata(pdev); sspi = spi_master_get_devdata(master); spi_bitbang_stop(&sspi->bitbang); - for (i = 0; i < master->num_chipselect; i++) { - if (sspi->chipselect[i] > 0) - gpio_free(sspi->chipselect[i]); - } kfree(sspi->dummypage); clk_disable_unprepare(sspi->clk); clk_put(sspi->clk); diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c index e4a85ada861d..795bcbc0131b 100644 --- a/drivers/spi/spi-tegra114.c +++ b/drivers/spi/spi-tegra114.c @@ -302,6 +302,7 @@ static unsigned tegra_spi_fill_tx_fifo_from_client_txbuf( max_n_32bit = DIV_ROUND_UP(nbytes, 4); for (count = 0; count < max_n_32bit; count++) { u32 x = 0; + for (i = 0; (i < 4) && nbytes; i++, nbytes--) x |= (u32)(*tx_buf++) << (i * 8); tegra_spi_writel(tspi, x, SPI_TX_FIFO); @@ -312,6 +313,7 @@ static unsigned tegra_spi_fill_tx_fifo_from_client_txbuf( nbytes = written_words * tspi->bytes_per_word; for (count = 0; count < max_n_32bit; count++) { u32 x = 0; + for (i = 0; nbytes && (i < tspi->bytes_per_word); i++, nbytes--) x |= (u32)(*tx_buf++) << (i * 8); @@ -338,6 +340,7 @@ static unsigned int tegra_spi_read_rx_fifo_to_client_rxbuf( len = tspi->curr_dma_words * tspi->bytes_per_word; for (count = 0; count < rx_full_count; count++) { u32 x = tegra_spi_readl(tspi, SPI_RX_FIFO); + for (i = 0; len && (i < 4); i++, len--) *rx_buf++ = (x >> i*8) & 0xFF; } @@ -345,8 +348,10 @@ static unsigned int tegra_spi_read_rx_fifo_to_client_rxbuf( read_words += tspi->curr_dma_words; } else { u32 rx_mask = ((u32)1 << t->bits_per_word) - 1; + for (count = 0; count < rx_full_count; count++) { u32 x = tegra_spi_readl(tspi, SPI_RX_FIFO) & rx_mask; + for (i = 0; (i < tspi->bytes_per_word); i++) *rx_buf++ = (x >> (i*8)) & 0xFF; } @@ -365,6 +370,7 @@ static void tegra_spi_copy_client_txbuf_to_spi_txbuf( if (tspi->is_packed) { unsigned len = tspi->curr_dma_words * tspi->bytes_per_word; + memcpy(tspi->tx_dma_buf, t->tx_buf + tspi->cur_pos, len); } else { unsigned int i; @@ -374,6 +380,7 @@ static void tegra_spi_copy_client_txbuf_to_spi_txbuf( for (count = 0; count < tspi->curr_dma_words; count++) { u32 x = 0; + for (i = 0; consume && (i < tspi->bytes_per_word); i++, consume--) x |= (u32)(*tx_buf++) << (i * 8); @@ -396,6 +403,7 @@ static void tegra_spi_copy_spi_rxbuf_to_client_rxbuf( if (tspi->is_packed) { unsigned len = tspi->curr_dma_words * tspi->bytes_per_word; + memcpy(t->rx_buf + tspi->cur_rx_pos, tspi->rx_dma_buf, len); } else { unsigned int i; @@ -405,6 +413,7 @@ static void tegra_spi_copy_spi_rxbuf_to_client_rxbuf( for (count = 0; count < tspi->curr_dma_words; count++) { u32 x = tspi->rx_dma_buf[count] & rx_mask; + for (i = 0; (i < tspi->bytes_per_word); i++) *rx_buf++ = (x >> (i*8)) & 0xFF; } diff --git a/drivers/spi/spi-tegra20-sflash.c b/drivers/spi/spi-tegra20-sflash.c index 3548ce25c08f..cd66fe7b78a9 100644 --- a/drivers/spi/spi-tegra20-sflash.c +++ b/drivers/spi/spi-tegra20-sflash.c @@ -99,7 +99,7 @@ #define SPI_TX_TRIG_MASK (0x3 << 16) #define SPI_TX_TRIG_1W (0x0 << 16) #define SPI_TX_TRIG_4W (0x1 << 16) -#define SPI_DMA_BLK_COUNT(count) (((count) - 1) & 0xFFFF); +#define SPI_DMA_BLK_COUNT(count) (((count) - 1) & 0xFFFF) #define SPI_TX_FIFO 0x10 #define SPI_RX_FIFO 0x20 @@ -221,6 +221,7 @@ static int tegra_sflash_read_rx_fifo_to_client_rxbuf( while (!(status & SPI_RXF_EMPTY)) { int i; u32 x = tegra_sflash_readl(tsd, SPI_RX_FIFO); + for (i = 0; (i < tsd->bytes_per_word); i++) *rx_buf++ = (x >> (i*8)) & 0xFF; read_words++; diff --git a/drivers/spi/spi-txx9.c b/drivers/spi/spi-txx9.c index 5f183baa91a9..2501a8373e89 100644 --- a/drivers/spi/spi-txx9.c +++ b/drivers/spi/spi-txx9.c @@ -97,6 +97,7 @@ static void txx9spi_cs_func(struct spi_device *spi, struct txx9spi *c, int on, unsigned int cs_delay) { int val = (spi->mode & SPI_CS_HIGH) ? on : !on; + if (on) { /* deselect the chip with cs_change hint in last transfer */ if (c->last_chipselect >= 0) @@ -188,6 +189,7 @@ static void txx9spi_work_one(struct txx9spi *c, struct spi_message *m) if (prev_speed_hz != speed_hz || prev_bits_per_word != bits_per_word) { int n = DIV_ROUND_UP(c->baseclk, speed_hz) - 1; + n = clamp(n, SPI_MIN_DIVIDER, SPI_MAX_DIVIDER); /* enter config mode */ txx9spi_wr(c, mcr | TXx9_SPMCR_CONFIG | TXx9_SPMCR_BCLR, diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c index 4d8efb16573d..79bd84f43430 100644 --- a/drivers/spi/spi-xilinx.c +++ b/drivers/spi/spi-xilinx.c @@ -471,7 +471,6 @@ static struct platform_driver xilinx_spi_driver = { .remove = xilinx_spi_remove, .driver = { .name = XILINX_SPI_NAME, - .owner = THIS_MODULE, .of_match_table = xilinx_spi_of_match, }, }; diff --git a/drivers/spi/spi-xtensa-xtfpga.c b/drivers/spi/spi-xtensa-xtfpga.c index 41e158187f9d..0dc5df5233a9 100644 --- a/drivers/spi/spi-xtensa-xtfpga.c +++ b/drivers/spi/spi-xtensa-xtfpga.c @@ -46,6 +46,7 @@ static inline unsigned int xtfpga_spi_read32(const struct xtfpga_spi *spi, static inline void xtfpga_spi_wait_busy(struct xtfpga_spi *xspi) { unsigned i; + for (i = 0; xtfpga_spi_read32(xspi, XTFPGA_SPI_BUSY) && i < BUSY_WAIT_US; ++i) udelay(1); diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index e0531baf2782..e19512ffc40e 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -552,6 +552,9 @@ int spi_register_board_info(struct spi_board_info const *info, unsigned n) struct boardinfo *bi; int i; + if (!n) + return -EINVAL; + bi = kzalloc(n * sizeof(*bi), GFP_KERNEL); if (!bi) return -ENOMEM; @@ -789,27 +792,35 @@ static int spi_transfer_one_message(struct spi_master *master, list_for_each_entry(xfer, &msg->transfers, transfer_list) { trace_spi_transfer_start(msg, xfer); - reinit_completion(&master->xfer_completion); + if (xfer->tx_buf || xfer->rx_buf) { + reinit_completion(&master->xfer_completion); - ret = master->transfer_one(master, msg->spi, xfer); - if (ret < 0) { - dev_err(&msg->spi->dev, - "SPI transfer failed: %d\n", ret); - goto out; - } + ret = master->transfer_one(master, msg->spi, xfer); + if (ret < 0) { + dev_err(&msg->spi->dev, + "SPI transfer failed: %d\n", ret); + goto out; + } - if (ret > 0) { - ret = 0; - ms = xfer->len * 8 * 1000 / xfer->speed_hz; - ms += ms + 100; /* some tolerance */ + if (ret > 0) { + ret = 0; + ms = xfer->len * 8 * 1000 / xfer->speed_hz; + ms += ms + 100; /* some tolerance */ - ms = wait_for_completion_timeout(&master->xfer_completion, - msecs_to_jiffies(ms)); - } + ms = wait_for_completion_timeout(&master->xfer_completion, + msecs_to_jiffies(ms)); + } - if (ms == 0) { - dev_err(&msg->spi->dev, "SPI transfer timed out\n"); - msg->status = -ETIMEDOUT; + if (ms == 0) { + dev_err(&msg->spi->dev, + "SPI transfer timed out\n"); + msg->status = -ETIMEDOUT; + } + } else { + if (xfer->len) + dev_err(&msg->spi->dev, + "Bufferless transfer has length %u\n", + xfer->len); } trace_spi_transfer_stop(msg, xfer); @@ -848,6 +859,7 @@ out: /** * spi_finalize_current_transfer - report completion of a transfer + * @master: the master reporting completion * * Called by SPI drivers using the core transfer_one_message() * implementation to notify it that the current interrupt driven diff --git a/drivers/ssb/b43_pci_bridge.c b/drivers/ssb/b43_pci_bridge.c index 19396dc4ee47..bed2fedeb057 100644 --- a/drivers/ssb/b43_pci_bridge.c +++ b/drivers/ssb/b43_pci_bridge.c @@ -38,6 +38,7 @@ static const struct pci_device_id b43_pci_bridge_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x432b) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x432c) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4350) }, + { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4351) }, { 0, }, }; MODULE_DEVICE_TABLE(pci, b43_pci_bridge_tbl); diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index 2c486ea6236b..35b494f5667f 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig @@ -28,8 +28,6 @@ source "drivers/staging/et131x/Kconfig" source "drivers/staging/slicoss/Kconfig" -source "drivers/staging/usbip/Kconfig" - source "drivers/staging/wlan-ng/Kconfig" source "drivers/staging/comedi/Kconfig" diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile index 1e1a3a10faf7..e66a5dbd9b02 100644 --- a/drivers/staging/Makefile +++ b/drivers/staging/Makefile @@ -6,7 +6,6 @@ obj-$(CONFIG_STAGING) += staging.o obj-y += media/ obj-$(CONFIG_ET131X) += et131x/ obj-$(CONFIG_SLICOSS) += slicoss/ -obj-$(CONFIG_USBIP_CORE) += usbip/ obj-$(CONFIG_PRISM2_USB) += wlan-ng/ obj-$(CONFIG_COMEDI) += comedi/ obj-$(CONFIG_FB_OLPC_DCON) += olpc_dcon/ diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c index 02b0379ae550..4f34dc0095b5 100644 --- a/drivers/staging/android/binder.c +++ b/drivers/staging/android/binder.c @@ -585,7 +585,6 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate, for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { int ret; - struct page **page_array_ptr; page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; @@ -598,8 +597,7 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate, } tmp_area.addr = page_addr; tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */; - page_array_ptr = page; - ret = map_vm_area(&tmp_area, PAGE_KERNEL, &page_array_ptr); + ret = map_vm_area(&tmp_area, PAGE_KERNEL, page); if (ret) { pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n", proc->pid, page_addr); diff --git a/drivers/staging/android/logger.c b/drivers/staging/android/logger.c index 9b47e66599a3..0bf0d24d12d5 100644 --- a/drivers/staging/android/logger.c +++ b/drivers/staging/android/logger.c @@ -790,7 +790,7 @@ static int __init create_log(char *log_name, int size) if (unlikely(ret)) { pr_err("failed to register misc device for log '%s'!\n", log->misc.name); - goto out_free_log; + goto out_free_misc_name; } pr_info("created %luK log '%s'\n", @@ -798,6 +798,9 @@ static int __init create_log(char *log_name, int size) return 0; +out_free_misc_name: + kfree(log->misc.name); + out_free_log: kfree(log); diff --git a/drivers/staging/android/sync.c b/drivers/staging/android/sync.c index e7b2e0234196..69139ce7420d 100644 --- a/drivers/staging/android/sync.c +++ b/drivers/staging/android/sync.c @@ -199,7 +199,6 @@ struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt) fence->num_fences = 1; atomic_set(&fence->status, 1); - fence_get(&pt->base); fence->cbs[0].sync_pt = &pt->base; fence->cbs[0].fence = fence; if (fence_add_callback(&pt->base, &fence->cbs[0].cb, diff --git a/drivers/staging/et131x/et131x.c b/drivers/staging/et131x/et131x.c index 8bf1eb485163..831b7c6fe494 100644 --- a/drivers/staging/et131x/et131x.c +++ b/drivers/staging/et131x/et131x.c @@ -1421,22 +1421,16 @@ static int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value) * @reg: the register to read * @value: 16-bit value to write */ -static int et131x_mii_write(struct et131x_adapter *adapter, u8 reg, u16 value) +static int et131x_mii_write(struct et131x_adapter *adapter, u8 addr, u8 reg, + u16 value) { struct mac_regs __iomem *mac = &adapter->regs->mac; - struct phy_device *phydev = adapter->phydev; int status = 0; - u8 addr; u32 delay = 0; u32 mii_addr; u32 mii_cmd; u32 mii_indicator; - if (!phydev) - return -EIO; - - addr = phydev->addr; - /* Save a local copy of the registers we are dealing with so we can * set them back */ @@ -1631,17 +1625,7 @@ static int et131x_mdio_write(struct mii_bus *bus, int phy_addr, struct net_device *netdev = bus->priv; struct et131x_adapter *adapter = netdev_priv(netdev); - return et131x_mii_write(adapter, reg, value); -} - -static int et131x_mdio_reset(struct mii_bus *bus) -{ - struct net_device *netdev = bus->priv; - struct et131x_adapter *adapter = netdev_priv(netdev); - - et131x_mii_write(adapter, MII_BMCR, BMCR_RESET); - - return 0; + return et131x_mii_write(adapter, phy_addr, reg, value); } /* et1310_phy_power_switch - PHY power control @@ -1656,18 +1640,20 @@ static int et131x_mdio_reset(struct mii_bus *bus) static void et1310_phy_power_switch(struct et131x_adapter *adapter, bool down) { u16 data; + struct phy_device *phydev = adapter->phydev; et131x_mii_read(adapter, MII_BMCR, &data); data &= ~BMCR_PDOWN; if (down) data |= BMCR_PDOWN; - et131x_mii_write(adapter, MII_BMCR, data); + et131x_mii_write(adapter, phydev->addr, MII_BMCR, data); } /* et131x_xcvr_init - Init the phy if we are setting it into force mode */ static void et131x_xcvr_init(struct et131x_adapter *adapter) { u16 lcr2; + struct phy_device *phydev = adapter->phydev; /* Set the LED behavior such that LED 1 indicates speed (off = * 10Mbits, blink = 100Mbits, on = 1000Mbits) and LED 2 indicates @@ -1688,7 +1674,7 @@ static void et131x_xcvr_init(struct et131x_adapter *adapter) else lcr2 |= (LED_VAL_LINKON << LED_TXRX_SHIFT); - et131x_mii_write(adapter, PHY_LED_2, lcr2); + et131x_mii_write(adapter, phydev->addr, PHY_LED_2, lcr2); } } @@ -3643,14 +3629,14 @@ static void et131x_adjust_link(struct net_device *netdev) et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, ®ister18); - et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, - register18 | 0x4); - et131x_mii_write(adapter, PHY_INDEX_REG, + et131x_mii_write(adapter, phydev->addr, + PHY_MPHY_CONTROL_REG, register18 | 0x4); + et131x_mii_write(adapter, phydev->addr, PHY_INDEX_REG, register18 | 0x8402); - et131x_mii_write(adapter, PHY_DATA_REG, + et131x_mii_write(adapter, phydev->addr, PHY_DATA_REG, register18 | 511); - et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, - register18); + et131x_mii_write(adapter, phydev->addr, + PHY_MPHY_CONTROL_REG, register18); } et1310_config_flow_control(adapter); @@ -3662,7 +3648,8 @@ static void et131x_adjust_link(struct net_device *netdev) et131x_mii_read(adapter, PHY_CONFIG, ®); reg &= ~ET_PHY_CONFIG_TX_FIFO_DEPTH; reg |= ET_PHY_CONFIG_FIFO_DEPTH_32; - et131x_mii_write(adapter, PHY_CONFIG, reg); + et131x_mii_write(adapter, phydev->addr, PHY_CONFIG, + reg); } et131x_set_rx_dma_timer(adapter); @@ -3675,14 +3662,14 @@ static void et131x_adjust_link(struct net_device *netdev) et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, ®ister18); - et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, - register18 | 0x4); - et131x_mii_write(adapter, PHY_INDEX_REG, - register18 | 0x8402); - et131x_mii_write(adapter, PHY_DATA_REG, - register18 | 511); - et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, - register18); + et131x_mii_write(adapter, phydev->addr, + PHY_MPHY_CONTROL_REG, register18 | 0x4); + et131x_mii_write(adapter, phydev->addr, + PHY_INDEX_REG, register18 | 0x8402); + et131x_mii_write(adapter, phydev->addr, + PHY_DATA_REG, register18 | 511); + et131x_mii_write(adapter, phydev->addr, + PHY_MPHY_CONTROL_REG, register18); } /* Free the packets being actively sent & stopped */ @@ -4644,10 +4631,6 @@ static int et131x_pci_setup(struct pci_dev *pdev, /* Copy address into the net_device struct */ memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN); - /* Init variable for counting how long we do not have link status */ - adapter->boot_coma = 0; - et1310_disable_phy_coma(adapter); - rc = -ENOMEM; /* Setup the mii_bus struct */ @@ -4663,7 +4646,6 @@ static int et131x_pci_setup(struct pci_dev *pdev, adapter->mii_bus->priv = netdev; adapter->mii_bus->read = et131x_mdio_read; adapter->mii_bus->write = et131x_mdio_write; - adapter->mii_bus->reset = et131x_mdio_reset; adapter->mii_bus->irq = kmalloc_array(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL); if (!adapter->mii_bus->irq) @@ -4687,6 +4669,10 @@ static int et131x_pci_setup(struct pci_dev *pdev, /* Setup et1310 as per the documentation */ et131x_adapter_setup(adapter); + /* Init variable for counting how long we do not have link status */ + adapter->boot_coma = 0; + et1310_disable_phy_coma(adapter); + /* We can enable interrupts now * * NOTE - Because registration of interrupt handler is done in the diff --git a/drivers/staging/iio/meter/ade7758_trigger.c b/drivers/staging/iio/meter/ade7758_trigger.c index ea01b8f7a2c3..6f45ce0478d7 100644 --- a/drivers/staging/iio/meter/ade7758_trigger.c +++ b/drivers/staging/iio/meter/ade7758_trigger.c @@ -85,7 +85,7 @@ int ade7758_probe_trigger(struct iio_dev *indio_dev) ret = iio_trigger_register(st->trig); /* select default trigger */ - indio_dev->trig = st->trig; + indio_dev->trig = iio_trigger_get(st->trig); if (ret) goto error_free_irq; diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c index 47ee6c79857a..6b22106534d8 100644 --- a/drivers/staging/imx-drm/imx-drm-core.c +++ b/drivers/staging/imx-drm/imx-drm-core.c @@ -202,7 +202,7 @@ static const struct file_operations imx_drm_driver_fops = { void imx_drm_connector_destroy(struct drm_connector *connector) { - drm_sysfs_connector_remove(connector); + drm_connector_unregister(connector); drm_connector_cleanup(connector); } EXPORT_SYMBOL_GPL(imx_drm_connector_destroy); @@ -293,10 +293,10 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags) * userspace will expect to be able to access DRM at this point. */ list_for_each_entry(connector, &drm->mode_config.connector_list, head) { - ret = drm_sysfs_connector_add(connector); + ret = drm_connector_register(connector); if (ret) { dev_err(drm->dev, - "[CONNECTOR:%d:%s] drm_sysfs_connector_add failed: %d\n", + "[CONNECTOR:%d:%s] drm_connector_register failed: %d\n", connector->base.id, connector->name, ret); goto err_unbind; diff --git a/drivers/staging/imx-drm/imx-ldb.c b/drivers/staging/imx-drm/imx-ldb.c index 7e3f019d7e72..4662e00b456a 100644 --- a/drivers/staging/imx-drm/imx-ldb.c +++ b/drivers/staging/imx-drm/imx-ldb.c @@ -574,6 +574,9 @@ static void imx_ldb_unbind(struct device *dev, struct device *master, for (i = 0; i < 2; i++) { struct imx_ldb_channel *channel = &imx_ldb->channel[i]; + if (!channel->connector.funcs) + continue; + channel->connector.funcs->destroy(&channel->connector); channel->encoder.funcs->destroy(&channel->encoder); } diff --git a/drivers/staging/imx-drm/ipuv3-plane.c b/drivers/staging/imx-drm/ipuv3-plane.c index 6f393a11f44d..50de10a550e9 100644 --- a/drivers/staging/imx-drm/ipuv3-plane.c +++ b/drivers/staging/imx-drm/ipuv3-plane.c @@ -281,7 +281,8 @@ static void ipu_plane_dpms(struct ipu_plane *ipu_plane, int mode) ipu_idmac_put(ipu_plane->ipu_ch); ipu_dmfc_put(ipu_plane->dmfc); - ipu_dp_put(ipu_plane->dp); + if (ipu_plane->dp) + ipu_dp_put(ipu_plane->dp); } } diff --git a/drivers/staging/lustre/lustre/libcfs/hash.c b/drivers/staging/lustre/lustre/libcfs/hash.c index 5dde79418297..8ef1deb59d4a 100644 --- a/drivers/staging/lustre/lustre/libcfs/hash.c +++ b/drivers/staging/lustre/lustre/libcfs/hash.c @@ -351,7 +351,7 @@ cfs_hash_dh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd, cfs_hash_dhead_t, dh_head); if (dh->dh_tail != NULL) /* not empty */ - hlist_add_after(dh->dh_tail, hnode); + hlist_add_behind(hnode, dh->dh_tail); else /* empty list */ hlist_add_head(hnode, &dh->dh_head); dh->dh_tail = hnode; @@ -406,7 +406,7 @@ cfs_hash_dd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd, cfs_hash_dhead_dep_t, dd_head); if (dh->dd_tail != NULL) /* not empty */ - hlist_add_after(dh->dd_tail, hnode); + hlist_add_behind(hnode, dh->dd_tail); else /* empty list */ hlist_add_head(hnode, &dh->dd_head); dh->dd_tail = hnode; diff --git a/drivers/staging/lustre/lustre/libcfs/workitem.c b/drivers/staging/lustre/lustre/libcfs/workitem.c index 65629579bd7d..03ab9e046784 100644 --- a/drivers/staging/lustre/lustre/libcfs/workitem.c +++ b/drivers/staging/lustre/lustre/libcfs/workitem.c @@ -365,6 +365,7 @@ cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab, return -ENOMEM; strncpy(sched->ws_name, name, CFS_WS_NAME_LEN); + sched->ws_name[CFS_WS_NAME_LEN - 1] = '\0'; sched->ws_cptab = cptab; sched->ws_cpt = cpt; diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c index 0367f5a2cfe4..0c59e26c0805 100644 --- a/drivers/staging/lustre/lustre/llite/llite_lib.c +++ b/drivers/staging/lustre/lustre/llite/llite_lib.c @@ -568,7 +568,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, if (sb->s_root == NULL) { CERROR("%s: can't make root dentry\n", ll_get_fsname(sb, NULL, 0)); - GOTO(out_root, err = -ENOMEM); + GOTO(out_lock_cn_cb, err = -ENOMEM); } sbi->ll_sdev_orig = sb->s_dev; diff --git a/drivers/staging/lustre/lustre/obdclass/class_obd.c b/drivers/staging/lustre/lustre/obdclass/class_obd.c index 8b19f3caa68f..701c6a776524 100644 --- a/drivers/staging/lustre/lustre/obdclass/class_obd.c +++ b/drivers/staging/lustre/lustre/obdclass/class_obd.c @@ -35,7 +35,7 @@ */ #define DEBUG_SUBSYSTEM S_CLASS -# include <asm/atomic.h> +# include <linux/atomic.h> #include "../include/obd_support.h" #include "../include/obd_class.h" diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c index b8676ac77b0c..407a318b09db 100644 --- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c +++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c @@ -43,9 +43,11 @@ static struct usb_device_id rtw_usb_id_tbl[] = { {USB_DEVICE(USB_VENDER_ID_REALTEK, 0x0179)}, /* 8188ETV */ /*=== Customer ID ===*/ /****** 8188EUS ********/ + {USB_DEVICE(0x056e, 0x4008)}, /* Elecom WDC-150SU2M */ {USB_DEVICE(0x07b8, 0x8179)}, /* Abocom - Abocom */ {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */ {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */ + {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */ {} /* Terminating entry */ }; diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c index 2920e406030a..5729cf678765 100644 --- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c +++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c @@ -2065,20 +2065,16 @@ static short rtl8192_alloc_rx_desc_ring(struct net_device *dev) int i, rx_queue_idx; for (rx_queue_idx = 0; rx_queue_idx < MAX_RX_QUEUE; rx_queue_idx++) { - priv->rx_ring[rx_queue_idx] = pci_alloc_consistent(priv->pdev, - sizeof(*priv->rx_ring[rx_queue_idx]) * - priv->rxringcount, - &priv->rx_ring_dma[rx_queue_idx]); - + priv->rx_ring[rx_queue_idx] = + pci_zalloc_consistent(priv->pdev, + sizeof(*priv->rx_ring[rx_queue_idx]) * priv->rxringcount, + &priv->rx_ring_dma[rx_queue_idx]); if (!priv->rx_ring[rx_queue_idx] || (unsigned long)priv->rx_ring[rx_queue_idx] & 0xFF) { RT_TRACE(COMP_ERR, "Cannot allocate RX ring\n"); return -ENOMEM; } - memset(priv->rx_ring[rx_queue_idx], 0, - sizeof(*priv->rx_ring[rx_queue_idx]) * - priv->rxringcount); priv->rx_idx[rx_queue_idx] = 0; for (i = 0; i < priv->rxringcount; i++) { @@ -2118,14 +2114,13 @@ static int rtl8192_alloc_tx_desc_ring(struct net_device *dev, dma_addr_t dma; int i; - ring = pci_alloc_consistent(priv->pdev, sizeof(*ring) * entries, &dma); + ring = pci_zalloc_consistent(priv->pdev, sizeof(*ring) * entries, &dma); if (!ring || (unsigned long)ring & 0xFF) { RT_TRACE(COMP_ERR, "Cannot allocate TX ring (prio = %d)\n", prio); return -ENOMEM; } - memset(ring, 0, sizeof(*ring)*entries); priv->tx_ring[prio].desc = ring; priv->tx_ring[prio].dma = dma; priv->tx_ring[prio].idx = 0; diff --git a/drivers/staging/rtl8192ee/pci.c b/drivers/staging/rtl8192ee/pci.c index f3abbcc9f3ba..0215aef1eacc 100644 --- a/drivers/staging/rtl8192ee/pci.c +++ b/drivers/staging/rtl8192ee/pci.c @@ -1224,10 +1224,10 @@ static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw, /* alloc tx buffer desc for new trx flow*/ if (rtlpriv->use_new_trx_flow) { - buffer_desc = pci_alloc_consistent(rtlpci->pdev, - sizeof(*buffer_desc) * entries, - &buffer_desc_dma); - + buffer_desc = + pci_zalloc_consistent(rtlpci->pdev, + sizeof(*buffer_desc) * entries, + &buffer_desc_dma); if (!buffer_desc || (unsigned long)buffer_desc & 0xFF) { RT_TRACE(COMP_ERR, DBG_EMERG, ("Cannot allocate TX ring (prio = %d)\n", @@ -1235,7 +1235,6 @@ static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw, return -ENOMEM; } - memset(buffer_desc, 0, sizeof(*buffer_desc) * entries); rtlpci->tx_ring[prio].buffer_desc = buffer_desc; rtlpci->tx_ring[prio].buffer_desc_dma = buffer_desc_dma; @@ -1245,16 +1244,14 @@ static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw, } /* alloc dma for this ring */ - desc = pci_alloc_consistent(rtlpci->pdev, - sizeof(*desc) * entries, &desc_dma); - + desc = pci_zalloc_consistent(rtlpci->pdev, sizeof(*desc) * entries, + &desc_dma); if (!desc || (unsigned long)desc & 0xFF) { RT_TRACE(COMP_ERR, DBG_EMERG, ("Cannot allocate TX ring (prio = %d)\n", prio)); return -ENOMEM; } - memset(desc, 0, sizeof(*desc) * entries); rtlpci->tx_ring[prio].desc = desc; rtlpci->tx_ring[prio].dma = desc_dma; @@ -1290,11 +1287,9 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx) struct rtl_rx_buffer_desc *entry = NULL; /* alloc dma for this ring */ rtlpci->rx_ring[rxring_idx].buffer_desc = - pci_alloc_consistent(rtlpci->pdev, - sizeof(*rtlpci->rx_ring[rxring_idx]. - buffer_desc) * - rtlpci->rxringcount, - &rtlpci->rx_ring[rxring_idx].dma); + pci_zalloc_consistent(rtlpci->pdev, + sizeof(*rtlpci->rx_ring[rxring_idx].buffer_desc) * rtlpci->rxringcount, + &rtlpci->rx_ring[rxring_idx].dma); if (!rtlpci->rx_ring[rxring_idx].buffer_desc || (unsigned long)rtlpci->rx_ring[rxring_idx].buffer_desc & 0xFF) { RT_TRACE(COMP_ERR, DBG_EMERG, @@ -1302,10 +1297,6 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx) return -ENOMEM; } - memset(rtlpci->rx_ring[rxring_idx].buffer_desc, 0, - sizeof(*rtlpci->rx_ring[rxring_idx].buffer_desc) * - rtlpci->rxringcount); - /* init every desc in this ring */ rtlpci->rx_ring[rxring_idx].idx = 0; @@ -1320,19 +1311,15 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx) u8 tmp_one = 1; /* alloc dma for this ring */ rtlpci->rx_ring[rxring_idx].desc = - pci_alloc_consistent(rtlpci->pdev, - sizeof(*rtlpci->rx_ring[rxring_idx]. - desc) * rtlpci->rxringcount, - &rtlpci->rx_ring[rxring_idx].dma); + pci_zalloc_consistent(rtlpci->pdev, + sizeof(*rtlpci->rx_ring[rxring_idx].desc) * rtlpci->rxringcount, + &rtlpci->rx_ring[rxring_idx].dma); if (!rtlpci->rx_ring[rxring_idx].desc || (unsigned long)rtlpci->rx_ring[rxring_idx].desc & 0xFF) { RT_TRACE(COMP_ERR, DBG_EMERG, ("Cannot allocate RX ring\n")); return -ENOMEM; } - memset(rtlpci->rx_ring[rxring_idx].desc, 0, - sizeof(*rtlpci->rx_ring[rxring_idx].desc) * - rtlpci->rxringcount); /* init every desc in this ring */ rtlpci->rx_ring[rxring_idx].idx = 0; diff --git a/drivers/staging/rtl8821ae/pci.c b/drivers/staging/rtl8821ae/pci.c index f9847d1fbdeb..26d7b2fc852a 100644 --- a/drivers/staging/rtl8821ae/pci.c +++ b/drivers/staging/rtl8821ae/pci.c @@ -1248,9 +1248,10 @@ static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw, /* alloc tx buffer desc for new trx flow*/ if (rtlpriv->use_new_trx_flow) { - buffer_desc = pci_alloc_consistent(rtlpci->pdev, - sizeof(*buffer_desc) * entries, - &buffer_desc_dma); + buffer_desc = + pci_zalloc_consistent(rtlpci->pdev, + sizeof(*buffer_desc) * entries, + &buffer_desc_dma); if (!buffer_desc || (unsigned long)buffer_desc & 0xFF) { RT_TRACE(COMP_ERR, DBG_EMERG, @@ -1259,7 +1260,6 @@ static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw, return -ENOMEM; } - memset(buffer_desc, 0, sizeof(*buffer_desc) * entries); rtlpci->tx_ring[prio].buffer_desc = buffer_desc; rtlpci->tx_ring[prio].buffer_desc_dma = buffer_desc_dma; @@ -1270,8 +1270,8 @@ static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw, } /* alloc dma for this ring */ - desc = pci_alloc_consistent(rtlpci->pdev, - sizeof(*desc) * entries, &desc_dma); + desc = pci_zalloc_consistent(rtlpci->pdev, sizeof(*desc) * entries, + &desc_dma); if (!desc || (unsigned long)desc & 0xFF) { RT_TRACE(COMP_ERR, DBG_EMERG, @@ -1279,7 +1279,6 @@ static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw, return -ENOMEM; } - memset(desc, 0, sizeof(*desc) * entries); rtlpci->tx_ring[prio].desc = desc; rtlpci->tx_ring[prio].dma = desc_dma; @@ -1316,21 +1315,15 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx) struct rtl_rx_buffer_desc *entry = NULL; /* alloc dma for this ring */ rtlpci->rx_ring[rxring_idx].buffer_desc = - pci_alloc_consistent(rtlpci->pdev, - sizeof(*rtlpci->rx_ring[rxring_idx]. - buffer_desc) * - rtlpci->rxringcount, - &rtlpci->rx_ring[rxring_idx].dma); + pci_zalloc_consistent(rtlpci->pdev, + sizeof(*rtlpci->rx_ring[rxring_idx].buffer_desc) * rtlpci->rxringcount, + &rtlpci->rx_ring[rxring_idx].dma); if (!rtlpci->rx_ring[rxring_idx].buffer_desc || (unsigned long)rtlpci->rx_ring[rxring_idx].buffer_desc & 0xFF) { RT_TRACE(COMP_ERR, DBG_EMERG, ("Cannot allocate RX ring\n")); return -ENOMEM; } - memset(rtlpci->rx_ring[rxring_idx].buffer_desc, 0, - sizeof(*rtlpci->rx_ring[rxring_idx].buffer_desc) * - rtlpci->rxringcount); - /* init every desc in this ring */ rtlpci->rx_ring[rxring_idx].idx = 0; for (i = 0; i < rtlpci->rxringcount; i++) { @@ -1344,10 +1337,9 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx) u8 tmp_one = 1; /* alloc dma for this ring */ rtlpci->rx_ring[rxring_idx].desc = - pci_alloc_consistent(rtlpci->pdev, - sizeof(*rtlpci->rx_ring[rxring_idx]. - desc) * rtlpci->rxringcount, - &rtlpci->rx_ring[rxring_idx].dma); + pci_zalloc_consistent(rtlpci->pdev, + sizeof(*rtlpci->rx_ring[rxring_idx].desc) * rtlpci->rxringcount, + &rtlpci->rx_ring[rxring_idx].dma); if (!rtlpci->rx_ring[rxring_idx].desc || (unsigned long)rtlpci->rx_ring[rxring_idx].desc & 0xFF) { RT_TRACE(COMP_ERR, DBG_EMERG, @@ -1355,10 +1347,6 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx) return -ENOMEM; } - memset(rtlpci->rx_ring[rxring_idx].desc, 0, - sizeof(*rtlpci->rx_ring[rxring_idx].desc) * - rtlpci->rxringcount); - /* init every desc in this ring */ rtlpci->rx_ring[rxring_idx].idx = 0; for (i = 0; i < rtlpci->rxringcount; i++) { diff --git a/drivers/staging/slicoss/slicoss.c b/drivers/staging/slicoss/slicoss.c index 50ece291fc6a..f35fa3dfe22c 100644 --- a/drivers/staging/slicoss/slicoss.c +++ b/drivers/staging/slicoss/slicoss.c @@ -1191,18 +1191,15 @@ static int slic_rspqueue_init(struct adapter *adapter) rspq->num_pages = SLIC_RSPQ_PAGES_GB; for (i = 0; i < rspq->num_pages; i++) { - rspq->vaddr[i] = pci_alloc_consistent(adapter->pcidev, - PAGE_SIZE, - &rspq->paddr[i]); + rspq->vaddr[i] = pci_zalloc_consistent(adapter->pcidev, + PAGE_SIZE, + &rspq->paddr[i]); if (!rspq->vaddr[i]) { dev_err(&adapter->pcidev->dev, "pci_alloc_consistent failed\n"); slic_rspqueue_free(adapter); return -ENOMEM; } - /* FIXME: - * do we really need this assertions (4K PAGE_SIZE aligned addr)? */ - memset(rspq->vaddr[i], 0, PAGE_SIZE); if (paddrh == 0) { slic_reg32_write(&slic_regs->slic_rbar, diff --git a/drivers/staging/usbip/uapi/usbip.h b/drivers/staging/usbip/uapi/usbip.h deleted file mode 100644 index fa5db30ede36..000000000000 --- a/drivers/staging/usbip/uapi/usbip.h +++ /dev/null @@ -1,26 +0,0 @@ -/* - * usbip.h - * - * USBIP uapi defines and function prototypes etc. -*/ - -#ifndef _UAPI_LINUX_USBIP_H -#define _UAPI_LINUX_USBIP_H - -/* usbip device status - exported in usbip device sysfs status */ -enum usbip_device_status { - /* sdev is available. */ - SDEV_ST_AVAILABLE = 0x01, - /* sdev is now used. */ - SDEV_ST_USED, - /* sdev is unusable because of a fatal error. */ - SDEV_ST_ERROR, - - /* vdev does not connect a remote device. */ - VDEV_ST_NULL, - /* vdev is used, but the USB address is not assigned yet */ - VDEV_ST_NOTASSIGNED, - VDEV_ST_USED, - VDEV_ST_ERROR -}; -#endif /* _UAPI_LINUX_USBIP_H */ diff --git a/drivers/staging/usbip/userspace/.gitignore b/drivers/staging/usbip/userspace/.gitignore deleted file mode 100644 index 9aad9e30a8ba..000000000000 --- a/drivers/staging/usbip/userspace/.gitignore +++ /dev/null @@ -1,28 +0,0 @@ -Makefile -Makefile.in -aclocal.m4 -autom4te.cache/ -config.guess -config.h -config.h.in -config.log -config.status -config.sub -configure -depcomp -install-sh -libsrc/Makefile -libsrc/Makefile.in -libtool -ltmain.sh -missing -src/Makefile -src/Makefile.in -stamp-h1 -libsrc/libusbip.la -libsrc/libusbip_la-names.lo -libsrc/libusbip_la-usbip_common.lo -libsrc/libusbip_la-usbip_host_driver.lo -libsrc/libusbip_la-vhci_driver.lo -src/usbip -src/usbipd diff --git a/drivers/staging/usbip/userspace/AUTHORS b/drivers/staging/usbip/userspace/AUTHORS deleted file mode 100644 index a27ea8d03aec..000000000000 --- a/drivers/staging/usbip/userspace/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -Takahiro Hirofuchi -Robert Leibl -matt mooney <mfm@muteddisk.com> diff --git a/drivers/staging/usbip/userspace/COPYING b/drivers/staging/usbip/userspace/COPYING deleted file mode 100644 index c5611e48a8e1..000000000000 --- a/drivers/staging/usbip/userspace/COPYING +++ /dev/null @@ -1,340 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 2, June 1991 - - Copyright (C) 1989, 1991 Free Software Foundation, Inc. - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The licenses for most software are designed to take away your -freedom to share and change it. By contrast, the GNU General Public -License is intended to guarantee your freedom to share and change free -software--to make sure the software is free for all its users. This -General Public License applies to most of the Free Software -Foundation's software and to any other program whose authors commit to -using it. (Some other Free Software Foundation software is covered by -the GNU Library General Public License instead.) You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -this service if you wish), that you receive source code or can get it -if you want it, that you can change the software or use pieces of it -in new free programs; and that you know you can do these things. - - To protect your rights, we need to make restrictions that forbid -anyone to deny you these rights or to ask you to surrender the rights. -These restrictions translate to certain responsibilities for you if you -distribute copies of the software, or if you modify it. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must give the recipients all the rights that -you have. You must make sure that they, too, receive or can get the -source code. And you must show them these terms so they know their -rights. - - We protect your rights with two steps: (1) copyright the software, and -(2) offer you this license which gives you legal permission to copy, -distribute and/or modify the software. - - Also, for each author's protection and ours, we want to make certain -that everyone understands that there is no warranty for this free -software. If the software is modified by someone else and passed on, we -want its recipients to know that what they have is not the original, so -that any problems introduced by others will not reflect on the original -authors' reputations. - - Finally, any free program is threatened constantly by software -patents. We wish to avoid the danger that redistributors of a free -program will individually obtain patent licenses, in effect making the -program proprietary. To prevent this, we have made it clear that any -patent must be licensed for everyone's free use or not licensed at all. - - The precise terms and conditions for copying, distribution and -modification follow. - - GNU GENERAL PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. This License applies to any program or other work which contains -a notice placed by the copyright holder saying it may be distributed -under the terms of this General Public License. The "Program", below, -refers to any such program or work, and a "work based on the Program" -means either the Program or any derivative work under copyright law: -that is to say, a work containing the Program or a portion of it, -either verbatim or with modifications and/or translated into another -language. (Hereinafter, translation is included without limitation in -the term "modification".) Each licensee is addressed as "you". - -Activities other than copying, distribution and modification are not -covered by this License; they are outside its scope. The act of -running the Program is not restricted, and the output from the Program -is covered only if its contents constitute a work based on the -Program (independent of having been made by running the Program). -Whether that is true depends on what the Program does. - - 1. You may copy and distribute verbatim copies of the Program's -source code as you receive it, in any medium, provided that you -conspicuously and appropriately publish on each copy an appropriate -copyright notice and disclaimer of warranty; keep intact all the -notices that refer to this License and to the absence of any warranty; -and give any other recipients of the Program a copy of this License -along with the Program. - -You may charge a fee for the physical act of transferring a copy, and -you may at your option offer warranty protection in exchange for a fee. - - 2. You may modify your copy or copies of the Program or any portion -of it, thus forming a work based on the Program, and copy and -distribute such modifications or work under the terms of Section 1 -above, provided that you also meet all of these conditions: - - a) You must cause the modified files to carry prominent notices - stating that you changed the files and the date of any change. - - b) You must cause any work that you distribute or publish, that in - whole or in part contains or is derived from the Program or any - part thereof, to be licensed as a whole at no charge to all third - parties under the terms of this License. - - c) If the modified program normally reads commands interactively - when run, you must cause it, when started running for such - interactive use in the most ordinary way, to print or display an - announcement including an appropriate copyright notice and a - notice that there is no warranty (or else, saying that you provide - a warranty) and that users may redistribute the program under - these conditions, and telling the user how to view a copy of this - License. (Exception: if the Program itself is interactive but - does not normally print such an announcement, your work based on - the Program is not required to print an announcement.) - -These requirements apply to the modified work as a whole. If -identifiable sections of that work are not derived from the Program, -and can be reasonably considered independent and separate works in -themselves, then this License, and its terms, do not apply to those -sections when you distribute them as separate works. But when you -distribute the same sections as part of a whole which is a work based -on the Program, the distribution of the whole must be on the terms of -this License, whose permissions for other licensees extend to the -entire whole, and thus to each and every part regardless of who wrote it. - -Thus, it is not the intent of this section to claim rights or contest -your rights to work written entirely by you; rather, the intent is to -exercise the right to control the distribution of derivative or -collective works based on the Program. - -In addition, mere aggregation of another work not based on the Program -with the Program (or with a work based on the Program) on a volume of -a storage or distribution medium does not bring the other work under -the scope of this License. - - 3. You may copy and distribute the Program (or a work based on it, -under Section 2) in object code or executable form under the terms of -Sections 1 and 2 above provided that you also do one of the following: - - a) Accompany it with the complete corresponding machine-readable - source code, which must be distributed under the terms of Sections - 1 and 2 above on a medium customarily used for software interchange; or, - - b) Accompany it with a written offer, valid for at least three - years, to give any third party, for a charge no more than your - cost of physically performing source distribution, a complete - machine-readable copy of the corresponding source code, to be - distributed under the terms of Sections 1 and 2 above on a medium - customarily used for software interchange; or, - - c) Accompany it with the information you received as to the offer - to distribute corresponding source code. (This alternative is - allowed only for noncommercial distribution and only if you - received the program in object code or executable form with such - an offer, in accord with Subsection b above.) - -The source code for a work means the preferred form of the work for -making modifications to it. For an executable work, complete source -code means all the source code for all modules it contains, plus any -associated interface definition files, plus the scripts used to -control compilation and installation of the executable. However, as a -special exception, the source code distributed need not include -anything that is normally distributed (in either source or binary -form) with the major components (compiler, kernel, and so on) of the -operating system on which the executable runs, unless that component -itself accompanies the executable. - -If distribution of executable or object code is made by offering -access to copy from a designated place, then offering equivalent -access to copy the source code from the same place counts as -distribution of the source code, even though third parties are not -compelled to copy the source along with the object code. - - 4. You may not copy, modify, sublicense, or distribute the Program -except as expressly provided under this License. Any attempt -otherwise to copy, modify, sublicense or distribute the Program is -void, and will automatically terminate your rights under this License. -However, parties who have received copies, or rights, from you under -this License will not have their licenses terminated so long as such -parties remain in full compliance. - - 5. You are not required to accept this License, since you have not -signed it. However, nothing else grants you permission to modify or -distribute the Program or its derivative works. These actions are -prohibited by law if you do not accept this License. Therefore, by -modifying or distributing the Program (or any work based on the -Program), you indicate your acceptance of this License to do so, and -all its terms and conditions for copying, distributing or modifying -the Program or works based on it. - - 6. Each time you redistribute the Program (or any work based on the -Program), the recipient automatically receives a license from the -original licensor to copy, distribute or modify the Program subject to -these terms and conditions. You may not impose any further -restrictions on the recipients' exercise of the rights granted herein. -You are not responsible for enforcing compliance by third parties to -this License. - - 7. If, as a consequence of a court judgment or allegation of patent -infringement or for any other reason (not limited to patent issues), -conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot -distribute so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you -may not distribute the Program at all. For example, if a patent -license would not permit royalty-free redistribution of the Program by -all those who receive copies directly or indirectly through you, then -the only way you could satisfy both it and this License would be to -refrain entirely from distribution of the Program. - -If any portion of this section is held invalid or unenforceable under -any particular circumstance, the balance of the section is intended to -apply and the section as a whole is intended to apply in other -circumstances. - -It is not the purpose of this section to induce you to infringe any -patents or other property right claims or to contest validity of any -such claims; this section has the sole purpose of protecting the -integrity of the free software distribution system, which is -implemented by public license practices. Many people have made -generous contributions to the wide range of software distributed -through that system in reliance on consistent application of that -system; it is up to the author/donor to decide if he or she is willing -to distribute software through any other system and a licensee cannot -impose that choice. - -This section is intended to make thoroughly clear what is believed to -be a consequence of the rest of this License. - - 8. If the distribution and/or use of the Program is restricted in -certain countries either by patents or by copyrighted interfaces, the -original copyright holder who places the Program under this License -may add an explicit geographical distribution limitation excluding -those countries, so that distribution is permitted only in or among -countries not thus excluded. In such case, this License incorporates -the limitation as if written in the body of this License. - - 9. The Free Software Foundation may publish revised and/or new versions -of the General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - -Each version is given a distinguishing version number. If the Program -specifies a version number of this License which applies to it and "any -later version", you have the option of following the terms and conditions -either of that version or of any later version published by the Free -Software Foundation. If the Program does not specify a version number of -this License, you may choose any version ever published by the Free Software -Foundation. - - 10. If you wish to incorporate parts of the Program into other free -programs whose distribution conditions are different, write to the author -to ask for permission. For software which is copyrighted by the Free -Software Foundation, write to the Free Software Foundation; we sometimes -make exceptions for this. Our decision will be guided by the two goals -of preserving the free status of all derivatives of our free software and -of promoting the sharing and reuse of software generally. - - NO WARRANTY - - 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY -FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN -OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES -PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED -OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS -TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE -PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, -REPAIR OR CORRECTION. - - 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR -REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, -INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING -OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED -TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY -YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER -PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE -POSSIBILITY OF SUCH DAMAGES. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -convey the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - <one line to give the program's name and a brief idea of what it does.> - Copyright (C) <year> <name of author> - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - - -Also add information on how to contact you by electronic and paper mail. - -If the program is interactive, make it output a short notice like this -when it starts in an interactive mode: - - Gnomovision version 69, Copyright (C) year name of author - Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, the commands you use may -be called something other than `show w' and `show c'; they could even be -mouse-clicks or menu items--whatever suits your program. - -You should also get your employer (if you work as a programmer) or your -school, if any, to sign a "copyright disclaimer" for the program, if -necessary. Here is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the program - `Gnomovision' (which makes passes at compilers) written by James Hacker. - - <signature of Ty Coon>, 1 April 1989 - Ty Coon, President of Vice - -This General Public License does not permit incorporating your program into -proprietary programs. If your program is a subroutine library, you may -consider it more useful to permit linking proprietary applications with the -library. If this is what you want to do, use the GNU Library General -Public License instead of this License. diff --git a/drivers/staging/usbip/userspace/INSTALL b/drivers/staging/usbip/userspace/INSTALL deleted file mode 100644 index d3c5b40a9409..000000000000 --- a/drivers/staging/usbip/userspace/INSTALL +++ /dev/null @@ -1,237 +0,0 @@ -Installation Instructions -************************* - -Copyright (C) 1994, 1995, 1996, 1999, 2000, 2001, 2002, 2004, 2005, -2006, 2007 Free Software Foundation, Inc. - -This file is free documentation; the Free Software Foundation gives -unlimited permission to copy, distribute and modify it. - -Basic Installation -================== - -Briefly, the shell commands `./configure; make; make install' should -configure, build, and install this package. The following -more-detailed instructions are generic; see the `README' file for -instructions specific to this package. - - The `configure' shell script attempts to guess correct values for -various system-dependent variables used during compilation. It uses -those values to create a `Makefile' in each directory of the package. -It may also create one or more `.h' files containing system-dependent -definitions. Finally, it creates a shell script `config.status' that -you can run in the future to recreate the current configuration, and a -file `config.log' containing compiler output (useful mainly for -debugging `configure'). - - It can also use an optional file (typically called `config.cache' -and enabled with `--cache-file=config.cache' or simply `-C') that saves -the results of its tests to speed up reconfiguring. Caching is -disabled by default to prevent problems with accidental use of stale -cache files. - - If you need to do unusual things to compile the package, please try -to figure out how `configure' could check whether to do them, and mail -diffs or instructions to the address given in the `README' so they can -be considered for the next release. If you are using the cache, and at -some point `config.cache' contains results you don't want to keep, you -may remove or edit it. - - The file `configure.ac' (or `configure.in') is used to create -`configure' by a program called `autoconf'. You need `configure.ac' if -you want to change it or regenerate `configure' using a newer version -of `autoconf'. - -The simplest way to compile this package is: - - 1. `cd' to the directory containing the package's source code and type - `./configure' to configure the package for your system. - - Running `configure' might take a while. While running, it prints - some messages telling which features it is checking for. - - 2. Type `make' to compile the package. - - 3. Optionally, type `make check' to run any self-tests that come with - the package. - - 4. Type `make install' to install the programs and any data files and - documentation. - - 5. You can remove the program binaries and object files from the - source code directory by typing `make clean'. To also remove the - files that `configure' created (so you can compile the package for - a different kind of computer), type `make distclean'. There is - also a `make maintainer-clean' target, but that is intended mainly - for the package's developers. If you use it, you may have to get - all sorts of other programs in order to regenerate files that came - with the distribution. - - 6. Often, you can also type `make uninstall' to remove the installed - files again. - -Compilers and Options -===================== - -Some systems require unusual options for compilation or linking that the -`configure' script does not know about. Run `./configure --help' for -details on some of the pertinent environment variables. - - You can give `configure' initial values for configuration parameters -by setting variables in the command line or in the environment. Here -is an example: - - ./configure CC=c99 CFLAGS=-g LIBS=-lposix - - *Note Defining Variables::, for more details. - -Compiling For Multiple Architectures -==================================== - -You can compile the package for more than one kind of computer at the -same time, by placing the object files for each architecture in their -own directory. To do this, you can use GNU `make'. `cd' to the -directory where you want the object files and executables to go and run -the `configure' script. `configure' automatically checks for the -source code in the directory that `configure' is in and in `..'. - - With a non-GNU `make', it is safer to compile the package for one -architecture at a time in the source code directory. After you have -installed the package for one architecture, use `make distclean' before -reconfiguring for another architecture. - -Installation Names -================== - -By default, `make install' installs the package's commands under -`/usr/local/bin', include files under `/usr/local/include', etc. You -can specify an installation prefix other than `/usr/local' by giving -`configure' the option `--prefix=PREFIX'. - - You can specify separate installation prefixes for -architecture-specific files and architecture-independent files. If you -pass the option `--exec-prefix=PREFIX' to `configure', the package uses -PREFIX as the prefix for installing programs and libraries. -Documentation and other data files still use the regular prefix. - - In addition, if you use an unusual directory layout you can give -options like `--bindir=DIR' to specify different values for particular -kinds of files. Run `configure --help' for a list of the directories -you can set and what kinds of files go in them. - - If the package supports it, you can cause programs to be installed -with an extra prefix or suffix on their names by giving `configure' the -option `--program-prefix=PREFIX' or `--program-suffix=SUFFIX'. - -Optional Features -================= - -Some packages pay attention to `--enable-FEATURE' options to -`configure', where FEATURE indicates an optional part of the package. -They may also pay attention to `--with-PACKAGE' options, where PACKAGE -is something like `gnu-as' or `x' (for the X Window System). The -`README' should mention any `--enable-' and `--with-' options that the -package recognizes. - - For packages that use the X Window System, `configure' can usually -find the X include and library files automatically, but if it doesn't, -you can use the `configure' options `--x-includes=DIR' and -`--x-libraries=DIR' to specify their locations. - -Specifying the System Type -========================== - -There may be some features `configure' cannot figure out automatically, -but needs to determine by the type of machine the package will run on. -Usually, assuming the package is built to be run on the _same_ -architectures, `configure' can figure that out, but if it prints a -message saying it cannot guess the machine type, give it the -`--build=TYPE' option. TYPE can either be a short name for the system -type, such as `sun4', or a canonical name which has the form: - - CPU-COMPANY-SYSTEM - -where SYSTEM can have one of these forms: - - OS KERNEL-OS - - See the file `config.sub' for the possible values of each field. If -`config.sub' isn't included in this package, then this package doesn't -need to know the machine type. - - If you are _building_ compiler tools for cross-compiling, you should -use the option `--target=TYPE' to select the type of system they will -produce code for. - - If you want to _use_ a cross compiler, that generates code for a -platform different from the build platform, you should specify the -"host" platform (i.e., that on which the generated programs will -eventually be run) with `--host=TYPE'. - -Sharing Defaults -================ - -If you want to set default values for `configure' scripts to share, you -can create a site shell script called `config.site' that gives default -values for variables like `CC', `cache_file', and `prefix'. -`configure' looks for `PREFIX/share/config.site' if it exists, then -`PREFIX/etc/config.site' if it exists. Or, you can set the -`CONFIG_SITE' environment variable to the location of the site script. -A warning: not all `configure' scripts look for a site script. - -Defining Variables -================== - -Variables not defined in a site shell script can be set in the -environment passed to `configure'. However, some packages may run -configure again during the build, and the customized values of these -variables may be lost. In order to avoid this problem, you should set -them in the `configure' command line, using `VAR=value'. For example: - - ./configure CC=/usr/local2/bin/gcc - -causes the specified `gcc' to be used as the C compiler (unless it is -overridden in the site shell script). - -Unfortunately, this technique does not work for `CONFIG_SHELL' due to -an Autoconf bug. Until the bug is fixed you can use this workaround: - - CONFIG_SHELL=/bin/bash /bin/bash ./configure CONFIG_SHELL=/bin/bash - -`configure' Invocation -====================== - -`configure' recognizes the following options to control how it operates. - -`--help' -`-h' - Print a summary of the options to `configure', and exit. - -`--version' -`-V' - Print the version of Autoconf used to generate the `configure' - script, and exit. - -`--cache-file=FILE' - Enable the cache: use and save the results of the tests in FILE, - traditionally `config.cache'. FILE defaults to `/dev/null' to - disable caching. - -`--config-cache' -`-C' - Alias for `--cache-file=config.cache'. - -`--quiet' -`--silent' -`-q' - Do not print messages saying which checks are being made. To - suppress all normal output, redirect it to `/dev/null' (any error - messages will still be shown). - -`--srcdir=DIR' - Look for the package's source code in directory DIR. Usually - `configure' can determine that directory automatically. - -`configure' also accepts some other, not widely useful, options. Run -`configure --help' for more details. - diff --git a/drivers/staging/usbip/userspace/Makefile.am b/drivers/staging/usbip/userspace/Makefile.am deleted file mode 100644 index 66f8bf038c9f..000000000000 --- a/drivers/staging/usbip/userspace/Makefile.am +++ /dev/null @@ -1,6 +0,0 @@ -SUBDIRS := libsrc src -includedir = @includedir@/usbip -include_HEADERS := $(addprefix libsrc/, \ - usbip_common.h vhci_driver.h usbip_host_driver.h) - -dist_man_MANS := $(addprefix doc/, usbip.8 usbipd.8) diff --git a/drivers/staging/usbip/userspace/README b/drivers/staging/usbip/userspace/README deleted file mode 100644 index 831f49fea3ce..000000000000 --- a/drivers/staging/usbip/userspace/README +++ /dev/null @@ -1,202 +0,0 @@ -# -# README for usbip-utils -# -# Copyright (C) 2011 matt mooney <mfm@muteddisk.com> -# 2005-2008 Takahiro Hirofuchi - - -[Requirements] - - USB/IP device drivers - Found in the staging directory of the Linux kernel. - - - libudev >= 2.0 - libudev library - - - libwrap0-dev - tcp wrapper library - - - gcc >= 4.0 - - - libtool, automake >= 1.9, autoconf >= 2.5.0, pkg-config - -[Optional] - - hwdata - Contains USB device identification data. - - -[Install] - 0. Generate configuration scripts. - $ ./autogen.sh - - 1. Compile & install the userspace utilities. - $ ./configure [--with-tcp-wrappers=no] [--with-usbids-dir=<dir>] - $ make install - - 2. Compile & install USB/IP drivers. - - -[Usage] - server:# (Physically attach your USB device.) - - server:# insmod usbip-core.ko - server:# insmod usbip-host.ko - - server:# usbipd -D - - Start usbip daemon. - - server:# usbip list -l - - List driver assignments for USB devices. - - server:# usbip bind --busid 1-2 - - Bind usbip-host.ko to the device with busid 1-2. - - The USB device 1-2 is now exportable to other hosts! - - Use `usbip unbind --busid 1-2' to stop exporting the device. - - client:# insmod usbip-core.ko - client:# insmod vhci-hcd.ko - - client:# usbip list --remote <host> - - List exported USB devices on the <host>. - - client:# usbip attach --remote <host> --busid 1-2 - - Connect the remote USB device. - - client:# usbip port - - Show virtual port status. - - client:# usbip detach --port <port> - - Detach the USB device. - - -[Example] ---------------------------- - SERVER SIDE ---------------------------- -Physically attach your USB devices to this host. - - trois:# insmod path/to/usbip-core.ko - trois:# insmod path/to/usbip-host.ko - trois:# usbipd -D - -In another terminal, let's look up what USB devices are physically -attached to this host. - - trois:# usbip list -l - Local USB devices - ================= - - busid 1-1 (05a9:a511) - 1-1:1.0 -> ov511 - - - busid 3-2 (0711:0902) - 3-2:1.0 -> none - - - busid 3-3.1 (08bb:2702) - 3-3.1:1.0 -> snd-usb-audio - 3-3.1:1.1 -> snd-usb-audio - - - busid 3-3.2 (04bb:0206) - 3-3.2:1.0 -> usb-storage - - - busid 3-3 (0409:0058) - 3-3:1.0 -> hub - - - busid 4-1 (046d:08b2) - 4-1:1.0 -> none - 4-1:1.1 -> none - 4-1:1.2 -> none - - - busid 5-2 (058f:9254) - 5-2:1.0 -> hub - -A USB storage device of busid 3-3.2 is now bound to the usb-storage -driver. To export this device, we first mark the device as -"exportable"; the device is bound to the usbip-host driver. Please -remember you can not export a USB hub. - -Mark the device of busid 3-3.2 as exportable: - - trois:# usbip --debug bind --busid 3-3.2 - ... - usbip debug: usbip_bind.c:162:[unbind_other] 3-3.2:1.0 -> usb-storage - ... - bind device on busid 3-3.2: complete - - trois:# usbip list -l - Local USB devices - ================= - ... - - - busid 3-3.2 (04bb:0206) - 3-3.2:1.0 -> usbip-host - ... - ---------------------------- - CLIENT SIDE ---------------------------- -First, let's list available remote devices that are marked as -exportable on the host. - - deux:# insmod path/to/usbip-core.ko - deux:# insmod path/to/vhci-hcd.ko - - deux:# usbip list --remote 10.0.0.3 - Exportable USB devices - ====================== - - 10.0.0.3 - 1-1: Prolific Technology, Inc. : unknown product (067b:3507) - : /sys/devices/pci0000:00/0000:00:1f.2/usb1/1-1 - : (Defined at Interface level) / unknown subclass / unknown protocol (00/00/00) - : 0 - Mass Storage / SCSI / Bulk (Zip) (08/06/50) - - 1-2.2.1: Apple Computer, Inc. : unknown product (05ac:0203) - : /sys/devices/pci0000:00/0000:00:1f.2/usb1/1-2/1-2.2/1-2.2.1 - : (Defined at Interface level) / unknown subclass / unknown protocol (00/00/00) - : 0 - Human Interface Devices / Boot Interface Subclass / Keyboard (03/01/01) - - 1-2.2.3: OmniVision Technologies, Inc. : OV511+ WebCam (05a9:a511) - : /sys/devices/pci0000:00/0000:00:1f.2/usb1/1-2/1-2.2/1-2.2.3 - : (Defined at Interface level) / unknown subclass / unknown protocol (00/00/00) - : 0 - Vendor Specific Class / unknown subclass / unknown protocol (ff/00/00) - - 3-1: Logitech, Inc. : QuickCam Pro 4000 (046d:08b2) - : /sys/devices/pci0000:00/0000:00:1e.0/0000:02:0a.0/usb3/3-1 - : (Defined at Interface level) / unknown subclass / unknown protocol (00/00/00) - : 0 - Data / unknown subclass / unknown protocol (0a/ff/00) - : 1 - Audio / Control Device / unknown protocol (01/01/00) - : 2 - Audio / Streaming / unknown protocol (01/02/00) - -Attach a remote USB device: - - deux:# usbip attach --remote 10.0.0.3 --busid 1-1 - port 0 attached - -Show the devices attached to this client: - - deux:# usbip port - Port 00: <Port in Use> at Full Speed(12Mbps) - Prolific Technology, Inc. : unknown product (067b:3507) - 6-1 -> usbip://10.0.0.3:3240/1-1 (remote bus/dev 001/004) - 6-1:1.0 used by usb-storage - /sys/class/scsi_device/0:0:0:0/device - /sys/class/scsi_host/host0/device - /sys/block/sda/device - -Detach the imported device: - - deux:# usbip detach --port 0 - port 0 detached - - -[Checklist] - - See 'Debug Tips' on the project wiki. - - http://usbip.wiki.sourceforge.net/how-to-debug-usbip - - usbip-host.ko must be bound to the target device. - - See /proc/bus/usb/devices and find "Driver=..." lines of the device. - - Shutdown firewall. - - usbip now uses TCP port 3240. - - Disable SELinux. - - Check the kernel and daemon messages. - - -[Contact] - Mailing List: linux-usb@vger.kernel.org diff --git a/drivers/staging/usbip/userspace/autogen.sh b/drivers/staging/usbip/userspace/autogen.sh deleted file mode 100755 index e1112d3fcbf6..000000000000 --- a/drivers/staging/usbip/userspace/autogen.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/sh -x - -#aclocal -#autoheader -#libtoolize --copy --force -#automake-1.9 -acf -#autoconf - -autoreconf -i -f -v diff --git a/drivers/staging/usbip/userspace/cleanup.sh b/drivers/staging/usbip/userspace/cleanup.sh deleted file mode 100755 index 955c3ccb729a..000000000000 --- a/drivers/staging/usbip/userspace/cleanup.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/sh - -if [ -r Makefile ]; then - make distclean -fi - -FILES="aclocal.m4 autom4te.cache compile config.guess config.h.in config.log \ - config.status config.sub configure cscope.out depcomp install-sh \ - libsrc/Makefile libsrc/Makefile.in libtool ltmain.sh Makefile \ - Makefile.in missing src/Makefile src/Makefile.in" - -rm -vRf $FILES diff --git a/drivers/staging/usbip/userspace/configure.ac b/drivers/staging/usbip/userspace/configure.ac deleted file mode 100644 index 607d05c5ccfd..000000000000 --- a/drivers/staging/usbip/userspace/configure.ac +++ /dev/null @@ -1,111 +0,0 @@ -dnl Process this file with autoconf to produce a configure script. - -AC_PREREQ(2.59) -AC_INIT([usbip-utils], [2.0], [linux-usb@vger.kernel.org]) -AC_DEFINE([USBIP_VERSION], [0x00000111], [binary-coded decimal version number]) - -CURRENT=0 -REVISION=1 -AGE=0 -AC_SUBST([LIBUSBIP_VERSION], [$CURRENT:$REVISION:$AGE], [library version]) - -AC_CONFIG_SRCDIR([src/usbipd.c]) -AC_CONFIG_HEADERS([config.h]) - -AM_INIT_AUTOMAKE([foreign]) -LT_INIT - -# Silent build for automake >= 1.11 -m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])]) - -AC_SUBST([EXTRA_CFLAGS], ["-Wall -Werror -Wextra -std=gnu99"]) - -# Checks for programs. -AC_PROG_CC -AC_PROG_INSTALL -AC_PROG_MAKE_SET - -# Checks for header files. -AC_HEADER_DIRENT -AC_HEADER_STDC -AC_CHECK_HEADERS([arpa/inet.h fcntl.h netdb.h netinet/in.h stdint.h stdlib.h dnl - string.h sys/socket.h syslog.h unistd.h]) - -# Checks for typedefs, structures, and compiler characteristics. -AC_TYPE_INT32_T -AC_TYPE_SIZE_T -AC_TYPE_SSIZE_T -AC_TYPE_UINT16_T -AC_TYPE_UINT32_T -AC_TYPE_UINT8_T - -# Checks for library functions. -AC_FUNC_REALLOC -AC_CHECK_FUNCS([memset mkdir regcomp socket strchr strerror strstr dnl - strtoul]) - -AC_CHECK_HEADER([libudev.h], - [AC_CHECK_LIB([udev], [udev_new], - [LIBS="$LIBS -ludev"], - [AC_MSG_ERROR([Missing udev library!])])], - [AC_MSG_ERROR([Missing /usr/include/libudev.h])]) - -# Checks for libwrap library. -AC_MSG_CHECKING([whether to use the libwrap (TCP wrappers) library]) -AC_ARG_WITH([tcp-wrappers], - [AS_HELP_STRING([--with-tcp-wrappers], - [use the libwrap (TCP wrappers) library])], - dnl [ACTION-IF-GIVEN] - [if test "$withval" = "yes"; then - AC_MSG_RESULT([yes]) - AC_MSG_CHECKING([for hosts_access in -lwrap]) - saved_LIBS="$LIBS" - LIBS="-lwrap $saved_LIBS" - AC_TRY_LINK( - [int hosts_access(); int allow_severity, deny_severity;], - [hosts_access()], - [AC_MSG_RESULT([yes]); - AC_DEFINE([HAVE_LIBWRAP], [1], - [use tcp wrapper]) wrap_LIB="-lwrap"], - [AC_MSG_RESULT([not found]); exit 1]) - else - AC_MSG_RESULT([no]); - fi], - dnl [ACTION-IF-NOT-GIVEN] - [AC_MSG_RESULT([(default)]) - AC_MSG_CHECKING([for hosts_access in -lwrap]) - saved_LIBS="$LIBS" - LIBS="-lwrap $saved_LIBS" - AC_TRY_LINK( - [int hosts_access(); int allow_severity, deny_severity;], - [hosts_access()], - [AC_MSG_RESULT([yes]); - AC_DEFINE([HAVE_LIBWRAP], [1], [use tcp wrapper])], - [AC_MSG_RESULT([no]); LIBS="$saved_LIBS"])]) - -# Sets directory containing usb.ids. -AC_ARG_WITH([usbids-dir], - [AS_HELP_STRING([--with-usbids-dir=DIR], - [where usb.ids is found (default /usr/share/hwdata/)])], - [USBIDS_DIR=$withval], [USBIDS_DIR="/usr/share/hwdata/"]) -AC_SUBST([USBIDS_DIR]) - -# use _FORTIFY_SOURCE -AC_MSG_CHECKING([whether to use fortify]) -AC_ARG_WITH([fortify], - [AS_HELP_STRING([--with-fortify], - [use _FORTIFY_SROUCE option when compiling)])], - dnl [ACTION-IF-GIVEN] - [if test "$withval" = "yes"; then - AC_MSG_RESULT([yes]) - CFLAGS="$CFLAGS -D_FORTIFY_SOURCE -O" - else - AC_MSG_RESULT([no]) - CFLAGS="$CFLAGS -U_FORTIFY_SOURCE" - fi - ], - dnl [ACTION-IF-NOT-GIVEN] - [AC_MSG_RESULT([default])]) - -AC_CONFIG_FILES([Makefile libsrc/Makefile src/Makefile]) -AC_OUTPUT diff --git a/drivers/staging/usbip/userspace/doc/usbip.8 b/drivers/staging/usbip/userspace/doc/usbip.8 deleted file mode 100644 index a6097be25d28..000000000000 --- a/drivers/staging/usbip/userspace/doc/usbip.8 +++ /dev/null @@ -1,95 +0,0 @@ -.TH USBIP "8" "February 2009" "usbip" "System Administration Utilities" -.SH NAME -usbip \- manage USB/IP devices -.SH SYNOPSIS -.B usbip -[\fIoptions\fR] <\fIcommand\fR> <\fIargs\fR> - -.SH DESCRIPTION -On a USB/IP server, devices can be listed, bound, and unbound using -this program. On a USB/IP client, devices exported by USB/IP servers -can be listed, attached and detached. - -.SH OPTIONS -.HP -\fB\-\-debug\fR -.IP -Print debugging information. -.PP - -.HP -\fB\-\-log\fR -.IP -Log to syslog. -.PP - -.HP -\fB\-\-tcp-port PORT\fR -.IP -Connect to PORT on remote host (used for attach and list --remote). -.PP - -.SH COMMANDS -.HP -\fBversion\fR -.IP -Show version and exit. -.PP - -.HP -\fBhelp\fR [\fIcommand\fR] -.IP -Print the program help message, or help on a specific command, and -then exit. -.PP - -.HP -\fBattach\fR \-\-remote=<\fIhost\fR> \-\-busid=<\fIbus_id\fR> -.IP -Attach a remote USB device. -.PP - -.HP -\fBdetach\fR \-\-port=<\fIport\fR> -.IP -Detach an imported USB device. -.PP - -.HP -\fBbind\fR \-\-busid=<\fIbusid\fR> -.IP -Make a device exportable. -.PP - -.HP -\fBunbind\fR \-\-busid=<\fIbusid\fR> -.IP -Stop exporting a device so it can be used by a local driver. -.PP - -.HP -\fBlist\fR \-\-remote=<\fIhost\fR> -.IP -List USB devices exported by a remote host. -.PP - -.HP -\fBlist\fR \-\-local -.IP -List local USB devices. -.PP - - -.SH EXAMPLES - - client:# usbip list --remote=server - - List exportable usb devices on the server. - - client:# usbip attach --remote=server --busid=1-2 - - Connect the remote USB device. - - client:# usbip detach --port=0 - - Detach the usb device. - -.SH "SEE ALSO" -\fBusbipd\fP\fB(8)\fB\fP diff --git a/drivers/staging/usbip/userspace/doc/usbipd.8 b/drivers/staging/usbip/userspace/doc/usbipd.8 deleted file mode 100644 index ac4635db3f03..000000000000 --- a/drivers/staging/usbip/userspace/doc/usbipd.8 +++ /dev/null @@ -1,91 +0,0 @@ -.TH USBIP "8" "February 2009" "usbip" "System Administration Utilities" -.SH NAME -usbipd \- USB/IP server daemon -.SH SYNOPSIS -.B usbipd -[\fIoptions\fR] - -.SH DESCRIPTION -.B usbipd -provides USB/IP clients access to exported USB devices. - -Devices have to explicitly be exported using -.B usbip bind -before usbipd makes them available to other hosts. - -The daemon accepts connections from USB/IP clients -on TCP port 3240 by default. - -.SH OPTIONS -.HP -\fB\-4\fR, \fB\-\-ipv4\fR -.IP -Bind to IPv4. Default is both. -.PP - -.HP -\fB\-6\fR, \fB\-\-ipv6\fR -.IP -Bind to IPv6. Default is both. -.PP - -.HP -\fB\-D\fR, \fB\-\-daemon\fR -.IP -Run as a daemon process. -.PP - -.HP -\fB\-d\fR, \fB\-\-debug\fR -.IP -Print debugging information. -.PP - -.HP -\fB\-PFILE\fR, \fB\-\-pid FILE\fR -.IP -Write process id to FILE. -.br -If no FILE specified, use /var/run/usbipd.pid -.PP - -\fB\-tPORT\fR, \fB\-\-tcp\-port PORT\fR -.IP -Listen on TCP/IP port PORT. -.PP - -\fB\-h\fR, \fB\-\-help\fR -.IP -Print the program help message and exit. -.PP - -.HP -\fB\-v\fR, \fB\-\-version\fR -.IP -Show version. -.PP - -.SH LIMITATIONS - -.B usbipd -offers no authentication or authorization for USB/IP. Any -USB/IP client can connect and use exported devices. - -.SH EXAMPLES - - server:# modprobe usbip - - server:# usbipd -D - - Start usbip daemon. - - server:# usbip list --local - - List driver assignments for usb devices. - - server:# usbip bind --busid=1-2 - - Bind usbip-host.ko to the device of busid 1-2. - - A usb device 1-2 is now exportable to other hosts! - - Use 'usbip unbind --busid=1-2' when you want to shutdown exporting and use the device locally. - -.SH "SEE ALSO" -\fBusbip\fP\fB(8)\fB\fP - diff --git a/drivers/staging/usbip/userspace/libsrc/Makefile.am b/drivers/staging/usbip/userspace/libsrc/Makefile.am deleted file mode 100644 index 7c8f8a4d54e4..000000000000 --- a/drivers/staging/usbip/userspace/libsrc/Makefile.am +++ /dev/null @@ -1,8 +0,0 @@ -libusbip_la_CPPFLAGS = -DUSBIDS_FILE='"@USBIDS_DIR@/usb.ids"' -libusbip_la_CFLAGS = @EXTRA_CFLAGS@ -libusbip_la_LDFLAGS = -version-info @LIBUSBIP_VERSION@ - -lib_LTLIBRARIES := libusbip.la -libusbip_la_SOURCES := names.c names.h usbip_host_driver.c usbip_host_driver.h \ - usbip_common.c usbip_common.h vhci_driver.c vhci_driver.h \ - sysfs_utils.c sysfs_utils.h diff --git a/drivers/staging/usbip/userspace/libsrc/list.h b/drivers/staging/usbip/userspace/libsrc/list.h deleted file mode 100644 index 8d0c936e184f..000000000000 --- a/drivers/staging/usbip/userspace/libsrc/list.h +++ /dev/null @@ -1,136 +0,0 @@ -#ifndef _LIST_H -#define _LIST_H - -/* Stripped down implementation of linked list taken - * from the Linux Kernel. - */ - -/* - * Simple doubly linked list implementation. - * - * Some of the internal functions ("__xxx") are useful when - * manipulating whole lists rather than single entries, as - * sometimes we already know the next/prev entries and we can - * generate better code by using them directly rather than - * using the generic single-entry routines. - */ - -struct list_head { - struct list_head *next, *prev; -}; - -#define LIST_HEAD_INIT(name) { &(name), &(name) } - -#define LIST_HEAD(name) \ - struct list_head name = LIST_HEAD_INIT(name) - -static inline void INIT_LIST_HEAD(struct list_head *list) -{ - list->next = list; - list->prev = list; -} - -/* - * Insert a new entry between two known consecutive entries. - * - * This is only for internal list manipulation where we know - * the prev/next entries already! - */ -static inline void __list_add(struct list_head *new, - struct list_head *prev, - struct list_head *next) -{ - next->prev = new; - new->next = next; - new->prev = prev; - prev->next = new; -} - -/** - * list_add - add a new entry - * @new: new entry to be added - * @head: list head to add it after - * - * Insert a new entry after the specified head. - * This is good for implementing stacks. - */ -static inline void list_add(struct list_head *new, struct list_head *head) -{ - __list_add(new, head, head->next); -} - -/* - * Delete a list entry by making the prev/next entries - * point to each other. - * - * This is only for internal list manipulation where we know - * the prev/next entries already! - */ -static inline void __list_del(struct list_head * prev, struct list_head * next) -{ - next->prev = prev; - prev->next = next; -} - -#define POISON_POINTER_DELTA 0 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA) -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA) - -/** - * list_del - deletes entry from list. - * @entry: the element to delete from the list. - * Note: list_empty() on entry does not return true after this, the entry is - * in an undefined state. - */ -static inline void __list_del_entry(struct list_head *entry) -{ - __list_del(entry->prev, entry->next); -} - -static inline void list_del(struct list_head *entry) -{ - __list_del(entry->prev, entry->next); - entry->next = LIST_POISON1; - entry->prev = LIST_POISON2; -} - -/** - * list_entry - get the struct for this entry - * @ptr: the &struct list_head pointer. - * @type: the type of the struct this is embedded in. - * @member: the name of the list_struct within the struct. - */ -#define list_entry(ptr, type, member) \ - container_of(ptr, type, member) -/** - * list_for_each - iterate over a list - * @pos: the &struct list_head to use as a loop cursor. - * @head: the head for your list. - */ -#define list_for_each(pos, head) \ - for (pos = (head)->next; pos != (head); pos = pos->next) - -/** - * list_for_each_safe - iterate over a list safe against removal of list entry - * @pos: the &struct list_head to use as a loop cursor. - * @n: another &struct list_head to use as temporary storage - * @head: the head for your list. - */ -#define list_for_each_safe(pos, n, head) \ - for (pos = (head)->next, n = pos->next; pos != (head); \ - pos = n, n = pos->next) - -#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) - -/** - * container_of - cast a member of a structure out to the containing structure - * @ptr: the pointer to the member. - * @type: the type of the container struct this is embedded in. - * @member: the name of the member within the struct. - * - */ -#define container_of(ptr, type, member) ({ \ - const typeof( ((type *)0)->member ) *__mptr = (ptr); \ - (type *)( (char *)__mptr - offsetof(type,member) );}) - -#endif diff --git a/drivers/staging/usbip/userspace/libsrc/names.c b/drivers/staging/usbip/userspace/libsrc/names.c deleted file mode 100644 index 81ff8522405c..000000000000 --- a/drivers/staging/usbip/userspace/libsrc/names.c +++ /dev/null @@ -1,504 +0,0 @@ -/* - * names.c -- USB name database manipulation routines - * - * Copyright (C) 1999, 2000 Thomas Sailer (sailer@ife.ee.ethz.ch) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * - * - * - * - * - * Copyright (C) 2005 Takahiro Hirofuchi - * - names_deinit() is added. - * - */ - -#include <sys/types.h> -#include <sys/stat.h> -#include <fcntl.h> -#include <dirent.h> -#include <string.h> -#include <errno.h> -#include <stdlib.h> -#include <unistd.h> -#include <stdio.h> -#include <ctype.h> - -#include "names.h" -#include "usbip_common.h" - -struct vendor { - struct vendor *next; - u_int16_t vendorid; - char name[1]; -}; - -struct product { - struct product *next; - u_int16_t vendorid, productid; - char name[1]; -}; - -struct class { - struct class *next; - u_int8_t classid; - char name[1]; -}; - -struct subclass { - struct subclass *next; - u_int8_t classid, subclassid; - char name[1]; -}; - -struct protocol { - struct protocol *next; - u_int8_t classid, subclassid, protocolid; - char name[1]; -}; - -struct genericstrtable { - struct genericstrtable *next; - unsigned int num; - char name[1]; -}; - - -#define HASH1 0x10 -#define HASH2 0x02 -#define HASHSZ 16 - -static unsigned int hashnum(unsigned int num) -{ - unsigned int mask1 = HASH1 << 27, mask2 = HASH2 << 27; - - for (; mask1 >= HASH1; mask1 >>= 1, mask2 >>= 1) - if (num & mask1) - num ^= mask2; - return num & (HASHSZ-1); -} - - -static struct vendor *vendors[HASHSZ] = { NULL, }; -static struct product *products[HASHSZ] = { NULL, }; -static struct class *classes[HASHSZ] = { NULL, }; -static struct subclass *subclasses[HASHSZ] = { NULL, }; -static struct protocol *protocols[HASHSZ] = { NULL, }; - -const char *names_vendor(u_int16_t vendorid) -{ - struct vendor *v; - - v = vendors[hashnum(vendorid)]; - for (; v; v = v->next) - if (v->vendorid == vendorid) - return v->name; - return NULL; -} - -const char *names_product(u_int16_t vendorid, u_int16_t productid) -{ - struct product *p; - - p = products[hashnum((vendorid << 16) | productid)]; - for (; p; p = p->next) - if (p->vendorid == vendorid && p->productid == productid) - return p->name; - return NULL; -} - -const char *names_class(u_int8_t classid) -{ - struct class *c; - - c = classes[hashnum(classid)]; - for (; c; c = c->next) - if (c->classid == classid) - return c->name; - return NULL; -} - -const char *names_subclass(u_int8_t classid, u_int8_t subclassid) -{ - struct subclass *s; - - s = subclasses[hashnum((classid << 8) | subclassid)]; - for (; s; s = s->next) - if (s->classid == classid && s->subclassid == subclassid) - return s->name; - return NULL; -} - -const char *names_protocol(u_int8_t classid, u_int8_t subclassid, - u_int8_t protocolid) -{ - struct protocol *p; - - p = protocols[hashnum((classid << 16) | (subclassid << 8) - | protocolid)]; - for (; p; p = p->next) - if (p->classid == classid && p->subclassid == subclassid && - p->protocolid == protocolid) - return p->name; - return NULL; -} - -/* add a cleanup function by takahiro */ -struct pool { - struct pool *next; - void *mem; -}; - -static struct pool *pool_head; - -static void *my_malloc(size_t size) -{ - struct pool *p; - - p = calloc(1, sizeof(struct pool)); - if (!p) - return NULL; - - p->mem = calloc(1, size); - if (!p->mem) { - free(p); - return NULL; - } - - p->next = pool_head; - pool_head = p; - - return p->mem; -} - -void names_free(void) -{ - struct pool *pool; - - if (!pool_head) - return; - - for (pool = pool_head; pool != NULL; ) { - struct pool *tmp; - - if (pool->mem) - free(pool->mem); - - tmp = pool; - pool = pool->next; - free(tmp); - } -} - -static int new_vendor(const char *name, u_int16_t vendorid) -{ - struct vendor *v; - unsigned int h = hashnum(vendorid); - - v = vendors[h]; - for (; v; v = v->next) - if (v->vendorid == vendorid) - return -1; - v = my_malloc(sizeof(struct vendor) + strlen(name)); - if (!v) - return -1; - strcpy(v->name, name); - v->vendorid = vendorid; - v->next = vendors[h]; - vendors[h] = v; - return 0; -} - -static int new_product(const char *name, u_int16_t vendorid, - u_int16_t productid) -{ - struct product *p; - unsigned int h = hashnum((vendorid << 16) | productid); - - p = products[h]; - for (; p; p = p->next) - if (p->vendorid == vendorid && p->productid == productid) - return -1; - p = my_malloc(sizeof(struct product) + strlen(name)); - if (!p) - return -1; - strcpy(p->name, name); - p->vendorid = vendorid; - p->productid = productid; - p->next = products[h]; - products[h] = p; - return 0; -} - -static int new_class(const char *name, u_int8_t classid) -{ - struct class *c; - unsigned int h = hashnum(classid); - - c = classes[h]; - for (; c; c = c->next) - if (c->classid == classid) - return -1; - c = my_malloc(sizeof(struct class) + strlen(name)); - if (!c) - return -1; - strcpy(c->name, name); - c->classid = classid; - c->next = classes[h]; - classes[h] = c; - return 0; -} - -static int new_subclass(const char *name, u_int8_t classid, u_int8_t subclassid) -{ - struct subclass *s; - unsigned int h = hashnum((classid << 8) | subclassid); - - s = subclasses[h]; - for (; s; s = s->next) - if (s->classid == classid && s->subclassid == subclassid) - return -1; - s = my_malloc(sizeof(struct subclass) + strlen(name)); - if (!s) - return -1; - strcpy(s->name, name); - s->classid = classid; - s->subclassid = subclassid; - s->next = subclasses[h]; - subclasses[h] = s; - return 0; -} - -static int new_protocol(const char *name, u_int8_t classid, u_int8_t subclassid, - u_int8_t protocolid) -{ - struct protocol *p; - unsigned int h = hashnum((classid << 16) | (subclassid << 8) - | protocolid); - - p = protocols[h]; - for (; p; p = p->next) - if (p->classid == classid && p->subclassid == subclassid - && p->protocolid == protocolid) - return -1; - p = my_malloc(sizeof(struct protocol) + strlen(name)); - if (!p) - return -1; - strcpy(p->name, name); - p->classid = classid; - p->subclassid = subclassid; - p->protocolid = protocolid; - p->next = protocols[h]; - protocols[h] = p; - return 0; -} - -static void parse(FILE *f) -{ - char buf[512], *cp; - unsigned int linectr = 0; - int lastvendor = -1; - int lastclass = -1; - int lastsubclass = -1; - int lasthut = -1; - int lastlang = -1; - unsigned int u; - - while (fgets(buf, sizeof(buf), f)) { - linectr++; - /* remove line ends */ - cp = strchr(buf, '\r'); - if (cp) - *cp = 0; - cp = strchr(buf, '\n'); - if (cp) - *cp = 0; - if (buf[0] == '#' || !buf[0]) - continue; - cp = buf; - if (buf[0] == 'P' && buf[1] == 'H' && buf[2] == 'Y' && - buf[3] == 'S' && buf[4] == 'D' && - buf[5] == 'E' && buf[6] == 'S' && /*isspace(buf[7])*/ - buf[7] == ' ') { - continue; - } - if (buf[0] == 'P' && buf[1] == 'H' && - buf[2] == 'Y' && /*isspace(buf[3])*/ buf[3] == ' ') { - continue; - } - if (buf[0] == 'B' && buf[1] == 'I' && buf[2] == 'A' && - buf[3] == 'S' && /*isspace(buf[4])*/ buf[4] == ' ') { - continue; - } - if (buf[0] == 'L' && /*isspace(buf[1])*/ buf[1] == ' ') { - lasthut = lastclass = lastvendor = lastsubclass = -1; - /* - * set 1 as pseudo-id to indicate that the parser is - * in a `L' section. - */ - lastlang = 1; - continue; - } - if (buf[0] == 'C' && /*isspace(buf[1])*/ buf[1] == ' ') { - /* class spec */ - cp = buf+2; - while (isspace(*cp)) - cp++; - if (!isxdigit(*cp)) { - err("Invalid class spec at line %u", linectr); - continue; - } - u = strtoul(cp, &cp, 16); - while (isspace(*cp)) - cp++; - if (!*cp) { - err("Invalid class spec at line %u", linectr); - continue; - } - if (new_class(cp, u)) - err("Duplicate class spec at line %u class %04x %s", - linectr, u, cp); - dbg("line %5u class %02x %s", linectr, u, cp); - lasthut = lastlang = lastvendor = lastsubclass = -1; - lastclass = u; - continue; - } - if (buf[0] == 'A' && buf[1] == 'T' && isspace(buf[2])) { - /* audio terminal type spec */ - continue; - } - if (buf[0] == 'H' && buf[1] == 'C' && buf[2] == 'C' - && isspace(buf[3])) { - /* HID Descriptor bCountryCode */ - continue; - } - if (isxdigit(*cp)) { - /* vendor */ - u = strtoul(cp, &cp, 16); - while (isspace(*cp)) - cp++; - if (!*cp) { - err("Invalid vendor spec at line %u", linectr); - continue; - } - if (new_vendor(cp, u)) - err("Duplicate vendor spec at line %u vendor %04x %s", - linectr, u, cp); - dbg("line %5u vendor %04x %s", linectr, u, cp); - lastvendor = u; - lasthut = lastlang = lastclass = lastsubclass = -1; - continue; - } - if (buf[0] == '\t' && isxdigit(buf[1])) { - /* product or subclass spec */ - u = strtoul(buf+1, &cp, 16); - while (isspace(*cp)) - cp++; - if (!*cp) { - err("Invalid product/subclass spec at line %u", - linectr); - continue; - } - if (lastvendor != -1) { - if (new_product(cp, lastvendor, u)) - err("Duplicate product spec at line %u product %04x:%04x %s", - linectr, lastvendor, u, cp); - dbg("line %5u product %04x:%04x %s", linectr, - lastvendor, u, cp); - continue; - } - if (lastclass != -1) { - if (new_subclass(cp, lastclass, u)) - err("Duplicate subclass spec at line %u class %02x:%02x %s", - linectr, lastclass, u, cp); - dbg("line %5u subclass %02x:%02x %s", linectr, - lastclass, u, cp); - lastsubclass = u; - continue; - } - if (lasthut != -1) { - /* do not store hut */ - continue; - } - if (lastlang != -1) { - /* do not store langid */ - continue; - } - err("Product/Subclass spec without prior Vendor/Class spec at line %u", - linectr); - continue; - } - if (buf[0] == '\t' && buf[1] == '\t' && isxdigit(buf[2])) { - /* protocol spec */ - u = strtoul(buf+2, &cp, 16); - while (isspace(*cp)) - cp++; - if (!*cp) { - err("Invalid protocol spec at line %u", - linectr); - continue; - } - if (lastclass != -1 && lastsubclass != -1) { - if (new_protocol(cp, lastclass, lastsubclass, - u)) - err("Duplicate protocol spec at line %u class %02x:%02x:%02x %s", - linectr, lastclass, lastsubclass, - u, cp); - dbg("line %5u protocol %02x:%02x:%02x %s", - linectr, lastclass, lastsubclass, u, cp); - continue; - } - err("Protocol spec without prior Class and Subclass spec at line %u", - linectr); - continue; - } - if (buf[0] == 'H' && buf[1] == 'I' && - buf[2] == 'D' && /*isspace(buf[3])*/ buf[3] == ' ') { - continue; - } - if (buf[0] == 'H' && buf[1] == 'U' && - buf[2] == 'T' && /*isspace(buf[3])*/ buf[3] == ' ') { - lastlang = lastclass = lastvendor = lastsubclass = -1; - /* - * set 1 as pseudo-id to indicate that the parser is - * in a `HUT' section. - */ - lasthut = 1; - continue; - } - if (buf[0] == 'R' && buf[1] == ' ') - continue; - - if (buf[0] == 'V' && buf[1] == 'T') - continue; - - err("Unknown line at line %u", linectr); - } -} - - -int names_init(char *n) -{ - FILE *f; - - f = fopen(n, "r"); - if (!f) - return errno; - - parse(f); - fclose(f); - return 0; -} diff --git a/drivers/staging/usbip/userspace/libsrc/names.h b/drivers/staging/usbip/userspace/libsrc/names.h deleted file mode 100644 index 680926512de2..000000000000 --- a/drivers/staging/usbip/userspace/libsrc/names.h +++ /dev/null @@ -1,41 +0,0 @@ -/* - * names.h -- USB name database manipulation routines - * - * Copyright (C) 1999, 2000 Thomas Sailer (sailer@ife.ee.ethz.ch) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * - * - * - * Copyright (C) 2005 Takahiro Hirofuchi - * - names_free() is added. - */ - -#ifndef _NAMES_H -#define _NAMES_H - -#include <sys/types.h> - -/* used by usbip_common.c */ -extern const char *names_vendor(u_int16_t vendorid); -extern const char *names_product(u_int16_t vendorid, u_int16_t productid); -extern const char *names_class(u_int8_t classid); -extern const char *names_subclass(u_int8_t classid, u_int8_t subclassid); -extern const char *names_protocol(u_int8_t classid, u_int8_t subclassid, - u_int8_t protocolid); -extern int names_init(char *n); -extern void names_free(void); - -#endif /* _NAMES_H */ diff --git a/drivers/staging/usbip/userspace/libsrc/sysfs_utils.c b/drivers/staging/usbip/userspace/libsrc/sysfs_utils.c deleted file mode 100644 index 36ac88ece0b8..000000000000 --- a/drivers/staging/usbip/userspace/libsrc/sysfs_utils.c +++ /dev/null @@ -1,31 +0,0 @@ -#include <sys/types.h> -#include <sys/stat.h> -#include <fcntl.h> -#include <errno.h> - -#include "sysfs_utils.h" -#include "usbip_common.h" - -int write_sysfs_attribute(const char *attr_path, const char *new_value, - size_t len) -{ - int fd; - int length; - - fd = open(attr_path, O_WRONLY); - if (fd < 0) { - dbg("error opening attribute %s", attr_path); - return -1; - } - - length = write(fd, new_value, len); - if (length < 0) { - dbg("error writing to attribute %s", attr_path); - close(fd); - return -1; - } - - close(fd); - - return 0; -} diff --git a/drivers/staging/usbip/userspace/libsrc/sysfs_utils.h b/drivers/staging/usbip/userspace/libsrc/sysfs_utils.h deleted file mode 100644 index 32ac1d105d18..000000000000 --- a/drivers/staging/usbip/userspace/libsrc/sysfs_utils.h +++ /dev/null @@ -1,8 +0,0 @@ - -#ifndef __SYSFS_UTILS_H -#define __SYSFS_UTILS_H - -int write_sysfs_attribute(const char *attr_path, const char *new_value, - size_t len); - -#endif diff --git a/drivers/staging/usbip/userspace/libsrc/usbip_common.c b/drivers/staging/usbip/userspace/libsrc/usbip_common.c deleted file mode 100644 index ac73710473de..000000000000 --- a/drivers/staging/usbip/userspace/libsrc/usbip_common.c +++ /dev/null @@ -1,285 +0,0 @@ -/* - * Copyright (C) 2005-2007 Takahiro Hirofuchi - */ - -#include <libudev.h> -#include "usbip_common.h" -#include "names.h" - -#undef PROGNAME -#define PROGNAME "libusbip" - -int usbip_use_syslog; -int usbip_use_stderr; -int usbip_use_debug; - -extern struct udev *udev_context; - -struct speed_string { - int num; - char *speed; - char *desc; -}; - -static const struct speed_string speed_strings[] = { - { USB_SPEED_UNKNOWN, "unknown", "Unknown Speed"}, - { USB_SPEED_LOW, "1.5", "Low Speed(1.5Mbps)" }, - { USB_SPEED_FULL, "12", "Full Speed(12Mbps)" }, - { USB_SPEED_HIGH, "480", "High Speed(480Mbps)" }, - { USB_SPEED_WIRELESS, "53.3-480", "Wireless"}, - { USB_SPEED_SUPER, "5000", "Super Speed(5000Mbps)" }, - { 0, NULL, NULL } -}; - -struct portst_string { - int num; - char *desc; -}; - -static struct portst_string portst_strings[] = { - { SDEV_ST_AVAILABLE, "Device Available" }, - { SDEV_ST_USED, "Device in Use" }, - { SDEV_ST_ERROR, "Device Error"}, - { VDEV_ST_NULL, "Port Available"}, - { VDEV_ST_NOTASSIGNED, "Port Initializing"}, - { VDEV_ST_USED, "Port in Use"}, - { VDEV_ST_ERROR, "Port Error"}, - { 0, NULL} -}; - -const char *usbip_status_string(int32_t status) -{ - for (int i = 0; portst_strings[i].desc != NULL; i++) - if (portst_strings[i].num == status) - return portst_strings[i].desc; - - return "Unknown Status"; -} - -const char *usbip_speed_string(int num) -{ - for (int i = 0; speed_strings[i].speed != NULL; i++) - if (speed_strings[i].num == num) - return speed_strings[i].desc; - - return "Unknown Speed"; -} - - -#define DBG_UDEV_INTEGER(name)\ - dbg("%-20s = %x", to_string(name), (int) udev->name) - -#define DBG_UINF_INTEGER(name)\ - dbg("%-20s = %x", to_string(name), (int) uinf->name) - -void dump_usb_interface(struct usbip_usb_interface *uinf) -{ - char buff[100]; - - usbip_names_get_class(buff, sizeof(buff), - uinf->bInterfaceClass, - uinf->bInterfaceSubClass, - uinf->bInterfaceProtocol); - dbg("%-20s = %s", "Interface(C/SC/P)", buff); -} - -void dump_usb_device(struct usbip_usb_device *udev) -{ - char buff[100]; - - dbg("%-20s = %s", "path", udev->path); - dbg("%-20s = %s", "busid", udev->busid); - - usbip_names_get_class(buff, sizeof(buff), - udev->bDeviceClass, - udev->bDeviceSubClass, - udev->bDeviceProtocol); - dbg("%-20s = %s", "Device(C/SC/P)", buff); - - DBG_UDEV_INTEGER(bcdDevice); - - usbip_names_get_product(buff, sizeof(buff), - udev->idVendor, - udev->idProduct); - dbg("%-20s = %s", "Vendor/Product", buff); - - DBG_UDEV_INTEGER(bNumConfigurations); - DBG_UDEV_INTEGER(bNumInterfaces); - - dbg("%-20s = %s", "speed", - usbip_speed_string(udev->speed)); - - DBG_UDEV_INTEGER(busnum); - DBG_UDEV_INTEGER(devnum); -} - - -int read_attr_value(struct udev_device *dev, const char *name, - const char *format) -{ - const char *attr; - int num = 0; - int ret; - - attr = udev_device_get_sysattr_value(dev, name); - if (!attr) { - err("udev_device_get_sysattr_value failed"); - goto err; - } - - /* The client chooses the device configuration - * when attaching it so right after being bound - * to usbip-host on the server the device will - * have no configuration. - * Therefore, attributes such as bConfigurationValue - * and bNumInterfaces will not exist and sscanf will - * fail. Check for these cases and don't treat them - * as errors. - */ - - ret = sscanf(attr, format, &num); - if (ret < 1) { - if (strcmp(name, "bConfigurationValue") && - strcmp(name, "bNumInterfaces")) { - err("sscanf failed for attribute %s", name); - goto err; - } - } - -err: - - return num; -} - - -int read_attr_speed(struct udev_device *dev) -{ - const char *speed; - - speed = udev_device_get_sysattr_value(dev, "speed"); - if (!speed) { - err("udev_device_get_sysattr_value failed"); - goto err; - } - - for (int i = 0; speed_strings[i].speed != NULL; i++) { - if (!strcmp(speed, speed_strings[i].speed)) - return speed_strings[i].num; - } - -err: - - return USB_SPEED_UNKNOWN; -} - -#define READ_ATTR(object, type, dev, name, format) \ - do { \ - (object)->name = (type) read_attr_value(dev, to_string(name), \ - format); \ - } while (0) - - -int read_usb_device(struct udev_device *sdev, struct usbip_usb_device *udev) -{ - uint32_t busnum, devnum; - const char *path, *name; - - READ_ATTR(udev, uint8_t, sdev, bDeviceClass, "%02x\n"); - READ_ATTR(udev, uint8_t, sdev, bDeviceSubClass, "%02x\n"); - READ_ATTR(udev, uint8_t, sdev, bDeviceProtocol, "%02x\n"); - - READ_ATTR(udev, uint16_t, sdev, idVendor, "%04x\n"); - READ_ATTR(udev, uint16_t, sdev, idProduct, "%04x\n"); - READ_ATTR(udev, uint16_t, sdev, bcdDevice, "%04x\n"); - - READ_ATTR(udev, uint8_t, sdev, bConfigurationValue, "%02x\n"); - READ_ATTR(udev, uint8_t, sdev, bNumConfigurations, "%02x\n"); - READ_ATTR(udev, uint8_t, sdev, bNumInterfaces, "%02x\n"); - - READ_ATTR(udev, uint8_t, sdev, devnum, "%d\n"); - udev->speed = read_attr_speed(sdev); - - path = udev_device_get_syspath(sdev); - name = udev_device_get_sysname(sdev); - - strncpy(udev->path, path, SYSFS_PATH_MAX); - strncpy(udev->busid, name, SYSFS_BUS_ID_SIZE); - - sscanf(name, "%u-%u", &busnum, &devnum); - udev->busnum = busnum; - - return 0; -} - -int read_usb_interface(struct usbip_usb_device *udev, int i, - struct usbip_usb_interface *uinf) -{ - char busid[SYSFS_BUS_ID_SIZE]; - struct udev_device *sif; - - sprintf(busid, "%s:%d.%d", udev->busid, udev->bConfigurationValue, i); - - sif = udev_device_new_from_subsystem_sysname(udev_context, "usb", busid); - if (!sif) { - err("udev_device_new_from_subsystem_sysname %s failed", busid); - return -1; - } - - READ_ATTR(uinf, uint8_t, sif, bInterfaceClass, "%02x\n"); - READ_ATTR(uinf, uint8_t, sif, bInterfaceSubClass, "%02x\n"); - READ_ATTR(uinf, uint8_t, sif, bInterfaceProtocol, "%02x\n"); - - return 0; -} - -int usbip_names_init(char *f) -{ - return names_init(f); -} - -void usbip_names_free(void) -{ - names_free(); -} - -void usbip_names_get_product(char *buff, size_t size, uint16_t vendor, - uint16_t product) -{ - const char *prod, *vend; - - prod = names_product(vendor, product); - if (!prod) - prod = "unknown product"; - - - vend = names_vendor(vendor); - if (!vend) - vend = "unknown vendor"; - - snprintf(buff, size, "%s : %s (%04x:%04x)", vend, prod, vendor, product); -} - -void usbip_names_get_class(char *buff, size_t size, uint8_t class, - uint8_t subclass, uint8_t protocol) -{ - const char *c, *s, *p; - - if (class == 0 && subclass == 0 && protocol == 0) { - snprintf(buff, size, "(Defined at Interface level) (%02x/%02x/%02x)", class, subclass, protocol); - return; - } - - p = names_protocol(class, subclass, protocol); - if (!p) - p = "unknown protocol"; - - s = names_subclass(class, subclass); - if (!s) - s = "unknown subclass"; - - c = names_class(class); - if (!c) - c = "unknown class"; - - snprintf(buff, size, "%s / %s / %s (%02x/%02x/%02x)", c, s, p, class, subclass, protocol); -} diff --git a/drivers/staging/usbip/userspace/libsrc/usbip_common.h b/drivers/staging/usbip/userspace/libsrc/usbip_common.h deleted file mode 100644 index 5a0e95edf4df..000000000000 --- a/drivers/staging/usbip/userspace/libsrc/usbip_common.h +++ /dev/null @@ -1,137 +0,0 @@ -/* - * Copyright (C) 2005-2007 Takahiro Hirofuchi - */ - -#ifndef __USBIP_COMMON_H -#define __USBIP_COMMON_H - -#include <libudev.h> - -#include <stdint.h> -#include <stdio.h> -#include <stdlib.h> -#include <string.h> - -#include <syslog.h> -#include <unistd.h> -#include <linux/usb/ch9.h> -#include "../../uapi/usbip.h" - -#ifndef USBIDS_FILE -#define USBIDS_FILE "/usr/share/hwdata/usb.ids" -#endif - -#ifndef VHCI_STATE_PATH -#define VHCI_STATE_PATH "/var/run/vhci_hcd" -#endif - -/* kernel module names */ -#define USBIP_CORE_MOD_NAME "usbip-core" -#define USBIP_HOST_DRV_NAME "usbip-host" -#define USBIP_VHCI_DRV_NAME "vhci_hcd" - -/* sysfs constants */ -#define SYSFS_MNT_PATH "/sys" -#define SYSFS_BUS_NAME "bus" -#define SYSFS_BUS_TYPE "usb" -#define SYSFS_DRIVERS_NAME "drivers" - -#define SYSFS_PATH_MAX 256 -#define SYSFS_BUS_ID_SIZE 32 - -extern int usbip_use_syslog; -extern int usbip_use_stderr; -extern int usbip_use_debug ; - -#define PROGNAME "usbip" - -#define pr_fmt(fmt) "%s: %s: " fmt "\n", PROGNAME -#define dbg_fmt(fmt) pr_fmt("%s:%d:[%s] " fmt), "debug", \ - __FILE__, __LINE__, __func__ - -#define err(fmt, args...) \ - do { \ - if (usbip_use_syslog) { \ - syslog(LOG_ERR, pr_fmt(fmt), "error", ##args); \ - } \ - if (usbip_use_stderr) { \ - fprintf(stderr, pr_fmt(fmt), "error", ##args); \ - } \ - } while (0) - -#define info(fmt, args...) \ - do { \ - if (usbip_use_syslog) { \ - syslog(LOG_INFO, pr_fmt(fmt), "info", ##args); \ - } \ - if (usbip_use_stderr) { \ - fprintf(stderr, pr_fmt(fmt), "info", ##args); \ - } \ - } while (0) - -#define dbg(fmt, args...) \ - do { \ - if (usbip_use_debug) { \ - if (usbip_use_syslog) { \ - syslog(LOG_DEBUG, dbg_fmt(fmt), ##args); \ - } \ - if (usbip_use_stderr) { \ - fprintf(stderr, dbg_fmt(fmt), ##args); \ - } \ - } \ - } while (0) - -#define BUG() \ - do { \ - err("sorry, it's a bug!"); \ - abort(); \ - } while (0) - -struct usbip_usb_interface { - uint8_t bInterfaceClass; - uint8_t bInterfaceSubClass; - uint8_t bInterfaceProtocol; - uint8_t padding; /* alignment */ -} __attribute__((packed)); - -struct usbip_usb_device { - char path[SYSFS_PATH_MAX]; - char busid[SYSFS_BUS_ID_SIZE]; - - uint32_t busnum; - uint32_t devnum; - uint32_t speed; - - uint16_t idVendor; - uint16_t idProduct; - uint16_t bcdDevice; - - uint8_t bDeviceClass; - uint8_t bDeviceSubClass; - uint8_t bDeviceProtocol; - uint8_t bConfigurationValue; - uint8_t bNumConfigurations; - uint8_t bNumInterfaces; -} __attribute__((packed)); - -#define to_string(s) #s - -void dump_usb_interface(struct usbip_usb_interface *); -void dump_usb_device(struct usbip_usb_device *); -int read_usb_device(struct udev_device *sdev, struct usbip_usb_device *udev); -int read_attr_value(struct udev_device *dev, const char *name, - const char *format); -int read_usb_interface(struct usbip_usb_device *udev, int i, - struct usbip_usb_interface *uinf); - -const char *usbip_speed_string(int num); -const char *usbip_status_string(int32_t status); - -int usbip_names_init(char *); -void usbip_names_free(void); -void usbip_names_get_product(char *buff, size_t size, uint16_t vendor, - uint16_t product); -void usbip_names_get_class(char *buff, size_t size, uint8_t class, - uint8_t subclass, uint8_t protocol); - -#endif /* __USBIP_COMMON_H */ diff --git a/drivers/staging/usbip/userspace/libsrc/usbip_host_driver.c b/drivers/staging/usbip/userspace/libsrc/usbip_host_driver.c deleted file mode 100644 index bef08d5c44e8..000000000000 --- a/drivers/staging/usbip/userspace/libsrc/usbip_host_driver.c +++ /dev/null @@ -1,280 +0,0 @@ -/* - * Copyright (C) 2011 matt mooney <mfm@muteddisk.com> - * 2005-2007 Takahiro Hirofuchi - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - */ - -#include <sys/types.h> -#include <sys/stat.h> -#include <fcntl.h> - -#include <errno.h> -#include <unistd.h> - -#include <libudev.h> - -#include "usbip_common.h" -#include "usbip_host_driver.h" -#include "list.h" -#include "sysfs_utils.h" - -#undef PROGNAME -#define PROGNAME "libusbip" - -struct usbip_host_driver *host_driver; -struct udev *udev_context; - -static int32_t read_attr_usbip_status(struct usbip_usb_device *udev) -{ - char status_attr_path[SYSFS_PATH_MAX]; - int fd; - int length; - char status; - int value = 0; - - snprintf(status_attr_path, SYSFS_PATH_MAX, "%s/usbip_status", - udev->path); - - fd = open(status_attr_path, O_RDONLY); - if (fd < 0) { - err("error opening attribute %s", status_attr_path); - return -1; - } - - length = read(fd, &status, 1); - if (length < 0) { - err("error reading attribute %s", status_attr_path); - close(fd); - return -1; - } - - value = atoi(&status); - - return value; -} - -static -struct usbip_exported_device *usbip_exported_device_new(const char *sdevpath) -{ - struct usbip_exported_device *edev = NULL; - struct usbip_exported_device *edev_old; - size_t size; - int i; - - edev = calloc(1, sizeof(struct usbip_exported_device)); - - edev->sudev = udev_device_new_from_syspath(udev_context, sdevpath); - if (!edev->sudev) { - err("udev_device_new_from_syspath: %s", sdevpath); - goto err; - } - - read_usb_device(edev->sudev, &edev->udev); - - edev->status = read_attr_usbip_status(&edev->udev); - if (edev->status < 0) - goto err; - - /* reallocate buffer to include usb interface data */ - size = sizeof(struct usbip_exported_device) + - edev->udev.bNumInterfaces * sizeof(struct usbip_usb_interface); - - edev_old = edev; - edev = realloc(edev, size); - if (!edev) { - edev = edev_old; - dbg("realloc failed"); - goto err; - } - - for (i = 0; i < edev->udev.bNumInterfaces; i++) - read_usb_interface(&edev->udev, i, &edev->uinf[i]); - - return edev; -err: - if (edev->sudev) - udev_device_unref(edev->sudev); - if (edev) - free(edev); - - return NULL; -} - -static int refresh_exported_devices(void) -{ - struct usbip_exported_device *edev; - struct udev_enumerate *enumerate; - struct udev_list_entry *devices, *dev_list_entry; - struct udev_device *dev; - const char *path; - const char *driver; - - enumerate = udev_enumerate_new(udev_context); - udev_enumerate_add_match_subsystem(enumerate, "usb"); - udev_enumerate_scan_devices(enumerate); - - devices = udev_enumerate_get_list_entry(enumerate); - - udev_list_entry_foreach(dev_list_entry, devices) { - path = udev_list_entry_get_name(dev_list_entry); - dev = udev_device_new_from_syspath(udev_context, path); - if (dev == NULL) - continue; - - /* Check whether device uses usbip-host driver. */ - driver = udev_device_get_driver(dev); - if (driver != NULL && !strcmp(driver, USBIP_HOST_DRV_NAME)) { - edev = usbip_exported_device_new(path); - if (!edev) { - dbg("usbip_exported_device_new failed"); - continue; - } - - list_add(&edev->node, &host_driver->edev_list); - host_driver->ndevs++; - } - } - - return 0; -} - -static void usbip_exported_device_destroy(void) -{ - struct list_head *i, *tmp; - struct usbip_exported_device *edev; - - list_for_each_safe(i, tmp, &host_driver->edev_list) { - edev = list_entry(i, struct usbip_exported_device, node); - list_del(i); - free(edev); - } -} - -int usbip_host_driver_open(void) -{ - int rc; - - udev_context = udev_new(); - if (!udev_context) { - err("udev_new failed"); - return -1; - } - - host_driver = calloc(1, sizeof(*host_driver)); - - host_driver->ndevs = 0; - INIT_LIST_HEAD(&host_driver->edev_list); - - rc = refresh_exported_devices(); - if (rc < 0) - goto err_free_host_driver; - - return 0; - -err_free_host_driver: - free(host_driver); - host_driver = NULL; - - udev_unref(udev_context); - - return -1; -} - -void usbip_host_driver_close(void) -{ - if (!host_driver) - return; - - usbip_exported_device_destroy(); - - free(host_driver); - host_driver = NULL; - - udev_unref(udev_context); -} - -int usbip_host_refresh_device_list(void) -{ - int rc; - - usbip_exported_device_destroy(); - - host_driver->ndevs = 0; - INIT_LIST_HEAD(&host_driver->edev_list); - - rc = refresh_exported_devices(); - if (rc < 0) - return -1; - - return 0; -} - -int usbip_host_export_device(struct usbip_exported_device *edev, int sockfd) -{ - char attr_name[] = "usbip_sockfd"; - char sockfd_attr_path[SYSFS_PATH_MAX]; - char sockfd_buff[30]; - int ret; - - if (edev->status != SDEV_ST_AVAILABLE) { - dbg("device not available: %s", edev->udev.busid); - switch (edev->status) { - case SDEV_ST_ERROR: - dbg("status SDEV_ST_ERROR"); - break; - case SDEV_ST_USED: - dbg("status SDEV_ST_USED"); - break; - default: - dbg("status unknown: 0x%x", edev->status); - } - return -1; - } - - /* only the first interface is true */ - snprintf(sockfd_attr_path, sizeof(sockfd_attr_path), "%s/%s", - edev->udev.path, attr_name); - - snprintf(sockfd_buff, sizeof(sockfd_buff), "%d\n", sockfd); - - ret = write_sysfs_attribute(sockfd_attr_path, sockfd_buff, - strlen(sockfd_buff)); - if (ret < 0) { - err("write_sysfs_attribute failed: sockfd %s to %s", - sockfd_buff, sockfd_attr_path); - return ret; - } - - info("connect: %s", edev->udev.busid); - - return ret; -} - -struct usbip_exported_device *usbip_host_get_device(int num) -{ - struct list_head *i; - struct usbip_exported_device *edev; - int cnt = 0; - - list_for_each(i, &host_driver->edev_list) { - edev = list_entry(i, struct usbip_exported_device, node); - if (num == cnt) - return edev; - else - cnt++; - } - - return NULL; -} diff --git a/drivers/staging/usbip/userspace/libsrc/usbip_host_driver.h b/drivers/staging/usbip/userspace/libsrc/usbip_host_driver.h deleted file mode 100644 index 2a31f855c616..000000000000 --- a/drivers/staging/usbip/userspace/libsrc/usbip_host_driver.h +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright (C) 2011 matt mooney <mfm@muteddisk.com> - * 2005-2007 Takahiro Hirofuchi - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - */ - -#ifndef __USBIP_HOST_DRIVER_H -#define __USBIP_HOST_DRIVER_H - -#include <stdint.h> -#include "usbip_common.h" -#include "list.h" - -struct usbip_host_driver { - int ndevs; - /* list of exported device */ - struct list_head edev_list; -}; - -struct usbip_exported_device { - struct udev_device *sudev; - int32_t status; - struct usbip_usb_device udev; - struct list_head node; - struct usbip_usb_interface uinf[]; -}; - -extern struct usbip_host_driver *host_driver; - -int usbip_host_driver_open(void); -void usbip_host_driver_close(void); - -int usbip_host_refresh_device_list(void); -int usbip_host_export_device(struct usbip_exported_device *edev, int sockfd); -struct usbip_exported_device *usbip_host_get_device(int num); - -#endif /* __USBIP_HOST_DRIVER_H */ diff --git a/drivers/staging/usbip/userspace/libsrc/vhci_driver.c b/drivers/staging/usbip/userspace/libsrc/vhci_driver.c deleted file mode 100644 index ad9204773533..000000000000 --- a/drivers/staging/usbip/userspace/libsrc/vhci_driver.c +++ /dev/null @@ -1,411 +0,0 @@ -/* - * Copyright (C) 2005-2007 Takahiro Hirofuchi - */ - -#include "usbip_common.h" -#include "vhci_driver.h" -#include <limits.h> -#include <netdb.h> -#include <libudev.h> -#include "sysfs_utils.h" - -#undef PROGNAME -#define PROGNAME "libusbip" - -struct usbip_vhci_driver *vhci_driver; -struct udev *udev_context; - -static struct usbip_imported_device * -imported_device_init(struct usbip_imported_device *idev, char *busid) -{ - struct udev_device *sudev; - - sudev = udev_device_new_from_subsystem_sysname(udev_context, - "usb", busid); - if (!sudev) { - dbg("udev_device_new_from_subsystem_sysname failed: %s", busid); - goto err; - } - read_usb_device(sudev, &idev->udev); - udev_device_unref(sudev); - - return idev; - -err: - return NULL; -} - - - -static int parse_status(const char *value) -{ - int ret = 0; - char *c; - - - for (int i = 0; i < vhci_driver->nports; i++) - memset(&vhci_driver->idev[i], 0, sizeof(vhci_driver->idev[i])); - - - /* skip a header line */ - c = strchr(value, '\n'); - if (!c) - return -1; - c++; - - while (*c != '\0') { - int port, status, speed, devid; - unsigned long socket; - char lbusid[SYSFS_BUS_ID_SIZE]; - - ret = sscanf(c, "%d %d %d %x %lx %31s\n", - &port, &status, &speed, - &devid, &socket, lbusid); - - if (ret < 5) { - dbg("sscanf failed: %d", ret); - BUG(); - } - - dbg("port %d status %d speed %d devid %x", - port, status, speed, devid); - dbg("socket %lx lbusid %s", socket, lbusid); - - - /* if a device is connected, look at it */ - { - struct usbip_imported_device *idev = &vhci_driver->idev[port]; - - idev->port = port; - idev->status = status; - - idev->devid = devid; - - idev->busnum = (devid >> 16); - idev->devnum = (devid & 0x0000ffff); - - if (idev->status != VDEV_ST_NULL - && idev->status != VDEV_ST_NOTASSIGNED) { - idev = imported_device_init(idev, lbusid); - if (!idev) { - dbg("imported_device_init failed"); - return -1; - } - } - } - - - /* go to the next line */ - c = strchr(c, '\n'); - if (!c) - break; - c++; - } - - dbg("exit"); - - return 0; -} - -static int refresh_imported_device_list(void) -{ - const char *attr_status; - - attr_status = udev_device_get_sysattr_value(vhci_driver->hc_device, - "status"); - if (!attr_status) { - err("udev_device_get_sysattr_value failed"); - return -1; - } - - return parse_status(attr_status); -} - -static int get_nports(void) -{ - char *c; - int nports = 0; - const char *attr_status; - - attr_status = udev_device_get_sysattr_value(vhci_driver->hc_device, - "status"); - if (!attr_status) { - err("udev_device_get_sysattr_value failed"); - return -1; - } - - /* skip a header line */ - c = strchr(attr_status, '\n'); - if (!c) - return 0; - c++; - - while (*c != '\0') { - /* go to the next line */ - c = strchr(c, '\n'); - if (!c) - return nports; - c++; - nports += 1; - } - - return nports; -} - -/* - * Read the given port's record. - * - * To avoid buffer overflow we will read the entire line and - * validate each part's size. The initial buffer is padded by 4 to - * accommodate the 2 spaces, 1 newline and an additional character - * which is needed to properly validate the 3rd part without it being - * truncated to an acceptable length. - */ -static int read_record(int rhport, char *host, unsigned long host_len, - char *port, unsigned long port_len, char *busid) -{ - int part; - FILE *file; - char path[PATH_MAX+1]; - char *buffer, *start, *end; - char delim[] = {' ', ' ', '\n'}; - int max_len[] = {(int)host_len, (int)port_len, SYSFS_BUS_ID_SIZE}; - size_t buffer_len = host_len + port_len + SYSFS_BUS_ID_SIZE + 4; - - buffer = malloc(buffer_len); - if (!buffer) - return -1; - - snprintf(path, PATH_MAX, VHCI_STATE_PATH"/port%d", rhport); - - file = fopen(path, "r"); - if (!file) { - err("fopen"); - free(buffer); - return -1; - } - - if (fgets(buffer, buffer_len, file) == NULL) { - err("fgets"); - free(buffer); - fclose(file); - return -1; - } - fclose(file); - - /* validate the length of each of the 3 parts */ - start = buffer; - for (part = 0; part < 3; part++) { - end = strchr(start, delim[part]); - if (end == NULL || (end - start) > max_len[part]) { - free(buffer); - return -1; - } - start = end + 1; - } - - if (sscanf(buffer, "%s %s %s\n", host, port, busid) != 3) { - err("sscanf"); - free(buffer); - return -1; - } - - free(buffer); - - return 0; -} - -/* ---------------------------------------------------------------------- */ - -int usbip_vhci_driver_open(void) -{ - udev_context = udev_new(); - if (!udev_context) { - err("udev_new failed"); - return -1; - } - - vhci_driver = calloc(1, sizeof(struct usbip_vhci_driver)); - - /* will be freed in usbip_driver_close() */ - vhci_driver->hc_device = - udev_device_new_from_subsystem_sysname(udev_context, - USBIP_VHCI_BUS_TYPE, - USBIP_VHCI_DRV_NAME); - if (!vhci_driver->hc_device) { - err("udev_device_new_from_subsystem_sysname failed"); - goto err; - } - - vhci_driver->nports = get_nports(); - - dbg("available ports: %d", vhci_driver->nports); - - if (refresh_imported_device_list()) - goto err; - - return 0; - -err: - udev_device_unref(vhci_driver->hc_device); - - if (vhci_driver) - free(vhci_driver); - - vhci_driver = NULL; - - udev_unref(udev_context); - - return -1; -} - - -void usbip_vhci_driver_close(void) -{ - if (!vhci_driver) - return; - - udev_device_unref(vhci_driver->hc_device); - - free(vhci_driver); - - vhci_driver = NULL; - - udev_unref(udev_context); -} - - -int usbip_vhci_refresh_device_list(void) -{ - - if (refresh_imported_device_list()) - goto err; - - return 0; -err: - dbg("failed to refresh device list"); - return -1; -} - - -int usbip_vhci_get_free_port(void) -{ - for (int i = 0; i < vhci_driver->nports; i++) { - if (vhci_driver->idev[i].status == VDEV_ST_NULL) - return i; - } - - return -1; -} - -int usbip_vhci_attach_device2(uint8_t port, int sockfd, uint32_t devid, - uint32_t speed) { - char buff[200]; /* what size should be ? */ - char attach_attr_path[SYSFS_PATH_MAX]; - char attr_attach[] = "attach"; - const char *path; - int ret; - - snprintf(buff, sizeof(buff), "%u %d %u %u", - port, sockfd, devid, speed); - dbg("writing: %s", buff); - - path = udev_device_get_syspath(vhci_driver->hc_device); - snprintf(attach_attr_path, sizeof(attach_attr_path), "%s/%s", - path, attr_attach); - dbg("attach attribute path: %s", attach_attr_path); - - ret = write_sysfs_attribute(attach_attr_path, buff, strlen(buff)); - if (ret < 0) { - dbg("write_sysfs_attribute failed"); - return -1; - } - - dbg("attached port: %d", port); - - return 0; -} - -static unsigned long get_devid(uint8_t busnum, uint8_t devnum) -{ - return (busnum << 16) | devnum; -} - -/* will be removed */ -int usbip_vhci_attach_device(uint8_t port, int sockfd, uint8_t busnum, - uint8_t devnum, uint32_t speed) -{ - int devid = get_devid(busnum, devnum); - - return usbip_vhci_attach_device2(port, sockfd, devid, speed); -} - -int usbip_vhci_detach_device(uint8_t port) -{ - char detach_attr_path[SYSFS_PATH_MAX]; - char attr_detach[] = "detach"; - char buff[200]; /* what size should be ? */ - const char *path; - int ret; - - snprintf(buff, sizeof(buff), "%u", port); - dbg("writing: %s", buff); - - path = udev_device_get_syspath(vhci_driver->hc_device); - snprintf(detach_attr_path, sizeof(detach_attr_path), "%s/%s", - path, attr_detach); - dbg("detach attribute path: %s", detach_attr_path); - - ret = write_sysfs_attribute(detach_attr_path, buff, strlen(buff)); - if (ret < 0) { - dbg("write_sysfs_attribute failed"); - return -1; - } - - dbg("detached port: %d", port); - - return 0; -} - -int usbip_vhci_imported_device_dump(struct usbip_imported_device *idev) -{ - char product_name[100]; - char host[NI_MAXHOST] = "unknown host"; - char serv[NI_MAXSERV] = "unknown port"; - char remote_busid[SYSFS_BUS_ID_SIZE]; - int ret; - int read_record_error = 0; - - if (idev->status == VDEV_ST_NULL || idev->status == VDEV_ST_NOTASSIGNED) - return 0; - - ret = read_record(idev->port, host, sizeof(host), serv, sizeof(serv), - remote_busid); - if (ret) { - err("read_record"); - read_record_error = 1; - } - - printf("Port %02d: <%s> at %s\n", idev->port, - usbip_status_string(idev->status), - usbip_speed_string(idev->udev.speed)); - - usbip_names_get_product(product_name, sizeof(product_name), - idev->udev.idVendor, idev->udev.idProduct); - - printf(" %s\n", product_name); - - if (!read_record_error) { - printf("%10s -> usbip://%s:%s/%s\n", idev->udev.busid, - host, serv, remote_busid); - printf("%10s -> remote bus/dev %03d/%03d\n", " ", - idev->busnum, idev->devnum); - } else { - printf("%10s -> unknown host, remote port and remote busid\n", - idev->udev.busid); - printf("%10s -> remote bus/dev %03d/%03d\n", " ", - idev->busnum, idev->devnum); - } - - return 0; -} diff --git a/drivers/staging/usbip/userspace/libsrc/vhci_driver.h b/drivers/staging/usbip/userspace/libsrc/vhci_driver.h deleted file mode 100644 index fa2316cf2cac..000000000000 --- a/drivers/staging/usbip/userspace/libsrc/vhci_driver.h +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright (C) 2005-2007 Takahiro Hirofuchi - */ - -#ifndef __VHCI_DRIVER_H -#define __VHCI_DRIVER_H - -#include <libudev.h> -#include <stdint.h> - -#include "usbip_common.h" - -#define USBIP_VHCI_BUS_TYPE "platform" -#define MAXNPORT 128 - -struct usbip_imported_device { - uint8_t port; - uint32_t status; - - uint32_t devid; - - uint8_t busnum; - uint8_t devnum; - - /* usbip_class_device list */ - struct usbip_usb_device udev; -}; - -struct usbip_vhci_driver { - - /* /sys/devices/platform/vhci_hcd */ - struct udev_device *hc_device; - - int nports; - struct usbip_imported_device idev[MAXNPORT]; -}; - - -extern struct usbip_vhci_driver *vhci_driver; - -int usbip_vhci_driver_open(void); -void usbip_vhci_driver_close(void); - -int usbip_vhci_refresh_device_list(void); - - -int usbip_vhci_get_free_port(void); -int usbip_vhci_attach_device2(uint8_t port, int sockfd, uint32_t devid, - uint32_t speed); - -/* will be removed */ -int usbip_vhci_attach_device(uint8_t port, int sockfd, uint8_t busnum, - uint8_t devnum, uint32_t speed); - -int usbip_vhci_detach_device(uint8_t port); - -int usbip_vhci_imported_device_dump(struct usbip_imported_device *idev); - -#endif /* __VHCI_DRIVER_H */ diff --git a/drivers/staging/usbip/userspace/src/Makefile.am b/drivers/staging/usbip/userspace/src/Makefile.am deleted file mode 100644 index e81a4ebadeff..000000000000 --- a/drivers/staging/usbip/userspace/src/Makefile.am +++ /dev/null @@ -1,11 +0,0 @@ -AM_CPPFLAGS = -I$(top_srcdir)/libsrc -DUSBIDS_FILE='"@USBIDS_DIR@/usb.ids"' -AM_CFLAGS = @EXTRA_CFLAGS@ -LDADD = $(top_builddir)/libsrc/libusbip.la - -sbin_PROGRAMS := usbip usbipd - -usbip_SOURCES := usbip.h utils.h usbip.c utils.c usbip_network.c \ - usbip_attach.c usbip_detach.c usbip_list.c \ - usbip_bind.c usbip_unbind.c usbip_port.c - -usbipd_SOURCES := usbip_network.h usbipd.c usbip_network.c diff --git a/drivers/staging/usbip/userspace/src/usbip.c b/drivers/staging/usbip/userspace/src/usbip.c deleted file mode 100644 index d7599d943529..000000000000 --- a/drivers/staging/usbip/userspace/src/usbip.c +++ /dev/null @@ -1,201 +0,0 @@ -/* - * command structure borrowed from udev - * (git://git.kernel.org/pub/scm/linux/hotplug/udev.git) - * - * Copyright (C) 2011 matt mooney <mfm@muteddisk.com> - * 2005-2007 Takahiro Hirofuchi - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - */ - -#include <stdio.h> -#include <stdlib.h> - -#include <getopt.h> -#include <syslog.h> - -#include "usbip_common.h" -#include "usbip_network.h" -#include "usbip.h" - -static int usbip_help(int argc, char *argv[]); -static int usbip_version(int argc, char *argv[]); - -static const char usbip_version_string[] = PACKAGE_STRING; - -static const char usbip_usage_string[] = - "usbip [--debug] [--log] [--tcp-port PORT] [version]\n" - " [help] <command> <args>\n"; - -static void usbip_usage(void) -{ - printf("usage: %s", usbip_usage_string); -} - -struct command { - const char *name; - int (*fn)(int argc, char *argv[]); - const char *help; - void (*usage)(void); -}; - -static const struct command cmds[] = { - { - .name = "help", - .fn = usbip_help, - .help = NULL, - .usage = NULL - }, - { - .name = "version", - .fn = usbip_version, - .help = NULL, - .usage = NULL - }, - { - .name = "attach", - .fn = usbip_attach, - .help = "Attach a remote USB device", - .usage = usbip_attach_usage - }, - { - .name = "detach", - .fn = usbip_detach, - .help = "Detach a remote USB device", - .usage = usbip_detach_usage - }, - { - .name = "list", - .fn = usbip_list, - .help = "List exportable or local USB devices", - .usage = usbip_list_usage - }, - { - .name = "bind", - .fn = usbip_bind, - .help = "Bind device to " USBIP_HOST_DRV_NAME ".ko", - .usage = usbip_bind_usage - }, - { - .name = "unbind", - .fn = usbip_unbind, - .help = "Unbind device from " USBIP_HOST_DRV_NAME ".ko", - .usage = usbip_unbind_usage - }, - { - .name = "port", - .fn = usbip_port_show, - .help = "Show imported USB devices", - .usage = NULL - }, - { NULL, NULL, NULL, NULL } -}; - -static int usbip_help(int argc, char *argv[]) -{ - const struct command *cmd; - int i; - int ret = 0; - - if (argc > 1 && argv++) { - for (i = 0; cmds[i].name != NULL; i++) - if (!strcmp(cmds[i].name, argv[0]) && cmds[i].usage) { - cmds[i].usage(); - goto done; - } - ret = -1; - } - - usbip_usage(); - printf("\n"); - for (cmd = cmds; cmd->name != NULL; cmd++) - if (cmd->help != NULL) - printf(" %-10s %s\n", cmd->name, cmd->help); - printf("\n"); -done: - return ret; -} - -static int usbip_version(int argc, char *argv[]) -{ - (void) argc; - (void) argv; - - printf(PROGNAME " (%s)\n", usbip_version_string); - return 0; -} - -static int run_command(const struct command *cmd, int argc, char *argv[]) -{ - dbg("running command: `%s'", cmd->name); - return cmd->fn(argc, argv); -} - -int main(int argc, char *argv[]) -{ - static const struct option opts[] = { - { "debug", no_argument, NULL, 'd' }, - { "log", no_argument, NULL, 'l' }, - { "tcp-port", required_argument, NULL, 't' }, - { NULL, 0, NULL, 0 } - }; - - char *cmd; - int opt; - int i, rc = -1; - - usbip_use_stderr = 1; - opterr = 0; - for (;;) { - opt = getopt_long(argc, argv, "+dlt:", opts, NULL); - - if (opt == -1) - break; - - switch (opt) { - case 'd': - usbip_use_debug = 1; - break; - case 'l': - usbip_use_syslog = 1; - openlog("", LOG_PID, LOG_USER); - break; - case 't': - usbip_setup_port_number(optarg); - break; - case '?': - printf("usbip: invalid option\n"); - default: - usbip_usage(); - goto out; - } - } - - cmd = argv[optind]; - if (cmd) { - for (i = 0; cmds[i].name != NULL; i++) - if (!strcmp(cmds[i].name, cmd)) { - argc -= optind; - argv += optind; - optind = 0; - rc = run_command(&cmds[i], argc, argv); - goto out; - } - } - - /* invalid command */ - usbip_help(0, NULL); -out: - return (rc > -1 ? EXIT_SUCCESS : EXIT_FAILURE); -} diff --git a/drivers/staging/usbip/userspace/src/usbip.h b/drivers/staging/usbip/userspace/src/usbip.h deleted file mode 100644 index 84fe66a9d8ad..000000000000 --- a/drivers/staging/usbip/userspace/src/usbip.h +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright (C) 2011 matt mooney <mfm@muteddisk.com> - * 2005-2007 Takahiro Hirofuchi - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - */ - -#ifndef __USBIP_H -#define __USBIP_H - -#ifdef HAVE_CONFIG_H -#include "../config.h" -#endif - -/* usbip commands */ -int usbip_attach(int argc, char *argv[]); -int usbip_detach(int argc, char *argv[]); -int usbip_list(int argc, char *argv[]); -int usbip_bind(int argc, char *argv[]); -int usbip_unbind(int argc, char *argv[]); -int usbip_port_show(int argc, char *argv[]); - -void usbip_attach_usage(void); -void usbip_detach_usage(void); -void usbip_list_usage(void); -void usbip_bind_usage(void); -void usbip_unbind_usage(void); - -#endif /* __USBIP_H */ diff --git a/drivers/staging/usbip/userspace/src/usbip_attach.c b/drivers/staging/usbip/userspace/src/usbip_attach.c deleted file mode 100644 index d58a14dfc094..000000000000 --- a/drivers/staging/usbip/userspace/src/usbip_attach.c +++ /dev/null @@ -1,241 +0,0 @@ -/* - * Copyright (C) 2011 matt mooney <mfm@muteddisk.com> - * 2005-2007 Takahiro Hirofuchi - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - */ - -#include <sys/stat.h> - -#include <limits.h> -#include <stdint.h> -#include <stdio.h> -#include <string.h> - -#include <fcntl.h> -#include <getopt.h> -#include <unistd.h> -#include <errno.h> - -#include "vhci_driver.h" -#include "usbip_common.h" -#include "usbip_network.h" -#include "usbip.h" - -static const char usbip_attach_usage_string[] = - "usbip attach <args>\n" - " -r, --remote=<host> The machine with exported USB devices\n" - " -b, --busid=<busid> Busid of the device on <host>\n"; - -void usbip_attach_usage(void) -{ - printf("usage: %s", usbip_attach_usage_string); -} - -#define MAX_BUFF 100 -static int record_connection(char *host, char *port, char *busid, int rhport) -{ - int fd; - char path[PATH_MAX+1]; - char buff[MAX_BUFF+1]; - int ret; - - ret = mkdir(VHCI_STATE_PATH, 0700); - if (ret < 0) { - /* if VHCI_STATE_PATH exists, then it better be a directory */ - if (errno == EEXIST) { - struct stat s; - - ret = stat(VHCI_STATE_PATH, &s); - if (ret < 0) - return -1; - if (!(s.st_mode & S_IFDIR)) - return -1; - } else - return -1; - } - - snprintf(path, PATH_MAX, VHCI_STATE_PATH"/port%d", rhport); - - fd = open(path, O_WRONLY|O_CREAT|O_TRUNC, S_IRWXU); - if (fd < 0) - return -1; - - snprintf(buff, MAX_BUFF, "%s %s %s\n", - host, port, busid); - - ret = write(fd, buff, strlen(buff)); - if (ret != (ssize_t) strlen(buff)) { - close(fd); - return -1; - } - - close(fd); - - return 0; -} - -static int import_device(int sockfd, struct usbip_usb_device *udev) -{ - int rc; - int port; - - rc = usbip_vhci_driver_open(); - if (rc < 0) { - err("open vhci_driver"); - return -1; - } - - port = usbip_vhci_get_free_port(); - if (port < 0) { - err("no free port"); - usbip_vhci_driver_close(); - return -1; - } - - rc = usbip_vhci_attach_device(port, sockfd, udev->busnum, - udev->devnum, udev->speed); - if (rc < 0) { - err("import device"); - usbip_vhci_driver_close(); - return -1; - } - - usbip_vhci_driver_close(); - - return port; -} - -static int query_import_device(int sockfd, char *busid) -{ - int rc; - struct op_import_request request; - struct op_import_reply reply; - uint16_t code = OP_REP_IMPORT; - - memset(&request, 0, sizeof(request)); - memset(&reply, 0, sizeof(reply)); - - /* send a request */ - rc = usbip_net_send_op_common(sockfd, OP_REQ_IMPORT, 0); - if (rc < 0) { - err("send op_common"); - return -1; - } - - strncpy(request.busid, busid, SYSFS_BUS_ID_SIZE-1); - - PACK_OP_IMPORT_REQUEST(0, &request); - - rc = usbip_net_send(sockfd, (void *) &request, sizeof(request)); - if (rc < 0) { - err("send op_import_request"); - return -1; - } - - /* receive a reply */ - rc = usbip_net_recv_op_common(sockfd, &code); - if (rc < 0) { - err("recv op_common"); - return -1; - } - - rc = usbip_net_recv(sockfd, (void *) &reply, sizeof(reply)); - if (rc < 0) { - err("recv op_import_reply"); - return -1; - } - - PACK_OP_IMPORT_REPLY(0, &reply); - - /* check the reply */ - if (strncmp(reply.udev.busid, busid, SYSFS_BUS_ID_SIZE)) { - err("recv different busid %s", reply.udev.busid); - return -1; - } - - /* import a device */ - return import_device(sockfd, &reply.udev); -} - -static int attach_device(char *host, char *busid) -{ - int sockfd; - int rc; - int rhport; - - sockfd = usbip_net_tcp_connect(host, usbip_port_string); - if (sockfd < 0) { - err("tcp connect"); - return -1; - } - - rhport = query_import_device(sockfd, busid); - if (rhport < 0) { - err("query"); - return -1; - } - - close(sockfd); - - rc = record_connection(host, usbip_port_string, busid, rhport); - if (rc < 0) { - err("record connection"); - return -1; - } - - return 0; -} - -int usbip_attach(int argc, char *argv[]) -{ - static const struct option opts[] = { - { "remote", required_argument, NULL, 'r' }, - { "busid", required_argument, NULL, 'b' }, - { NULL, 0, NULL, 0 } - }; - char *host = NULL; - char *busid = NULL; - int opt; - int ret = -1; - - for (;;) { - opt = getopt_long(argc, argv, "r:b:", opts, NULL); - - if (opt == -1) - break; - - switch (opt) { - case 'r': - host = optarg; - break; - case 'b': - busid = optarg; - break; - default: - goto err_out; - } - } - - if (!host || !busid) - goto err_out; - - ret = attach_device(host, busid); - goto out; - -err_out: - usbip_attach_usage(); -out: - return ret; -} diff --git a/drivers/staging/usbip/userspace/src/usbip_bind.c b/drivers/staging/usbip/userspace/src/usbip_bind.c deleted file mode 100644 index fa46141ae68b..000000000000 --- a/drivers/staging/usbip/userspace/src/usbip_bind.c +++ /dev/null @@ -1,214 +0,0 @@ -/* - * Copyright (C) 2011 matt mooney <mfm@muteddisk.com> - * 2005-2007 Takahiro Hirofuchi - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - */ - -#include <libudev.h> - -#include <errno.h> -#include <stdio.h> -#include <stdlib.h> -#include <string.h> - -#include <getopt.h> - -#include "usbip_common.h" -#include "utils.h" -#include "usbip.h" -#include "sysfs_utils.h" - -enum unbind_status { - UNBIND_ST_OK, - UNBIND_ST_USBIP_HOST, - UNBIND_ST_FAILED -}; - -static const char usbip_bind_usage_string[] = - "usbip bind <args>\n" - " -b, --busid=<busid> Bind " USBIP_HOST_DRV_NAME ".ko to device " - "on <busid>\n"; - -void usbip_bind_usage(void) -{ - printf("usage: %s", usbip_bind_usage_string); -} - -/* call at unbound state */ -static int bind_usbip(char *busid) -{ - char attr_name[] = "bind"; - char bind_attr_path[SYSFS_PATH_MAX]; - int rc = -1; - - snprintf(bind_attr_path, sizeof(bind_attr_path), "%s/%s/%s/%s/%s/%s", - SYSFS_MNT_PATH, SYSFS_BUS_NAME, SYSFS_BUS_TYPE, - SYSFS_DRIVERS_NAME, USBIP_HOST_DRV_NAME, attr_name); - - rc = write_sysfs_attribute(bind_attr_path, busid, strlen(busid)); - if (rc < 0) { - err("error binding device %s to driver: %s", busid, - strerror(errno)); - return -1; - } - - return 0; -} - -/* buggy driver may cause dead lock */ -static int unbind_other(char *busid) -{ - enum unbind_status status = UNBIND_ST_OK; - - char attr_name[] = "unbind"; - char unbind_attr_path[SYSFS_PATH_MAX]; - int rc = -1; - - struct udev *udev; - struct udev_device *dev; - const char *driver; - const char *bDevClass; - - /* Create libudev context. */ - udev = udev_new(); - - /* Get the device. */ - dev = udev_device_new_from_subsystem_sysname(udev, "usb", busid); - if (!dev) { - dbg("unable to find device with bus ID %s", busid); - goto err_close_busid_dev; - } - - /* Check what kind of device it is. */ - bDevClass = udev_device_get_sysattr_value(dev, "bDeviceClass"); - if (!bDevClass) { - dbg("unable to get bDevClass device attribute"); - goto err_close_busid_dev; - } - - if (!strncmp(bDevClass, "09", strlen(bDevClass))) { - dbg("skip unbinding of hub"); - goto err_close_busid_dev; - } - - /* Get the device driver. */ - driver = udev_device_get_driver(dev); - if (!driver) { - /* No driver bound to this device. */ - goto out; - } - - if (!strncmp(USBIP_HOST_DRV_NAME, driver, - strlen(USBIP_HOST_DRV_NAME))) { - /* Already bound to usbip-host. */ - status = UNBIND_ST_USBIP_HOST; - goto out; - } - - /* Unbind device from driver. */ - snprintf(unbind_attr_path, sizeof(unbind_attr_path), "%s/%s/%s/%s/%s/%s", - SYSFS_MNT_PATH, SYSFS_BUS_NAME, SYSFS_BUS_TYPE, - SYSFS_DRIVERS_NAME, driver, attr_name); - - rc = write_sysfs_attribute(unbind_attr_path, busid, strlen(busid)); - if (rc < 0) { - err("error unbinding device %s from driver", busid); - goto err_close_busid_dev; - } - - goto out; - -err_close_busid_dev: - status = UNBIND_ST_FAILED; -out: - udev_device_unref(dev); - udev_unref(udev); - - return status; -} - -static int bind_device(char *busid) -{ - int rc; - struct udev *udev; - struct udev_device *dev; - - /* Check whether the device with this bus ID exists. */ - udev = udev_new(); - dev = udev_device_new_from_subsystem_sysname(udev, "usb", busid); - if (!dev) { - err("device with the specified bus ID does not exist"); - return -1; - } - udev_unref(udev); - - rc = unbind_other(busid); - if (rc == UNBIND_ST_FAILED) { - err("could not unbind driver from device on busid %s", busid); - return -1; - } else if (rc == UNBIND_ST_USBIP_HOST) { - err("device on busid %s is already bound to %s", busid, - USBIP_HOST_DRV_NAME); - return -1; - } - - rc = modify_match_busid(busid, 1); - if (rc < 0) { - err("unable to bind device on %s", busid); - return -1; - } - - rc = bind_usbip(busid); - if (rc < 0) { - err("could not bind device to %s", USBIP_HOST_DRV_NAME); - modify_match_busid(busid, 0); - return -1; - } - - info("bind device on busid %s: complete", busid); - - return 0; -} - -int usbip_bind(int argc, char *argv[]) -{ - static const struct option opts[] = { - { "busid", required_argument, NULL, 'b' }, - { NULL, 0, NULL, 0 } - }; - - int opt; - int ret = -1; - - for (;;) { - opt = getopt_long(argc, argv, "b:", opts, NULL); - - if (opt == -1) - break; - - switch (opt) { - case 'b': - ret = bind_device(optarg); - goto out; - default: - goto err_out; - } - } - -err_out: - usbip_bind_usage(); -out: - return ret; -} diff --git a/drivers/staging/usbip/userspace/src/usbip_detach.c b/drivers/staging/usbip/userspace/src/usbip_detach.c deleted file mode 100644 index 05c6d15856eb..000000000000 --- a/drivers/staging/usbip/userspace/src/usbip_detach.c +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Copyright (C) 2011 matt mooney <mfm@muteddisk.com> - * 2005-2007 Takahiro Hirofuchi - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - */ - -#include <ctype.h> -#include <limits.h> -#include <stdint.h> -#include <stdio.h> -#include <stdlib.h> -#include <string.h> - -#include <getopt.h> -#include <unistd.h> - -#include "vhci_driver.h" -#include "usbip_common.h" -#include "usbip_network.h" -#include "usbip.h" - -static const char usbip_detach_usage_string[] = - "usbip detach <args>\n" - " -p, --port=<port> " USBIP_VHCI_DRV_NAME - " port the device is on\n"; - -void usbip_detach_usage(void) -{ - printf("usage: %s", usbip_detach_usage_string); -} - -static int detach_port(char *port) -{ - int ret; - uint8_t portnum; - char path[PATH_MAX+1]; - - for (unsigned int i = 0; i < strlen(port); i++) - if (!isdigit(port[i])) { - err("invalid port %s", port); - return -1; - } - - /* check max port */ - - portnum = atoi(port); - - /* remove the port state file */ - - snprintf(path, PATH_MAX, VHCI_STATE_PATH"/port%d", portnum); - - remove(path); - rmdir(VHCI_STATE_PATH); - - ret = usbip_vhci_driver_open(); - if (ret < 0) { - err("open vhci_driver"); - return -1; - } - - ret = usbip_vhci_detach_device(portnum); - if (ret < 0) - return -1; - - usbip_vhci_driver_close(); - - return ret; -} - -int usbip_detach(int argc, char *argv[]) -{ - static const struct option opts[] = { - { "port", required_argument, NULL, 'p' }, - { NULL, 0, NULL, 0 } - }; - int opt; - int ret = -1; - - for (;;) { - opt = getopt_long(argc, argv, "p:", opts, NULL); - - if (opt == -1) - break; - - switch (opt) { - case 'p': - ret = detach_port(optarg); - goto out; - default: - goto err_out; - } - } - -err_out: - usbip_detach_usage(); -out: - return ret; -} diff --git a/drivers/staging/usbip/userspace/src/usbip_list.c b/drivers/staging/usbip/userspace/src/usbip_list.c deleted file mode 100644 index d5ce34a410e7..000000000000 --- a/drivers/staging/usbip/userspace/src/usbip_list.c +++ /dev/null @@ -1,283 +0,0 @@ -/* - * Copyright (C) 2011 matt mooney <mfm@muteddisk.com> - * 2005-2007 Takahiro Hirofuchi - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - */ - -#include <sys/types.h> -#include <libudev.h> - -#include <errno.h> -#include <stdbool.h> -#include <stdint.h> -#include <stdio.h> -#include <stdlib.h> -#include <string.h> - -#include <getopt.h> -#include <netdb.h> -#include <unistd.h> - -#include "usbip_common.h" -#include "usbip_network.h" -#include "usbip.h" - -static const char usbip_list_usage_string[] = - "usbip list [-p|--parsable] <args>\n" - " -p, --parsable Parsable list format\n" - " -r, --remote=<host> List the exportable USB devices on <host>\n" - " -l, --local List the local USB devices\n"; - -void usbip_list_usage(void) -{ - printf("usage: %s", usbip_list_usage_string); -} - -static int get_exported_devices(char *host, int sockfd) -{ - char product_name[100]; - char class_name[100]; - struct op_devlist_reply reply; - uint16_t code = OP_REP_DEVLIST; - struct usbip_usb_device udev; - struct usbip_usb_interface uintf; - unsigned int i; - int rc, j; - - rc = usbip_net_send_op_common(sockfd, OP_REQ_DEVLIST, 0); - if (rc < 0) { - dbg("usbip_net_send_op_common failed"); - return -1; - } - - rc = usbip_net_recv_op_common(sockfd, &code); - if (rc < 0) { - dbg("usbip_net_recv_op_common failed"); - return -1; - } - - memset(&reply, 0, sizeof(reply)); - rc = usbip_net_recv(sockfd, &reply, sizeof(reply)); - if (rc < 0) { - dbg("usbip_net_recv_op_devlist failed"); - return -1; - } - PACK_OP_DEVLIST_REPLY(0, &reply); - dbg("exportable devices: %d\n", reply.ndev); - - if (reply.ndev == 0) { - info("no exportable devices found on %s", host); - return 0; - } - - printf("Exportable USB devices\n"); - printf("======================\n"); - printf(" - %s\n", host); - - for (i = 0; i < reply.ndev; i++) { - memset(&udev, 0, sizeof(udev)); - rc = usbip_net_recv(sockfd, &udev, sizeof(udev)); - if (rc < 0) { - dbg("usbip_net_recv failed: usbip_usb_device[%d]", i); - return -1; - } - usbip_net_pack_usb_device(0, &udev); - - usbip_names_get_product(product_name, sizeof(product_name), - udev.idVendor, udev.idProduct); - usbip_names_get_class(class_name, sizeof(class_name), - udev.bDeviceClass, udev.bDeviceSubClass, - udev.bDeviceProtocol); - printf("%11s: %s\n", udev.busid, product_name); - printf("%11s: %s\n", "", udev.path); - printf("%11s: %s\n", "", class_name); - - for (j = 0; j < udev.bNumInterfaces; j++) { - rc = usbip_net_recv(sockfd, &uintf, sizeof(uintf)); - if (rc < 0) { - err("usbip_net_recv failed: usbip_usb_intf[%d]", - j); - - return -1; - } - usbip_net_pack_usb_interface(0, &uintf); - - usbip_names_get_class(class_name, sizeof(class_name), - uintf.bInterfaceClass, - uintf.bInterfaceSubClass, - uintf.bInterfaceProtocol); - printf("%11s: %2d - %s\n", "", j, class_name); - } - - printf("\n"); - } - - return 0; -} - -static int list_exported_devices(char *host) -{ - int rc; - int sockfd; - - sockfd = usbip_net_tcp_connect(host, usbip_port_string); - if (sockfd < 0) { - err("could not connect to %s:%s: %s", host, - usbip_port_string, gai_strerror(sockfd)); - return -1; - } - dbg("connected to %s:%s", host, usbip_port_string); - - rc = get_exported_devices(host, sockfd); - if (rc < 0) { - err("failed to get device list from %s", host); - return -1; - } - - close(sockfd); - - return 0; -} - -static void print_device(const char *busid, const char *vendor, - const char *product, bool parsable) -{ - if (parsable) - printf("busid=%s#usbid=%.4s:%.4s#", busid, vendor, product); - else - printf(" - busid %s (%.4s:%.4s)\n", busid, vendor, product); -} - -static void print_product_name(char *product_name, bool parsable) -{ - if (!parsable) - printf(" %s\n", product_name); -} - -static int list_devices(bool parsable) -{ - struct udev *udev; - struct udev_enumerate *enumerate; - struct udev_list_entry *devices, *dev_list_entry; - struct udev_device *dev; - const char *path; - const char *idVendor; - const char *idProduct; - const char *bConfValue; - const char *bNumIntfs; - const char *busid; - char product_name[128]; - int ret = -1; - - /* Create libudev context. */ - udev = udev_new(); - - /* Create libudev device enumeration. */ - enumerate = udev_enumerate_new(udev); - - /* Take only USB devices that are not hubs and do not have - * the bInterfaceNumber attribute, i.e. are not interfaces. - */ - udev_enumerate_add_match_subsystem(enumerate, "usb"); - udev_enumerate_add_nomatch_sysattr(enumerate, "bDeviceClass", "09"); - udev_enumerate_add_nomatch_sysattr(enumerate, "bInterfaceNumber", NULL); - udev_enumerate_scan_devices(enumerate); - - devices = udev_enumerate_get_list_entry(enumerate); - - /* Show information about each device. */ - udev_list_entry_foreach(dev_list_entry, devices) { - path = udev_list_entry_get_name(dev_list_entry); - dev = udev_device_new_from_syspath(udev, path); - - /* Get device information. */ - idVendor = udev_device_get_sysattr_value(dev, "idVendor"); - idProduct = udev_device_get_sysattr_value(dev, "idProduct"); - bConfValue = udev_device_get_sysattr_value(dev, "bConfigurationValue"); - bNumIntfs = udev_device_get_sysattr_value(dev, "bNumInterfaces"); - busid = udev_device_get_sysname(dev); - if (!idVendor || !idProduct || !bConfValue || !bNumIntfs) { - err("problem getting device attributes: %s", - strerror(errno)); - goto err_out; - } - - /* Get product name. */ - usbip_names_get_product(product_name, sizeof(product_name), - strtol(idVendor, NULL, 16), - strtol(idProduct, NULL, 16)); - - /* Print information. */ - print_device(busid, idVendor, idProduct, parsable); - print_product_name(product_name, parsable); - - printf("\n"); - - udev_device_unref(dev); - } - - ret = 0; - -err_out: - udev_enumerate_unref(enumerate); - udev_unref(udev); - - return ret; -} - -int usbip_list(int argc, char *argv[]) -{ - static const struct option opts[] = { - { "parsable", no_argument, NULL, 'p' }, - { "remote", required_argument, NULL, 'r' }, - { "local", no_argument, NULL, 'l' }, - { NULL, 0, NULL, 0 } - }; - - bool parsable = false; - int opt; - int ret = -1; - - if (usbip_names_init(USBIDS_FILE)) - err("failed to open %s", USBIDS_FILE); - - for (;;) { - opt = getopt_long(argc, argv, "pr:l", opts, NULL); - - if (opt == -1) - break; - - switch (opt) { - case 'p': - parsable = true; - break; - case 'r': - ret = list_exported_devices(optarg); - goto out; - case 'l': - ret = list_devices(parsable); - goto out; - default: - goto err_out; - } - } - -err_out: - usbip_list_usage(); -out: - usbip_names_free(); - - return ret; -} diff --git a/drivers/staging/usbip/userspace/src/usbip_network.c b/drivers/staging/usbip/userspace/src/usbip_network.c deleted file mode 100644 index b4c37e76a6e0..000000000000 --- a/drivers/staging/usbip/userspace/src/usbip_network.c +++ /dev/null @@ -1,303 +0,0 @@ -/* - * Copyright (C) 2011 matt mooney <mfm@muteddisk.com> - * 2005-2007 Takahiro Hirofuchi - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - */ - -#include <sys/socket.h> - -#include <string.h> - -#include <arpa/inet.h> -#include <netdb.h> -#include <netinet/tcp.h> -#include <unistd.h> - -#ifdef HAVE_LIBWRAP -#include <tcpd.h> -#endif - -#include "usbip_common.h" -#include "usbip_network.h" - -int usbip_port = 3240; -char *usbip_port_string = "3240"; - -void usbip_setup_port_number(char *arg) -{ - dbg("parsing port arg '%s'", arg); - char *end; - unsigned long int port = strtoul(arg, &end, 10); - - if (end == arg) { - err("port: could not parse '%s' as a decimal integer", arg); - return; - } - - if (*end != '\0') { - err("port: garbage at end of '%s'", arg); - return; - } - - if (port > UINT16_MAX) { - err("port: %s too high (max=%d)", - arg, UINT16_MAX); - return; - } - - usbip_port = port; - usbip_port_string = arg; - info("using port %d (\"%s\")", usbip_port, usbip_port_string); -} - -void usbip_net_pack_uint32_t(int pack, uint32_t *num) -{ - uint32_t i; - - if (pack) - i = htonl(*num); - else - i = ntohl(*num); - - *num = i; -} - -void usbip_net_pack_uint16_t(int pack, uint16_t *num) -{ - uint16_t i; - - if (pack) - i = htons(*num); - else - i = ntohs(*num); - - *num = i; -} - -void usbip_net_pack_usb_device(int pack, struct usbip_usb_device *udev) -{ - usbip_net_pack_uint32_t(pack, &udev->busnum); - usbip_net_pack_uint32_t(pack, &udev->devnum); - usbip_net_pack_uint32_t(pack, &udev->speed); - - usbip_net_pack_uint16_t(pack, &udev->idVendor); - usbip_net_pack_uint16_t(pack, &udev->idProduct); - usbip_net_pack_uint16_t(pack, &udev->bcdDevice); -} - -void usbip_net_pack_usb_interface(int pack __attribute__((unused)), - struct usbip_usb_interface *udev - __attribute__((unused))) -{ - /* uint8_t members need nothing */ -} - -static ssize_t usbip_net_xmit(int sockfd, void *buff, size_t bufflen, - int sending) -{ - ssize_t nbytes; - ssize_t total = 0; - - if (!bufflen) - return 0; - - do { - if (sending) - nbytes = send(sockfd, buff, bufflen, 0); - else - nbytes = recv(sockfd, buff, bufflen, MSG_WAITALL); - - if (nbytes <= 0) - return -1; - - buff = (void *)((intptr_t) buff + nbytes); - bufflen -= nbytes; - total += nbytes; - - } while (bufflen > 0); - - return total; -} - -ssize_t usbip_net_recv(int sockfd, void *buff, size_t bufflen) -{ - return usbip_net_xmit(sockfd, buff, bufflen, 0); -} - -ssize_t usbip_net_send(int sockfd, void *buff, size_t bufflen) -{ - return usbip_net_xmit(sockfd, buff, bufflen, 1); -} - -int usbip_net_send_op_common(int sockfd, uint32_t code, uint32_t status) -{ - struct op_common op_common; - int rc; - - memset(&op_common, 0, sizeof(op_common)); - - op_common.version = USBIP_VERSION; - op_common.code = code; - op_common.status = status; - - PACK_OP_COMMON(1, &op_common); - - rc = usbip_net_send(sockfd, &op_common, sizeof(op_common)); - if (rc < 0) { - dbg("usbip_net_send failed: %d", rc); - return -1; - } - - return 0; -} - -int usbip_net_recv_op_common(int sockfd, uint16_t *code) -{ - struct op_common op_common; - int rc; - - memset(&op_common, 0, sizeof(op_common)); - - rc = usbip_net_recv(sockfd, &op_common, sizeof(op_common)); - if (rc < 0) { - dbg("usbip_net_recv failed: %d", rc); - goto err; - } - - PACK_OP_COMMON(0, &op_common); - - if (op_common.version != USBIP_VERSION) { - dbg("version mismatch: %d %d", op_common.version, - USBIP_VERSION); - goto err; - } - - switch (*code) { - case OP_UNSPEC: - break; - default: - if (op_common.code != *code) { - dbg("unexpected pdu %#0x for %#0x", op_common.code, - *code); - goto err; - } - } - - if (op_common.status != ST_OK) { - dbg("request failed at peer: %d", op_common.status); - goto err; - } - - *code = op_common.code; - - return 0; -err: - return -1; -} - -int usbip_net_set_reuseaddr(int sockfd) -{ - const int val = 1; - int ret; - - ret = setsockopt(sockfd, SOL_SOCKET, SO_REUSEADDR, &val, sizeof(val)); - if (ret < 0) - dbg("setsockopt: SO_REUSEADDR"); - - return ret; -} - -int usbip_net_set_nodelay(int sockfd) -{ - const int val = 1; - int ret; - - ret = setsockopt(sockfd, IPPROTO_TCP, TCP_NODELAY, &val, sizeof(val)); - if (ret < 0) - dbg("setsockopt: TCP_NODELAY"); - - return ret; -} - -int usbip_net_set_keepalive(int sockfd) -{ - const int val = 1; - int ret; - - ret = setsockopt(sockfd, SOL_SOCKET, SO_KEEPALIVE, &val, sizeof(val)); - if (ret < 0) - dbg("setsockopt: SO_KEEPALIVE"); - - return ret; -} - -int usbip_net_set_v6only(int sockfd) -{ - const int val = 1; - int ret; - - ret = setsockopt(sockfd, IPPROTO_IPV6, IPV6_V6ONLY, &val, sizeof(val)); - if (ret < 0) - dbg("setsockopt: IPV6_V6ONLY"); - - return ret; -} - -/* - * IPv6 Ready - */ -int usbip_net_tcp_connect(char *hostname, char *service) -{ - struct addrinfo hints, *res, *rp; - int sockfd; - int ret; - - memset(&hints, 0, sizeof(hints)); - hints.ai_family = AF_UNSPEC; - hints.ai_socktype = SOCK_STREAM; - - /* get all possible addresses */ - ret = getaddrinfo(hostname, service, &hints, &res); - if (ret < 0) { - dbg("getaddrinfo: %s service %s: %s", hostname, service, - gai_strerror(ret)); - return ret; - } - - /* try the addresses */ - for (rp = res; rp; rp = rp->ai_next) { - sockfd = socket(rp->ai_family, rp->ai_socktype, - rp->ai_protocol); - if (sockfd < 0) - continue; - - /* should set TCP_NODELAY for usbip */ - usbip_net_set_nodelay(sockfd); - /* TODO: write code for heartbeat */ - usbip_net_set_keepalive(sockfd); - - if (connect(sockfd, rp->ai_addr, rp->ai_addrlen) == 0) - break; - - close(sockfd); - } - - freeaddrinfo(res); - - if (!rp) - return EAI_SYSTEM; - - return sockfd; -} diff --git a/drivers/staging/usbip/userspace/src/usbip_network.h b/drivers/staging/usbip/userspace/src/usbip_network.h deleted file mode 100644 index c1e875cf1078..000000000000 --- a/drivers/staging/usbip/userspace/src/usbip_network.h +++ /dev/null @@ -1,185 +0,0 @@ -/* - * Copyright (C) 2005-2007 Takahiro Hirofuchi - */ - -#ifndef __USBIP_NETWORK_H -#define __USBIP_NETWORK_H - -#ifdef HAVE_CONFIG_H -#include "../config.h" -#endif - -#include <sys/types.h> - -#include <stdint.h> - -extern int usbip_port; -extern char *usbip_port_string; -void usbip_setup_port_number(char *arg); - -/* ---------------------------------------------------------------------- */ -/* Common header for all the kinds of PDUs. */ -struct op_common { - uint16_t version; - -#define OP_REQUEST (0x80 << 8) -#define OP_REPLY (0x00 << 8) - uint16_t code; - - /* add more error code */ -#define ST_OK 0x00 -#define ST_NA 0x01 - uint32_t status; /* op_code status (for reply) */ - -} __attribute__((packed)); - -#define PACK_OP_COMMON(pack, op_common) do {\ - usbip_net_pack_uint16_t(pack, &(op_common)->version);\ - usbip_net_pack_uint16_t(pack, &(op_common)->code);\ - usbip_net_pack_uint32_t(pack, &(op_common)->status);\ -} while (0) - -/* ---------------------------------------------------------------------- */ -/* Dummy Code */ -#define OP_UNSPEC 0x00 -#define OP_REQ_UNSPEC OP_UNSPEC -#define OP_REP_UNSPEC OP_UNSPEC - -/* ---------------------------------------------------------------------- */ -/* Retrieve USB device information. (still not used) */ -#define OP_DEVINFO 0x02 -#define OP_REQ_DEVINFO (OP_REQUEST | OP_DEVINFO) -#define OP_REP_DEVINFO (OP_REPLY | OP_DEVINFO) - -struct op_devinfo_request { - char busid[SYSFS_BUS_ID_SIZE]; -} __attribute__((packed)); - -struct op_devinfo_reply { - struct usbip_usb_device udev; - struct usbip_usb_interface uinf[]; -} __attribute__((packed)); - -/* ---------------------------------------------------------------------- */ -/* Import a remote USB device. */ -#define OP_IMPORT 0x03 -#define OP_REQ_IMPORT (OP_REQUEST | OP_IMPORT) -#define OP_REP_IMPORT (OP_REPLY | OP_IMPORT) - -struct op_import_request { - char busid[SYSFS_BUS_ID_SIZE]; -} __attribute__((packed)); - -struct op_import_reply { - struct usbip_usb_device udev; -// struct usbip_usb_interface uinf[]; -} __attribute__((packed)); - -#define PACK_OP_IMPORT_REQUEST(pack, request) do {\ -} while (0) - -#define PACK_OP_IMPORT_REPLY(pack, reply) do {\ - usbip_net_pack_usb_device(pack, &(reply)->udev);\ -} while (0) - -/* ---------------------------------------------------------------------- */ -/* Export a USB device to a remote host. */ -#define OP_EXPORT 0x06 -#define OP_REQ_EXPORT (OP_REQUEST | OP_EXPORT) -#define OP_REP_EXPORT (OP_REPLY | OP_EXPORT) - -struct op_export_request { - struct usbip_usb_device udev; -} __attribute__((packed)); - -struct op_export_reply { - int returncode; -} __attribute__((packed)); - - -#define PACK_OP_EXPORT_REQUEST(pack, request) do {\ - usbip_net_pack_usb_device(pack, &(request)->udev);\ -} while (0) - -#define PACK_OP_EXPORT_REPLY(pack, reply) do {\ -} while (0) - -/* ---------------------------------------------------------------------- */ -/* un-Export a USB device from a remote host. */ -#define OP_UNEXPORT 0x07 -#define OP_REQ_UNEXPORT (OP_REQUEST | OP_UNEXPORT) -#define OP_REP_UNEXPORT (OP_REPLY | OP_UNEXPORT) - -struct op_unexport_request { - struct usbip_usb_device udev; -} __attribute__((packed)); - -struct op_unexport_reply { - int returncode; -} __attribute__((packed)); - -#define PACK_OP_UNEXPORT_REQUEST(pack, request) do {\ - usbip_net_pack_usb_device(pack, &(request)->udev);\ -} while (0) - -#define PACK_OP_UNEXPORT_REPLY(pack, reply) do {\ -} while (0) - -/* ---------------------------------------------------------------------- */ -/* Negotiate IPSec encryption key. (still not used) */ -#define OP_CRYPKEY 0x04 -#define OP_REQ_CRYPKEY (OP_REQUEST | OP_CRYPKEY) -#define OP_REP_CRYPKEY (OP_REPLY | OP_CRYPKEY) - -struct op_crypkey_request { - /* 128bit key */ - uint32_t key[4]; -} __attribute__((packed)); - -struct op_crypkey_reply { - uint32_t __reserved; -} __attribute__((packed)); - - -/* ---------------------------------------------------------------------- */ -/* Retrieve the list of exported USB devices. */ -#define OP_DEVLIST 0x05 -#define OP_REQ_DEVLIST (OP_REQUEST | OP_DEVLIST) -#define OP_REP_DEVLIST (OP_REPLY | OP_DEVLIST) - -struct op_devlist_request { -} __attribute__((packed)); - -struct op_devlist_reply { - uint32_t ndev; - /* followed by reply_extra[] */ -} __attribute__((packed)); - -struct op_devlist_reply_extra { - struct usbip_usb_device udev; - struct usbip_usb_interface uinf[]; -} __attribute__((packed)); - -#define PACK_OP_DEVLIST_REQUEST(pack, request) do {\ -} while (0) - -#define PACK_OP_DEVLIST_REPLY(pack, reply) do {\ - usbip_net_pack_uint32_t(pack, &(reply)->ndev);\ -} while (0) - -void usbip_net_pack_uint32_t(int pack, uint32_t *num); -void usbip_net_pack_uint16_t(int pack, uint16_t *num); -void usbip_net_pack_usb_device(int pack, struct usbip_usb_device *udev); -void usbip_net_pack_usb_interface(int pack, struct usbip_usb_interface *uinf); - -ssize_t usbip_net_recv(int sockfd, void *buff, size_t bufflen); -ssize_t usbip_net_send(int sockfd, void *buff, size_t bufflen); -int usbip_net_send_op_common(int sockfd, uint32_t code, uint32_t status); -int usbip_net_recv_op_common(int sockfd, uint16_t *code); -int usbip_net_set_reuseaddr(int sockfd); -int usbip_net_set_nodelay(int sockfd); -int usbip_net_set_keepalive(int sockfd); -int usbip_net_set_v6only(int sockfd); -int usbip_net_tcp_connect(char *hostname, char *port); - -#endif /* __USBIP_NETWORK_H */ diff --git a/drivers/staging/usbip/userspace/src/usbip_port.c b/drivers/staging/usbip/userspace/src/usbip_port.c deleted file mode 100644 index a2e884fd9226..000000000000 --- a/drivers/staging/usbip/userspace/src/usbip_port.c +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright (C) 2011 matt mooney <mfm@muteddisk.com> - * 2005-2007 Takahiro Hirofuchi - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include "vhci_driver.h" -#include "usbip_common.h" - -static int list_imported_devices(void) -{ - int i; - struct usbip_imported_device *idev; - int ret; - - ret = usbip_vhci_driver_open(); - if (ret < 0) { - err("open vhci_driver"); - return -1; - } - - printf("Imported USB devices\n"); - printf("====================\n"); - - for (i = 0; i < vhci_driver->nports; i++) { - idev = &vhci_driver->idev[i]; - - if (usbip_vhci_imported_device_dump(idev) < 0) - ret = -1; - } - - usbip_vhci_driver_close(); - - return ret; - -} - -int usbip_port_show(__attribute__((unused)) int argc, - __attribute__((unused)) char *argv[]) -{ - int ret; - - ret = list_imported_devices(); - if (ret < 0) - err("list imported devices"); - - return ret; -} diff --git a/drivers/staging/usbip/userspace/src/usbip_unbind.c b/drivers/staging/usbip/userspace/src/usbip_unbind.c deleted file mode 100644 index a4a496c9cbaf..000000000000 --- a/drivers/staging/usbip/userspace/src/usbip_unbind.c +++ /dev/null @@ -1,141 +0,0 @@ -/* - * Copyright (C) 2011 matt mooney <mfm@muteddisk.com> - * 2005-2007 Takahiro Hirofuchi - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - */ - -#include <libudev.h> - -#include <errno.h> -#include <stdio.h> -#include <string.h> - -#include <getopt.h> - -#include "usbip_common.h" -#include "utils.h" -#include "usbip.h" -#include "sysfs_utils.h" - -static const char usbip_unbind_usage_string[] = - "usbip unbind <args>\n" - " -b, --busid=<busid> Unbind " USBIP_HOST_DRV_NAME ".ko from " - "device on <busid>\n"; - -void usbip_unbind_usage(void) -{ - printf("usage: %s", usbip_unbind_usage_string); -} - -static int unbind_device(char *busid) -{ - char bus_type[] = "usb"; - int rc, ret = -1; - - char unbind_attr_name[] = "unbind"; - char unbind_attr_path[SYSFS_PATH_MAX]; - char rebind_attr_name[] = "rebind"; - char rebind_attr_path[SYSFS_PATH_MAX]; - - struct udev *udev; - struct udev_device *dev; - const char *driver; - - /* Create libudev context. */ - udev = udev_new(); - - /* Check whether the device with this bus ID exists. */ - dev = udev_device_new_from_subsystem_sysname(udev, "usb", busid); - if (!dev) { - err("device with the specified bus ID does not exist"); - goto err_close_udev; - } - - /* Check whether the device is using usbip-host driver. */ - driver = udev_device_get_driver(dev); - if (!driver || strcmp(driver, "usbip-host")) { - err("device is not bound to usbip-host driver"); - goto err_close_udev; - } - - /* Unbind device from driver. */ - snprintf(unbind_attr_path, sizeof(unbind_attr_path), "%s/%s/%s/%s/%s/%s", - SYSFS_MNT_PATH, SYSFS_BUS_NAME, bus_type, SYSFS_DRIVERS_NAME, - USBIP_HOST_DRV_NAME, unbind_attr_name); - - rc = write_sysfs_attribute(unbind_attr_path, busid, strlen(busid)); - if (rc < 0) { - err("error unbinding device %s from driver", busid); - goto err_close_udev; - } - - /* Notify driver of unbind. */ - rc = modify_match_busid(busid, 0); - if (rc < 0) { - err("unable to unbind device on %s", busid); - goto err_close_udev; - } - - /* Trigger new probing. */ - snprintf(rebind_attr_path, sizeof(unbind_attr_path), "%s/%s/%s/%s/%s/%s", - SYSFS_MNT_PATH, SYSFS_BUS_NAME, bus_type, SYSFS_DRIVERS_NAME, - USBIP_HOST_DRV_NAME, rebind_attr_name); - - rc = write_sysfs_attribute(rebind_attr_path, busid, strlen(busid)); - if (rc < 0) { - err("error rebinding"); - goto err_close_udev; - } - - ret = 0; - info("unbind device on busid %s: complete", busid); - -err_close_udev: - udev_device_unref(dev); - udev_unref(udev); - - return ret; -} - -int usbip_unbind(int argc, char *argv[]) -{ - static const struct option opts[] = { - { "busid", required_argument, NULL, 'b' }, - { NULL, 0, NULL, 0 } - }; - - int opt; - int ret = -1; - - for (;;) { - opt = getopt_long(argc, argv, "b:", opts, NULL); - - if (opt == -1) - break; - - switch (opt) { - case 'b': - ret = unbind_device(optarg); - goto out; - default: - goto err_out; - } - } - -err_out: - usbip_unbind_usage(); -out: - return ret; -} diff --git a/drivers/staging/usbip/userspace/src/usbipd.c b/drivers/staging/usbip/userspace/src/usbipd.c deleted file mode 100644 index 2f87f2d348ba..000000000000 --- a/drivers/staging/usbip/userspace/src/usbipd.c +++ /dev/null @@ -1,679 +0,0 @@ -/* - * Copyright (C) 2011 matt mooney <mfm@muteddisk.com> - * 2005-2007 Takahiro Hirofuchi - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - */ - -#ifdef HAVE_CONFIG_H -#include "../config.h" -#endif - -#define _GNU_SOURCE -#include <errno.h> -#include <unistd.h> -#include <netdb.h> -#include <string.h> -#include <stdlib.h> -#include <sys/types.h> -#include <sys/stat.h> -#include <arpa/inet.h> -#include <sys/socket.h> -#include <netinet/in.h> - -#ifdef HAVE_LIBWRAP -#include <tcpd.h> -#endif - -#include <getopt.h> -#include <signal.h> -#include <poll.h> - -#include "usbip_host_driver.h" -#include "usbip_common.h" -#include "usbip_network.h" -#include "list.h" - -#undef PROGNAME -#define PROGNAME "usbipd" -#define MAXSOCKFD 20 - -#define MAIN_LOOP_TIMEOUT 10 - -#define DEFAULT_PID_FILE "/var/run/" PROGNAME ".pid" - -static const char usbip_version_string[] = PACKAGE_STRING; - -static const char usbipd_help_string[] = - "usage: usbipd [options]\n" - "\n" - " -4, --ipv4\n" - " Bind to IPv4. Default is both.\n" - "\n" - " -6, --ipv6\n" - " Bind to IPv6. Default is both.\n" - "\n" - " -D, --daemon\n" - " Run as a daemon process.\n" - "\n" - " -d, --debug\n" - " Print debugging information.\n" - "\n" - " -PFILE, --pid FILE\n" - " Write process id to FILE.\n" - " If no FILE specified, use " DEFAULT_PID_FILE "\n" - "\n" - " -tPORT, --tcp-port PORT\n" - " Listen on TCP/IP port PORT.\n" - "\n" - " -h, --help\n" - " Print this help.\n" - "\n" - " -v, --version\n" - " Show version.\n"; - -static void usbipd_help(void) -{ - printf("%s\n", usbipd_help_string); -} - -static int recv_request_import(int sockfd) -{ - struct op_import_request req; - struct op_common reply; - struct usbip_exported_device *edev; - struct usbip_usb_device pdu_udev; - struct list_head *i; - int found = 0; - int error = 0; - int rc; - - memset(&req, 0, sizeof(req)); - memset(&reply, 0, sizeof(reply)); - - rc = usbip_net_recv(sockfd, &req, sizeof(req)); - if (rc < 0) { - dbg("usbip_net_recv failed: import request"); - return -1; - } - PACK_OP_IMPORT_REQUEST(0, &req); - - list_for_each(i, &host_driver->edev_list) { - edev = list_entry(i, struct usbip_exported_device, node); - if (!strncmp(req.busid, edev->udev.busid, SYSFS_BUS_ID_SIZE)) { - info("found requested device: %s", req.busid); - found = 1; - break; - } - } - - if (found) { - /* should set TCP_NODELAY for usbip */ - usbip_net_set_nodelay(sockfd); - - /* export device needs a TCP/IP socket descriptor */ - rc = usbip_host_export_device(edev, sockfd); - if (rc < 0) - error = 1; - } else { - info("requested device not found: %s", req.busid); - error = 1; - } - - rc = usbip_net_send_op_common(sockfd, OP_REP_IMPORT, - (!error ? ST_OK : ST_NA)); - if (rc < 0) { - dbg("usbip_net_send_op_common failed: %#0x", OP_REP_IMPORT); - return -1; - } - - if (error) { - dbg("import request busid %s: failed", req.busid); - return -1; - } - - memcpy(&pdu_udev, &edev->udev, sizeof(pdu_udev)); - usbip_net_pack_usb_device(1, &pdu_udev); - - rc = usbip_net_send(sockfd, &pdu_udev, sizeof(pdu_udev)); - if (rc < 0) { - dbg("usbip_net_send failed: devinfo"); - return -1; - } - - dbg("import request busid %s: complete", req.busid); - - return 0; -} - -static int send_reply_devlist(int connfd) -{ - struct usbip_exported_device *edev; - struct usbip_usb_device pdu_udev; - struct usbip_usb_interface pdu_uinf; - struct op_devlist_reply reply; - struct list_head *j; - int rc, i; - - reply.ndev = 0; - /* number of exported devices */ - list_for_each(j, &host_driver->edev_list) { - reply.ndev += 1; - } - info("exportable devices: %d", reply.ndev); - - rc = usbip_net_send_op_common(connfd, OP_REP_DEVLIST, ST_OK); - if (rc < 0) { - dbg("usbip_net_send_op_common failed: %#0x", OP_REP_DEVLIST); - return -1; - } - PACK_OP_DEVLIST_REPLY(1, &reply); - - rc = usbip_net_send(connfd, &reply, sizeof(reply)); - if (rc < 0) { - dbg("usbip_net_send failed: %#0x", OP_REP_DEVLIST); - return -1; - } - - list_for_each(j, &host_driver->edev_list) { - edev = list_entry(j, struct usbip_exported_device, node); - dump_usb_device(&edev->udev); - memcpy(&pdu_udev, &edev->udev, sizeof(pdu_udev)); - usbip_net_pack_usb_device(1, &pdu_udev); - - rc = usbip_net_send(connfd, &pdu_udev, sizeof(pdu_udev)); - if (rc < 0) { - dbg("usbip_net_send failed: pdu_udev"); - return -1; - } - - for (i = 0; i < edev->udev.bNumInterfaces; i++) { - dump_usb_interface(&edev->uinf[i]); - memcpy(&pdu_uinf, &edev->uinf[i], sizeof(pdu_uinf)); - usbip_net_pack_usb_interface(1, &pdu_uinf); - - rc = usbip_net_send(connfd, &pdu_uinf, - sizeof(pdu_uinf)); - if (rc < 0) { - err("usbip_net_send failed: pdu_uinf"); - return -1; - } - } - } - - return 0; -} - -static int recv_request_devlist(int connfd) -{ - struct op_devlist_request req; - int rc; - - memset(&req, 0, sizeof(req)); - - rc = usbip_net_recv(connfd, &req, sizeof(req)); - if (rc < 0) { - dbg("usbip_net_recv failed: devlist request"); - return -1; - } - - rc = send_reply_devlist(connfd); - if (rc < 0) { - dbg("send_reply_devlist failed"); - return -1; - } - - return 0; -} - -static int recv_pdu(int connfd) -{ - uint16_t code = OP_UNSPEC; - int ret; - - ret = usbip_net_recv_op_common(connfd, &code); - if (ret < 0) { - dbg("could not receive opcode: %#0x", code); - return -1; - } - - ret = usbip_host_refresh_device_list(); - if (ret < 0) { - dbg("could not refresh device list: %d", ret); - return -1; - } - - info("received request: %#0x(%d)", code, connfd); - switch (code) { - case OP_REQ_DEVLIST: - ret = recv_request_devlist(connfd); - break; - case OP_REQ_IMPORT: - ret = recv_request_import(connfd); - break; - case OP_REQ_DEVINFO: - case OP_REQ_CRYPKEY: - default: - err("received an unknown opcode: %#0x", code); - ret = -1; - } - - if (ret == 0) - info("request %#0x(%d): complete", code, connfd); - else - info("request %#0x(%d): failed", code, connfd); - - return ret; -} - -#ifdef HAVE_LIBWRAP -static int tcpd_auth(int connfd) -{ - struct request_info request; - int rc; - - request_init(&request, RQ_DAEMON, PROGNAME, RQ_FILE, connfd, 0); - fromhost(&request); - rc = hosts_access(&request); - if (rc == 0) - return -1; - - return 0; -} -#endif - -static int do_accept(int listenfd) -{ - int connfd; - struct sockaddr_storage ss; - socklen_t len = sizeof(ss); - char host[NI_MAXHOST], port[NI_MAXSERV]; - int rc; - - memset(&ss, 0, sizeof(ss)); - - connfd = accept(listenfd, (struct sockaddr *)&ss, &len); - if (connfd < 0) { - err("failed to accept connection"); - return -1; - } - - rc = getnameinfo((struct sockaddr *)&ss, len, host, sizeof(host), - port, sizeof(port), NI_NUMERICHOST | NI_NUMERICSERV); - if (rc) - err("getnameinfo: %s", gai_strerror(rc)); - -#ifdef HAVE_LIBWRAP - rc = tcpd_auth(connfd); - if (rc < 0) { - info("denied access from %s", host); - close(connfd); - return -1; - } -#endif - info("connection from %s:%s", host, port); - - return connfd; -} - -int process_request(int listenfd) -{ - pid_t childpid; - int connfd; - - connfd = do_accept(listenfd); - if (connfd < 0) - return -1; - childpid = fork(); - if (childpid == 0) { - close(listenfd); - recv_pdu(connfd); - exit(0); - } - close(connfd); - return 0; -} - -static void addrinfo_to_text(struct addrinfo *ai, char buf[], - const size_t buf_size) -{ - char hbuf[NI_MAXHOST]; - char sbuf[NI_MAXSERV]; - int rc; - - buf[0] = '\0'; - - rc = getnameinfo(ai->ai_addr, ai->ai_addrlen, hbuf, sizeof(hbuf), - sbuf, sizeof(sbuf), NI_NUMERICHOST | NI_NUMERICSERV); - if (rc) - err("getnameinfo: %s", gai_strerror(rc)); - - snprintf(buf, buf_size, "%s:%s", hbuf, sbuf); -} - -static int listen_all_addrinfo(struct addrinfo *ai_head, int sockfdlist[], - int maxsockfd) -{ - struct addrinfo *ai; - int ret, nsockfd = 0; - const size_t ai_buf_size = NI_MAXHOST + NI_MAXSERV + 2; - char ai_buf[ai_buf_size]; - - for (ai = ai_head; ai && nsockfd < maxsockfd; ai = ai->ai_next) { - int sock; - - addrinfo_to_text(ai, ai_buf, ai_buf_size); - dbg("opening %s", ai_buf); - sock = socket(ai->ai_family, ai->ai_socktype, ai->ai_protocol); - if (sock < 0) { - err("socket: %s: %d (%s)", - ai_buf, errno, strerror(errno)); - continue; - } - - usbip_net_set_reuseaddr(sock); - usbip_net_set_nodelay(sock); - /* We use seperate sockets for IPv4 and IPv6 - * (see do_standalone_mode()) */ - usbip_net_set_v6only(sock); - - if (sock >= FD_SETSIZE) { - err("FD_SETSIZE: %s: sock=%d, max=%d", - ai_buf, sock, FD_SETSIZE); - close(sock); - continue; - } - - ret = bind(sock, ai->ai_addr, ai->ai_addrlen); - if (ret < 0) { - err("bind: %s: %d (%s)", - ai_buf, errno, strerror(errno)); - close(sock); - continue; - } - - ret = listen(sock, SOMAXCONN); - if (ret < 0) { - err("listen: %s: %d (%s)", - ai_buf, errno, strerror(errno)); - close(sock); - continue; - } - - info("listening on %s", ai_buf); - sockfdlist[nsockfd++] = sock; - } - - return nsockfd; -} - -static struct addrinfo *do_getaddrinfo(char *host, int ai_family) -{ - struct addrinfo hints, *ai_head; - int rc; - - memset(&hints, 0, sizeof(hints)); - hints.ai_family = ai_family; - hints.ai_socktype = SOCK_STREAM; - hints.ai_flags = AI_PASSIVE; - - rc = getaddrinfo(host, usbip_port_string, &hints, &ai_head); - if (rc) { - err("failed to get a network address %s: %s", usbip_port_string, - gai_strerror(rc)); - return NULL; - } - - return ai_head; -} - -static void signal_handler(int i) -{ - dbg("received '%s' signal", strsignal(i)); -} - -static void set_signal(void) -{ - struct sigaction act; - - memset(&act, 0, sizeof(act)); - act.sa_handler = signal_handler; - sigemptyset(&act.sa_mask); - sigaction(SIGTERM, &act, NULL); - sigaction(SIGINT, &act, NULL); - act.sa_handler = SIG_IGN; - sigaction(SIGCLD, &act, NULL); -} - -static const char *pid_file; - -static void write_pid_file(void) -{ - if (pid_file) { - dbg("creating pid file %s", pid_file); - FILE *fp; - - fp = fopen(pid_file, "w"); - if (!fp) { - err("pid_file: %s: %d (%s)", - pid_file, errno, strerror(errno)); - return; - } - fprintf(fp, "%d\n", getpid()); - fclose(fp); - } -} - -static void remove_pid_file(void) -{ - if (pid_file) { - dbg("removing pid file %s", pid_file); - unlink(pid_file); - } -} - -static int do_standalone_mode(int daemonize, int ipv4, int ipv6) -{ - struct addrinfo *ai_head; - int sockfdlist[MAXSOCKFD]; - int nsockfd, family; - int i, terminate; - struct pollfd *fds; - struct timespec timeout; - sigset_t sigmask; - - if (usbip_host_driver_open()) { - err("please load " USBIP_CORE_MOD_NAME ".ko and " - USBIP_HOST_DRV_NAME ".ko!"); - return -1; - } - - if (daemonize) { - if (daemon(0, 0) < 0) { - err("daemonizing failed: %s", strerror(errno)); - usbip_host_driver_close(); - return -1; - } - umask(0); - usbip_use_syslog = 1; - } - set_signal(); - write_pid_file(); - - info("starting " PROGNAME " (%s)", usbip_version_string); - - /* - * To suppress warnings on systems with bindv6only disabled - * (default), we use seperate sockets for IPv6 and IPv4 and set - * IPV6_V6ONLY on the IPv6 sockets. - */ - if (ipv4 && ipv6) - family = AF_UNSPEC; - else if (ipv4) - family = AF_INET; - else - family = AF_INET6; - - ai_head = do_getaddrinfo(NULL, family); - if (!ai_head) { - usbip_host_driver_close(); - return -1; - } - nsockfd = listen_all_addrinfo(ai_head, sockfdlist, - sizeof(sockfdlist) / sizeof(*sockfdlist)); - freeaddrinfo(ai_head); - if (nsockfd <= 0) { - err("failed to open a listening socket"); - usbip_host_driver_close(); - return -1; - } - - dbg("listening on %d address%s", nsockfd, (nsockfd == 1) ? "" : "es"); - - fds = calloc(nsockfd, sizeof(struct pollfd)); - for (i = 0; i < nsockfd; i++) { - fds[i].fd = sockfdlist[i]; - fds[i].events = POLLIN; - } - timeout.tv_sec = MAIN_LOOP_TIMEOUT; - timeout.tv_nsec = 0; - - sigfillset(&sigmask); - sigdelset(&sigmask, SIGTERM); - sigdelset(&sigmask, SIGINT); - - terminate = 0; - while (!terminate) { - int r; - - r = ppoll(fds, nsockfd, &timeout, &sigmask); - if (r < 0) { - dbg("%s", strerror(errno)); - terminate = 1; - } else if (r) { - for (i = 0; i < nsockfd; i++) { - if (fds[i].revents & POLLIN) { - dbg("read event on fd[%d]=%d", - i, sockfdlist[i]); - process_request(sockfdlist[i]); - } - } - } else { - dbg("heartbeat timeout on ppoll()"); - } - } - - info("shutting down " PROGNAME); - free(fds); - usbip_host_driver_close(); - - return 0; -} - -int main(int argc, char *argv[]) -{ - static const struct option longopts[] = { - { "ipv4", no_argument, NULL, '4' }, - { "ipv6", no_argument, NULL, '6' }, - { "daemon", no_argument, NULL, 'D' }, - { "daemon", no_argument, NULL, 'D' }, - { "debug", no_argument, NULL, 'd' }, - { "pid", optional_argument, NULL, 'P' }, - { "tcp-port", required_argument, NULL, 't' }, - { "help", no_argument, NULL, 'h' }, - { "version", no_argument, NULL, 'v' }, - { NULL, 0, NULL, 0 } - }; - - enum { - cmd_standalone_mode = 1, - cmd_help, - cmd_version - } cmd; - - int daemonize = 0; - int ipv4 = 0, ipv6 = 0; - int opt, rc = -1; - - pid_file = NULL; - - usbip_use_stderr = 1; - usbip_use_syslog = 0; - - if (geteuid() != 0) - err("not running as root?"); - - cmd = cmd_standalone_mode; - for (;;) { - opt = getopt_long(argc, argv, "46DdP::t:hv", longopts, NULL); - - if (opt == -1) - break; - - switch (opt) { - case '4': - ipv4 = 1; - break; - case '6': - ipv6 = 1; - break; - case 'D': - daemonize = 1; - break; - case 'd': - usbip_use_debug = 1; - break; - case 'h': - cmd = cmd_help; - break; - case 'P': - pid_file = optarg ? optarg : DEFAULT_PID_FILE; - break; - case 't': - usbip_setup_port_number(optarg); - break; - case 'v': - cmd = cmd_version; - break; - case '?': - usbipd_help(); - default: - goto err_out; - } - } - - if (!ipv4 && !ipv6) - ipv4 = ipv6 = 1; - - switch (cmd) { - case cmd_standalone_mode: - rc = do_standalone_mode(daemonize, ipv4, ipv6); - remove_pid_file(); - break; - case cmd_version: - printf(PROGNAME " (%s)\n", usbip_version_string); - rc = 0; - break; - case cmd_help: - usbipd_help(); - rc = 0; - break; - default: - usbipd_help(); - goto err_out; - } - -err_out: - return (rc > -1 ? EXIT_SUCCESS : EXIT_FAILURE); -} diff --git a/drivers/staging/usbip/userspace/src/utils.c b/drivers/staging/usbip/userspace/src/utils.c deleted file mode 100644 index 2b3d6d235015..000000000000 --- a/drivers/staging/usbip/userspace/src/utils.c +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright (C) 2011 matt mooney <mfm@muteddisk.com> - * 2005-2007 Takahiro Hirofuchi - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - */ - -#include <errno.h> -#include <stdio.h> -#include <string.h> - -#include "usbip_common.h" -#include "utils.h" -#include "sysfs_utils.h" - -int modify_match_busid(char *busid, int add) -{ - char attr_name[] = "match_busid"; - char command[SYSFS_BUS_ID_SIZE + 4]; - char match_busid_attr_path[SYSFS_PATH_MAX]; - int rc; - - snprintf(match_busid_attr_path, sizeof(match_busid_attr_path), - "%s/%s/%s/%s/%s/%s", SYSFS_MNT_PATH, SYSFS_BUS_NAME, - SYSFS_BUS_TYPE, SYSFS_DRIVERS_NAME, USBIP_HOST_DRV_NAME, - attr_name); - - if (add) - snprintf(command, SYSFS_BUS_ID_SIZE + 4, "add %s", busid); - else - snprintf(command, SYSFS_BUS_ID_SIZE + 4, "del %s", busid); - - rc = write_sysfs_attribute(match_busid_attr_path, command, - sizeof(command)); - if (rc < 0) { - dbg("failed to write match_busid: %s", strerror(errno)); - return -1; - } - - return 0; -} diff --git a/drivers/staging/usbip/userspace/src/utils.h b/drivers/staging/usbip/userspace/src/utils.h deleted file mode 100644 index 5916fd3e02a6..000000000000 --- a/drivers/staging/usbip/userspace/src/utils.h +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (C) 2011 matt mooney <mfm@muteddisk.com> - * 2005-2007 Takahiro Hirofuchi - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - */ - -#ifndef __UTILS_H -#define __UTILS_H - -int modify_match_busid(char *busid, int add); - -#endif /* __UTILS_H */ - diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c index c78d06eff7ea..0b583a37f5b3 100644 --- a/drivers/staging/vt6655/device_main.c +++ b/drivers/staging/vt6655/device_main.c @@ -1111,25 +1111,17 @@ static bool device_init_rings(PSDevice pDevice) void *vir_pool; /*allocate all RD/TD rings a single pool*/ - vir_pool = pci_alloc_consistent(pDevice->pcid, - pDevice->sOpts.nRxDescs0 * sizeof(SRxDesc) + - pDevice->sOpts.nRxDescs1 * sizeof(SRxDesc) + - pDevice->sOpts.nTxDescs[0] * sizeof(STxDesc) + - pDevice->sOpts.nTxDescs[1] * sizeof(STxDesc), - &pDevice->pool_dma); - + vir_pool = pci_zalloc_consistent(pDevice->pcid, + pDevice->sOpts.nRxDescs0 * sizeof(SRxDesc) + + pDevice->sOpts.nRxDescs1 * sizeof(SRxDesc) + + pDevice->sOpts.nTxDescs[0] * sizeof(STxDesc) + + pDevice->sOpts.nTxDescs[1] * sizeof(STxDesc), + &pDevice->pool_dma); if (vir_pool == NULL) { DBG_PRT(MSG_LEVEL_ERR, KERN_ERR "%s : allocate desc dma memory failed\n", pDevice->dev->name); return false; } - memset(vir_pool, 0, - pDevice->sOpts.nRxDescs0 * sizeof(SRxDesc) + - pDevice->sOpts.nRxDescs1 * sizeof(SRxDesc) + - pDevice->sOpts.nTxDescs[0] * sizeof(STxDesc) + - pDevice->sOpts.nTxDescs[1] * sizeof(STxDesc) - ); - pDevice->aRD0Ring = vir_pool; pDevice->aRD1Ring = vir_pool + pDevice->sOpts.nRxDescs0 * sizeof(SRxDesc); @@ -1138,13 +1130,12 @@ static bool device_init_rings(PSDevice pDevice) pDevice->rd1_pool_dma = pDevice->rd0_pool_dma + pDevice->sOpts.nRxDescs0 * sizeof(SRxDesc); - pDevice->tx0_bufs = pci_alloc_consistent(pDevice->pcid, - pDevice->sOpts.nTxDescs[0] * PKT_BUF_SZ + - pDevice->sOpts.nTxDescs[1] * PKT_BUF_SZ + - CB_BEACON_BUF_SIZE + - CB_MAX_BUF_SIZE, - &pDevice->tx_bufs_dma0); - + pDevice->tx0_bufs = pci_zalloc_consistent(pDevice->pcid, + pDevice->sOpts.nTxDescs[0] * PKT_BUF_SZ + + pDevice->sOpts.nTxDescs[1] * PKT_BUF_SZ + + CB_BEACON_BUF_SIZE + + CB_MAX_BUF_SIZE, + &pDevice->tx_bufs_dma0); if (pDevice->tx0_bufs == NULL) { DBG_PRT(MSG_LEVEL_ERR, KERN_ERR "%s: allocate buf dma memory failed\n", pDevice->dev->name); pci_free_consistent(pDevice->pcid, @@ -1157,13 +1148,6 @@ static bool device_init_rings(PSDevice pDevice) return false; } - memset(pDevice->tx0_bufs, 0, - pDevice->sOpts.nTxDescs[0] * PKT_BUF_SZ + - pDevice->sOpts.nTxDescs[1] * PKT_BUF_SZ + - CB_BEACON_BUF_SIZE + - CB_MAX_BUF_SIZE - ); - pDevice->td0_pool_dma = pDevice->rd1_pool_dma + pDevice->sOpts.nRxDescs1 * sizeof(SRxDesc); diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c index f105c2ac091b..164136b07a68 100644 --- a/drivers/staging/vt6655/hostap.c +++ b/drivers/staging/vt6655/hostap.c @@ -350,6 +350,9 @@ static int hostap_set_generic_element(PSDevice pDevice, { PSMgmtObject pMgmt = pDevice->pMgmt; + if (param->u.generic_elem.len > sizeof(pMgmt->abyWPAIE)) + return -EINVAL; + memcpy(pMgmt->abyWPAIE, param->u.generic_elem.data, param->u.generic_elem.len diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 1f4c794f5fcc..260c3e1e312c 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -4540,6 +4540,7 @@ static void iscsit_logout_post_handler_diffcid( { struct iscsi_conn *l_conn; struct iscsi_session *sess = conn->sess; + bool conn_found = false; if (!sess) return; @@ -4548,12 +4549,13 @@ static void iscsit_logout_post_handler_diffcid( list_for_each_entry(l_conn, &sess->sess_conn_list, conn_list) { if (l_conn->cid == cid) { iscsit_inc_conn_usage_count(l_conn); + conn_found = true; break; } } spin_unlock_bh(&sess->conn_lock); - if (!l_conn) + if (!conn_found) return; if (l_conn->sock) diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c index 02f9de26f38a..18c29260b4a2 100644 --- a/drivers/target/iscsi/iscsi_target_parameters.c +++ b/drivers/target/iscsi/iscsi_target_parameters.c @@ -601,7 +601,7 @@ int iscsi_copy_param_list( param_list = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL); if (!param_list) { pr_err("Unable to allocate memory for struct iscsi_param_list.\n"); - goto err_out; + return -1; } INIT_LIST_HEAD(¶m_list->param_list); INIT_LIST_HEAD(¶m_list->extra_response_list); diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index fd90b28f1d94..73355f4fca74 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c @@ -400,6 +400,8 @@ struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump( spin_lock_bh(&conn->cmd_lock); list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) { + if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) + continue; if (cmd->init_task_tag == init_task_tag) { spin_unlock_bh(&conn->cmd_lock); return cmd; diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index bf55c5a04cfa..756def38c77a 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -2363,7 +2363,7 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_support_##_name(\ pr_err("Invalid value '%ld', must be '0' or '1'\n", tmp); \ return -EINVAL; \ } \ - if (!tmp) \ + if (tmp) \ t->_var |= _bit; \ else \ t->_var &= ~_bit; \ diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c index 6cd7222738fc..bc286a67af7c 100644 --- a/drivers/target/target_core_spc.c +++ b/drivers/target/target_core_spc.c @@ -664,7 +664,7 @@ spc_emulate_evpd_b3(struct se_cmd *cmd, unsigned char *buf) buf[0] = dev->transport->get_device_type(dev); buf[3] = 0x0c; put_unaligned_be32(dev->t10_alua.lba_map_segment_size, &buf[8]); - put_unaligned_be32(dev->t10_alua.lba_map_segment_size, &buf[12]); + put_unaligned_be32(dev->t10_alua.lba_map_segment_multiplier, &buf[12]); return 0; } diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig index f9a13867cb70..693208eb9047 100644 --- a/drivers/thermal/Kconfig +++ b/drivers/thermal/Kconfig @@ -151,7 +151,7 @@ config KIRKWOOD_THERMAL config DOVE_THERMAL tristate "Temperature sensor on Marvell Dove SoCs" - depends on ARCH_DOVE + depends on ARCH_DOVE || MACH_DOVE depends on OF help Support for the Dove thermal sensor driver in the Linux thermal @@ -243,4 +243,9 @@ depends on ARCH_EXYNOS source "drivers/thermal/samsung/Kconfig" endmenu +menu "STMicroelectronics thermal drivers" +depends on ARCH_STI && OF +source "drivers/thermal/st/Kconfig" +endmenu + endif diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile index de0636a57a64..31e232f84b6b 100644 --- a/drivers/thermal/Makefile +++ b/drivers/thermal/Makefile @@ -32,3 +32,4 @@ obj-$(CONFIG_X86_PKG_TEMP_THERMAL) += x86_pkg_temp_thermal.o obj-$(CONFIG_INTEL_SOC_DTS_THERMAL) += intel_soc_dts_thermal.o obj-$(CONFIG_TI_SOC_THERMAL) += ti-soc-thermal/ obj-$(CONFIG_ACPI_INT3403_THERMAL) += int3403_thermal.o +obj-$(CONFIG_ST_THERMAL) += st/ diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c index 84a75f89bf74..1ab0018271c5 100644 --- a/drivers/thermal/cpu_cooling.c +++ b/drivers/thermal/cpu_cooling.c @@ -305,7 +305,7 @@ static int cpufreq_apply_cooling(struct cpufreq_cooling_device *cpufreq_device, * @event: value showing cpufreq event for which this function invoked. * @data: callback-specific data * - * Callback to highjack the notification on cpufreq policy transition. + * Callback to hijack the notification on cpufreq policy transition. * Every time there is a change in policy, we will intercept and * update the cpufreq policy with thermal constraints. * diff --git a/drivers/thermal/int3403_thermal.c b/drivers/thermal/int3403_thermal.c index e93f0253f6ed..17554eeb3953 100644 --- a/drivers/thermal/int3403_thermal.c +++ b/drivers/thermal/int3403_thermal.c @@ -33,6 +33,10 @@ struct int3403_sensor { struct thermal_zone_device *tzone; unsigned long *thresholds; + unsigned long crit_temp; + int crit_trip_id; + unsigned long psv_temp; + int psv_trip_id; }; static int sys_get_curr_temp(struct thermal_zone_device *tzone, @@ -79,12 +83,18 @@ static int sys_get_trip_temp(struct thermal_zone_device *tzone, struct acpi_device *device = tzone->devdata; struct int3403_sensor *obj = acpi_driver_data(device); - /* - * get_trip_temp is a mandatory callback but - * PATx method doesn't return any value, so return - * cached value, which was last set from user space. - */ - *temp = obj->thresholds[trip]; + if (trip == obj->crit_trip_id) + *temp = obj->crit_temp; + else if (trip == obj->psv_trip_id) + *temp = obj->psv_temp; + else { + /* + * get_trip_temp is a mandatory callback but + * PATx method doesn't return any value, so return + * cached value, which was last set from user space. + */ + *temp = obj->thresholds[trip]; + } return 0; } @@ -92,8 +102,14 @@ static int sys_get_trip_temp(struct thermal_zone_device *tzone, static int sys_get_trip_type(struct thermal_zone_device *thermal, int trip, enum thermal_trip_type *type) { + struct acpi_device *device = thermal->devdata; + struct int3403_sensor *obj = acpi_driver_data(device); + /* Mandatory callback, may not mean much here */ - *type = THERMAL_TRIP_PASSIVE; + if (trip == obj->crit_trip_id) + *type = THERMAL_TRIP_CRITICAL; + else + *type = THERMAL_TRIP_PASSIVE; return 0; } @@ -155,6 +171,34 @@ static void acpi_thermal_notify(struct acpi_device *device, u32 event) } } +static int sys_get_trip_crt(struct acpi_device *device, unsigned long *temp) +{ + unsigned long long crt; + acpi_status status; + + status = acpi_evaluate_integer(device->handle, "_CRT", NULL, &crt); + if (ACPI_FAILURE(status)) + return -EIO; + + *temp = DECI_KELVIN_TO_MILLI_CELSIUS(crt, KELVIN_OFFSET); + + return 0; +} + +static int sys_get_trip_psv(struct acpi_device *device, unsigned long *temp) +{ + unsigned long long psv; + acpi_status status; + + status = acpi_evaluate_integer(device->handle, "_PSV", NULL, &psv); + if (ACPI_FAILURE(status)) + return -EIO; + + *temp = DECI_KELVIN_TO_MILLI_CELSIUS(psv, KELVIN_OFFSET); + + return 0; +} + static int acpi_int3403_add(struct acpi_device *device) { int result = 0; @@ -194,6 +238,15 @@ static int acpi_int3403_add(struct acpi_device *device) return -ENOMEM; trip_mask = BIT(trip_cnt) - 1; } + + obj->psv_trip_id = -1; + if (!sys_get_trip_psv(device, &obj->psv_temp)) + obj->psv_trip_id = trip_cnt++; + + obj->crit_trip_id = -1; + if (!sys_get_trip_crt(device, &obj->crit_temp)) + obj->crit_trip_id = trip_cnt++; + obj->tzone = thermal_zone_device_register(acpi_device_bid(device), trip_cnt, trip_mask, device, &tzone_ops, NULL, 0, 0); diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c index d7ca9f49c9cb..acbff14da3a4 100644 --- a/drivers/thermal/samsung/exynos_tmu.c +++ b/drivers/thermal/samsung/exynos_tmu.c @@ -505,6 +505,10 @@ static irqreturn_t exynos_tmu_irq(int irq, void *id) static const struct of_device_id exynos_tmu_match[] = { { + .compatible = "samsung,exynos3250-tmu", + .data = (void *)EXYNOS3250_TMU_DRV_DATA, + }, + { .compatible = "samsung,exynos4210-tmu", .data = (void *)EXYNOS4210_TMU_DRV_DATA, }, @@ -677,7 +681,8 @@ static int exynos_tmu_probe(struct platform_device *pdev) goto err_clk_sec; } - if (pdata->type == SOC_ARCH_EXYNOS4210 || + if (pdata->type == SOC_ARCH_EXYNOS3250 || + pdata->type == SOC_ARCH_EXYNOS4210 || pdata->type == SOC_ARCH_EXYNOS4412 || pdata->type == SOC_ARCH_EXYNOS5250 || pdata->type == SOC_ARCH_EXYNOS5260 || @@ -759,10 +764,10 @@ static int exynos_tmu_remove(struct platform_device *pdev) { struct exynos_tmu_data *data = platform_get_drvdata(pdev); - exynos_tmu_control(pdev, false); - exynos_unregister_thermal(data->reg_conf); + exynos_tmu_control(pdev, false); + clk_unprepare(data->clk); if (!IS_ERR(data->clk_sec)) clk_unprepare(data->clk_sec); diff --git a/drivers/thermal/samsung/exynos_tmu.h b/drivers/thermal/samsung/exynos_tmu.h index edd08cf76729..1b4a6444ea61 100644 --- a/drivers/thermal/samsung/exynos_tmu.h +++ b/drivers/thermal/samsung/exynos_tmu.h @@ -40,7 +40,8 @@ enum calibration_mode { }; enum soc_type { - SOC_ARCH_EXYNOS4210 = 1, + SOC_ARCH_EXYNOS3250 = 1, + SOC_ARCH_EXYNOS4210, SOC_ARCH_EXYNOS4412, SOC_ARCH_EXYNOS5250, SOC_ARCH_EXYNOS5260, diff --git a/drivers/thermal/samsung/exynos_tmu_data.c b/drivers/thermal/samsung/exynos_tmu_data.c index c1d81dcd7819..aa8e0dee2055 100644 --- a/drivers/thermal/samsung/exynos_tmu_data.c +++ b/drivers/thermal/samsung/exynos_tmu_data.c @@ -90,6 +90,95 @@ struct exynos_tmu_init_data const exynos4210_default_tmu_data = { }; #endif +#if defined(CONFIG_SOC_EXYNOS3250) +static const struct exynos_tmu_registers exynos3250_tmu_registers = { + .triminfo_data = EXYNOS_TMU_REG_TRIMINFO, + .triminfo_25_shift = EXYNOS_TRIMINFO_25_SHIFT, + .triminfo_85_shift = EXYNOS_TRIMINFO_85_SHIFT, + .tmu_ctrl = EXYNOS_TMU_REG_CONTROL, + .test_mux_addr_shift = EXYNOS4412_MUX_ADDR_SHIFT, + .buf_vref_sel_shift = EXYNOS_TMU_REF_VOLTAGE_SHIFT, + .buf_vref_sel_mask = EXYNOS_TMU_REF_VOLTAGE_MASK, + .therm_trip_mode_shift = EXYNOS_TMU_TRIP_MODE_SHIFT, + .therm_trip_mode_mask = EXYNOS_TMU_TRIP_MODE_MASK, + .therm_trip_en_shift = EXYNOS_TMU_THERM_TRIP_EN_SHIFT, + .buf_slope_sel_shift = EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT, + .buf_slope_sel_mask = EXYNOS_TMU_BUF_SLOPE_SEL_MASK, + .core_en_shift = EXYNOS_TMU_CORE_EN_SHIFT, + .tmu_status = EXYNOS_TMU_REG_STATUS, + .tmu_cur_temp = EXYNOS_TMU_REG_CURRENT_TEMP, + .threshold_th0 = EXYNOS_THD_TEMP_RISE, + .threshold_th1 = EXYNOS_THD_TEMP_FALL, + .tmu_inten = EXYNOS_TMU_REG_INTEN, + .inten_rise0_shift = EXYNOS_TMU_INTEN_RISE0_SHIFT, + .inten_rise1_shift = EXYNOS_TMU_INTEN_RISE1_SHIFT, + .inten_rise2_shift = EXYNOS_TMU_INTEN_RISE2_SHIFT, + .inten_fall0_shift = EXYNOS_TMU_INTEN_FALL0_SHIFT, + .tmu_intstat = EXYNOS_TMU_REG_INTSTAT, + .tmu_intclear = EXYNOS_TMU_REG_INTCLEAR, + .intclr_fall_shift = EXYNOS_TMU_CLEAR_FALL_INT_SHIFT, + .intclr_rise_shift = EXYNOS_TMU_RISE_INT_SHIFT, + .intclr_rise_mask = EXYNOS_TMU_RISE_INT_MASK, + .intclr_fall_mask = EXYNOS_TMU_FALL_INT_MASK, + .emul_con = EXYNOS_EMUL_CON, + .emul_temp_shift = EXYNOS_EMUL_DATA_SHIFT, + .emul_time_shift = EXYNOS_EMUL_TIME_SHIFT, + .emul_time_mask = EXYNOS_EMUL_TIME_MASK, +}; + +#define EXYNOS3250_TMU_DATA \ + .threshold_falling = 10, \ + .trigger_levels[0] = 70, \ + .trigger_levels[1] = 95, \ + .trigger_levels[2] = 110, \ + .trigger_levels[3] = 120, \ + .trigger_enable[0] = true, \ + .trigger_enable[1] = true, \ + .trigger_enable[2] = true, \ + .trigger_enable[3] = false, \ + .trigger_type[0] = THROTTLE_ACTIVE, \ + .trigger_type[1] = THROTTLE_ACTIVE, \ + .trigger_type[2] = SW_TRIP, \ + .trigger_type[3] = HW_TRIP, \ + .max_trigger_level = 4, \ + .gain = 8, \ + .reference_voltage = 16, \ + .noise_cancel_mode = 4, \ + .cal_type = TYPE_TWO_POINT_TRIMMING, \ + .efuse_value = 55, \ + .min_efuse_value = 40, \ + .max_efuse_value = 100, \ + .first_point_trim = 25, \ + .second_point_trim = 85, \ + .default_temp_offset = 50, \ + .freq_tab[0] = { \ + .freq_clip_max = 800 * 1000, \ + .temp_level = 70, \ + }, \ + .freq_tab[1] = { \ + .freq_clip_max = 400 * 1000, \ + .temp_level = 95, \ + }, \ + .freq_tab_count = 2, \ + .registers = &exynos3250_tmu_registers, \ + .features = (TMU_SUPPORT_EMULATION | \ + TMU_SUPPORT_FALLING_TRIP | TMU_SUPPORT_READY_STATUS | \ + TMU_SUPPORT_EMUL_TIME) +#endif + +#if defined(CONFIG_SOC_EXYNOS3250) +struct exynos_tmu_init_data const exynos3250_default_tmu_data = { + .tmu_data = { + { + EXYNOS3250_TMU_DATA, + .type = SOC_ARCH_EXYNOS3250, + .test_mux = EXYNOS4412_MUX_ADDR_VALUE, + }, + }, + .tmu_count = 1, +}; +#endif + #if defined(CONFIG_SOC_EXYNOS4412) || defined(CONFIG_SOC_EXYNOS5250) static const struct exynos_tmu_registers exynos4412_tmu_registers = { .triminfo_data = EXYNOS_TMU_REG_TRIMINFO, diff --git a/drivers/thermal/samsung/exynos_tmu_data.h b/drivers/thermal/samsung/exynos_tmu_data.h index d268981b65e5..f0979e598491 100644 --- a/drivers/thermal/samsung/exynos_tmu_data.h +++ b/drivers/thermal/samsung/exynos_tmu_data.h @@ -148,6 +148,13 @@ #define EXYNOS5440_TMU_TH_RISE4_SHIFT 24 #define EXYNOS5440_EFUSE_SWAP_OFFSET 8 +#if defined(CONFIG_SOC_EXYNOS3250) +extern struct exynos_tmu_init_data const exynos3250_default_tmu_data; +#define EXYNOS3250_TMU_DRV_DATA (&exynos3250_default_tmu_data) +#else +#define EXYNOS3250_TMU_DRV_DATA (NULL) +#endif + #if defined(CONFIG_CPU_EXYNOS4210) extern struct exynos_tmu_init_data const exynos4210_default_tmu_data; #define EXYNOS4210_TMU_DRV_DATA (&exynos4210_default_tmu_data) diff --git a/drivers/thermal/st/Kconfig b/drivers/thermal/st/Kconfig new file mode 100644 index 000000000000..490fdbe22eea --- /dev/null +++ b/drivers/thermal/st/Kconfig @@ -0,0 +1,12 @@ +config ST_THERMAL + tristate "Thermal sensors on STMicroelectronics STi series of SoCs" + help + Support for thermal sensors on STMicroelectronics STi series of SoCs. + +config ST_THERMAL_SYSCFG + select ST_THERMAL + tristate "STi series syscfg register access based thermal sensors" + +config ST_THERMAL_MEMMAP + select ST_THERMAL + tristate "STi series memory mapped access based thermal sensors" diff --git a/drivers/thermal/st/Makefile b/drivers/thermal/st/Makefile new file mode 100644 index 000000000000..b38878977bd8 --- /dev/null +++ b/drivers/thermal/st/Makefile @@ -0,0 +1,3 @@ +obj-$(CONFIG_ST_THERMAL) := st_thermal.o +obj-$(CONFIG_ST_THERMAL_SYSCFG) += st_thermal_syscfg.o +obj-$(CONFIG_ST_THERMAL_MEMMAP) += st_thermal_memmap.o diff --git a/drivers/thermal/st/st_thermal.c b/drivers/thermal/st/st_thermal.c new file mode 100644 index 000000000000..90163b384660 --- /dev/null +++ b/drivers/thermal/st/st_thermal.c @@ -0,0 +1,313 @@ +/* + * ST Thermal Sensor Driver core routines + * Author: Ajit Pal Singh <ajitpal.singh@st.com> + * + * Copyright (C) 2003-2014 STMicroelectronics (R&D) Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ + +#include <linux/clk.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> + +#include "st_thermal.h" + +/* The Thermal Framework expects millidegrees */ +#define mcelsius(temp) ((temp) * 1000) + +/* + * Function to allocate regfields which are common + * between syscfg and memory mapped based sensors + */ +int st_thermal_alloc_regfields(struct st_thermal_sensor *sensor) +{ + struct device *dev = sensor->dev; + struct regmap *regmap = sensor->regmap; + const struct reg_field *reg_fields = sensor->cdata->reg_fields; + + sensor->dcorrect = devm_regmap_field_alloc(dev, regmap, + reg_fields[DCORRECT]); + + sensor->overflow = devm_regmap_field_alloc(dev, regmap, + reg_fields[OVERFLOW]); + + sensor->temp_data = devm_regmap_field_alloc(dev, regmap, + reg_fields[DATA]); + + if (IS_ERR(sensor->dcorrect) || + IS_ERR(sensor->overflow) || + IS_ERR(sensor->temp_data)) { + dev_err(dev, "failed to allocate common regfields\n"); + return -EINVAL; + } + + return sensor->ops->alloc_regfields(sensor); +} + +static int st_thermal_sensor_on(struct st_thermal_sensor *sensor) +{ + int ret; + struct device *dev = sensor->dev; + + ret = clk_prepare_enable(sensor->clk); + if (ret) { + dev_err(dev, "failed to enable clk\n"); + return ret; + } + + ret = sensor->ops->power_ctrl(sensor, POWER_ON); + if (ret) { + dev_err(dev, "failed to power on sensor\n"); + clk_disable_unprepare(sensor->clk); + } + + return ret; +} + +static int st_thermal_sensor_off(struct st_thermal_sensor *sensor) +{ + int ret; + + ret = sensor->ops->power_ctrl(sensor, POWER_OFF); + if (ret) + return ret; + + clk_disable_unprepare(sensor->clk); + + return 0; +} + +static int st_thermal_calibration(struct st_thermal_sensor *sensor) +{ + int ret; + unsigned int val; + struct device *dev = sensor->dev; + + /* Check if sensor calibration data is already written */ + ret = regmap_field_read(sensor->dcorrect, &val); + if (ret) { + dev_err(dev, "failed to read calibration data\n"); + return ret; + } + + if (!val) { + /* + * Sensor calibration value not set by bootloader, + * default calibration data to be used + */ + ret = regmap_field_write(sensor->dcorrect, + sensor->cdata->calibration_val); + if (ret) + dev_err(dev, "failed to set calibration data\n"); + } + + return ret; +} + +/* Callback to get temperature from HW*/ +static int st_thermal_get_temp(struct thermal_zone_device *th, + unsigned long *temperature) +{ + struct st_thermal_sensor *sensor = th->devdata; + struct device *dev = sensor->dev; + unsigned int temp; + unsigned int overflow; + int ret; + + ret = regmap_field_read(sensor->overflow, &overflow); + if (ret) + return ret; + if (overflow) + return -EIO; + + ret = regmap_field_read(sensor->temp_data, &temp); + if (ret) + return ret; + + temp += sensor->cdata->temp_adjust_val; + temp = mcelsius(temp); + + dev_dbg(dev, "temperature: %d\n", temp); + + *temperature = temp; + + return 0; +} + +static int st_thermal_get_trip_type(struct thermal_zone_device *th, + int trip, enum thermal_trip_type *type) +{ + struct st_thermal_sensor *sensor = th->devdata; + struct device *dev = sensor->dev; + + switch (trip) { + case 0: + *type = THERMAL_TRIP_CRITICAL; + break; + default: + dev_err(dev, "invalid trip point\n"); + return -EINVAL; + } + + return 0; +} + +static int st_thermal_get_trip_temp(struct thermal_zone_device *th, + int trip, unsigned long *temp) +{ + struct st_thermal_sensor *sensor = th->devdata; + struct device *dev = sensor->dev; + + switch (trip) { + case 0: + *temp = mcelsius(sensor->cdata->crit_temp); + break; + default: + dev_err(dev, "Invalid trip point\n"); + return -EINVAL; + } + + return 0; +} + +static struct thermal_zone_device_ops st_tz_ops = { + .get_temp = st_thermal_get_temp, + .get_trip_type = st_thermal_get_trip_type, + .get_trip_temp = st_thermal_get_trip_temp, +}; + +int st_thermal_register(struct platform_device *pdev, + const struct of_device_id *st_thermal_of_match) +{ + struct st_thermal_sensor *sensor; + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + const struct of_device_id *match; + + int polling_delay; + int ret; + + if (!np) { + dev_err(dev, "device tree node not found\n"); + return -EINVAL; + } + + sensor = devm_kzalloc(dev, sizeof(*sensor), GFP_KERNEL); + if (!sensor) + return -ENOMEM; + + sensor->dev = dev; + + match = of_match_device(st_thermal_of_match, dev); + if (!(match && match->data)) + return -EINVAL; + + sensor->cdata = match->data; + if (!sensor->cdata->ops) + return -EINVAL; + + sensor->ops = sensor->cdata->ops; + + ret = sensor->ops->regmap_init(sensor); + if (ret) + return ret; + + ret = st_thermal_alloc_regfields(sensor); + if (ret) + return ret; + + sensor->clk = devm_clk_get(dev, "thermal"); + if (IS_ERR(sensor->clk)) { + dev_err(dev, "failed to fetch clock\n"); + return PTR_ERR(sensor->clk); + } + + if (sensor->ops->register_enable_irq) { + ret = sensor->ops->register_enable_irq(sensor); + if (ret) + return ret; + } + + ret = st_thermal_sensor_on(sensor); + if (ret) + return ret; + + ret = st_thermal_calibration(sensor); + if (ret) + goto sensor_off; + + polling_delay = sensor->ops->register_enable_irq ? 0 : 1000; + + sensor->thermal_dev = + thermal_zone_device_register(dev_name(dev), 1, 0, sensor, + &st_tz_ops, NULL, 0, polling_delay); + if (IS_ERR(sensor->thermal_dev)) { + dev_err(dev, "failed to register thermal zone device\n"); + ret = PTR_ERR(sensor->thermal_dev); + goto sensor_off; + } + + platform_set_drvdata(pdev, sensor); + + return 0; + +sensor_off: + st_thermal_sensor_off(sensor); + + return ret; +} +EXPORT_SYMBOL_GPL(st_thermal_register); + +int st_thermal_unregister(struct platform_device *pdev) +{ + struct st_thermal_sensor *sensor = platform_get_drvdata(pdev); + + st_thermal_sensor_off(sensor); + thermal_zone_device_unregister(sensor->thermal_dev); + + return 0; +} +EXPORT_SYMBOL_GPL(st_thermal_unregister); + +static int st_thermal_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct st_thermal_sensor *sensor = platform_get_drvdata(pdev); + + return st_thermal_sensor_off(sensor); +} + +static int st_thermal_resume(struct device *dev) +{ + int ret; + struct platform_device *pdev = to_platform_device(dev); + struct st_thermal_sensor *sensor = platform_get_drvdata(pdev); + + ret = st_thermal_sensor_on(sensor); + if (ret) + return ret; + + ret = st_thermal_calibration(sensor); + if (ret) + return ret; + + if (sensor->ops->enable_irq) { + ret = sensor->ops->enable_irq(sensor); + if (ret) + return ret; + } + + return 0; +} +SIMPLE_DEV_PM_OPS(st_thermal_pm_ops, st_thermal_suspend, st_thermal_resume); +EXPORT_SYMBOL_GPL(st_thermal_pm_ops); + +MODULE_AUTHOR("STMicroelectronics (R&D) Limited <ajitpal.singh@st.com>"); +MODULE_DESCRIPTION("STMicroelectronics STi SoC Thermal Sensor Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/thermal/st/st_thermal.h b/drivers/thermal/st/st_thermal.h new file mode 100644 index 000000000000..fecafbe10fa7 --- /dev/null +++ b/drivers/thermal/st/st_thermal.h @@ -0,0 +1,104 @@ +/* + * ST Thermal Sensor Driver for STi series of SoCs + * Author: Ajit Pal Singh <ajitpal.singh@st.com> + * + * Copyright (C) 2003-2014 STMicroelectronics (R&D) Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __STI_THERMAL_SYSCFG_H +#define __STI_THERMAL_SYSCFG_H + +#include <linux/interrupt.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/thermal.h> + +enum st_thermal_regfield_ids { + INT_THRESH_HI = 0, /* Top two regfield IDs are mutually exclusive */ + TEMP_PWR = 0, + DCORRECT, + OVERFLOW, + DATA, + INT_ENABLE, + + MAX_REGFIELDS +}; + +/* Thermal sensor power states */ +enum st_thermal_power_state { + POWER_OFF = 0, + POWER_ON +}; + +struct st_thermal_sensor; + +/** + * Description of private thermal sensor ops. + * + * @power_ctrl: Function for powering on/off a sensor. Clock to the + * sensor is also controlled from this function. + * @alloc_regfields: Allocate regmap register fields, specific to a sensor. + * @do_memmap_regmap: Memory map the thermal register space and init regmap + * instance or find regmap instance. + * @register_irq: Register an interrupt handler for a sensor. + */ +struct st_thermal_sensor_ops { + int (*power_ctrl)(struct st_thermal_sensor *, enum st_thermal_power_state); + int (*alloc_regfields)(struct st_thermal_sensor *); + int (*regmap_init)(struct st_thermal_sensor *); + int (*register_enable_irq)(struct st_thermal_sensor *); + int (*enable_irq)(struct st_thermal_sensor *); +}; + +/** + * Description of thermal driver compatible data. + * + * @reg_fields: Pointer to the regfields array for a sensor. + * @sys_compat: Pointer to the syscon node compatible string. + * @ops: Pointer to private thermal ops for a sensor. + * @calibration_val: Default calibration value to be written to the DCORRECT + * register field for a sensor. + * @temp_adjust_val: Value to be added/subtracted from the data read from + * the sensor. If value needs to be added please provide a + * positive value and if it is to be subtracted please + * provide a negative value. + * @crit_temp: The temperature beyond which the SoC should be shutdown + * to prevent damage. + */ +struct st_thermal_compat_data { + char *sys_compat; + const struct reg_field *reg_fields; + const struct st_thermal_sensor_ops *ops; + unsigned int calibration_val; + int temp_adjust_val; + int crit_temp; +}; + +struct st_thermal_sensor { + struct device *dev; + struct thermal_zone_device *thermal_dev; + const struct st_thermal_sensor_ops *ops; + const struct st_thermal_compat_data *cdata; + struct clk *clk; + struct regmap *regmap; + struct regmap_field *pwr; + struct regmap_field *dcorrect; + struct regmap_field *overflow; + struct regmap_field *temp_data; + struct regmap_field *int_thresh_hi; + struct regmap_field *int_enable; + int irq; + void __iomem *mmio_base; +}; + +extern int st_thermal_register(struct platform_device *pdev, + const struct of_device_id *st_thermal_of_match); +extern int st_thermal_unregister(struct platform_device *pdev); +extern const struct dev_pm_ops st_thermal_pm_ops; + +#endif /* __STI_RESET_SYSCFG_H */ diff --git a/drivers/thermal/st/st_thermal_memmap.c b/drivers/thermal/st/st_thermal_memmap.c new file mode 100644 index 000000000000..39896ce2ee00 --- /dev/null +++ b/drivers/thermal/st/st_thermal_memmap.c @@ -0,0 +1,209 @@ +/* + * ST Thermal Sensor Driver for memory mapped sensors. + * Author: Ajit Pal Singh <ajitpal.singh@st.com> + * + * Copyright (C) 2003-2014 STMicroelectronics (R&D) Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/of.h> +#include <linux/module.h> + +#include "st_thermal.h" + +#define STIH416_MPE_CONF 0x0 +#define STIH416_MPE_STATUS 0x4 +#define STIH416_MPE_INT_THRESH 0x8 +#define STIH416_MPE_INT_EN 0xC + +/* Power control bits for the memory mapped thermal sensor */ +#define THERMAL_PDN BIT(4) +#define THERMAL_SRSTN BIT(10) + +static const struct reg_field st_mmap_thermal_regfields[MAX_REGFIELDS] = { + /* + * According to the STIH416 MPE temp sensor data sheet - + * the PDN (Power Down Bit) and SRSTN (Soft Reset Bit) need to be + * written simultaneously for powering on and off the temperature + * sensor. regmap_update_bits() will be used to update the register. + */ + [INT_THRESH_HI] = REG_FIELD(STIH416_MPE_INT_THRESH, 0, 7), + [DCORRECT] = REG_FIELD(STIH416_MPE_CONF, 5, 9), + [OVERFLOW] = REG_FIELD(STIH416_MPE_STATUS, 9, 9), + [DATA] = REG_FIELD(STIH416_MPE_STATUS, 11, 18), + [INT_ENABLE] = REG_FIELD(STIH416_MPE_INT_EN, 0, 0), +}; + +static irqreturn_t st_mmap_thermal_trip_handler(int irq, void *sdata) +{ + struct st_thermal_sensor *sensor = sdata; + + thermal_zone_device_update(sensor->thermal_dev); + + return IRQ_HANDLED; +} + +/* Private ops for the Memory Mapped based thermal sensors */ +static int st_mmap_power_ctrl(struct st_thermal_sensor *sensor, + enum st_thermal_power_state power_state) +{ + const unsigned int mask = (THERMAL_PDN | THERMAL_SRSTN); + const unsigned int val = power_state ? mask : 0; + + return regmap_update_bits(sensor->regmap, STIH416_MPE_CONF, mask, val); +} + +static int st_mmap_alloc_regfields(struct st_thermal_sensor *sensor) +{ + struct device *dev = sensor->dev; + struct regmap *regmap = sensor->regmap; + const struct reg_field *reg_fields = sensor->cdata->reg_fields; + + sensor->int_thresh_hi = devm_regmap_field_alloc(dev, regmap, + reg_fields[INT_THRESH_HI]); + sensor->int_enable = devm_regmap_field_alloc(dev, regmap, + reg_fields[INT_ENABLE]); + + if (IS_ERR(sensor->int_thresh_hi) || IS_ERR(sensor->int_enable)) { + dev_err(dev, "failed to alloc mmap regfields\n"); + return -EINVAL; + } + + return 0; +} + +static int st_mmap_enable_irq(struct st_thermal_sensor *sensor) +{ + int ret; + + /* Set upper critical threshold */ + ret = regmap_field_write(sensor->int_thresh_hi, + sensor->cdata->crit_temp - + sensor->cdata->temp_adjust_val); + if (ret) + return ret; + + return regmap_field_write(sensor->int_enable, 1); +} + +static int st_mmap_register_enable_irq(struct st_thermal_sensor *sensor) +{ + struct device *dev = sensor->dev; + struct platform_device *pdev = to_platform_device(dev); + int ret; + + sensor->irq = platform_get_irq(pdev, 0); + if (sensor->irq < 0) { + dev_err(dev, "failed to register IRQ\n"); + return sensor->irq; + } + + ret = devm_request_threaded_irq(dev, sensor->irq, + NULL, st_mmap_thermal_trip_handler, + IRQF_TRIGGER_RISING | IRQF_ONESHOT, + dev->driver->name, sensor); + if (ret) { + dev_err(dev, "failed to register IRQ %d\n", sensor->irq); + return ret; + } + + return st_mmap_enable_irq(sensor); +} + +static const struct regmap_config st_416mpe_regmap_config = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, +}; + +static int st_mmap_regmap_init(struct st_thermal_sensor *sensor) +{ + struct device *dev = sensor->dev; + struct platform_device *pdev = to_platform_device(dev); + struct resource *res; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(dev, "no memory resources defined\n"); + return -ENODEV; + } + + sensor->mmio_base = devm_ioremap_resource(dev, res); + if (IS_ERR(sensor->mmio_base)) { + dev_err(dev, "failed to remap IO\n"); + return PTR_ERR(sensor->mmio_base); + } + + sensor->regmap = devm_regmap_init_mmio(dev, sensor->mmio_base, + &st_416mpe_regmap_config); + if (IS_ERR(sensor->regmap)) { + dev_err(dev, "failed to initialise regmap\n"); + return PTR_ERR(sensor->regmap); + } + + return 0; +} + +static const struct st_thermal_sensor_ops st_mmap_sensor_ops = { + .power_ctrl = st_mmap_power_ctrl, + .alloc_regfields = st_mmap_alloc_regfields, + .regmap_init = st_mmap_regmap_init, + .register_enable_irq = st_mmap_register_enable_irq, + .enable_irq = st_mmap_enable_irq, +}; + +/* Compatible device data stih416 mpe thermal sensor */ +const struct st_thermal_compat_data st_416mpe_cdata = { + .reg_fields = st_mmap_thermal_regfields, + .ops = &st_mmap_sensor_ops, + .calibration_val = 14, + .temp_adjust_val = -95, + .crit_temp = 120, +}; + +/* Compatible device data stih407 thermal sensor */ +const struct st_thermal_compat_data st_407_cdata = { + .reg_fields = st_mmap_thermal_regfields, + .ops = &st_mmap_sensor_ops, + .calibration_val = 16, + .temp_adjust_val = -95, + .crit_temp = 120, +}; + +static struct of_device_id st_mmap_thermal_of_match[] = { + { .compatible = "st,stih416-mpe-thermal", .data = &st_416mpe_cdata }, + { .compatible = "st,stih407-thermal", .data = &st_407_cdata }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, st_mmap_thermal_of_match); + +int st_mmap_probe(struct platform_device *pdev) +{ + return st_thermal_register(pdev, st_mmap_thermal_of_match); +} + +int st_mmap_remove(struct platform_device *pdev) +{ + return st_thermal_unregister(pdev); +} + +static struct platform_driver st_mmap_thermal_driver = { + .driver = { + .name = "st_thermal_mmap", + .owner = THIS_MODULE, + .pm = &st_thermal_pm_ops, + .of_match_table = st_mmap_thermal_of_match, + }, + .probe = st_mmap_probe, + .remove = st_mmap_remove, +}; + +module_platform_driver(st_mmap_thermal_driver); + +MODULE_AUTHOR("STMicroelectronics (R&D) Limited <ajitpal.singh@st.com>"); +MODULE_DESCRIPTION("STMicroelectronics STi SoC Thermal Sensor Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/thermal/st/st_thermal_syscfg.c b/drivers/thermal/st/st_thermal_syscfg.c new file mode 100644 index 000000000000..888b58e64090 --- /dev/null +++ b/drivers/thermal/st/st_thermal_syscfg.c @@ -0,0 +1,179 @@ +/* + * ST Thermal Sensor Driver for syscfg based sensors. + * Author: Ajit Pal Singh <ajitpal.singh@st.com> + * + * Copyright (C) 2003-2014 STMicroelectronics (R&D) Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/of.h> +#include <linux/module.h> +#include <linux/mfd/syscon.h> + +#include "st_thermal.h" + +/* STiH415 */ +#define STIH415_SYSCFG_FRONT(num) ((num - 100) * 4) +#define STIH415_SAS_THSENS_CONF STIH415_SYSCFG_FRONT(178) +#define STIH415_SAS_THSENS_STATUS STIH415_SYSCFG_FRONT(198) +#define STIH415_SYSCFG_MPE(num) ((num - 600) * 4) +#define STIH415_MPE_THSENS_CONF STIH415_SYSCFG_MPE(607) +#define STIH415_MPE_THSENS_STATUS STIH415_SYSCFG_MPE(667) + +/* STiH416 */ +#define STIH416_SYSCFG_FRONT(num) ((num - 1000) * 4) +#define STIH416_SAS_THSENS_CONF STIH416_SYSCFG_FRONT(1552) +#define STIH416_SAS_THSENS_STATUS1 STIH416_SYSCFG_FRONT(1554) +#define STIH416_SAS_THSENS_STATUS2 STIH416_SYSCFG_FRONT(1594) + +/* STiD127 */ +#define STID127_SYSCFG_CPU(num) ((num - 700) * 4) +#define STID127_THSENS_CONF STID127_SYSCFG_CPU(743) +#define STID127_THSENS_STATUS STID127_SYSCFG_CPU(767) + +static const struct reg_field st_415sas_regfields[MAX_REGFIELDS] = { + [TEMP_PWR] = REG_FIELD(STIH415_SAS_THSENS_CONF, 9, 9), + [DCORRECT] = REG_FIELD(STIH415_SAS_THSENS_CONF, 4, 8), + [OVERFLOW] = REG_FIELD(STIH415_SAS_THSENS_STATUS, 8, 8), + [DATA] = REG_FIELD(STIH415_SAS_THSENS_STATUS, 10, 16), +}; + +static const struct reg_field st_415mpe_regfields[MAX_REGFIELDS] = { + [TEMP_PWR] = REG_FIELD(STIH415_MPE_THSENS_CONF, 8, 8), + [DCORRECT] = REG_FIELD(STIH415_MPE_THSENS_CONF, 3, 7), + [OVERFLOW] = REG_FIELD(STIH415_MPE_THSENS_STATUS, 9, 9), + [DATA] = REG_FIELD(STIH415_MPE_THSENS_STATUS, 11, 18), +}; + +static const struct reg_field st_416sas_regfields[MAX_REGFIELDS] = { + [TEMP_PWR] = REG_FIELD(STIH416_SAS_THSENS_CONF, 9, 9), + [DCORRECT] = REG_FIELD(STIH416_SAS_THSENS_CONF, 4, 8), + [OVERFLOW] = REG_FIELD(STIH416_SAS_THSENS_STATUS1, 8, 8), + [DATA] = REG_FIELD(STIH416_SAS_THSENS_STATUS2, 10, 16), +}; + +static const struct reg_field st_127_regfields[MAX_REGFIELDS] = { + [TEMP_PWR] = REG_FIELD(STID127_THSENS_CONF, 7, 7), + [DCORRECT] = REG_FIELD(STID127_THSENS_CONF, 2, 6), + [OVERFLOW] = REG_FIELD(STID127_THSENS_STATUS, 9, 9), + [DATA] = REG_FIELD(STID127_THSENS_STATUS, 11, 18), +}; + +/* Private OPs for System Configuration Register based thermal sensors */ +static int st_syscfg_power_ctrl(struct st_thermal_sensor *sensor, + enum st_thermal_power_state power_state) +{ + return regmap_field_write(sensor->pwr, power_state); +} + +static int st_syscfg_alloc_regfields(struct st_thermal_sensor *sensor) +{ + struct device *dev = sensor->dev; + + sensor->pwr = devm_regmap_field_alloc(dev, sensor->regmap, + sensor->cdata->reg_fields[TEMP_PWR]); + + if (IS_ERR(sensor->pwr)) { + dev_err(dev, "failed to alloc syscfg regfields\n"); + return PTR_ERR(sensor->pwr); + } + + return 0; +} + +static int st_syscfg_regmap_init(struct st_thermal_sensor *sensor) +{ + sensor->regmap = + syscon_regmap_lookup_by_compatible(sensor->cdata->sys_compat); + if (IS_ERR(sensor->regmap)) { + dev_err(sensor->dev, "failed to find syscfg regmap\n"); + return PTR_ERR(sensor->regmap); + } + + return 0; +} + +static const struct st_thermal_sensor_ops st_syscfg_sensor_ops = { + .power_ctrl = st_syscfg_power_ctrl, + .alloc_regfields = st_syscfg_alloc_regfields, + .regmap_init = st_syscfg_regmap_init, +}; + +/* Compatible device data for stih415 sas thermal sensor */ +const struct st_thermal_compat_data st_415sas_cdata = { + .sys_compat = "st,stih415-front-syscfg", + .reg_fields = st_415sas_regfields, + .ops = &st_syscfg_sensor_ops, + .calibration_val = 16, + .temp_adjust_val = 20, + .crit_temp = 120, +}; + +/* Compatible device data for stih415 mpe thermal sensor */ +const struct st_thermal_compat_data st_415mpe_cdata = { + .sys_compat = "st,stih415-system-syscfg", + .reg_fields = st_415mpe_regfields, + .ops = &st_syscfg_sensor_ops, + .calibration_val = 16, + .temp_adjust_val = -103, + .crit_temp = 120, +}; + +/* Compatible device data for stih416 sas thermal sensor */ +const struct st_thermal_compat_data st_416sas_cdata = { + .sys_compat = "st,stih416-front-syscfg", + .reg_fields = st_416sas_regfields, + .ops = &st_syscfg_sensor_ops, + .calibration_val = 16, + .temp_adjust_val = 20, + .crit_temp = 120, +}; + +/* Compatible device data for stid127 thermal sensor */ +const struct st_thermal_compat_data st_127_cdata = { + .sys_compat = "st,stid127-cpu-syscfg", + .reg_fields = st_127_regfields, + .ops = &st_syscfg_sensor_ops, + .calibration_val = 8, + .temp_adjust_val = -103, + .crit_temp = 120, +}; + +static struct of_device_id st_syscfg_thermal_of_match[] = { + { .compatible = "st,stih415-sas-thermal", .data = &st_415sas_cdata }, + { .compatible = "st,stih415-mpe-thermal", .data = &st_415mpe_cdata }, + { .compatible = "st,stih416-sas-thermal", .data = &st_416sas_cdata }, + { .compatible = "st,stid127-thermal", .data = &st_127_cdata }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, st_syscfg_thermal_of_match); + +int st_syscfg_probe(struct platform_device *pdev) +{ + return st_thermal_register(pdev, st_syscfg_thermal_of_match); +} + +int st_syscfg_remove(struct platform_device *pdev) +{ + return st_thermal_unregister(pdev); +} + +static struct platform_driver st_syscfg_thermal_driver = { + .driver = { + .name = "st_syscfg_thermal", + .owner = THIS_MODULE, + .pm = &st_thermal_pm_ops, + .of_match_table = st_syscfg_thermal_of_match, + }, + .probe = st_syscfg_probe, + .remove = st_syscfg_remove, +}; +module_platform_driver(st_syscfg_thermal_driver); + +MODULE_AUTHOR("STMicroelectronics (R&D) Limited <ajitpal.singh@st.com>"); +MODULE_DESCRIPTION("STMicroelectronics STi SoC Thermal Sensor Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/thunderbolt/path.c b/drivers/thunderbolt/path.c index 8fcf8a7b6c22..9562cd026dc0 100644 --- a/drivers/thunderbolt/path.c +++ b/drivers/thunderbolt/path.c @@ -150,7 +150,26 @@ int tb_path_activate(struct tb_path *path) /* Activate hops. */ for (i = path->path_length - 1; i >= 0; i--) { - struct tb_regs_hop hop; + struct tb_regs_hop hop = { 0 }; + + /* + * We do (currently) not tear down paths setup by the firmeware. + * If a firmware device is unplugged and plugged in again then + * it can happen that we reuse some of the hops from the (now + * defunct) firmeware path. This causes the hotplug operation to + * fail (the pci device does not show up). Clearing the hop + * before overwriting it fixes the problem. + * + * Should be removed once we discover and tear down firmeware + * paths. + */ + res = tb_port_write(path->hops[i].in_port, &hop, TB_CFG_HOPS, + 2 * path->hops[i].in_hop_index, 2); + if (res) { + __tb_path_deactivate_hops(path, i); + __tb_path_deallocate_nfc(path, 0); + goto err; + } /* dword 0 */ hop.next_hop = path->hops[i].next_hop_index; diff --git a/drivers/tty/ehv_bytechan.c b/drivers/tty/ehv_bytechan.c index 0419b69e270f..4f485e88f60c 100644 --- a/drivers/tty/ehv_bytechan.c +++ b/drivers/tty/ehv_bytechan.c @@ -108,55 +108,23 @@ static void disable_tx_interrupt(struct ehv_bc_data *bc) * * The byte channel to be used for the console is specified via a "stdout" * property in the /chosen node. - * - * For compatible with legacy device trees, we also look for a "stdout" alias. */ static int find_console_handle(void) { - struct device_node *np, *np2; + struct device_node *np = of_stdout; const char *sprop = NULL; const uint32_t *iprop; - np = of_find_node_by_path("/chosen"); - if (np) - sprop = of_get_property(np, "stdout-path", NULL); - - if (!np || !sprop) { - of_node_put(np); - np = of_find_node_by_name(NULL, "aliases"); - if (np) - sprop = of_get_property(np, "stdout", NULL); - } - - if (!sprop) { - of_node_put(np); - return 0; - } - /* We don't care what the aliased node is actually called. We only * care if it's compatible with "epapr,hv-byte-channel", because that - * indicates that it's a byte channel node. We use a temporary - * variable, 'np2', because we can't release 'np' until we're done with - * 'sprop'. + * indicates that it's a byte channel node. */ - np2 = of_find_node_by_path(sprop); - of_node_put(np); - np = np2; - if (!np) { - pr_warning("ehv-bc: stdout node '%s' does not exist\n", sprop); - return 0; - } - - /* Is it a byte channel? */ - if (!of_device_is_compatible(np, "epapr,hv-byte-channel")) { - of_node_put(np); + if (!np || !of_device_is_compatible(np, "epapr,hv-byte-channel")) return 0; - } stdout_irq = irq_of_parse_and_map(np, 0); if (stdout_irq == NO_IRQ) { - pr_err("ehv-bc: no 'interrupts' property in %s node\n", sprop); - of_node_put(np); + pr_err("ehv-bc: no 'interrupts' property in %s node\n", np->full_name); return 0; } @@ -167,12 +135,9 @@ static int find_console_handle(void) if (!iprop) { pr_err("ehv-bc: no 'hv-handle' property in %s node\n", np->name); - of_node_put(np); return 0; } stdout_bc = be32_to_cpu(*iprop); - - of_node_put(np); return 1; } diff --git a/drivers/tty/hvc/hvc_opal.c b/drivers/tty/hvc/hvc_opal.c index a585079b4b38..a2cc5f834c63 100644 --- a/drivers/tty/hvc/hvc_opal.c +++ b/drivers/tty/hvc/hvc_opal.c @@ -342,22 +342,13 @@ static void udbg_init_opal_common(void) void __init hvc_opal_init_early(void) { - struct device_node *stdout_node = NULL; + struct device_node *stdout_node = of_node_get(of_stdout); const __be32 *termno; - const char *name = NULL; const struct hv_ops *ops; u32 index; - /* find the boot console from /chosen/stdout */ - if (of_chosen) - name = of_get_property(of_chosen, "linux,stdout-path", NULL); - if (name) { - stdout_node = of_find_node_by_path(name); - if (!stdout_node) { - pr_err("hvc_opal: Failed to locate default console!\n"); - return; - } - } else { + /* If the console wasn't in /chosen, try /ibm,opal */ + if (!stdout_node) { struct device_node *opal, *np; /* Current OPAL takeover doesn't provide the stdout diff --git a/drivers/tty/hvc/hvc_vio.c b/drivers/tty/hvc/hvc_vio.c index b594abfbf21e..5618b5fc7500 100644 --- a/drivers/tty/hvc/hvc_vio.c +++ b/drivers/tty/hvc/hvc_vio.c @@ -404,42 +404,35 @@ module_exit(hvc_vio_exit); void __init hvc_vio_init_early(void) { - struct device_node *stdout_node; const __be32 *termno; const char *name; const struct hv_ops *ops; /* find the boot console from /chosen/stdout */ - if (!of_chosen) + if (!of_stdout) return; - name = of_get_property(of_chosen, "linux,stdout-path", NULL); - if (name == NULL) - return; - stdout_node = of_find_node_by_path(name); - if (!stdout_node) - return; - name = of_get_property(stdout_node, "name", NULL); + name = of_get_property(of_stdout, "name", NULL); if (!name) { printk(KERN_WARNING "stdout node missing 'name' property!\n"); - goto out; + return; } /* Check if it's a virtual terminal */ if (strncmp(name, "vty", 3) != 0) - goto out; - termno = of_get_property(stdout_node, "reg", NULL); + return; + termno = of_get_property(of_stdout, "reg", NULL); if (termno == NULL) - goto out; + return; hvterm_priv0.termno = of_read_number(termno, 1); spin_lock_init(&hvterm_priv0.buf_lock); hvterm_privs[0] = &hvterm_priv0; /* Check the protocol */ - if (of_device_is_compatible(stdout_node, "hvterm1")) { + if (of_device_is_compatible(of_stdout, "hvterm1")) { hvterm_priv0.proto = HV_PROTOCOL_RAW; ops = &hvterm_raw_ops; } - else if (of_device_is_compatible(stdout_node, "hvterm-protocol")) { + else if (of_device_is_compatible(of_stdout, "hvterm-protocol")) { hvterm_priv0.proto = HV_PROTOCOL_HVSI; ops = &hvterm_hvsi_ops; hvsilib_init(&hvterm_priv0.hvsi, hvc_get_chars, hvc_put_chars, @@ -447,7 +440,7 @@ void __init hvc_vio_init_early(void) /* HVSI, perform the handshake now */ hvsilib_establish(&hvterm_priv0.hvsi); } else - goto out; + return; udbg_putc = udbg_hvc_putc; udbg_getc = udbg_hvc_getc; udbg_getc_poll = udbg_hvc_getc_poll; @@ -456,14 +449,12 @@ void __init hvc_vio_init_early(void) * backend for HVSI, only do udbg */ if (hvterm_priv0.proto == HV_PROTOCOL_HVSI) - goto out; + return; #endif /* Check whether the user has requested a different console. */ if (!strstr(cmd_line, "console=")) add_preferred_console("hvc", 0, NULL); hvc_instantiate(0, 0, ops); -out: - of_node_put(stdout_node); } /* call this from early_init() for a working debug console on diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c index 1d42dba6121d..bd672948f2f1 100644 --- a/drivers/tty/serial/8250/8250_core.c +++ b/drivers/tty/serial/8250/8250_core.c @@ -3587,7 +3587,7 @@ static void __used s8250_options(void) #ifdef CONFIG_SERIAL_8250_RSA __module_param_call(MODULE_PARAM_PREFIX, probe_rsa, ¶m_array_ops, .arr = &__param_arr_probe_rsa, - 0444, -1); + 0444, -1, 0); #endif } #else diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c index 4db7987ec225..57d9df84ce5d 100644 --- a/drivers/tty/serial/8250/8250_dw.c +++ b/drivers/tty/serial/8250/8250_dw.c @@ -540,6 +540,7 @@ static const struct acpi_device_id dw8250_acpi_match[] = { { "INT3434", 0 }, { "INT3435", 0 }, { "80860F0A", 0 }, + { "8086228A", 0 }, { }, }; MODULE_DEVICE_TABLE(acpi, dw8250_acpi_match); diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index 7b63677475c1..d7d4584549a5 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c @@ -527,6 +527,45 @@ static void atmel_enable_ms(struct uart_port *port) } /* + * Disable modem status interrupts + */ +static void atmel_disable_ms(struct uart_port *port) +{ + struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); + uint32_t idr = 0; + + /* + * Interrupt should not be disabled twice + */ + if (!atmel_port->ms_irq_enabled) + return; + + atmel_port->ms_irq_enabled = false; + + if (atmel_port->gpio_irq[UART_GPIO_CTS] >= 0) + disable_irq(atmel_port->gpio_irq[UART_GPIO_CTS]); + else + idr |= ATMEL_US_CTSIC; + + if (atmel_port->gpio_irq[UART_GPIO_DSR] >= 0) + disable_irq(atmel_port->gpio_irq[UART_GPIO_DSR]); + else + idr |= ATMEL_US_DSRIC; + + if (atmel_port->gpio_irq[UART_GPIO_RI] >= 0) + disable_irq(atmel_port->gpio_irq[UART_GPIO_RI]); + else + idr |= ATMEL_US_RIIC; + + if (atmel_port->gpio_irq[UART_GPIO_DCD] >= 0) + disable_irq(atmel_port->gpio_irq[UART_GPIO_DCD]); + else + idr |= ATMEL_US_DCDIC; + + UART_PUT_IDR(port, idr); +} + +/* * Control the transmission of a break signal */ static void atmel_break_ctl(struct uart_port *port, int break_state) @@ -1993,7 +2032,9 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios, /* CTS flow-control and modem-status interrupts */ if (UART_ENABLE_MS(port, termios->c_cflag)) - port->ops->enable_ms(port); + atmel_enable_ms(port); + else + atmel_disable_ms(port); spin_unlock_irqrestore(&port->lock, flags); } diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c index f7ad5b903055..abbfedb84901 100644 --- a/drivers/tty/serial/pmac_zilog.c +++ b/drivers/tty/serial/pmac_zilog.c @@ -1653,8 +1653,7 @@ static int __init pmz_probe(void) /* * Find all escc chips in the system */ - node_p = of_find_node_by_name(NULL, "escc"); - while (node_p) { + for_each_node_by_name(node_p, "escc") { /* * First get channel A/B node pointers * @@ -1672,7 +1671,7 @@ static int __init pmz_probe(void) of_node_put(node_b); printk(KERN_ERR "pmac_zilog: missing node %c for escc %s\n", (!node_a) ? 'a' : 'b', node_p->full_name); - goto next; + continue; } /* @@ -1699,11 +1698,9 @@ static int __init pmz_probe(void) of_node_put(node_b); memset(&pmz_ports[count], 0, sizeof(struct uart_pmac_port)); memset(&pmz_ports[count+1], 0, sizeof(struct uart_pmac_port)); - goto next; + continue; } count += 2; -next: - node_p = of_find_node_by_name(node_p, "escc"); } pmz_ports_count = count; diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c index 4aff02d6712e..c78f43a481ce 100644 --- a/drivers/tty/serial/samsung.c +++ b/drivers/tty/serial/samsung.c @@ -47,10 +47,6 @@ #include <asm/irq.h> -#ifdef CONFIG_SAMSUNG_CLOCK -#include <plat/clock.h> -#endif - #include "samsung.h" #if defined(CONFIG_SERIAL_SAMSUNG_DEBUG) && \ diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index 8bb19da01639..29a7be47389a 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -26,6 +26,7 @@ #include <linux/slab.h> #include <linux/init.h> #include <linux/console.h> +#include <linux/of.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/device.h> @@ -2611,6 +2612,8 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport) spin_lock_init(&uport->lock); lockdep_set_class(&uport->lock, &port_lock_key); } + if (uport->cons && uport->dev) + of_console_check(uport->dev->of_node, uport->cons->name, uport->line); uart_configure_port(drv, state, uport); diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c index 01951d27cc03..806e4bcadbd7 100644 --- a/drivers/tty/serial/xilinx_uartps.c +++ b/drivers/tty/serial/xilinx_uartps.c @@ -581,7 +581,7 @@ static unsigned int cdns_uart_tx_empty(struct uart_port *port) { unsigned int status; - status = cdns_uart_readl(CDNS_UART_ISR_OFFSET) & CDNS_UART_IXR_TXEMPTY; + status = cdns_uart_readl(CDNS_UART_SR_OFFSET) & CDNS_UART_SR_TXEMPTY; return status ? TIOCSER_TEMT : 0; } diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c index ba1dbcdf4609..0e8c39b6ccd4 100644 --- a/drivers/tty/synclink_gt.c +++ b/drivers/tty/synclink_gt.c @@ -3383,12 +3383,11 @@ static int alloc_desc(struct slgt_info *info) unsigned int pbufs; /* allocate memory to hold descriptor lists */ - info->bufs = pci_alloc_consistent(info->pdev, DESC_LIST_SIZE, &info->bufs_dma_addr); + info->bufs = pci_zalloc_consistent(info->pdev, DESC_LIST_SIZE, + &info->bufs_dma_addr); if (info->bufs == NULL) return -ENOMEM; - memset(info->bufs, 0, DESC_LIST_SIZE); - info->rbufs = (struct slgt_desc*)info->bufs; info->tbufs = ((struct slgt_desc*)info->bufs) + info->rbuf_count; diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c index 454b65898e2c..42bad18c66c9 100644 --- a/drivers/tty/sysrq.c +++ b/drivers/tty/sysrq.c @@ -355,7 +355,7 @@ static struct sysrq_key_op sysrq_term_op = { static void moom_callback(struct work_struct *ignored) { - out_of_memory(node_zonelist(first_online_node, GFP_KERNEL), GFP_KERNEL, + out_of_memory(node_zonelist(first_memory_node, GFP_KERNEL), GFP_KERNEL, 0, NULL, true); } diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig index e0cad4418085..cf1b19bca306 100644 --- a/drivers/usb/Kconfig +++ b/drivers/usb/Kconfig @@ -92,6 +92,8 @@ source "drivers/usb/storage/Kconfig" source "drivers/usb/image/Kconfig" +source "drivers/usb/usbip/Kconfig" + endif source "drivers/usb/musb/Kconfig" diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile index 3cba892b83a2..d7be71778059 100644 --- a/drivers/usb/Makefile +++ b/drivers/usb/Makefile @@ -60,3 +60,5 @@ obj-$(CONFIG_USB_RENESAS_USBHS) += renesas_usbhs/ obj-$(CONFIG_USB_GADGET) += gadget/ obj-$(CONFIG_USB_COMMON) += common/ + +obj-$(CONFIG_USBIP_CORE) += usbip/ diff --git a/drivers/usb/chipidea/ci_hdrc_msm.c b/drivers/usb/chipidea/ci_hdrc_msm.c index d72b9d2de2c5..4935ac38fd00 100644 --- a/drivers/usb/chipidea/ci_hdrc_msm.c +++ b/drivers/usb/chipidea/ci_hdrc_msm.c @@ -20,13 +20,13 @@ static void ci_hdrc_msm_notify_event(struct ci_hdrc *ci, unsigned event) { struct device *dev = ci->gadget.dev.parent; - int val; switch (event) { case CI_HDRC_CONTROLLER_RESET_EVENT: dev_dbg(dev, "CI_HDRC_CONTROLLER_RESET_EVENT received\n"); writel(0, USB_AHBBURST); writel(0, USB_AHBMODE); + usb_phy_init(ci->transceiver); break; case CI_HDRC_CONTROLLER_STOPPED_EVENT: dev_dbg(dev, "CI_HDRC_CONTROLLER_STOPPED_EVENT received\n"); @@ -34,10 +34,7 @@ static void ci_hdrc_msm_notify_event(struct ci_hdrc *ci, unsigned event) * Put the transceiver in non-driving mode. Otherwise host * may not detect soft-disconnection. */ - val = usb_phy_io_read(ci->transceiver, ULPI_FUNC_CTRL); - val &= ~ULPI_FUNC_CTRL_OPMODE_MASK; - val |= ULPI_FUNC_CTRL_OPMODE_NONDRIVING; - usb_phy_io_write(ci->transceiver, val, ULPI_FUNC_CTRL); + usb_phy_notify_disconnect(ci->transceiver, USB_SPEED_UNKNOWN); break; default: dev_dbg(dev, "unknown ci_hdrc event\n"); diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 8a4dcbc7a75f..d481c99a20d7 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -1728,8 +1728,14 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id) * - Change autosuspend delay of hub can avoid unnecessary auto * suspend timer for hub, also may decrease power consumption * of USB bus. + * + * - If user has indicated to prevent autosuspend by passing + * usbcore.autosuspend = -1 then keep autosuspend disabled. */ - pm_runtime_set_autosuspend_delay(&hdev->dev, 0); +#ifdef CONFIG_PM_RUNTIME + if (hdev->dev.power.autosuspend_delay >= 0) + pm_runtime_set_autosuspend_delay(&hdev->dev, 0); +#endif /* * Hubs have proper suspend/resume support, except for root hubs @@ -2107,8 +2113,8 @@ void usb_disconnect(struct usb_device **pdev) { struct usb_port *port_dev = NULL; struct usb_device *udev = *pdev; - struct usb_hub *hub; - int port1; + struct usb_hub *hub = NULL; + int port1 = 1; /* mark the device as inactive, so any further urb submissions for * this device (and any of its children) will fail immediately. @@ -4631,9 +4637,7 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus, if (status != -ENODEV && port1 != unreliable_port && printk_ratelimit()) - dev_err(&udev->dev, "connect-debounce failed, port %d disabled\n", - port1); - + dev_err(&port_dev->dev, "connect-debounce failed\n"); portstatus &= ~USB_PORT_STAT_CONNECTION; unreliable_port = port1; } else { @@ -5020,9 +5024,10 @@ static void hub_events(void) hub = list_entry(tmp, struct usb_hub, event_list); kref_get(&hub->kref); + hdev = hub->hdev; + usb_get_dev(hdev); spin_unlock_irq(&hub_event_lock); - hdev = hub->hdev; hub_dev = hub->intfdev; intf = to_usb_interface(hub_dev); dev_dbg(hub_dev, "state %d ports %d chg %04x evt %04x\n", @@ -5135,6 +5140,7 @@ static void hub_events(void) usb_autopm_put_interface(intf); loop_disconnected: usb_unlock_device(hdev); + usb_put_dev(hdev); kref_put(&hub->kref, hub_release); } /* end while (1) */ diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index 0ba9c335b584..ce6071d65d51 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -1649,6 +1649,7 @@ static void s3c_hsotg_txfifo_flush(struct s3c_hsotg *hsotg, unsigned int idx) dev_err(hsotg->dev, "%s: timeout flushing fifo (GRSTCTL=%08x)\n", __func__, val); + break; } udelay(1); @@ -1901,7 +1902,7 @@ static void s3c_hsotg_epint(struct s3c_hsotg *hsotg, unsigned int idx, static void s3c_hsotg_irq_enumdone(struct s3c_hsotg *hsotg) { u32 dsts = readl(hsotg->regs + DSTS); - int ep0_mps = 0, ep_mps; + int ep0_mps = 0, ep_mps = 8; /* * This should signal the finish of the enumeration phase @@ -2747,13 +2748,14 @@ static void s3c_hsotg_phy_enable(struct s3c_hsotg *hsotg) dev_dbg(hsotg->dev, "pdev 0x%p\n", pdev); - if (hsotg->phy) { - phy_init(hsotg->phy); - phy_power_on(hsotg->phy); - } else if (hsotg->uphy) + if (hsotg->uphy) usb_phy_init(hsotg->uphy); - else if (hsotg->plat->phy_init) + else if (hsotg->plat && hsotg->plat->phy_init) hsotg->plat->phy_init(pdev, hsotg->plat->phy_type); + else { + phy_init(hsotg->phy); + phy_power_on(hsotg->phy); + } } /** @@ -2767,13 +2769,14 @@ static void s3c_hsotg_phy_disable(struct s3c_hsotg *hsotg) { struct platform_device *pdev = to_platform_device(hsotg->dev); - if (hsotg->phy) { - phy_power_off(hsotg->phy); - phy_exit(hsotg->phy); - } else if (hsotg->uphy) + if (hsotg->uphy) usb_phy_shutdown(hsotg->uphy); - else if (hsotg->plat->phy_exit) + else if (hsotg->plat && hsotg->plat->phy_exit) hsotg->plat->phy_exit(pdev, hsotg->plat->phy_type); + else { + phy_power_off(hsotg->phy); + phy_exit(hsotg->phy); + } } /** @@ -2892,13 +2895,11 @@ static int s3c_hsotg_udc_stop(struct usb_gadget *gadget, return -ENODEV; /* all endpoints should be shutdown */ - for (ep = 0; ep < hsotg->num_of_eps; ep++) + for (ep = 1; ep < hsotg->num_of_eps; ep++) s3c_hsotg_ep_disable(&hsotg->eps[ep].ep); spin_lock_irqsave(&hsotg->lock, flags); - s3c_hsotg_phy_disable(hsotg); - if (!driver) hsotg->driver = NULL; @@ -2941,7 +2942,6 @@ static int s3c_hsotg_pullup(struct usb_gadget *gadget, int is_on) s3c_hsotg_phy_enable(hsotg); s3c_hsotg_core_init(hsotg); } else { - s3c_hsotg_disconnect(hsotg); s3c_hsotg_phy_disable(hsotg); } @@ -3441,13 +3441,6 @@ static int s3c_hsotg_probe(struct platform_device *pdev) hsotg->irq = ret; - ret = devm_request_irq(&pdev->dev, hsotg->irq, s3c_hsotg_irq, 0, - dev_name(dev), hsotg); - if (ret < 0) { - dev_err(dev, "cannot claim IRQ\n"); - goto err_clk; - } - dev_info(dev, "regs %p, irq %d\n", hsotg->regs, hsotg->irq); hsotg->gadget.max_speed = USB_SPEED_HIGH; @@ -3488,9 +3481,6 @@ static int s3c_hsotg_probe(struct platform_device *pdev) if (hsotg->phy && (phy_get_bus_width(phy) == 8)) hsotg->phyif = GUSBCFG_PHYIF8; - if (hsotg->phy) - phy_init(hsotg->phy); - /* usb phy enable */ s3c_hsotg_phy_enable(hsotg); @@ -3498,6 +3488,17 @@ static int s3c_hsotg_probe(struct platform_device *pdev) s3c_hsotg_init(hsotg); s3c_hsotg_hw_cfg(hsotg); + ret = devm_request_irq(&pdev->dev, hsotg->irq, s3c_hsotg_irq, 0, + dev_name(dev), hsotg); + if (ret < 0) { + s3c_hsotg_phy_disable(hsotg); + clk_disable_unprepare(hsotg->clk); + regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies), + hsotg->supplies); + dev_err(dev, "cannot claim IRQ\n"); + goto err_clk; + } + /* hsotg->num_of_eps holds number of EPs other than ep0 */ if (hsotg->num_of_eps == 0) { @@ -3582,9 +3583,6 @@ static int s3c_hsotg_remove(struct platform_device *pdev) usb_gadget_unregister_driver(hsotg->driver); } - s3c_hsotg_phy_disable(hsotg); - if (hsotg->phy) - phy_exit(hsotg->phy); clk_disable_unprepare(hsotg->clk); return 0; diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index b769c1faaf03..9069984fe5cf 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -799,20 +799,21 @@ static int dwc3_remove(struct platform_device *pdev) { struct dwc3 *dwc = platform_get_drvdata(pdev); + dwc3_debugfs_exit(dwc); + dwc3_core_exit_mode(dwc); + dwc3_event_buffers_cleanup(dwc); + dwc3_free_event_buffers(dwc); + usb_phy_set_suspend(dwc->usb2_phy, 1); usb_phy_set_suspend(dwc->usb3_phy, 1); phy_power_off(dwc->usb2_generic_phy); phy_power_off(dwc->usb3_generic_phy); + dwc3_core_exit(dwc); + pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); - dwc3_debugfs_exit(dwc); - dwc3_core_exit_mode(dwc); - dwc3_event_buffers_cleanup(dwc); - dwc3_free_event_buffers(dwc); - dwc3_core_exit(dwc); - return 0; } diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c index ef4936ff626c..fc0de3753648 100644 --- a/drivers/usb/dwc3/dwc3-omap.c +++ b/drivers/usb/dwc3/dwc3-omap.c @@ -425,7 +425,7 @@ static void dwc3_omap_set_utmi_mode(struct dwc3_omap *omap) static int dwc3_omap_extcon_register(struct dwc3_omap *omap) { - u32 ret; + int ret; struct device_node *node = omap->dev->of_node; struct extcon_dev *edev; @@ -576,9 +576,9 @@ static int dwc3_omap_remove(struct platform_device *pdev) if (omap->extcon_id_dev.edev) extcon_unregister_interest(&omap->extcon_id_dev); dwc3_omap_disable_irqs(omap); + device_for_each_child(&pdev->dev, NULL, dwc3_omap_remove_core); pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); - device_for_each_child(&pdev->dev, NULL, dwc3_omap_remove_core); return 0; } diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 349cacc577d8..490a6ca00733 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -527,7 +527,7 @@ static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep, dep->stream_capable = true; } - if (usb_endpoint_xfer_isoc(desc)) + if (!usb_endpoint_xfer_control(desc)) params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN; /* @@ -1225,16 +1225,17 @@ static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request, int ret; + spin_lock_irqsave(&dwc->lock, flags); if (!dep->endpoint.desc) { dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n", request, ep->name); + spin_unlock_irqrestore(&dwc->lock, flags); return -ESHUTDOWN; } dev_vdbg(dwc->dev, "queing request %p to %s length %d\n", request, ep->name, request->length); - spin_lock_irqsave(&dwc->lock, flags); ret = __dwc3_gadget_ep_queue(dep, req); spin_unlock_irqrestore(&dwc->lock, flags); @@ -2041,12 +2042,6 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc, dwc3_endpoint_transfer_complete(dwc, dep, event); break; case DWC3_DEPEVT_XFERINPROGRESS: - if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) { - dev_dbg(dwc->dev, "%s is not an Isochronous endpoint\n", - dep->name); - return; - } - dwc3_endpoint_transfer_complete(dwc, dep, event); break; case DWC3_DEPEVT_XFERNOTREADY: diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile index a186afeaa700..9add915d41f7 100644 --- a/drivers/usb/gadget/Makefile +++ b/drivers/usb/gadget/Makefile @@ -3,7 +3,7 @@ # subdir-ccflags-$(CONFIG_USB_GADGET_DEBUG) := -DDEBUG subdir-ccflags-$(CONFIG_USB_GADGET_VERBOSE) += -DVERBOSE_DEBUG -ccflags-y += -I$(PWD)/drivers/usb/gadget/udc +ccflags-y += -Idrivers/usb/gadget/udc obj-$(CONFIG_USB_LIBCOMPOSITE) += libcomposite.o libcomposite-y := usbstring.o config.o epautoconf.o diff --git a/drivers/usb/gadget/function/Makefile b/drivers/usb/gadget/function/Makefile index 6d91f21b52a6..83ae1065149d 100644 --- a/drivers/usb/gadget/function/Makefile +++ b/drivers/usb/gadget/function/Makefile @@ -2,8 +2,8 @@ # USB peripheral controller drivers # -ccflags-y := -I$(PWD)/drivers/usb/gadget/ -ccflags-y += -I$(PWD)/drivers/usb/gadget/udc/ +ccflags-y := -Idrivers/usb/gadget/ +ccflags-y += -Idrivers/usb/gadget/udc/ # USB Functions usb_f_acm-y := f_acm.o diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index dc30adf15a01..0dc3552d1360 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -155,6 +155,12 @@ struct ffs_io_data { struct usb_request *req; }; +struct ffs_desc_helper { + struct ffs_data *ffs; + unsigned interfaces_count; + unsigned eps_count; +}; + static int __must_check ffs_epfiles_create(struct ffs_data *ffs); static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count); @@ -1830,7 +1836,8 @@ static int __ffs_data_do_entity(enum ffs_entity_type type, u8 *valuep, struct usb_descriptor_header *desc, void *priv) { - struct ffs_data *ffs = priv; + struct ffs_desc_helper *helper = priv; + struct usb_endpoint_descriptor *d; ENTER(); @@ -1844,8 +1851,8 @@ static int __ffs_data_do_entity(enum ffs_entity_type type, * encountered interface "n" then there are at least * "n+1" interfaces. */ - if (*valuep >= ffs->interfaces_count) - ffs->interfaces_count = *valuep + 1; + if (*valuep >= helper->interfaces_count) + helper->interfaces_count = *valuep + 1; break; case FFS_STRING: @@ -1853,14 +1860,22 @@ static int __ffs_data_do_entity(enum ffs_entity_type type, * Strings are indexed from 1 (0 is magic ;) reserved * for languages list or some such) */ - if (*valuep > ffs->strings_count) - ffs->strings_count = *valuep; + if (*valuep > helper->ffs->strings_count) + helper->ffs->strings_count = *valuep; break; case FFS_ENDPOINT: - /* Endpoints are indexed from 1 as well. */ - if ((*valuep & USB_ENDPOINT_NUMBER_MASK) > ffs->eps_count) - ffs->eps_count = (*valuep & USB_ENDPOINT_NUMBER_MASK); + d = (void *)desc; + helper->eps_count++; + if (helper->eps_count >= 15) + return -EINVAL; + /* Check if descriptors for any speed were already parsed */ + if (!helper->ffs->eps_count && !helper->ffs->interfaces_count) + helper->ffs->eps_addrmap[helper->eps_count] = + d->bEndpointAddress; + else if (helper->ffs->eps_addrmap[helper->eps_count] != + d->bEndpointAddress) + return -EINVAL; break; } @@ -2053,6 +2068,7 @@ static int __ffs_data_got_descs(struct ffs_data *ffs, char *data = _data, *raw_descs; unsigned os_descs_count = 0, counts[3], flags; int ret = -EINVAL, i; + struct ffs_desc_helper helper; ENTER(); @@ -2101,13 +2117,29 @@ static int __ffs_data_got_descs(struct ffs_data *ffs, /* Read descriptors */ raw_descs = data; + helper.ffs = ffs; for (i = 0; i < 3; ++i) { if (!counts[i]) continue; + helper.interfaces_count = 0; + helper.eps_count = 0; ret = ffs_do_descs(counts[i], data, len, - __ffs_data_do_entity, ffs); + __ffs_data_do_entity, &helper); if (ret < 0) goto error; + if (!ffs->eps_count && !ffs->interfaces_count) { + ffs->eps_count = helper.eps_count; + ffs->interfaces_count = helper.interfaces_count; + } else { + if (ffs->eps_count != helper.eps_count) { + ret = -EINVAL; + goto error; + } + if (ffs->interfaces_count != helper.interfaces_count) { + ret = -EINVAL; + goto error; + } + } data += ret; len -= ret; } @@ -2342,9 +2374,18 @@ static void ffs_event_add(struct ffs_data *ffs, spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags); } - /* Bind/unbind USB function hooks *******************************************/ +static int ffs_ep_addr2idx(struct ffs_data *ffs, u8 endpoint_address) +{ + int i; + + for (i = 1; i < ARRAY_SIZE(ffs->eps_addrmap); ++i) + if (ffs->eps_addrmap[i] == endpoint_address) + return i; + return -ENOENT; +} + static int __ffs_func_bind_do_descs(enum ffs_entity_type type, u8 *valuep, struct usb_descriptor_header *desc, void *priv) @@ -2378,7 +2419,10 @@ static int __ffs_func_bind_do_descs(enum ffs_entity_type type, u8 *valuep, if (!desc || desc->bDescriptorType != USB_DT_ENDPOINT) return 0; - idx = (ds->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) - 1; + idx = ffs_ep_addr2idx(func->ffs, ds->bEndpointAddress) - 1; + if (idx < 0) + return idx; + ffs_ep = func->eps + idx; if (unlikely(ffs_ep->descs[ep_desc_id])) { diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c index d50adda913cf..6e6f87656e7b 100644 --- a/drivers/usb/gadget/function/u_ether.c +++ b/drivers/usb/gadget/function/u_ether.c @@ -1127,10 +1127,7 @@ void gether_disconnect(struct gether *link) DBG(dev, "%s\n", __func__); - netif_tx_lock(dev->net); netif_stop_queue(dev->net); - netif_tx_unlock(dev->net); - netif_carrier_off(dev->net); /* disable endpoints, forcing (synchronous) completion diff --git a/drivers/usb/gadget/function/u_fs.h b/drivers/usb/gadget/function/u_fs.h index 63d6e71569c1..d48897e8ffeb 100644 --- a/drivers/usb/gadget/function/u_fs.h +++ b/drivers/usb/gadget/function/u_fs.h @@ -224,6 +224,8 @@ struct ffs_data { void *ms_os_descs_ext_prop_name_avail; void *ms_os_descs_ext_prop_data_avail; + u8 eps_addrmap[15]; + unsigned short strings_count; unsigned short interfaces_count; unsigned short eps_count; diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c index 71e896d4c5ae..a5eb9a3fbb7a 100644 --- a/drivers/usb/gadget/function/uvc_video.c +++ b/drivers/usb/gadget/function/uvc_video.c @@ -195,6 +195,7 @@ uvc_video_complete(struct usb_ep *ep, struct usb_request *req) printk(KERN_INFO "Failed to queue request (%d).\n", ret); usb_ep_set_halt(ep); spin_unlock_irqrestore(&video->queue.irqlock, flags); + uvc_queue_cancel(queue, 0); goto requeue; } spin_unlock_irqrestore(&video->queue.irqlock, flags); @@ -281,6 +282,7 @@ error: static int uvc_video_pump(struct uvc_video *video) { + struct uvc_video_queue *queue = &video->queue; struct usb_request *req; struct uvc_buffer *buf; unsigned long flags; @@ -322,6 +324,7 @@ uvc_video_pump(struct uvc_video *video) printk(KERN_INFO "Failed to queue request (%d)\n", ret); usb_ep_set_halt(video->ep); spin_unlock_irqrestore(&video->queue.irqlock, flags); + uvc_queue_cancel(queue, 0); break; } spin_unlock_irqrestore(&video->queue.irqlock, flags); diff --git a/drivers/usb/gadget/legacy/Makefile b/drivers/usb/gadget/legacy/Makefile index a11aad5635df..edba2d1ee0f3 100644 --- a/drivers/usb/gadget/legacy/Makefile +++ b/drivers/usb/gadget/legacy/Makefile @@ -2,9 +2,9 @@ # USB gadget drivers # -ccflags-y := -I$(PWD)/drivers/usb/gadget/ -ccflags-y += -I$(PWD)/drivers/usb/gadget/udc/ -ccflags-y += -I$(PWD)/drivers/usb/gadget/function/ +ccflags-y := -Idrivers/usb/gadget/ +ccflags-y += -Idrivers/usb/gadget/udc/ +ccflags-y += -Idrivers/usb/gadget/function/ g_zero-y := zero.o g_audio-y := audio.o diff --git a/drivers/usb/gadget/legacy/dbgp.c b/drivers/usb/gadget/legacy/dbgp.c index 986fc511a2ed..225e385a6160 100644 --- a/drivers/usb/gadget/legacy/dbgp.c +++ b/drivers/usb/gadget/legacy/dbgp.c @@ -222,10 +222,12 @@ static void dbgp_unbind(struct usb_gadget *gadget) { #ifdef CONFIG_USB_G_DBGP_SERIAL kfree(dbgp.serial); + dbgp.serial = NULL; #endif if (dbgp.req) { kfree(dbgp.req->buf); usb_ep_free_request(gadget->ep0, dbgp.req); + dbgp.req = NULL; } gadget->ep0->driver_data = NULL; diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c index 2e4ce7704908..e96077b8bf79 100644 --- a/drivers/usb/gadget/legacy/inode.c +++ b/drivers/usb/gadget/legacy/inode.c @@ -440,7 +440,7 @@ ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) value = -ENOMEM; kbuf = memdup_user(buf, len); - if (!kbuf) { + if (IS_ERR(kbuf)) { value = PTR_ERR(kbuf); goto free1; } diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig index 5151f947a4f5..34ebaa68504c 100644 --- a/drivers/usb/gadget/udc/Kconfig +++ b/drivers/usb/gadget/udc/Kconfig @@ -332,7 +332,7 @@ config USB_GOKU gadget drivers to also be dynamically linked. config USB_EG20T - tristate "Intel EG20T PCH/LAPIS Semiconductor IOH(ML7213/ML7831) UDC" + tristate "Intel QUARK X1000/EG20T PCH/LAPIS Semiconductor IOH(ML7213/ML7831) UDC" depends on PCI help This is a USB device driver for EG20T PCH. @@ -353,6 +353,7 @@ config USB_EG20T ML7213/ML7831 is companion chip for Intel Atom E6xx series. ML7213/ML7831 is completely compatible for Intel EG20T PCH. + This driver can be used with Intel's Quark X1000 SOC platform # # LAST -- dummy/emulated controller # diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c index 906e65f0e4fa..c9fe67e29d35 100644 --- a/drivers/usb/gadget/udc/atmel_usba_udc.c +++ b/drivers/usb/gadget/udc/atmel_usba_udc.c @@ -1661,7 +1661,7 @@ static irqreturn_t usba_udc_irq(int irq, void *devid) if (dma_status) { int i; - for (i = 1; i < USBA_NR_DMAS; i++) + for (i = 1; i <= USBA_NR_DMAS; i++) if (dma_status & (1 << i)) usba_dma_irq(udc, &udc->usba_ep[i]); } diff --git a/drivers/usb/gadget/udc/fusb300_udc.c b/drivers/usb/gadget/udc/fusb300_udc.c index d40255f784df..5c5d1adda7eb 100644 --- a/drivers/usb/gadget/udc/fusb300_udc.c +++ b/drivers/usb/gadget/udc/fusb300_udc.c @@ -1398,13 +1398,17 @@ static int fusb300_probe(struct platform_device *pdev) /* initialize udc */ fusb300 = kzalloc(sizeof(struct fusb300), GFP_KERNEL); - if (fusb300 == NULL) + if (fusb300 == NULL) { + ret = -ENOMEM; goto clean_up; + } for (i = 0; i < FUSB300_MAX_NUM_EP; i++) { _ep[i] = kzalloc(sizeof(struct fusb300_ep), GFP_KERNEL); - if (_ep[i] == NULL) + if (_ep[i] == NULL) { + ret = -ENOMEM; goto clean_up; + } fusb300->ep[i] = _ep[i]; } diff --git a/drivers/usb/gadget/udc/fusb300_udc.h b/drivers/usb/gadget/udc/fusb300_udc.h index ae811d8d38b4..ad39f892d200 100644 --- a/drivers/usb/gadget/udc/fusb300_udc.h +++ b/drivers/usb/gadget/udc/fusb300_udc.h @@ -12,7 +12,7 @@ #ifndef __FUSB300_UDC_H__ -#define __FUSB300_UDC_H_ +#define __FUSB300_UDC_H__ #include <linux/kernel.h> diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c index f4eac113690e..2e95715b50c0 100644 --- a/drivers/usb/gadget/udc/net2280.c +++ b/drivers/usb/gadget/udc/net2280.c @@ -3320,7 +3320,7 @@ static void handle_stat1_irqs(struct net2280 *dev, u32 stat) if (stat & tmp) { writel(tmp, &dev->regs->irqstat1); if ((((stat & BIT(ROOT_PORT_RESET_INTERRUPT)) && - (readl(&dev->usb->usbstat) & mask)) || + ((readl(&dev->usb->usbstat) & mask) == 0)) || ((readl(&dev->usb->usbctl) & BIT(VBUS_PIN)) == 0)) && (dev->gadget.speed != USB_SPEED_UNKNOWN)) { diff --git a/drivers/usb/gadget/udc/pch_udc.c b/drivers/usb/gadget/udc/pch_udc.c index eb8c3bedb57a..460d953c91b6 100644 --- a/drivers/usb/gadget/udc/pch_udc.c +++ b/drivers/usb/gadget/udc/pch_udc.c @@ -343,6 +343,7 @@ struct pch_vbus_gpio_data { * @setup_data: Received setup data * @phys_addr: of device memory * @base_addr: for mapped device memory + * @bar: Indicates which PCI BAR for USB regs * @irq: IRQ line for the device * @cfg_data: current cfg, intf, and alt in use * @vbus_gpio: GPIO informaton for detecting VBUS @@ -370,14 +371,17 @@ struct pch_udc_dev { struct usb_ctrlrequest setup_data; unsigned long phys_addr; void __iomem *base_addr; + unsigned bar; unsigned irq; struct pch_udc_cfg_data cfg_data; struct pch_vbus_gpio_data vbus_gpio; }; #define to_pch_udc(g) (container_of((g), struct pch_udc_dev, gadget)) +#define PCH_UDC_PCI_BAR_QUARK_X1000 0 #define PCH_UDC_PCI_BAR 1 #define PCI_DEVICE_ID_INTEL_EG20T_UDC 0x8808 +#define PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC 0x0939 #define PCI_VENDOR_ID_ROHM 0x10DB #define PCI_DEVICE_ID_ML7213_IOH_UDC 0x801D #define PCI_DEVICE_ID_ML7831_IOH_UDC 0x8808 @@ -3076,7 +3080,7 @@ static void pch_udc_remove(struct pci_dev *pdev) iounmap(dev->base_addr); if (dev->mem_region) release_mem_region(dev->phys_addr, - pci_resource_len(pdev, PCH_UDC_PCI_BAR)); + pci_resource_len(pdev, dev->bar)); if (dev->active) pci_disable_device(pdev); kfree(dev); @@ -3144,9 +3148,15 @@ static int pch_udc_probe(struct pci_dev *pdev, dev->active = 1; pci_set_drvdata(pdev, dev); + /* Determine BAR based on PCI ID */ + if (id->device == PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC) + dev->bar = PCH_UDC_PCI_BAR_QUARK_X1000; + else + dev->bar = PCH_UDC_PCI_BAR; + /* PCI resource allocation */ - resource = pci_resource_start(pdev, 1); - len = pci_resource_len(pdev, 1); + resource = pci_resource_start(pdev, dev->bar); + len = pci_resource_len(pdev, dev->bar); if (!request_mem_region(resource, len, KBUILD_MODNAME)) { dev_err(&pdev->dev, "%s: pci device used already\n", __func__); @@ -3212,6 +3222,12 @@ finished: static const struct pci_device_id pch_udc_pcidev_id[] = { { + PCI_DEVICE(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC), + .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe, + .class_mask = 0xffffffff, + }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EG20T_UDC), .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe, .class_mask = 0xffffffff, diff --git a/drivers/usb/gadget/udc/r8a66597-udc.c b/drivers/usb/gadget/udc/r8a66597-udc.c index 46008421c1ec..de2a8713b428 100644 --- a/drivers/usb/gadget/udc/r8a66597-udc.c +++ b/drivers/usb/gadget/udc/r8a66597-udc.c @@ -1868,8 +1868,8 @@ static int r8a66597_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); reg = devm_ioremap_resource(&pdev->dev, res); - if (!reg) - return -ENODEV; + if (IS_ERR(reg)) + return PTR_ERR(reg); ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0); irq = ires->start; diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index 81cda09b47e3..488a30836c36 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c @@ -965,8 +965,6 @@ rescan: } qh->exception = 1; - if (ehci->rh_state < EHCI_RH_RUNNING) - qh->qh_state = QH_STATE_IDLE; switch (qh->qh_state) { case QH_STATE_LINKED: WARN_ON(!list_empty(&qh->qtd_list)); diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c index cc305c71ac3d..6130b7574908 100644 --- a/drivers/usb/host/ehci-hub.c +++ b/drivers/usb/host/ehci-hub.c @@ -1230,7 +1230,7 @@ int ehci_hub_control( if (selector == EHSET_TEST_SINGLE_STEP_SET_FEATURE) { spin_unlock_irqrestore(&ehci->lock, flags); retval = ehset_single_step_set_feature(hcd, - wIndex); + wIndex + 1); spin_lock_irqsave(&ehci->lock, flags); break; } diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index aa79e8749040..69aece31143a 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -468,7 +468,8 @@ static void xhci_hub_report_usb2_link_state(u32 *status, u32 status_reg) } /* Updates Link Status for super Speed port */ -static void xhci_hub_report_usb3_link_state(u32 *status, u32 status_reg) +static void xhci_hub_report_usb3_link_state(struct xhci_hcd *xhci, + u32 *status, u32 status_reg) { u32 pls = status_reg & PORT_PLS_MASK; @@ -507,7 +508,8 @@ static void xhci_hub_report_usb3_link_state(u32 *status, u32 status_reg) * in which sometimes the port enters compliance mode * caused by a delay on the host-device negotiation. */ - if (pls == USB_SS_PORT_LS_COMP_MOD) + if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && + (pls == USB_SS_PORT_LS_COMP_MOD)) pls |= USB_PORT_STAT_CONNECTION; } @@ -666,7 +668,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd, } /* Update Port Link State */ if (hcd->speed == HCD_USB3) { - xhci_hub_report_usb3_link_state(&status, raw_port_status); + xhci_hub_report_usb3_link_state(xhci, &status, raw_port_status); /* * Verify if all USB3 Ports Have entered U0 already. * Delete Compliance Mode Timer if so. diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 8056d90690ee..8936211b161d 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -1812,6 +1812,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) if (xhci->lpm_command) xhci_free_command(xhci, xhci->lpm_command); + xhci->lpm_command = NULL; if (xhci->cmd_ring) xhci_ring_free(xhci, xhci->cmd_ring); xhci->cmd_ring = NULL; @@ -1819,7 +1820,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) xhci_cleanup_command_queue(xhci); num_ports = HCS_MAX_PORTS(xhci->hcs_params1); - for (i = 0; i < num_ports; i++) { + for (i = 0; i < num_ports && xhci->rh_bw; i++) { struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table; for (j = 0; j < XHCI_MAX_INTERVAL; j++) { struct list_head *ep = &bwt->interval_bw[j].endpoints; diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 687d36608155..c22a3e15a16e 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -101,6 +101,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) /* AMD PLL quirk */ if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info()) xhci->quirks |= XHCI_AMD_PLL_FIX; + + if (pdev->vendor == PCI_VENDOR_ID_AMD) + xhci->quirks |= XHCI_TRUST_TX_LENGTH; + if (pdev->vendor == PCI_VENDOR_ID_INTEL) { xhci->quirks |= XHCI_LPM_SUPPORT; xhci->quirks |= XHCI_INTEL_HOST; @@ -151,6 +155,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) if (pdev->vendor == PCI_VENDOR_ID_VIA) xhci->quirks |= XHCI_RESET_ON_RESUME; + /* See https://bugzilla.kernel.org/show_bug.cgi?id=79511 */ + if (pdev->vendor == PCI_VENDOR_ID_VIA && + pdev->device == 0x3432) + xhci->quirks |= XHCI_BROKEN_STREAMS; + if (xhci->quirks & XHCI_RESET_ON_RESUME) xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, "QUIRK: Resetting on resume"); diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 60fb52ae864b..abed30b82905 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -364,32 +364,6 @@ static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci, } } -/* - * Find the segment that trb is in. Start searching in start_seg. - * If we must move past a segment that has a link TRB with a toggle cycle state - * bit set, then we will toggle the value pointed at by cycle_state. - */ -static struct xhci_segment *find_trb_seg( - struct xhci_segment *start_seg, - union xhci_trb *trb, int *cycle_state) -{ - struct xhci_segment *cur_seg = start_seg; - struct xhci_generic_trb *generic_trb; - - while (cur_seg->trbs > trb || - &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) { - generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic; - if (generic_trb->field[3] & cpu_to_le32(LINK_TOGGLE)) - *cycle_state ^= 0x1; - cur_seg = cur_seg->next; - if (cur_seg == start_seg) - /* Looped over the entire list. Oops! */ - return NULL; - } - return cur_seg; -} - - static struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci, unsigned int slot_id, unsigned int ep_index, unsigned int stream_id) @@ -459,9 +433,12 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, struct xhci_virt_device *dev = xhci->devs[slot_id]; struct xhci_virt_ep *ep = &dev->eps[ep_index]; struct xhci_ring *ep_ring; - struct xhci_generic_trb *trb; + struct xhci_segment *new_seg; + union xhci_trb *new_deq; dma_addr_t addr; u64 hw_dequeue; + bool cycle_found = false; + bool td_last_trb_found = false; ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id, ep_index, stream_id); @@ -486,45 +463,45 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, hw_dequeue = le64_to_cpu(ep_ctx->deq); } - /* Find virtual address and segment of hardware dequeue pointer */ - state->new_deq_seg = ep_ring->deq_seg; - state->new_deq_ptr = ep_ring->dequeue; - while (xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr) - != (dma_addr_t)(hw_dequeue & ~0xf)) { - next_trb(xhci, ep_ring, &state->new_deq_seg, - &state->new_deq_ptr); - if (state->new_deq_ptr == ep_ring->dequeue) { - WARN_ON(1); - return; - } - } + new_seg = ep_ring->deq_seg; + new_deq = ep_ring->dequeue; + state->new_cycle_state = hw_dequeue & 0x1; + /* - * Find cycle state for last_trb, starting at old cycle state of - * hw_dequeue. If there is only one segment ring, find_trb_seg() will - * return immediately and cannot toggle the cycle state if this search - * wraps around, so add one more toggle manually in that case. + * We want to find the pointer, segment and cycle state of the new trb + * (the one after current TD's last_trb). We know the cycle state at + * hw_dequeue, so walk the ring until both hw_dequeue and last_trb are + * found. */ - state->new_cycle_state = hw_dequeue & 0x1; - if (ep_ring->first_seg == ep_ring->first_seg->next && - cur_td->last_trb < state->new_deq_ptr) - state->new_cycle_state ^= 0x1; + do { + if (!cycle_found && xhci_trb_virt_to_dma(new_seg, new_deq) + == (dma_addr_t)(hw_dequeue & ~0xf)) { + cycle_found = true; + if (td_last_trb_found) + break; + } + if (new_deq == cur_td->last_trb) + td_last_trb_found = true; - state->new_deq_ptr = cur_td->last_trb; - xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, - "Finding segment containing last TRB in TD."); - state->new_deq_seg = find_trb_seg(state->new_deq_seg, - state->new_deq_ptr, &state->new_cycle_state); - if (!state->new_deq_seg) { - WARN_ON(1); - return; - } + if (cycle_found && + TRB_TYPE_LINK_LE32(new_deq->generic.field[3]) && + new_deq->generic.field[3] & cpu_to_le32(LINK_TOGGLE)) + state->new_cycle_state ^= 0x1; + + next_trb(xhci, ep_ring, &new_seg, &new_deq); + + /* Search wrapped around, bail out */ + if (new_deq == ep->ring->dequeue) { + xhci_err(xhci, "Error: Failed finding new dequeue state\n"); + state->new_deq_seg = NULL; + state->new_deq_ptr = NULL; + return; + } + + } while (!cycle_found || !td_last_trb_found); - /* Increment to find next TRB after last_trb. Cycle if appropriate. */ - trb = &state->new_deq_ptr->generic; - if (TRB_TYPE_LINK_LE32(trb->field[3]) && - (trb->field[3] & cpu_to_le32(LINK_TOGGLE))) - state->new_cycle_state ^= 0x1; - next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr); + state->new_deq_seg = new_seg; + state->new_deq_ptr = new_deq; /* Don't update the ring cycle state for the producer (us). */ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, @@ -2487,7 +2464,8 @@ static int handle_tx_event(struct xhci_hcd *xhci, * last TRB of the previous TD. The command completion handle * will take care the rest. */ - if (!event_seg && trb_comp_code == COMP_STOP_INVAL) { + if (!event_seg && (trb_comp_code == COMP_STOP || + trb_comp_code == COMP_STOP_INVAL)) { ret = 0; goto cleanup; } diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index b6f21175b872..c4a8fca8ae93 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -2880,6 +2880,9 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, ep_index, ep->stopped_stream, ep->stopped_td, &deq_state); + if (!deq_state.new_deq_ptr || !deq_state.new_deq_seg) + return; + /* HW with the reset endpoint quirk will use the saved dequeue state to * issue a configure endpoint command later. */ @@ -3968,13 +3971,21 @@ static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci, int ret; spin_lock_irqsave(&xhci->lock, flags); - if (max_exit_latency == xhci->devs[udev->slot_id]->current_mel) { + + virt_dev = xhci->devs[udev->slot_id]; + + /* + * virt_dev might not exists yet if xHC resumed from hibernate (S4) and + * xHC was re-initialized. Exit latency will be set later after + * hub_port_finish_reset() is done and xhci->devs[] are re-allocated + */ + + if (!virt_dev || max_exit_latency == virt_dev->current_mel) { spin_unlock_irqrestore(&xhci->lock, flags); return 0; } /* Attempt to issue an Evaluate Context command to change the MEL. */ - virt_dev = xhci->devs[udev->slot_id]; command = xhci->lpm_command; ctrl_ctx = xhci_get_input_control_ctx(xhci, command->in_ctx); if (!ctrl_ctx) { diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c index 06b5d77cd9ad..633caf643122 100644 --- a/drivers/usb/misc/sisusbvga/sisusb.c +++ b/drivers/usb/misc/sisusbvga/sisusb.c @@ -3250,6 +3250,7 @@ static const struct usb_device_id sisusb_table[] = { { USB_DEVICE(0x0711, 0x0918) }, { USB_DEVICE(0x0711, 0x0920) }, { USB_DEVICE(0x0711, 0x0950) }, + { USB_DEVICE(0x0711, 0x5200) }, { USB_DEVICE(0x182d, 0x021c) }, { USB_DEVICE(0x182d, 0x0269) }, { } diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c index 47ae6455d073..3ee133f675ab 100644 --- a/drivers/usb/musb/musb_cppi41.c +++ b/drivers/usb/musb/musb_cppi41.c @@ -39,6 +39,7 @@ struct cppi41_dma_channel { u32 transferred; u32 packet_sz; struct list_head tx_check; + int tx_zlp; }; #define MUSB_DMA_NUM_CHANNELS 15 @@ -122,6 +123,8 @@ static void cppi41_trans_done(struct cppi41_dma_channel *cppi41_channel) { struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep; struct musb *musb = hw_ep->musb; + void __iomem *epio = hw_ep->regs; + u16 csr; if (!cppi41_channel->prog_len || (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE)) { @@ -131,15 +134,24 @@ static void cppi41_trans_done(struct cppi41_dma_channel *cppi41_channel) cppi41_channel->transferred; cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE; cppi41_channel->channel.rx_packet_done = true; + + /* + * transmit ZLP using PIO mode for transfers which size is + * multiple of EP packet size. + */ + if (cppi41_channel->tx_zlp && (cppi41_channel->transferred % + cppi41_channel->packet_sz) == 0) { + musb_ep_select(musb->mregs, hw_ep->epnum); + csr = MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY; + musb_writew(epio, MUSB_TXCSR, csr); + } musb_dma_completion(musb, hw_ep->epnum, cppi41_channel->is_tx); } else { /* next iteration, reload */ struct dma_chan *dc = cppi41_channel->dc; struct dma_async_tx_descriptor *dma_desc; enum dma_transfer_direction direction; - u16 csr; u32 remain_bytes; - void __iomem *epio = cppi41_channel->hw_ep->regs; cppi41_channel->buf_addr += cppi41_channel->packet_sz; @@ -363,6 +375,7 @@ static bool cppi41_configure_channel(struct dma_channel *channel, cppi41_channel->total_len = len; cppi41_channel->transferred = 0; cppi41_channel->packet_sz = packet_sz; + cppi41_channel->tx_zlp = (cppi41_channel->is_tx && mode) ? 1 : 0; /* * Due to AM335x' Advisory 1.0.13 we are not allowed to transfer more diff --git a/drivers/usb/musb/ux500_dma.c b/drivers/usb/musb/ux500_dma.c index 9aad00f11bd5..221faed9f074 100644 --- a/drivers/usb/musb/ux500_dma.c +++ b/drivers/usb/musb/ux500_dma.c @@ -96,7 +96,7 @@ static bool ux500_configure_channel(struct dma_channel *channel, struct musb *musb = ux500_channel->controller->private_data; dev_dbg(musb->controller, - "packet_sz=%d, mode=%d, dma_addr=0x%llu, len=%d is_tx=%d\n", + "packet_sz=%d, mode=%d, dma_addr=0x%llx, len=%d is_tx=%d\n", packet_sz, mode, (unsigned long long) dma_addr, len, ux500_channel->is_tx); diff --git a/drivers/usb/phy/phy-gpio-vbus-usb.c b/drivers/usb/phy/phy-gpio-vbus-usb.c index ea9e705555df..f4b14bd97e14 100644 --- a/drivers/usb/phy/phy-gpio-vbus-usb.c +++ b/drivers/usb/phy/phy-gpio-vbus-usb.c @@ -260,10 +260,8 @@ static int gpio_vbus_probe(struct platform_device *pdev) gpio_vbus->phy.otg = devm_kzalloc(&pdev->dev, sizeof(struct usb_otg), GFP_KERNEL); - if (!gpio_vbus->phy.otg) { - kfree(gpio_vbus); + if (!gpio_vbus->phy.otg) return -ENOMEM; - } platform_set_drvdata(pdev, gpio_vbus); gpio_vbus->dev = &pdev->dev; diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c index e4108eec5ef4..afc09087ec36 100644 --- a/drivers/usb/phy/phy-msm-usb.c +++ b/drivers/usb/phy/phy-msm-usb.c @@ -1601,8 +1601,8 @@ static int msm_otg_probe(struct platform_device *pdev) */ if (motg->phy_number) { phy_select = devm_ioremap_nocache(&pdev->dev, USB2_PHY_SEL, 4); - if (IS_ERR(phy_select)) - return PTR_ERR(phy_select); + if (!phy_select) + return -ENOMEM; /* Enable second PHY with the OTG port */ writel(0x1, phy_select); } diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c index c42bdf0c4a1f..00972eca04e7 100644 --- a/drivers/usb/phy/phy-mxs-usb.c +++ b/drivers/usb/phy/phy-mxs-usb.c @@ -1,5 +1,5 @@ /* - * Copyright 2012-2013 Freescale Semiconductor, Inc. + * Copyright 2012-2014 Freescale Semiconductor, Inc. * Copyright (C) 2012 Marek Vasut <marex@denx.de> * on behalf of DENX Software Engineering GmbH * @@ -125,7 +125,13 @@ static const struct mxs_phy_data imx6sl_phy_data = { MXS_PHY_NEED_IP_FIX, }; +static const struct mxs_phy_data imx6sx_phy_data = { + .flags = MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS | + MXS_PHY_NEED_IP_FIX, +}; + static const struct of_device_id mxs_phy_dt_ids[] = { + { .compatible = "fsl,imx6sx-usbphy", .data = &imx6sx_phy_data, }, { .compatible = "fsl,imx6sl-usbphy", .data = &imx6sl_phy_data, }, { .compatible = "fsl,imx6q-usbphy", .data = &imx6q_phy_data, }, { .compatible = "fsl,imx23-usbphy", .data = &imx23_phy_data, }, diff --git a/drivers/usb/phy/phy-samsung-usb.h b/drivers/usb/phy/phy-samsung-usb.h index 68771bfd1825..80eedd45a20a 100644 --- a/drivers/usb/phy/phy-samsung-usb.h +++ b/drivers/usb/phy/phy-samsung-usb.h @@ -216,7 +216,7 @@ #define EXYNOS5_DRD_PHYPARAM1 (0x20) -#define PHYPARAM1_PCS_TXDEEMPH_MASK (0x1f << 0) +#define PHYPARAM1_PCS_TXDEEMPH_MASK (0x3f << 0) #define PHYPARAM1_PCS_TXDEEMPH (0x1c) #define EXYNOS5_DRD_PHYTERM (0x24) diff --git a/drivers/usb/phy/phy-tegra-usb.c b/drivers/usb/phy/phy-tegra-usb.c index 13b4fa287da8..886f1807a67b 100644 --- a/drivers/usb/phy/phy-tegra-usb.c +++ b/drivers/usb/phy/phy-tegra-usb.c @@ -878,8 +878,8 @@ static int utmi_phy_probe(struct tegra_usb_phy *tegra_phy, return -ENOMEM; } - tegra_phy->config = devm_kzalloc(&pdev->dev, - sizeof(*tegra_phy->config), GFP_KERNEL); + tegra_phy->config = devm_kzalloc(&pdev->dev, sizeof(*config), + GFP_KERNEL); if (!tegra_phy->config) { dev_err(&pdev->dev, "unable to allocate memory for USB UTMIP config\n"); diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c index 6d0f6080eceb..045cd309367a 100644 --- a/drivers/usb/phy/phy.c +++ b/drivers/usb/phy/phy.c @@ -232,6 +232,9 @@ struct usb_phy *usb_get_phy_dev(struct device *dev, u8 index) phy = __usb_find_phy_dev(dev, &phy_bind_list, index); if (IS_ERR(phy) || !try_module_get(phy->dev->driver->owner)) { dev_dbg(dev, "unable to find transceiver\n"); + if (!IS_ERR(phy)) + phy = ERR_PTR(-ENODEV); + goto err0; } diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c index 4fd36530bfa3..b0c97a3f1bfe 100644 --- a/drivers/usb/renesas_usbhs/fifo.c +++ b/drivers/usb/renesas_usbhs/fifo.c @@ -108,19 +108,45 @@ static struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe) return list_first_entry(&pipe->list, struct usbhs_pkt, node); } +static void usbhsf_fifo_clear(struct usbhs_pipe *pipe, + struct usbhs_fifo *fifo); +static void usbhsf_fifo_unselect(struct usbhs_pipe *pipe, + struct usbhs_fifo *fifo); +static struct dma_chan *usbhsf_dma_chan_get(struct usbhs_fifo *fifo, + struct usbhs_pkt *pkt); +#define usbhsf_dma_map(p) __usbhsf_dma_map_ctrl(p, 1) +#define usbhsf_dma_unmap(p) __usbhsf_dma_map_ctrl(p, 0) +static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map); struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt) { struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); + struct usbhs_fifo *fifo = usbhs_pipe_to_fifo(pipe); unsigned long flags; /******************** spin lock ********************/ usbhs_lock(priv, flags); + usbhs_pipe_disable(pipe); + if (!pkt) pkt = __usbhsf_pkt_get(pipe); - if (pkt) + if (pkt) { + struct dma_chan *chan = NULL; + + if (fifo) + chan = usbhsf_dma_chan_get(fifo, pkt); + if (chan) { + dmaengine_terminate_all(chan); + usbhsf_fifo_clear(pipe, fifo); + usbhsf_dma_unmap(pkt); + } + __usbhsf_pkt_del(pkt); + } + + if (fifo) + usbhsf_fifo_unselect(pipe, fifo); usbhs_unlock(priv, flags); /******************** spin unlock ******************/ @@ -544,6 +570,7 @@ static int usbhsf_pio_try_push(struct usbhs_pkt *pkt, int *is_done) usbhsf_send_terminator(pipe, fifo); usbhsf_tx_irq_ctrl(pipe, !*is_done); + usbhs_pipe_running(pipe, !*is_done); usbhs_pipe_enable(pipe); dev_dbg(dev, " send %d (%d/ %d/ %d/ %d)\n", @@ -570,12 +597,21 @@ usbhs_fifo_write_busy: * retry in interrupt */ usbhsf_tx_irq_ctrl(pipe, 1); + usbhs_pipe_running(pipe, 1); return ret; } +static int usbhsf_pio_prepare_push(struct usbhs_pkt *pkt, int *is_done) +{ + if (usbhs_pipe_is_running(pkt->pipe)) + return 0; + + return usbhsf_pio_try_push(pkt, is_done); +} + struct usbhs_pkt_handle usbhs_fifo_pio_push_handler = { - .prepare = usbhsf_pio_try_push, + .prepare = usbhsf_pio_prepare_push, .try_run = usbhsf_pio_try_push, }; @@ -589,6 +625,9 @@ static int usbhsf_prepare_pop(struct usbhs_pkt *pkt, int *is_done) if (usbhs_pipe_is_busy(pipe)) return 0; + if (usbhs_pipe_is_running(pipe)) + return 0; + /* * pipe enable to prepare packet receive */ @@ -597,6 +636,7 @@ static int usbhsf_prepare_pop(struct usbhs_pkt *pkt, int *is_done) usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->length); usbhs_pipe_enable(pipe); + usbhs_pipe_running(pipe, 1); usbhsf_rx_irq_ctrl(pipe, 1); return 0; @@ -642,6 +682,7 @@ static int usbhsf_pio_try_pop(struct usbhs_pkt *pkt, int *is_done) (total_len < maxp)) { /* short packet */ *is_done = 1; usbhsf_rx_irq_ctrl(pipe, 0); + usbhs_pipe_running(pipe, 0); usbhs_pipe_disable(pipe); /* disable pipe first */ } @@ -763,8 +804,6 @@ static void __usbhsf_dma_ctrl(struct usbhs_pipe *pipe, usbhs_bset(priv, fifo->sel, DREQE, dreqe); } -#define usbhsf_dma_map(p) __usbhsf_dma_map_ctrl(p, 1) -#define usbhsf_dma_unmap(p) __usbhsf_dma_map_ctrl(p, 0) static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map) { struct usbhs_pipe *pipe = pkt->pipe; @@ -805,6 +844,7 @@ static void xfer_work(struct work_struct *work) dev_dbg(dev, " %s %d (%d/ %d)\n", fifo->name, usbhs_pipe_number(pipe), pkt->length, pkt->zero); + usbhs_pipe_running(pipe, 1); usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->trans); usbhs_pipe_enable(pipe); usbhsf_dma_start(pipe, fifo); @@ -836,6 +876,10 @@ static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done) if ((uintptr_t)(pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */ goto usbhsf_pio_prepare_push; + /* return at this time if the pipe is running */ + if (usbhs_pipe_is_running(pipe)) + return 0; + /* get enable DMA fifo */ fifo = usbhsf_get_dma_fifo(priv, pkt); if (!fifo) @@ -869,15 +913,29 @@ usbhsf_pio_prepare_push: static int usbhsf_dma_push_done(struct usbhs_pkt *pkt, int *is_done) { struct usbhs_pipe *pipe = pkt->pipe; + int is_short = pkt->trans % usbhs_pipe_get_maxpacket(pipe); + + pkt->actual += pkt->trans; - pkt->actual = pkt->trans; + if (pkt->actual < pkt->length) + *is_done = 0; /* there are remainder data */ + else if (is_short) + *is_done = 1; /* short packet */ + else + *is_done = !pkt->zero; /* send zero packet? */ - *is_done = !pkt->zero; /* send zero packet ? */ + usbhs_pipe_running(pipe, !*is_done); usbhsf_dma_stop(pipe, pipe->fifo); usbhsf_dma_unmap(pkt); usbhsf_fifo_unselect(pipe, pipe->fifo); + if (!*is_done) { + /* change handler to PIO */ + pkt->handler = &usbhs_fifo_pio_push_handler; + return pkt->handler->try_run(pkt, is_done); + } + return 0; } @@ -972,8 +1030,10 @@ static int usbhsf_dma_pop_done(struct usbhs_pkt *pkt, int *is_done) if ((pkt->actual == pkt->length) || /* receive all data */ (pkt->trans < maxp)) { /* short packet */ *is_done = 1; + usbhs_pipe_running(pipe, 0); } else { /* re-enable */ + usbhs_pipe_running(pipe, 0); usbhsf_prepare_pop(pkt, is_done); } diff --git a/drivers/usb/renesas_usbhs/mod.c b/drivers/usb/renesas_usbhs/mod.c index 6a030b931a3b..9a705b15b3a1 100644 --- a/drivers/usb/renesas_usbhs/mod.c +++ b/drivers/usb/renesas_usbhs/mod.c @@ -213,7 +213,10 @@ static int usbhs_status_get_each_irq(struct usbhs_priv *priv, { struct usbhs_mod *mod = usbhs_mod_get_current(priv); u16 intenb0, intenb1; + unsigned long flags; + /******************** spin lock ********************/ + usbhs_lock(priv, flags); state->intsts0 = usbhs_read(priv, INTSTS0); state->intsts1 = usbhs_read(priv, INTSTS1); @@ -229,6 +232,8 @@ static int usbhs_status_get_each_irq(struct usbhs_priv *priv, state->bempsts &= mod->irq_bempsts; state->brdysts &= mod->irq_brdysts; } + usbhs_unlock(priv, flags); + /******************** spin unlock ******************/ /* * Check whether the irq enable registers and the irq status are set diff --git a/drivers/usb/renesas_usbhs/pipe.c b/drivers/usb/renesas_usbhs/pipe.c index 75fbcf6b102e..040bcefcb040 100644 --- a/drivers/usb/renesas_usbhs/pipe.c +++ b/drivers/usb/renesas_usbhs/pipe.c @@ -578,6 +578,19 @@ int usbhs_pipe_is_dir_host(struct usbhs_pipe *pipe) return usbhsp_flags_has(pipe, IS_DIR_HOST); } +int usbhs_pipe_is_running(struct usbhs_pipe *pipe) +{ + return usbhsp_flags_has(pipe, IS_RUNNING); +} + +void usbhs_pipe_running(struct usbhs_pipe *pipe, int running) +{ + if (running) + usbhsp_flags_set(pipe, IS_RUNNING); + else + usbhsp_flags_clr(pipe, IS_RUNNING); +} + void usbhs_pipe_data_sequence(struct usbhs_pipe *pipe, int sequence) { u16 mask = (SQCLR | SQSET); diff --git a/drivers/usb/renesas_usbhs/pipe.h b/drivers/usb/renesas_usbhs/pipe.h index 406f36d050e4..d24a05972370 100644 --- a/drivers/usb/renesas_usbhs/pipe.h +++ b/drivers/usb/renesas_usbhs/pipe.h @@ -36,6 +36,7 @@ struct usbhs_pipe { #define USBHS_PIPE_FLAGS_IS_USED (1 << 0) #define USBHS_PIPE_FLAGS_IS_DIR_IN (1 << 1) #define USBHS_PIPE_FLAGS_IS_DIR_HOST (1 << 2) +#define USBHS_PIPE_FLAGS_IS_RUNNING (1 << 3) struct usbhs_pkt_handle *handler; @@ -80,6 +81,9 @@ int usbhs_pipe_probe(struct usbhs_priv *priv); void usbhs_pipe_remove(struct usbhs_priv *priv); int usbhs_pipe_is_dir_in(struct usbhs_pipe *pipe); int usbhs_pipe_is_dir_host(struct usbhs_pipe *pipe); +int usbhs_pipe_is_running(struct usbhs_pipe *pipe); +void usbhs_pipe_running(struct usbhs_pipe *pipe, int running); + void usbhs_pipe_init(struct usbhs_priv *priv, int (*dma_map_ctrl)(struct usbhs_pkt *pkt, int map)); int usbhs_pipe_get_maxpacket(struct usbhs_pipe *pipe); diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 216ce3078270..dc72b924c399 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -146,6 +146,7 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_BM_ATOM_NANO_PID) }, { USB_DEVICE(FTDI_VID, FTDI_NXTCAM_PID) }, { USB_DEVICE(FTDI_VID, FTDI_EV3CON_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_0_PID) }, @@ -727,6 +728,7 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(FTDI_VID, FTDI_NDI_AURORA_SCU_PID), .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk }, { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) }, + { USB_DEVICE(NOVITUS_VID, NOVITUS_BONO_E_PID) }, { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_S03_PID) }, { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_59_PID) }, { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_57A_PID) }, @@ -934,8 +936,12 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_2_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_3_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_4_PID) }, + /* ekey Devices */ + { USB_DEVICE(FTDI_VID, FTDI_EKEY_CONV_USB_PID) }, /* Infineon Devices */ { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) }, + /* GE Healthcare devices */ + { USB_DEVICE(GE_HEALTHCARE_VID, GE_HEALTHCARE_NEMO_TRACKER_PID) }, { } /* Terminating entry */ }; diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 1e58d90a0b6c..5937b2d242f2 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -42,6 +42,8 @@ /* www.candapter.com Ewert Energy Systems CANdapter device */ #define FTDI_CANDAPTER_PID 0x9F80 /* Product Id */ +#define FTDI_BM_ATOM_NANO_PID 0xa559 /* Basic Micro ATOM Nano USB2Serial */ + /* * Texas Instruments XDS100v2 JTAG / BeagleBone A3 * http://processors.wiki.ti.com/index.php/XDS100 @@ -835,6 +837,12 @@ #define TELLDUS_TELLSTICK_PID 0x0C30 /* RF control dongle 433 MHz using FT232RL */ /* + * NOVITUS printers + */ +#define NOVITUS_VID 0x1a28 +#define NOVITUS_BONO_E_PID 0x6010 + +/* * RT Systems programming cables for various ham radios */ #define RTSYSTEMS_VID 0x2100 /* Vendor ID */ @@ -1378,3 +1386,14 @@ #define BRAINBOXES_US_160_6_PID 0x9006 /* US-160 16xRS232 1Mbaud Port 11 and 12 */ #define BRAINBOXES_US_160_7_PID 0x9007 /* US-160 16xRS232 1Mbaud Port 13 and 14 */ #define BRAINBOXES_US_160_8_PID 0x9008 /* US-160 16xRS232 1Mbaud Port 15 and 16 */ + +/* + * ekey biometric systems GmbH (http://ekey.net/) + */ +#define FTDI_EKEY_CONV_USB_PID 0xCB08 /* Converter USB */ + +/* + * GE Healthcare devices + */ +#define GE_HEALTHCARE_VID 0x1901 +#define GE_HEALTHCARE_NEMO_TRACKER_PID 0x0015 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index a9688940543d..54a8120897a6 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -275,8 +275,12 @@ static void option_instat_callback(struct urb *urb); #define ZTE_PRODUCT_MF622 0x0001 #define ZTE_PRODUCT_MF628 0x0015 #define ZTE_PRODUCT_MF626 0x0031 -#define ZTE_PRODUCT_MC2718 0xffe8 #define ZTE_PRODUCT_AC2726 0xfff1 +#define ZTE_PRODUCT_CDMA_TECH 0xfffe +#define ZTE_PRODUCT_AC8710T 0xffff +#define ZTE_PRODUCT_MC2718 0xffe8 +#define ZTE_PRODUCT_AD3812 0xffeb +#define ZTE_PRODUCT_MC2716 0xffed #define BENQ_VENDOR_ID 0x04a5 #define BENQ_PRODUCT_H10 0x4068 @@ -494,6 +498,10 @@ static void option_instat_callback(struct urb *urb); #define INOVIA_VENDOR_ID 0x20a6 #define INOVIA_SEW858 0x1105 +/* VIA Telecom */ +#define VIATELECOM_VENDOR_ID 0x15eb +#define VIATELECOM_PRODUCT_CDS7 0x0001 + /* some devices interfaces need special handling due to a number of reasons */ enum option_blacklist_reason { OPTION_BLACKLIST_NONE = 0, @@ -527,10 +535,18 @@ static const struct option_blacklist_info zte_k3765_z_blacklist = { .reserved = BIT(4), }; +static const struct option_blacklist_info zte_ad3812_z_blacklist = { + .sendsetup = BIT(0) | BIT(1) | BIT(2), +}; + static const struct option_blacklist_info zte_mc2718_z_blacklist = { .sendsetup = BIT(1) | BIT(2) | BIT(3) | BIT(4), }; +static const struct option_blacklist_info zte_mc2716_z_blacklist = { + .sendsetup = BIT(1) | BIT(2) | BIT(3), +}; + static const struct option_blacklist_info huawei_cdc12_blacklist = { .reserved = BIT(1) | BIT(2), }; @@ -1070,6 +1086,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_INTERFACE_CLASS(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1012, 0xff) }, { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) }, { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) }, + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ @@ -1544,13 +1561,18 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff93, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff94, 0xff, 0xff, 0xff) }, - /* NOTE: most ZTE CDMA devices should be driven by zte_ev, not option */ + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710T, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2718, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_mc2718_z_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AD3812, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist }, { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) }, { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) }, { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) }, { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) }, { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) }, @@ -1724,6 +1746,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) }, + { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, option_ids); @@ -1916,6 +1939,8 @@ static void option_instat_callback(struct urb *urb) dev_dbg(dev, "%s: type %x req %x\n", __func__, req_pkt->bRequestType, req_pkt->bRequest); } + } else if (status == -ENOENT || status == -ESHUTDOWN) { + dev_dbg(dev, "%s: urb stopped: %d\n", __func__, status); } else dev_err(dev, "%s: error %d\n", __func__, status); diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index b3d5a35c0d4b..e9bad928039f 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c @@ -45,6 +45,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_GPRS) }, { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_HCR331) }, { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MOTOROLA) }, + { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ZTEK) }, { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) }, { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) }, { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) }, diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h index 42bc082896ac..71fd9da1d6e7 100644 --- a/drivers/usb/serial/pl2303.h +++ b/drivers/usb/serial/pl2303.h @@ -22,6 +22,7 @@ #define PL2303_PRODUCT_ID_GPRS 0x0609 #define PL2303_PRODUCT_ID_HCR331 0x331a #define PL2303_PRODUCT_ID_MOTOROLA 0x0307 +#define PL2303_PRODUCT_ID_ZTEK 0xe1f1 #define ATEN_VENDOR_ID 0x0557 #define ATEN_VENDOR_ID2 0x0547 diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c index 6f7f01eb556a..46179a0828eb 100644 --- a/drivers/usb/serial/sierra.c +++ b/drivers/usb/serial/sierra.c @@ -282,14 +282,19 @@ static const struct usb_device_id id_table[] = { /* Sierra Wireless HSPA Non-Composite Device */ { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6892, 0xFF, 0xFF, 0xFF)}, { USB_DEVICE(0x1199, 0x6893) }, /* Sierra Wireless Device */ - { USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */ + /* Sierra Wireless Direct IP modems */ + { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68A3, 0xFF, 0xFF, 0xFF), + .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist + }, + { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68AA, 0xFF, 0xFF, 0xFF), .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist }, /* AT&T Direct IP LTE modems */ { USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68AA, 0xFF, 0xFF, 0xFF), .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist }, - { USB_DEVICE(0x0f3d, 0x68A3), /* Airprime/Sierra Wireless Direct IP modems */ + /* Airprime/Sierra Wireless Direct IP modems */ + { USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68A3, 0xFF, 0xFF, 0xFF), .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist }, diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index 02de3110fe94..475723c006f9 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c @@ -764,29 +764,39 @@ static int usb_serial_probe(struct usb_interface *interface, if (usb_endpoint_is_bulk_in(endpoint)) { /* we found a bulk in endpoint */ dev_dbg(ddev, "found bulk in on endpoint %d\n", i); - bulk_in_endpoint[num_bulk_in] = endpoint; - ++num_bulk_in; + if (num_bulk_in < MAX_NUM_PORTS) { + bulk_in_endpoint[num_bulk_in] = endpoint; + ++num_bulk_in; + } } if (usb_endpoint_is_bulk_out(endpoint)) { /* we found a bulk out endpoint */ dev_dbg(ddev, "found bulk out on endpoint %d\n", i); - bulk_out_endpoint[num_bulk_out] = endpoint; - ++num_bulk_out; + if (num_bulk_out < MAX_NUM_PORTS) { + bulk_out_endpoint[num_bulk_out] = endpoint; + ++num_bulk_out; + } } if (usb_endpoint_is_int_in(endpoint)) { /* we found a interrupt in endpoint */ dev_dbg(ddev, "found interrupt in on endpoint %d\n", i); - interrupt_in_endpoint[num_interrupt_in] = endpoint; - ++num_interrupt_in; + if (num_interrupt_in < MAX_NUM_PORTS) { + interrupt_in_endpoint[num_interrupt_in] = + endpoint; + ++num_interrupt_in; + } } if (usb_endpoint_is_int_out(endpoint)) { /* we found an interrupt out endpoint */ dev_dbg(ddev, "found interrupt out on endpoint %d\n", i); - interrupt_out_endpoint[num_interrupt_out] = endpoint; - ++num_interrupt_out; + if (num_interrupt_out < MAX_NUM_PORTS) { + interrupt_out_endpoint[num_interrupt_out] = + endpoint; + ++num_interrupt_out; + } } } @@ -809,8 +819,10 @@ static int usb_serial_probe(struct usb_interface *interface, if (usb_endpoint_is_int_in(endpoint)) { /* we found a interrupt in endpoint */ dev_dbg(ddev, "found interrupt in for Prolific device on separate interface\n"); - interrupt_in_endpoint[num_interrupt_in] = endpoint; - ++num_interrupt_in; + if (num_interrupt_in < MAX_NUM_PORTS) { + interrupt_in_endpoint[num_interrupt_in] = endpoint; + ++num_interrupt_in; + } } } } @@ -850,6 +862,11 @@ static int usb_serial_probe(struct usb_interface *interface, num_ports = type->num_ports; } + if (num_ports > MAX_NUM_PORTS) { + dev_warn(ddev, "too many ports requested: %d\n", num_ports); + num_ports = MAX_NUM_PORTS; + } + serial->num_ports = num_ports; serial->num_bulk_in = num_bulk_in; serial->num_bulk_out = num_bulk_out; diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c index e62f2dff8b7d..6c3734d2b45a 100644 --- a/drivers/usb/serial/whiteheat.c +++ b/drivers/usb/serial/whiteheat.c @@ -514,6 +514,10 @@ static void command_port_read_callback(struct urb *urb) dev_dbg(&urb->dev->dev, "%s - command_info is NULL, exiting.\n", __func__); return; } + if (!urb->actual_length) { + dev_dbg(&urb->dev->dev, "%s - empty response, exiting.\n", __func__); + return; + } if (status) { dev_dbg(&urb->dev->dev, "%s - nonzero urb status: %d\n", __func__, status); if (status != -ENOENT) @@ -534,7 +538,8 @@ static void command_port_read_callback(struct urb *urb) /* These are unsolicited reports from the firmware, hence no waiting command to wakeup */ dev_dbg(&urb->dev->dev, "%s - event received\n", __func__); - } else if (data[0] == WHITEHEAT_GET_DTR_RTS) { + } else if ((data[0] == WHITEHEAT_GET_DTR_RTS) && + (urb->actual_length - 1 <= sizeof(command_info->result_buffer))) { memcpy(command_info->result_buffer, &data[1], urb->actual_length - 1); command_info->command_finished = WHITEHEAT_CMD_COMPLETE; diff --git a/drivers/usb/serial/zte_ev.c b/drivers/usb/serial/zte_ev.c index e40ab739c4a6..c9bb107d5e5c 100644 --- a/drivers/usb/serial/zte_ev.c +++ b/drivers/usb/serial/zte_ev.c @@ -272,28 +272,16 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port) } static const struct usb_device_id id_table[] = { - /* AC8710, AC8710T */ - { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xffff, 0xff, 0xff, 0xff) }, - /* AC8700 */ - { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xfffe, 0xff, 0xff, 0xff) }, - /* MG880 */ - { USB_DEVICE(0x19d2, 0xfffd) }, - { USB_DEVICE(0x19d2, 0xfffc) }, - { USB_DEVICE(0x19d2, 0xfffb) }, - /* AC8710_V3 */ + { USB_DEVICE(0x19d2, 0xffec) }, + { USB_DEVICE(0x19d2, 0xffee) }, { USB_DEVICE(0x19d2, 0xfff6) }, { USB_DEVICE(0x19d2, 0xfff7) }, { USB_DEVICE(0x19d2, 0xfff8) }, { USB_DEVICE(0x19d2, 0xfff9) }, - { USB_DEVICE(0x19d2, 0xffee) }, - /* AC2716, MC2716 */ - { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xffed, 0xff, 0xff, 0xff) }, - /* AD3812 */ - { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xffeb, 0xff, 0xff, 0xff) }, - { USB_DEVICE(0x19d2, 0xffec) }, - { USB_DEVICE(0x05C6, 0x3197) }, - { USB_DEVICE(0x05C6, 0x6000) }, - { USB_DEVICE(0x05C6, 0x9008) }, + { USB_DEVICE(0x19d2, 0xfffb) }, + { USB_DEVICE(0x19d2, 0xfffc) }, + /* MG880 */ + { USB_DEVICE(0x19d2, 0xfffd) }, { }, }; MODULE_DEVICE_TABLE(usb, id_table); diff --git a/drivers/usb/storage/uas-detect.h b/drivers/usb/storage/uas-detect.h index 503ac5c8d80f..8a6f371ed6e7 100644 --- a/drivers/usb/storage/uas-detect.h +++ b/drivers/usb/storage/uas-detect.h @@ -59,10 +59,6 @@ static int uas_use_uas_driver(struct usb_interface *intf, unsigned long flags = id->driver_info; int r, alt; - usb_stor_adjust_quirks(udev, &flags); - - if (flags & US_FL_IGNORE_UAS) - return 0; alt = uas_find_uas_alt_setting(intf); if (alt < 0) @@ -72,6 +68,29 @@ static int uas_use_uas_driver(struct usb_interface *intf, if (r < 0) return 0; + /* + * ASM1051 and older ASM1053 devices have the same usb-id, and UAS is + * broken on the ASM1051, use the number of streams to differentiate. + * New ASM1053-s also support 32 streams, but have a different prod-id. + */ + if (le16_to_cpu(udev->descriptor.idVendor) == 0x174c && + le16_to_cpu(udev->descriptor.idProduct) == 0x55aa) { + if (udev->speed < USB_SPEED_SUPER) { + /* No streams info, assume ASM1051 */ + flags |= US_FL_IGNORE_UAS; + } else if (usb_ss_max_streams(&eps[1]->ss_ep_comp) == 32) { + flags |= US_FL_IGNORE_UAS; + } + } + + usb_stor_adjust_quirks(udev, &flags); + + if (flags & US_FL_IGNORE_UAS) { + dev_warn(&udev->dev, + "UAS is blacklisted for this device, using usb-storage instead\n"); + return 0; + } + if (udev->bus->sg_tablesize == 0) { dev_warn(&udev->dev, "The driver for the USB controller %s does not support scatter-gather which is\n", diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c index 3f42785f653c..9bfa7252f7f9 100644 --- a/drivers/usb/storage/uas.c +++ b/drivers/usb/storage/uas.c @@ -970,6 +970,13 @@ static struct scsi_host_template uas_host_template = { .cmd_per_lun = 1, /* until we override it */ .skip_settle_delay = 1, .ordered_tag = 1, + + /* + * The uas drivers expects tags not to be bigger than the maximum + * per-device queue depth, which is not true with the blk-mq tag + * allocator. + */ + .disable_blk_mq = true, }; #define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \ diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 80a5b366255f..4a5c68a47e46 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -101,6 +101,12 @@ UNUSUAL_DEV( 0x03f0, 0x4002, 0x0001, 0x0001, "PhotoSmart R707", USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_FIX_CAPACITY), +UNUSUAL_DEV( 0x03f3, 0x0001, 0x0000, 0x9999, + "Adaptec", + "USBConnect 2000", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init, + US_FL_SCM_MULT_TARG ), + /* Reported by Sebastian Kapfer <sebastian_kapfer@gmx.net> * and Olaf Hering <olh@suse.de> (different bcd's, same vendor/product) * for USB floppies that need the SINGLE_LUN enforcement. @@ -741,6 +747,12 @@ UNUSUAL_DEV( 0x059b, 0x0001, 0x0100, 0x0100, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_SINGLE_LUN ), +UNUSUAL_DEV( 0x059b, 0x0040, 0x0100, 0x0100, + "Iomega", + "Jaz USB Adapter", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_SINGLE_LUN ), + /* Reported by <Hendryk.Pfeiffer@gmx.de> */ UNUSUAL_DEV( 0x059f, 0x0643, 0x0000, 0x0000, "LaCie", @@ -922,6 +934,12 @@ UNUSUAL_DEV( 0x069b, 0x3004, 0x0001, 0x0001, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_FIX_CAPACITY ), +UNUSUAL_DEV( 0x06ca, 0x2003, 0x0100, 0x0100, + "Newer Technology", + "uSCSI", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init, + US_FL_SCM_MULT_TARG ), + /* Reported by Adrian Pilchowiec <adi1981@epf.pl> */ UNUSUAL_DEV( 0x071b, 0x3203, 0x0000, 0x0000, "RockChip", @@ -1113,6 +1131,18 @@ UNUSUAL_DEV( 0x0851, 0x1543, 0x0200, 0x0200, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NOT_LOCKABLE), +UNUSUAL_DEV( 0x085a, 0x0026, 0x0100, 0x0133, + "Xircom", + "PortGear USB-SCSI (Mac USB Dock)", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init, + US_FL_SCM_MULT_TARG ), + +UNUSUAL_DEV( 0x085a, 0x0028, 0x0100, 0x0133, + "Xircom", + "PortGear USB to SCSI Converter", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init, + US_FL_SCM_MULT_TARG ), + /* Submitted by Jan De Luyck <lkml@kcore.org> */ UNUSUAL_DEV( 0x08bd, 0x1100, 0x0000, 0x0000, "CITIZEN", @@ -1952,6 +1982,14 @@ UNUSUAL_DEV( 0x152d, 0x2329, 0x0100, 0x0100, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_IGNORE_RESIDUE | US_FL_SANE_SENSE ), +/* Entrega Technologies U1-SC25 (later Xircom PortGear PGSCSI) + * and Mac USB Dock USB-SCSI */ +UNUSUAL_DEV( 0x1645, 0x0007, 0x0100, 0x0133, + "Entrega Technologies", + "USB to SCSI Converter", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init, + US_FL_SCM_MULT_TARG ), + /* Reported by Robert Schedel <r.schedel@yahoo.de> * Note: this is a 'super top' device like the above 14cd/6600 device */ UNUSUAL_DEV( 0x1652, 0x6600, 0x0201, 0x0201, @@ -1974,6 +2012,12 @@ UNUSUAL_DEV( 0x177f, 0x0400, 0x0000, 0x0000, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_BULK_IGNORE_TAG | US_FL_MAX_SECTORS_64 ), +UNUSUAL_DEV( 0x1822, 0x0001, 0x0000, 0x9999, + "Ariston Technologies", + "iConnect USB to SCSI adapter", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init, + US_FL_SCM_MULT_TARG ), + /* Reported by Hans de Goede <hdegoede@redhat.com> * These Appotech controllers are found in Picture Frames, they provide a * (buggy) emulation of a cdrom drive which contains the windows software diff --git a/drivers/staging/usbip/Kconfig b/drivers/usb/usbip/Kconfig index bd99e9e47e50..bd99e9e47e50 100644 --- a/drivers/staging/usbip/Kconfig +++ b/drivers/usb/usbip/Kconfig diff --git a/drivers/staging/usbip/Makefile b/drivers/usb/usbip/Makefile index 9ecd61545be1..9ecd61545be1 100644 --- a/drivers/staging/usbip/Makefile +++ b/drivers/usb/usbip/Makefile diff --git a/drivers/staging/usbip/README b/drivers/usb/usbip/README index 41a2cf2e77a6..41a2cf2e77a6 100644 --- a/drivers/staging/usbip/README +++ b/drivers/usb/usbip/README diff --git a/drivers/staging/usbip/stub.h b/drivers/usb/usbip/stub.h index 266e2b0ce9a8..266e2b0ce9a8 100644 --- a/drivers/staging/usbip/stub.h +++ b/drivers/usb/usbip/stub.h diff --git a/drivers/staging/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c index 51d0c7188738..fac20e0434c0 100644 --- a/drivers/staging/usbip/stub_dev.c +++ b/drivers/usb/usbip/stub_dev.c @@ -26,33 +26,6 @@ #include "stub.h" /* - * Define device IDs here if you want to explicitly limit exportable devices. - * In most cases, wildcard matching will be okay because driver binding can be - * changed dynamically by a userland program. - */ -static struct usb_device_id stub_table[] = { -#if 0 - /* just an example */ - { USB_DEVICE(0x05ac, 0x0301) }, /* Mac 1 button mouse */ - { USB_DEVICE(0x0430, 0x0009) }, /* Plat Home Keyboard */ - { USB_DEVICE(0x059b, 0x0001) }, /* Iomega USB Zip 100 */ - { USB_DEVICE(0x04b3, 0x4427) }, /* IBM USB CD-ROM */ - { USB_DEVICE(0x05a9, 0xa511) }, /* LifeView USB cam */ - { USB_DEVICE(0x55aa, 0x0201) }, /* Imation card reader */ - { USB_DEVICE(0x046d, 0x0870) }, /* Qcam Express(QV-30) */ - { USB_DEVICE(0x04bb, 0x0101) }, /* IO-DATA HD 120GB */ - { USB_DEVICE(0x04bb, 0x0904) }, /* IO-DATA USB-ET/TX */ - { USB_DEVICE(0x04bb, 0x0201) }, /* IO-DATA USB-ET/TX */ - { USB_DEVICE(0x08bb, 0x2702) }, /* ONKYO USB Speaker */ - { USB_DEVICE(0x046d, 0x08b2) }, /* Logicool Qcam 4000 Pro */ -#endif - /* magic for wild card */ - { .driver_info = 1 }, - { 0, } /* Terminating entry */ -}; -MODULE_DEVICE_TABLE(usb, stub_table); - -/* * usbip_status shows the status of usbip-host as long as this driver is bound * to the target device. */ diff --git a/drivers/staging/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c index 44ab43fc4fcc..44ab43fc4fcc 100644 --- a/drivers/staging/usbip/stub_main.c +++ b/drivers/usb/usbip/stub_main.c diff --git a/drivers/staging/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c index 00e475c51a12..00e475c51a12 100644 --- a/drivers/staging/usbip/stub_rx.c +++ b/drivers/usb/usbip/stub_rx.c diff --git a/drivers/staging/usbip/stub_tx.c b/drivers/usb/usbip/stub_tx.c index dbcabc9dbe0d..dbcabc9dbe0d 100644 --- a/drivers/staging/usbip/stub_tx.c +++ b/drivers/usb/usbip/stub_tx.c diff --git a/drivers/staging/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c index facaaf003f19..facaaf003f19 100644 --- a/drivers/staging/usbip/usbip_common.c +++ b/drivers/usb/usbip/usbip_common.c diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h index 4da3866a037d..86b08475c254 100644 --- a/drivers/staging/usbip/usbip_common.h +++ b/drivers/usb/usbip/usbip_common.h @@ -29,7 +29,7 @@ #include <linux/types.h> #include <linux/usb.h> #include <linux/wait.h> -#include "uapi/usbip.h" +#include <uapi/linux/usbip.h> #define USBIP_VERSION "1.0.0" diff --git a/drivers/staging/usbip/usbip_event.c b/drivers/usb/usbip/usbip_event.c index 64933b993d7a..64933b993d7a 100644 --- a/drivers/staging/usbip/usbip_event.c +++ b/drivers/usb/usbip/usbip_event.c diff --git a/drivers/staging/usbip/usbip_protocol.txt b/drivers/usb/usbip/usbip_protocol.txt index 16b6fe27284c..16b6fe27284c 100644 --- a/drivers/staging/usbip/usbip_protocol.txt +++ b/drivers/usb/usbip/usbip_protocol.txt diff --git a/drivers/staging/usbip/vhci.h b/drivers/usb/usbip/vhci.h index a863a98a91ce..a863a98a91ce 100644 --- a/drivers/staging/usbip/vhci.h +++ b/drivers/usb/usbip/vhci.h diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c index c02374b6049c..c02374b6049c 100644 --- a/drivers/staging/usbip/vhci_hcd.c +++ b/drivers/usb/usbip/vhci_hcd.c diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c index 00e4a54308e4..00e4a54308e4 100644 --- a/drivers/staging/usbip/vhci_rx.c +++ b/drivers/usb/usbip/vhci_rx.c diff --git a/drivers/staging/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c index 211f43f67ea2..211f43f67ea2 100644 --- a/drivers/staging/usbip/vhci_sysfs.c +++ b/drivers/usb/usbip/vhci_sysfs.c diff --git a/drivers/staging/usbip/vhci_tx.c b/drivers/usb/usbip/vhci_tx.c index 409fd99f3257..409fd99f3257 100644 --- a/drivers/staging/usbip/vhci_tx.c +++ b/drivers/usb/usbip/vhci_tx.c diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c index 3e2e4ed20157..e279015be466 100644 --- a/drivers/usb/wusbcore/wa-xfer.c +++ b/drivers/usb/wusbcore/wa-xfer.c @@ -2602,6 +2602,7 @@ static void wa_buf_in_cb(struct urb *urb) dev = &wa->usb_iface->dev; --(wa->active_buf_in_urbs); active_buf_in_urbs = wa->active_buf_in_urbs; + rpipe = xfer->ep->hcpriv; if (usb_pipeisoc(xfer->urb->pipe)) { struct usb_iso_packet_descriptor *iso_frame_desc = @@ -2659,7 +2660,6 @@ static void wa_buf_in_cb(struct urb *urb) resubmit_dti = (isoc_data_frame_count == urb_frame_count); } else if (active_buf_in_urbs == 0) { - rpipe = xfer->ep->hcpriv; dev_dbg(dev, "xfer %p 0x%08X#%u: data in done (%zu bytes)\n", xfer, wa_xfer_id(xfer), seg->index, @@ -2685,7 +2685,6 @@ static void wa_buf_in_cb(struct urb *urb) */ resubmit_dti = wa->dti_state != WA_DTI_TRANSFER_RESULT_PENDING; spin_lock_irqsave(&xfer->lock, flags); - rpipe = xfer->ep->hcpriv; if (printk_ratelimit()) dev_err(dev, "xfer %p 0x%08X#%u: data in error %d\n", xfer, wa_xfer_id(xfer), seg->index, diff --git a/drivers/uwb/lc-dev.c b/drivers/uwb/lc-dev.c index 80079b8fed15..d0303f0dbe15 100644 --- a/drivers/uwb/lc-dev.c +++ b/drivers/uwb/lc-dev.c @@ -431,16 +431,19 @@ void uwbd_dev_onair(struct uwb_rc *rc, struct uwb_beca_e *bce) uwb_dev->mac_addr = *bce->mac_addr; uwb_dev->dev_addr = bce->dev_addr; dev_set_name(&uwb_dev->dev, "%s", macbuf); + + /* plug the beacon cache */ + bce->uwb_dev = uwb_dev; + uwb_dev->bce = bce; + uwb_bce_get(bce); /* released in uwb_dev_sys_release() */ + result = uwb_dev_add(uwb_dev, &rc->uwb_dev.dev, rc); if (result < 0) { dev_err(dev, "new device %s: cannot instantiate device\n", macbuf); goto error_dev_add; } - /* plug the beacon cache */ - bce->uwb_dev = uwb_dev; - uwb_dev->bce = bce; - uwb_bce_get(bce); /* released in uwb_dev_sys_release() */ + dev_info(dev, "uwb device (mac %s dev %s) connected to %s %s\n", macbuf, devbuf, rc->uwb_dev.dev.parent->bus->name, dev_name(rc->uwb_dev.dev.parent)); @@ -448,6 +451,8 @@ void uwbd_dev_onair(struct uwb_rc *rc, struct uwb_beca_e *bce) return; error_dev_add: + bce->uwb_dev = NULL; + uwb_bce_put(bce); kfree(uwb_dev); return; } diff --git a/drivers/vfio/Kconfig b/drivers/vfio/Kconfig index af7b204b9215..d8c57636b9ce 100644 --- a/drivers/vfio/Kconfig +++ b/drivers/vfio/Kconfig @@ -8,11 +8,17 @@ config VFIO_IOMMU_SPAPR_TCE depends on VFIO && SPAPR_TCE_IOMMU default n +config VFIO_SPAPR_EEH + tristate + depends on EEH && VFIO_IOMMU_SPAPR_TCE + default n + menuconfig VFIO tristate "VFIO Non-Privileged userspace driver framework" depends on IOMMU_API select VFIO_IOMMU_TYPE1 if X86 select VFIO_IOMMU_SPAPR_TCE if (PPC_POWERNV || PPC_PSERIES) + select VFIO_SPAPR_EEH if (PPC_POWERNV || PPC_PSERIES) select ANON_INODES help VFIO provides a framework for secure userspace device drivers. diff --git a/drivers/vfio/Makefile b/drivers/vfio/Makefile index 72bfabc8629e..0b035b12600a 100644 --- a/drivers/vfio/Makefile +++ b/drivers/vfio/Makefile @@ -1,4 +1,5 @@ obj-$(CONFIG_VFIO) += vfio.o obj-$(CONFIG_VFIO_IOMMU_TYPE1) += vfio_iommu_type1.o obj-$(CONFIG_VFIO_IOMMU_SPAPR_TCE) += vfio_iommu_spapr_tce.o +obj-$(CONFIG_VFIO_SPAPR_EEH) += vfio_spapr_eeh.o obj-$(CONFIG_VFIO_PCI) += pci/ diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index 010e0f8b8e4f..f7825332a325 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -37,6 +37,10 @@ module_param_named(nointxmask, nointxmask, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(nointxmask, "Disable support for PCI 2.3 style INTx masking. If this resolves problems for specific devices, report lspci -vvvxxx to linux-pci@vger.kernel.org so the device can be fixed automatically via the broken_intx_masking flag."); +static DEFINE_MUTEX(driver_lock); + +static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev); + static int vfio_pci_enable(struct vfio_pci_device *vdev) { struct pci_dev *pdev = vdev->pdev; @@ -44,6 +48,9 @@ static int vfio_pci_enable(struct vfio_pci_device *vdev) u16 cmd; u8 msix_pos; + /* Don't allow our initial saved state to include busmaster */ + pci_clear_master(pdev); + ret = pci_enable_device(pdev); if (ret) return ret; @@ -99,7 +106,8 @@ static void vfio_pci_disable(struct vfio_pci_device *vdev) struct pci_dev *pdev = vdev->pdev; int bar; - pci_disable_device(pdev); + /* Stop the device from further DMA */ + pci_clear_master(pdev); vfio_pci_set_irqs_ioctl(vdev, VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER, @@ -117,6 +125,8 @@ static void vfio_pci_disable(struct vfio_pci_device *vdev) vdev->barmap[bar] = NULL; } + vdev->needs_reset = true; + /* * If we have saved state, restore it. If we can reset the device, * even better. Resetting with current state seems better than @@ -128,7 +138,7 @@ static void vfio_pci_disable(struct vfio_pci_device *vdev) __func__, dev_name(&pdev->dev)); if (!vdev->reset_works) - return; + goto out; pci_save_state(pdev); } @@ -148,17 +158,29 @@ static void vfio_pci_disable(struct vfio_pci_device *vdev) if (ret) pr_warn("%s: Failed to reset device %s (%d)\n", __func__, dev_name(&pdev->dev), ret); + else + vdev->needs_reset = false; } pci_restore_state(pdev); +out: + pci_disable_device(pdev); + + vfio_pci_try_bus_reset(vdev); } static void vfio_pci_release(void *device_data) { struct vfio_pci_device *vdev = device_data; - if (atomic_dec_and_test(&vdev->refcnt)) + mutex_lock(&driver_lock); + + if (!(--vdev->refcnt)) { + vfio_spapr_pci_eeh_release(vdev->pdev); vfio_pci_disable(vdev); + } + + mutex_unlock(&driver_lock); module_put(THIS_MODULE); } @@ -166,19 +188,26 @@ static void vfio_pci_release(void *device_data) static int vfio_pci_open(void *device_data) { struct vfio_pci_device *vdev = device_data; + int ret = 0; if (!try_module_get(THIS_MODULE)) return -ENODEV; - if (atomic_inc_return(&vdev->refcnt) == 1) { - int ret = vfio_pci_enable(vdev); - if (ret) { - module_put(THIS_MODULE); - return ret; - } - } + mutex_lock(&driver_lock); - return 0; + if (!vdev->refcnt) { + ret = vfio_pci_enable(vdev); + if (ret) + goto error; + + vfio_spapr_pci_eeh_open(vdev->pdev); + } + vdev->refcnt++; +error: + mutex_unlock(&driver_lock); + if (ret) + module_put(THIS_MODULE); + return ret; } static int vfio_pci_get_irq_count(struct vfio_pci_device *vdev, int irq_type) @@ -833,7 +862,6 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) vdev->irq_type = VFIO_PCI_NUM_IRQS; mutex_init(&vdev->igate); spin_lock_init(&vdev->irqlock); - atomic_set(&vdev->refcnt, 0); ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev); if (ret) { @@ -848,12 +876,15 @@ static void vfio_pci_remove(struct pci_dev *pdev) { struct vfio_pci_device *vdev; + mutex_lock(&driver_lock); + vdev = vfio_del_group_dev(&pdev->dev); - if (!vdev) - return; + if (vdev) { + iommu_group_put(pdev->dev.iommu_group); + kfree(vdev); + } - iommu_group_put(pdev->dev.iommu_group); - kfree(vdev); + mutex_unlock(&driver_lock); } static pci_ers_result_t vfio_pci_aer_err_detected(struct pci_dev *pdev, @@ -896,6 +927,110 @@ static struct pci_driver vfio_pci_driver = { .err_handler = &vfio_err_handlers, }; +/* + * Test whether a reset is necessary and possible. We mark devices as + * needs_reset when they are released, but don't have a function-local reset + * available. If any of these exist in the affected devices, we want to do + * a bus/slot reset. We also need all of the affected devices to be unused, + * so we abort if any device has a non-zero refcnt. driver_lock prevents a + * device from being opened during the scan or unbound from vfio-pci. + */ +static int vfio_pci_test_bus_reset(struct pci_dev *pdev, void *data) +{ + bool *needs_reset = data; + struct pci_driver *pci_drv = ACCESS_ONCE(pdev->driver); + int ret = -EBUSY; + + if (pci_drv == &vfio_pci_driver) { + struct vfio_device *device; + struct vfio_pci_device *vdev; + + device = vfio_device_get_from_dev(&pdev->dev); + if (!device) + return ret; + + vdev = vfio_device_data(device); + if (vdev) { + if (vdev->needs_reset) + *needs_reset = true; + + if (!vdev->refcnt) + ret = 0; + } + + vfio_device_put(device); + } + + /* + * TODO: vfio-core considers groups to be viable even if some devices + * are attached to known drivers, like pci-stub or pcieport. We can't + * freeze devices from being unbound to those drivers like we can + * here though, so it would be racy to test for them. We also can't + * use device_lock() to prevent changes as that would interfere with + * PCI-core taking device_lock during bus reset. For now, we require + * devices to be bound to vfio-pci to get a bus/slot reset on release. + */ + + return ret; +} + +/* Clear needs_reset on all affected devices after successful bus/slot reset */ +static int vfio_pci_clear_needs_reset(struct pci_dev *pdev, void *data) +{ + struct pci_driver *pci_drv = ACCESS_ONCE(pdev->driver); + + if (pci_drv == &vfio_pci_driver) { + struct vfio_device *device; + struct vfio_pci_device *vdev; + + device = vfio_device_get_from_dev(&pdev->dev); + if (!device) + return 0; + + vdev = vfio_device_data(device); + if (vdev) + vdev->needs_reset = false; + + vfio_device_put(device); + } + + return 0; +} + +/* + * Attempt to do a bus/slot reset if there are devices affected by a reset for + * this device that are needs_reset and all of the affected devices are unused + * (!refcnt). Callers of this function are required to hold driver_lock such + * that devices can not be unbound from vfio-pci or opened by a user while we + * test for and perform a bus/slot reset. + */ +static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev) +{ + bool needs_reset = false, slot = false; + int ret; + + if (!pci_probe_reset_slot(vdev->pdev->slot)) + slot = true; + else if (pci_probe_reset_bus(vdev->pdev->bus)) + return; + + if (vfio_pci_for_each_slot_or_bus(vdev->pdev, + vfio_pci_test_bus_reset, + &needs_reset, slot) || !needs_reset) + return; + + if (slot) + ret = pci_try_reset_slot(vdev->pdev->slot); + else + ret = pci_try_reset_bus(vdev->pdev->bus); + + if (ret) + return; + + vfio_pci_for_each_slot_or_bus(vdev->pdev, + vfio_pci_clear_needs_reset, NULL, slot); +} + static void __exit vfio_pci_cleanup(void) { pci_unregister_driver(&vfio_pci_driver); diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h index 9c6d5d0f3b02..671c17a6e6d0 100644 --- a/drivers/vfio/pci/vfio_pci_private.h +++ b/drivers/vfio/pci/vfio_pci_private.h @@ -54,8 +54,9 @@ struct vfio_pci_device { bool extended_caps; bool bardirty; bool has_vga; + bool needs_reset; struct pci_saved_state *pci_saved_state; - atomic_t refcnt; + int refcnt; struct eventfd_ctx *err_trigger; }; diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c index a84788ba662c..730b4ef3e0cc 100644 --- a/drivers/vfio/vfio_iommu_spapr_tce.c +++ b/drivers/vfio/vfio_iommu_spapr_tce.c @@ -156,7 +156,16 @@ static long tce_iommu_ioctl(void *iommu_data, switch (cmd) { case VFIO_CHECK_EXTENSION: - return (arg == VFIO_SPAPR_TCE_IOMMU) ? 1 : 0; + switch (arg) { + case VFIO_SPAPR_TCE_IOMMU: + ret = 1; + break; + default: + ret = vfio_spapr_iommu_eeh_ioctl(NULL, cmd, arg); + break; + } + + return (ret < 0) ? 0 : ret; case VFIO_IOMMU_SPAPR_TCE_GET_INFO: { struct vfio_iommu_spapr_tce_info info; @@ -283,6 +292,12 @@ static long tce_iommu_ioctl(void *iommu_data, tce_iommu_disable(container); mutex_unlock(&container->lock); return 0; + case VFIO_EEH_PE_OP: + if (!container->tbl || !container->tbl->it_group) + return -ENODEV; + + return vfio_spapr_iommu_eeh_ioctl(container->tbl->it_group, + cmd, arg); } return -ENOTTY; diff --git a/drivers/vfio/vfio_spapr_eeh.c b/drivers/vfio/vfio_spapr_eeh.c new file mode 100644 index 000000000000..86dfceb9201f --- /dev/null +++ b/drivers/vfio/vfio_spapr_eeh.c @@ -0,0 +1,100 @@ +/* + * EEH functionality support for VFIO devices. The feature is only + * available on sPAPR compatible platforms. + * + * Copyright Gavin Shan, IBM Corporation 2014. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/uaccess.h> +#include <linux/vfio.h> +#include <asm/eeh.h> + +#define DRIVER_VERSION "0.1" +#define DRIVER_AUTHOR "Gavin Shan, IBM Corporation" +#define DRIVER_DESC "VFIO IOMMU SPAPR EEH" + +/* We might build address mapping here for "fast" path later */ +void vfio_spapr_pci_eeh_open(struct pci_dev *pdev) +{ + eeh_dev_open(pdev); +} +EXPORT_SYMBOL_GPL(vfio_spapr_pci_eeh_open); + +void vfio_spapr_pci_eeh_release(struct pci_dev *pdev) +{ + eeh_dev_release(pdev); +} +EXPORT_SYMBOL_GPL(vfio_spapr_pci_eeh_release); + +long vfio_spapr_iommu_eeh_ioctl(struct iommu_group *group, + unsigned int cmd, unsigned long arg) +{ + struct eeh_pe *pe; + struct vfio_eeh_pe_op op; + unsigned long minsz; + long ret = -EINVAL; + + switch (cmd) { + case VFIO_CHECK_EXTENSION: + if (arg == VFIO_EEH) + ret = eeh_enabled() ? 1 : 0; + else + ret = 0; + break; + case VFIO_EEH_PE_OP: + pe = eeh_iommu_group_to_pe(group); + if (!pe) + return -ENODEV; + + minsz = offsetofend(struct vfio_eeh_pe_op, op); + if (copy_from_user(&op, (void __user *)arg, minsz)) + return -EFAULT; + if (op.argsz < minsz || op.flags) + return -EINVAL; + + switch (op.op) { + case VFIO_EEH_PE_DISABLE: + ret = eeh_pe_set_option(pe, EEH_OPT_DISABLE); + break; + case VFIO_EEH_PE_ENABLE: + ret = eeh_pe_set_option(pe, EEH_OPT_ENABLE); + break; + case VFIO_EEH_PE_UNFREEZE_IO: + ret = eeh_pe_set_option(pe, EEH_OPT_THAW_MMIO); + break; + case VFIO_EEH_PE_UNFREEZE_DMA: + ret = eeh_pe_set_option(pe, EEH_OPT_THAW_DMA); + break; + case VFIO_EEH_PE_GET_STATE: + ret = eeh_pe_get_state(pe); + break; + case VFIO_EEH_PE_RESET_DEACTIVATE: + ret = eeh_pe_reset(pe, EEH_RESET_DEACTIVATE); + break; + case VFIO_EEH_PE_RESET_HOT: + ret = eeh_pe_reset(pe, EEH_RESET_HOT); + break; + case VFIO_EEH_PE_RESET_FUNDAMENTAL: + ret = eeh_pe_reset(pe, EEH_RESET_FUNDAMENTAL); + break; + case VFIO_EEH_PE_CONFIGURE: + ret = eeh_pe_configure(pe); + break; + default: + ret = -EINVAL; + } + } + + return ret; +} +EXPORT_SYMBOL(vfio_spapr_iommu_eeh_ioctl); + +MODULE_VERSION(DRIVER_VERSION); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig index 5d449059a556..8d03924749b8 100644 --- a/drivers/video/backlight/Kconfig +++ b/drivers/video/backlight/Kconfig @@ -178,17 +178,6 @@ config BACKLIGHT_ATMEL_LCDC If in doubt, it's safe to enable this option; it doesn't kick in unless the board's description says it's wired that way. -config BACKLIGHT_ATMEL_PWM - tristate "Atmel PWM backlight control" - depends on ATMEL_PWM - help - Say Y here if you want to use the PWM peripheral in Atmel AT91 and - AVR32 devices. This driver will need additional platform data to know - which PWM instance to use and how to configure it. - - To compile this driver as a module, choose M here: the module will be - called atmel-pwm-bl. - config BACKLIGHT_EP93XX tristate "Cirrus EP93xx Backlight Driver" depends on FB_EP93XX @@ -207,6 +196,15 @@ config BACKLIGHT_GENERIC known as the Corgi backlight driver. If you have a Sharp Zaurus SL-C7xx, SL-Cxx00 or SL-6000x say y. +config BACKLIGHT_IPAQ_MICRO + tristate "iPAQ microcontroller backlight driver" + depends on MFD_IPAQ_MICRO + default y + help + Say y to enable the backlight driver for Compaq iPAQ handheld + computers. Say yes if you have one of the h3100/h3600/h3700 + machines. + config BACKLIGHT_LM3533 tristate "Backlight Driver for LM3533" depends on BACKLIGHT_CLASS_DEVICE diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile index bb820024f346..fcd50b732165 100644 --- a/drivers/video/backlight/Makefile +++ b/drivers/video/backlight/Makefile @@ -25,7 +25,6 @@ obj-$(CONFIG_BACKLIGHT_ADP8860) += adp8860_bl.o obj-$(CONFIG_BACKLIGHT_ADP8870) += adp8870_bl.o obj-$(CONFIG_BACKLIGHT_APPLE) += apple_bl.o obj-$(CONFIG_BACKLIGHT_AS3711) += as3711_bl.o -obj-$(CONFIG_BACKLIGHT_ATMEL_PWM) += atmel-pwm-bl.o obj-$(CONFIG_BACKLIGHT_BD6107) += bd6107.o obj-$(CONFIG_BACKLIGHT_CARILLO_RANCH) += cr_bllcd.o obj-$(CONFIG_BACKLIGHT_CLASS_DEVICE) += backlight.o @@ -36,6 +35,7 @@ obj-$(CONFIG_BACKLIGHT_GENERIC) += generic_bl.o obj-$(CONFIG_BACKLIGHT_GPIO) += gpio_backlight.o obj-$(CONFIG_BACKLIGHT_HP680) += hp680_bl.o obj-$(CONFIG_BACKLIGHT_HP700) += jornada720_bl.o +obj-$(CONFIG_BACKLIGHT_IPAQ_MICRO) += ipaq_micro_bl.o obj-$(CONFIG_BACKLIGHT_LM3533) += lm3533_bl.o obj-$(CONFIG_BACKLIGHT_LM3630A) += lm3630a_bl.o obj-$(CONFIG_BACKLIGHT_LM3639) += lm3639_bl.o diff --git a/drivers/video/backlight/aat2870_bl.c b/drivers/video/backlight/aat2870_bl.c index ec5350f2c28a..86234c31d79c 100644 --- a/drivers/video/backlight/aat2870_bl.c +++ b/drivers/video/backlight/aat2870_bl.c @@ -67,11 +67,6 @@ static inline int aat2870_bl_disable(struct aat2870_bl_driver_data *aat2870_bl) return aat2870->write(aat2870, AAT2870_BL_CH_EN, 0x0); } -static int aat2870_bl_get_brightness(struct backlight_device *bd) -{ - return bd->props.brightness; -} - static int aat2870_bl_update_status(struct backlight_device *bd) { struct aat2870_bl_driver_data *aat2870_bl = bl_get_data(bd); @@ -120,7 +115,6 @@ static int aat2870_bl_check_fb(struct backlight_device *bd, struct fb_info *fi) static const struct backlight_ops aat2870_bl_ops = { .options = BL_CORE_SUSPENDRESUME, - .get_brightness = aat2870_bl_get_brightness, .update_status = aat2870_bl_update_status, .check_fb = aat2870_bl_check_fb, }; diff --git a/drivers/video/backlight/ams369fg06.c b/drivers/video/backlight/ams369fg06.c index d8952c4aa689..4726c8be626f 100644 --- a/drivers/video/backlight/ams369fg06.c +++ b/drivers/video/backlight/ams369fg06.c @@ -410,11 +410,6 @@ static int ams369fg06_set_power(struct lcd_device *ld, int power) return ams369fg06_power(lcd, power); } -static int ams369fg06_get_brightness(struct backlight_device *bd) -{ - return bd->props.brightness; -} - static int ams369fg06_set_brightness(struct backlight_device *bd) { int ret = 0; @@ -443,7 +438,6 @@ static struct lcd_ops ams369fg06_lcd_ops = { }; static const struct backlight_ops ams369fg06_backlight_ops = { - .get_brightness = ams369fg06_get_brightness, .update_status = ams369fg06_set_brightness, }; diff --git a/drivers/video/backlight/atmel-pwm-bl.c b/drivers/video/backlight/atmel-pwm-bl.c deleted file mode 100644 index 261b1a4ec3d8..000000000000 --- a/drivers/video/backlight/atmel-pwm-bl.c +++ /dev/null @@ -1,223 +0,0 @@ -/* - * Copyright (C) 2008 Atmel Corporation - * - * Backlight driver using Atmel PWM peripheral. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - */ -#include <linux/init.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/platform_device.h> -#include <linux/fb.h> -#include <linux/gpio.h> -#include <linux/backlight.h> -#include <linux/atmel_pwm.h> -#include <linux/atmel-pwm-bl.h> -#include <linux/slab.h> - -struct atmel_pwm_bl { - const struct atmel_pwm_bl_platform_data *pdata; - struct backlight_device *bldev; - struct platform_device *pdev; - struct pwm_channel pwmc; - int gpio_on; -}; - -static void atmel_pwm_bl_set_gpio_on(struct atmel_pwm_bl *pwmbl, int on) -{ - if (!gpio_is_valid(pwmbl->gpio_on)) - return; - - gpio_set_value(pwmbl->gpio_on, on ^ pwmbl->pdata->on_active_low); -} - -static int atmel_pwm_bl_set_intensity(struct backlight_device *bd) -{ - struct atmel_pwm_bl *pwmbl = bl_get_data(bd); - int intensity = bd->props.brightness; - int pwm_duty; - - if (bd->props.power != FB_BLANK_UNBLANK) - intensity = 0; - if (bd->props.fb_blank != FB_BLANK_UNBLANK) - intensity = 0; - - if (pwmbl->pdata->pwm_active_low) - pwm_duty = pwmbl->pdata->pwm_duty_min + intensity; - else - pwm_duty = pwmbl->pdata->pwm_duty_max - intensity; - - if (pwm_duty > pwmbl->pdata->pwm_duty_max) - pwm_duty = pwmbl->pdata->pwm_duty_max; - if (pwm_duty < pwmbl->pdata->pwm_duty_min) - pwm_duty = pwmbl->pdata->pwm_duty_min; - - if (!intensity) { - atmel_pwm_bl_set_gpio_on(pwmbl, 0); - pwm_channel_writel(&pwmbl->pwmc, PWM_CUPD, pwm_duty); - pwm_channel_disable(&pwmbl->pwmc); - } else { - pwm_channel_enable(&pwmbl->pwmc); - pwm_channel_writel(&pwmbl->pwmc, PWM_CUPD, pwm_duty); - atmel_pwm_bl_set_gpio_on(pwmbl, 1); - } - - return 0; -} - -static int atmel_pwm_bl_get_intensity(struct backlight_device *bd) -{ - struct atmel_pwm_bl *pwmbl = bl_get_data(bd); - u32 cdty; - u32 intensity; - - cdty = pwm_channel_readl(&pwmbl->pwmc, PWM_CDTY); - if (pwmbl->pdata->pwm_active_low) - intensity = cdty - pwmbl->pdata->pwm_duty_min; - else - intensity = pwmbl->pdata->pwm_duty_max - cdty; - - return intensity & 0xffff; -} - -static int atmel_pwm_bl_init_pwm(struct atmel_pwm_bl *pwmbl) -{ - unsigned long pwm_rate = pwmbl->pwmc.mck; - unsigned long prescale = DIV_ROUND_UP(pwm_rate, - (pwmbl->pdata->pwm_frequency * - pwmbl->pdata->pwm_compare_max)) - 1; - - /* - * Prescale must be power of two and maximum 0xf in size because of - * hardware limit. PWM speed will be: - * PWM module clock speed / (2 ^ prescale). - */ - prescale = fls(prescale); - if (prescale > 0xf) - prescale = 0xf; - - pwm_channel_writel(&pwmbl->pwmc, PWM_CMR, prescale); - pwm_channel_writel(&pwmbl->pwmc, PWM_CDTY, - pwmbl->pdata->pwm_duty_min + - pwmbl->bldev->props.brightness); - pwm_channel_writel(&pwmbl->pwmc, PWM_CPRD, - pwmbl->pdata->pwm_compare_max); - - dev_info(&pwmbl->pdev->dev, "Atmel PWM backlight driver (%lu Hz)\n", - pwmbl->pwmc.mck / pwmbl->pdata->pwm_compare_max / - (1 << prescale)); - - return pwm_channel_enable(&pwmbl->pwmc); -} - -static const struct backlight_ops atmel_pwm_bl_ops = { - .get_brightness = atmel_pwm_bl_get_intensity, - .update_status = atmel_pwm_bl_set_intensity, -}; - -static int atmel_pwm_bl_probe(struct platform_device *pdev) -{ - struct backlight_properties props; - const struct atmel_pwm_bl_platform_data *pdata; - struct backlight_device *bldev; - struct atmel_pwm_bl *pwmbl; - unsigned long flags; - int retval; - - pdata = dev_get_platdata(&pdev->dev); - if (!pdata) - return -ENODEV; - - if (pdata->pwm_compare_max < pdata->pwm_duty_max || - pdata->pwm_duty_min > pdata->pwm_duty_max || - pdata->pwm_frequency == 0) - return -EINVAL; - - pwmbl = devm_kzalloc(&pdev->dev, sizeof(struct atmel_pwm_bl), - GFP_KERNEL); - if (!pwmbl) - return -ENOMEM; - - pwmbl->pdev = pdev; - pwmbl->pdata = pdata; - pwmbl->gpio_on = pdata->gpio_on; - - retval = pwm_channel_alloc(pdata->pwm_channel, &pwmbl->pwmc); - if (retval) - return retval; - - if (gpio_is_valid(pwmbl->gpio_on)) { - /* Turn display off by default. */ - if (pdata->on_active_low) - flags = GPIOF_OUT_INIT_HIGH; - else - flags = GPIOF_OUT_INIT_LOW; - - retval = devm_gpio_request_one(&pdev->dev, pwmbl->gpio_on, - flags, "gpio_atmel_pwm_bl"); - if (retval) - goto err_free_pwm; - } - - memset(&props, 0, sizeof(struct backlight_properties)); - props.type = BACKLIGHT_RAW; - props.max_brightness = pdata->pwm_duty_max - pdata->pwm_duty_min; - bldev = devm_backlight_device_register(&pdev->dev, "atmel-pwm-bl", - &pdev->dev, pwmbl, &atmel_pwm_bl_ops, - &props); - if (IS_ERR(bldev)) { - retval = PTR_ERR(bldev); - goto err_free_pwm; - } - - pwmbl->bldev = bldev; - - platform_set_drvdata(pdev, pwmbl); - - /* Power up the backlight by default at middle intesity. */ - bldev->props.power = FB_BLANK_UNBLANK; - bldev->props.brightness = bldev->props.max_brightness / 2; - - retval = atmel_pwm_bl_init_pwm(pwmbl); - if (retval) - goto err_free_pwm; - - atmel_pwm_bl_set_intensity(bldev); - - return 0; - -err_free_pwm: - pwm_channel_free(&pwmbl->pwmc); - - return retval; -} - -static int atmel_pwm_bl_remove(struct platform_device *pdev) -{ - struct atmel_pwm_bl *pwmbl = platform_get_drvdata(pdev); - - atmel_pwm_bl_set_gpio_on(pwmbl, 0); - pwm_channel_disable(&pwmbl->pwmc); - pwm_channel_free(&pwmbl->pwmc); - - return 0; -} - -static struct platform_driver atmel_pwm_bl_driver = { - .driver = { - .name = "atmel-pwm-bl", - }, - /* REVISIT add suspend() and resume() */ - .probe = atmel_pwm_bl_probe, - .remove = atmel_pwm_bl_remove, -}; - -module_platform_driver(atmel_pwm_bl_driver); - -MODULE_AUTHOR("Hans-Christian egtvedt <hans-christian.egtvedt@atmel.com>"); -MODULE_DESCRIPTION("Atmel PWM backlight driver"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:atmel-pwm-bl"); diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c index 428089009cd5..bddc8b17a4d8 100644 --- a/drivers/video/backlight/backlight.c +++ b/drivers/video/backlight/backlight.c @@ -223,6 +223,8 @@ static ssize_t actual_brightness_show(struct device *dev, mutex_lock(&bd->ops_lock); if (bd->ops && bd->ops->get_brightness) rc = sprintf(buf, "%d\n", bd->ops->get_brightness(bd)); + else + rc = sprintf(buf, "%d\n", bd->props.brightness); mutex_unlock(&bd->ops_lock); return rc; diff --git a/drivers/video/backlight/bd6107.c b/drivers/video/backlight/bd6107.c index 16dd9bc625bd..fdb2f7e2c6b5 100644 --- a/drivers/video/backlight/bd6107.c +++ b/drivers/video/backlight/bd6107.c @@ -105,11 +105,6 @@ static int bd6107_backlight_update_status(struct backlight_device *backlight) return 0; } -static int bd6107_backlight_get_brightness(struct backlight_device *backlight) -{ - return backlight->props.brightness; -} - static int bd6107_backlight_check_fb(struct backlight_device *backlight, struct fb_info *info) { @@ -121,7 +116,6 @@ static int bd6107_backlight_check_fb(struct backlight_device *backlight, static const struct backlight_ops bd6107_backlight_ops = { .options = BL_CORE_SUSPENDRESUME, .update_status = bd6107_backlight_update_status, - .get_brightness = bd6107_backlight_get_brightness, .check_fb = bd6107_backlight_check_fb, }; diff --git a/drivers/video/backlight/gpio_backlight.c b/drivers/video/backlight/gpio_backlight.c index 1cea68848f1a..aaead04a2d54 100644 --- a/drivers/video/backlight/gpio_backlight.c +++ b/drivers/video/backlight/gpio_backlight.c @@ -44,11 +44,6 @@ static int gpio_backlight_update_status(struct backlight_device *bl) return 0; } -static int gpio_backlight_get_brightness(struct backlight_device *bl) -{ - return bl->props.brightness; -} - static int gpio_backlight_check_fb(struct backlight_device *bl, struct fb_info *info) { @@ -60,7 +55,6 @@ static int gpio_backlight_check_fb(struct backlight_device *bl, static const struct backlight_ops gpio_backlight_ops = { .options = BL_CORE_SUSPENDRESUME, .update_status = gpio_backlight_update_status, - .get_brightness = gpio_backlight_get_brightness, .check_fb = gpio_backlight_check_fb, }; diff --git a/drivers/video/backlight/ipaq_micro_bl.c b/drivers/video/backlight/ipaq_micro_bl.c new file mode 100644 index 000000000000..347dc11d4ceb --- /dev/null +++ b/drivers/video/backlight/ipaq_micro_bl.c @@ -0,0 +1,83 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * iPAQ microcontroller backlight support + * Author : Linus Walleij <linus.walleij@linaro.org> + */ + +#include <linux/backlight.h> +#include <linux/err.h> +#include <linux/fb.h> +#include <linux/init.h> +#include <linux/mfd/ipaq-micro.h> +#include <linux/module.h> +#include <linux/platform_device.h> + +static int micro_bl_update_status(struct backlight_device *bd) +{ + struct ipaq_micro *micro = dev_get_drvdata(&bd->dev); + int intensity = bd->props.brightness; + struct ipaq_micro_msg msg = { + .id = MSG_BACKLIGHT, + .tx_len = 3, + }; + + if (bd->props.power != FB_BLANK_UNBLANK) + intensity = 0; + if (bd->props.state & (BL_CORE_FBBLANK | BL_CORE_SUSPENDED)) + intensity = 0; + + /* + * Message format: + * Byte 0: backlight instance (usually 1) + * Byte 1: on/off + * Byte 2: intensity, 0-255 + */ + msg.tx_data[0] = 0x01; + msg.tx_data[1] = intensity > 0 ? 1 : 0; + msg.tx_data[2] = intensity; + return ipaq_micro_tx_msg_sync(micro, &msg); +} + +static const struct backlight_ops micro_bl_ops = { + .options = BL_CORE_SUSPENDRESUME, + .update_status = micro_bl_update_status, +}; + +static struct backlight_properties micro_bl_props = { + .type = BACKLIGHT_RAW, + .max_brightness = 255, + .power = FB_BLANK_UNBLANK, + .brightness = 64, +}; + +static int micro_backlight_probe(struct platform_device *pdev) +{ + struct backlight_device *bd; + struct ipaq_micro *micro = dev_get_drvdata(pdev->dev.parent); + + bd = devm_backlight_device_register(&pdev->dev, "ipaq-micro-backlight", + &pdev->dev, micro, µ_bl_ops, + µ_bl_props); + if (IS_ERR(bd)) + return PTR_ERR(bd); + + platform_set_drvdata(pdev, bd); + backlight_update_status(bd); + + return 0; +} + +static struct platform_driver micro_backlight_device_driver = { + .driver = { + .name = "ipaq-micro-backlight", + }, + .probe = micro_backlight_probe, +}; +module_platform_driver(micro_backlight_device_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("driver for iPAQ Atmel micro backlight"); +MODULE_ALIAS("platform:ipaq-micro-backlight"); diff --git a/drivers/video/backlight/jornada720_lcd.c b/drivers/video/backlight/jornada720_lcd.c index da3876c9b3ae..228bc319de19 100644 --- a/drivers/video/backlight/jornada720_lcd.c +++ b/drivers/video/backlight/jornada720_lcd.c @@ -43,37 +43,38 @@ static int jornada_lcd_get_contrast(struct lcd_device *ld) jornada_ssp_start(); - if (jornada_ssp_byte(GETCONTRAST) != TXDUMMY) { - dev_err(&ld->dev, "get contrast failed\n"); - jornada_ssp_end(); - return -ETIMEDOUT; - } else { + if (jornada_ssp_byte(GETCONTRAST) == TXDUMMY) { ret = jornada_ssp_byte(TXDUMMY); - jornada_ssp_end(); - return ret; + goto success; } + + dev_err(&ld->dev, "failed to set contrast\n"); + ret = -ETIMEDOUT; + +success: + jornada_ssp_end(); + return ret; } static int jornada_lcd_set_contrast(struct lcd_device *ld, int value) { - int ret; + int ret = 0; jornada_ssp_start(); /* start by sending our set contrast cmd to mcu */ - ret = jornada_ssp_byte(SETCONTRAST); - - /* push the new value */ - if (jornada_ssp_byte(value) != TXDUMMY) { - dev_err(&ld->dev, "set contrast failed\n"); - jornada_ssp_end(); - return -ETIMEDOUT; + if (jornada_ssp_byte(SETCONTRAST) == TXDUMMY) { + /* if successful push the new value */ + if (jornada_ssp_byte(value) == TXDUMMY) + goto success; } - /* if we get here we can assume everything went well */ - jornada_ssp_end(); + dev_err(&ld->dev, "failed to set contrast\n"); + ret = -ETIMEDOUT; - return 0; +success: + jornada_ssp_end(); + return ret; } static int jornada_lcd_set_power(struct lcd_device *ld, int power) diff --git a/drivers/video/backlight/ld9040.c b/drivers/video/backlight/ld9040.c index 506a6c236039..ccb44e8e4927 100644 --- a/drivers/video/backlight/ld9040.c +++ b/drivers/video/backlight/ld9040.c @@ -642,11 +642,6 @@ static int ld9040_get_power(struct lcd_device *ld) return lcd->power; } -static int ld9040_get_brightness(struct backlight_device *bd) -{ - return bd->props.brightness; -} - static int ld9040_set_brightness(struct backlight_device *bd) { int ret = 0, brightness = bd->props.brightness; @@ -674,7 +669,6 @@ static struct lcd_ops ld9040_lcd_ops = { }; static const struct backlight_ops ld9040_backlight_ops = { - .get_brightness = ld9040_get_brightness, .update_status = ld9040_set_brightness, }; diff --git a/drivers/video/backlight/lp855x_bl.c b/drivers/video/backlight/lp855x_bl.c index 2ca3a040007b..dcdd5443efcf 100644 --- a/drivers/video/backlight/lp855x_bl.c +++ b/drivers/video/backlight/lp855x_bl.c @@ -274,15 +274,9 @@ static int lp855x_bl_update_status(struct backlight_device *bl) return 0; } -static int lp855x_bl_get_brightness(struct backlight_device *bl) -{ - return bl->props.brightness; -} - static const struct backlight_ops lp855x_bl_ops = { .options = BL_CORE_SUSPENDRESUME, .update_status = lp855x_bl_update_status, - .get_brightness = lp855x_bl_get_brightness, }; static int lp855x_backlight_register(struct lp855x *lp) diff --git a/drivers/video/backlight/lp8788_bl.c b/drivers/video/backlight/lp8788_bl.c index daba34dc46d4..d6c4f6a2d43e 100644 --- a/drivers/video/backlight/lp8788_bl.c +++ b/drivers/video/backlight/lp8788_bl.c @@ -176,15 +176,9 @@ static int lp8788_bl_update_status(struct backlight_device *bl_dev) return 0; } -static int lp8788_bl_get_brightness(struct backlight_device *bl_dev) -{ - return bl_dev->props.brightness; -} - static const struct backlight_ops lp8788_bl_ops = { .options = BL_CORE_SUSPENDRESUME, .update_status = lp8788_bl_update_status, - .get_brightness = lp8788_bl_get_brightness, }; static int lp8788_backlight_register(struct lp8788_bl *bl) diff --git a/drivers/video/backlight/lv5207lp.c b/drivers/video/backlight/lv5207lp.c index 1802b2d1357d..8ab7297b118a 100644 --- a/drivers/video/backlight/lv5207lp.c +++ b/drivers/video/backlight/lv5207lp.c @@ -70,11 +70,6 @@ static int lv5207lp_backlight_update_status(struct backlight_device *backlight) return 0; } -static int lv5207lp_backlight_get_brightness(struct backlight_device *backlight) -{ - return backlight->props.brightness; -} - static int lv5207lp_backlight_check_fb(struct backlight_device *backlight, struct fb_info *info) { @@ -86,7 +81,6 @@ static int lv5207lp_backlight_check_fb(struct backlight_device *backlight, static const struct backlight_ops lv5207lp_backlight_ops = { .options = BL_CORE_SUSPENDRESUME, .update_status = lv5207lp_backlight_update_status, - .get_brightness = lv5207lp_backlight_get_brightness, .check_fb = lv5207lp_backlight_check_fb, }; diff --git a/drivers/video/backlight/pandora_bl.c b/drivers/video/backlight/pandora_bl.c index 2098c5d6efb9..2e3f82063c03 100644 --- a/drivers/video/backlight/pandora_bl.c +++ b/drivers/video/backlight/pandora_bl.c @@ -100,15 +100,9 @@ done: return 0; } -static int pandora_backlight_get_brightness(struct backlight_device *bl) -{ - return bl->props.brightness; -} - static const struct backlight_ops pandora_backlight_ops = { .options = BL_CORE_SUSPENDRESUME, .update_status = pandora_backlight_update_status, - .get_brightness = pandora_backlight_get_brightness, }; static int pandora_backlight_probe(struct platform_device *pdev) diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c index 38ca88bc5c3e..b85983e97f0a 100644 --- a/drivers/video/backlight/pwm_bl.c +++ b/drivers/video/backlight/pwm_bl.c @@ -115,11 +115,6 @@ static int pwm_backlight_update_status(struct backlight_device *bl) return 0; } -static int pwm_backlight_get_brightness(struct backlight_device *bl) -{ - return bl->props.brightness; -} - static int pwm_backlight_check_fb(struct backlight_device *bl, struct fb_info *info) { @@ -130,7 +125,6 @@ static int pwm_backlight_check_fb(struct backlight_device *bl, static const struct backlight_ops pwm_backlight_ops = { .update_status = pwm_backlight_update_status, - .get_brightness = pwm_backlight_get_brightness, .check_fb = pwm_backlight_check_fb, }; @@ -179,6 +173,7 @@ static int pwm_backlight_parse_dt(struct device *dev, data->max_brightness--; } + data->enable_gpio = -EINVAL; return 0; } @@ -245,13 +240,10 @@ static int pwm_backlight_probe(struct platform_device *pdev) pb->dev = &pdev->dev; pb->enabled = false; - pb->enable_gpio = devm_gpiod_get(&pdev->dev, "enable"); + pb->enable_gpio = devm_gpiod_get_optional(&pdev->dev, "enable"); if (IS_ERR(pb->enable_gpio)) { ret = PTR_ERR(pb->enable_gpio); - if (ret == -ENOENT) - pb->enable_gpio = NULL; - else - goto err_alloc; + goto err_alloc; } /* diff --git a/drivers/video/backlight/s6e63m0.c b/drivers/video/backlight/s6e63m0.c index 2d6d48196c6d..f3a65c8940ed 100644 --- a/drivers/video/backlight/s6e63m0.c +++ b/drivers/video/backlight/s6e63m0.c @@ -597,11 +597,6 @@ static int s6e63m0_get_power(struct lcd_device *ld) return lcd->power; } -static int s6e63m0_get_brightness(struct backlight_device *bd) -{ - return bd->props.brightness; -} - static int s6e63m0_set_brightness(struct backlight_device *bd) { int ret = 0, brightness = bd->props.brightness; @@ -629,7 +624,6 @@ static struct lcd_ops s6e63m0_lcd_ops = { }; static const struct backlight_ops s6e63m0_backlight_ops = { - .get_brightness = s6e63m0_get_brightness, .update_status = s6e63m0_set_brightness, }; diff --git a/drivers/video/backlight/tps65217_bl.c b/drivers/video/backlight/tps65217_bl.c index 595dcf561020..2e04d93aa0ef 100644 --- a/drivers/video/backlight/tps65217_bl.c +++ b/drivers/video/backlight/tps65217_bl.c @@ -109,15 +109,9 @@ static int tps65217_bl_update_status(struct backlight_device *bl) return rc; } -static int tps65217_bl_get_brightness(struct backlight_device *bl) -{ - return bl->props.brightness; -} - static const struct backlight_ops tps65217_bl_ops = { .options = BL_CORE_SUSPENDRESUME, .update_status = tps65217_bl_update_status, - .get_brightness = tps65217_bl_get_brightness }; static int tps65217_bl_hw_init(struct tps65217_bl *tps65217_bl, diff --git a/drivers/video/fbdev/68328fb.c b/drivers/video/fbdev/68328fb.c index 552258c8f99d..17f21cedff9b 100644 --- a/drivers/video/fbdev/68328fb.c +++ b/drivers/video/fbdev/68328fb.c @@ -49,12 +49,6 @@ #error wrong architecture for the MC68x328 frame buffer device #endif -#if defined(CONFIG_FB_68328_INVERT) -#define MC68X328FB_MONO_VISUAL FB_VISUAL_MONO01 -#else -#define MC68X328FB_MONO_VISUAL FB_VISUAL_MONO10 -#endif - static u_long videomemory; static u_long videomemorysize; @@ -462,7 +456,7 @@ int __init mc68x328fb_init(void) fb_info.fix.line_length = get_line_length(mc68x328fb_default.xres_virtual, mc68x328fb_default.bits_per_pixel); fb_info.fix.visual = (mc68x328fb_default.bits_per_pixel) == 1 ? - MC68X328FB_MONO_VISUAL : FB_VISUAL_PSEUDOCOLOR; + FB_VISUAL_MONO10 : FB_VISUAL_PSEUDOCOLOR; if (fb_info.var.bits_per_pixel == 1) { fb_info.var.red.length = fb_info.var.green.length = fb_info.var.blue.length = 1; fb_info.var.red.offset = fb_info.var.green.offset = fb_info.var.blue.offset = 0; diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig index 59c98bfd5a8a..e911b9c96e19 100644 --- a/drivers/video/fbdev/Kconfig +++ b/drivers/video/fbdev/Kconfig @@ -280,6 +280,8 @@ config FB_ARMCLCD select FB_CFB_FILLRECT select FB_CFB_COPYAREA select FB_CFB_IMAGEBLIT + select FB_MODE_HELPERS if OF + select VIDEOMODE_HELPERS if OF help This framebuffer device driver is for the ARM PrimeCell PL110 Colour LCD controller. ARM PrimeCells provide the building @@ -290,6 +292,12 @@ config FB_ARMCLCD here and read <file:Documentation/kbuild/modules.txt>. The module will be called amba-clcd. +# Helper logic selected only by the ARM Versatile platform family. +config PLAT_VERSATILE_CLCD + def_bool ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS + depends on ARM + depends on FB_ARMCLCD && FB=y + config FB_ACORN bool "Acorn VIDC support" depends on (FB = y) && ARM && ARCH_ACORN @@ -301,15 +309,26 @@ config FB_ACORN hardware found in Acorn RISC PCs and other ARM-based machines. If unsure, say N. -config FB_CLPS711X - bool "CLPS711X LCD support" - depends on (FB = y) && ARM && ARCH_CLPS711X +config FB_CLPS711X_OLD + tristate select FB_CFB_FILLRECT select FB_CFB_COPYAREA select FB_CFB_IMAGEBLIT + +config FB_CLPS711X + tristate "CLPS711X LCD support" + depends on FB && (ARCH_CLPS711X || COMPILE_TEST) + select FB_CLPS711X_OLD if ARCH_CLPS711X && !ARCH_MULTIPLATFORM + select BACKLIGHT_LCD_SUPPORT + select FB_MODE_HELPERS + select FB_SYS_FILLRECT + select FB_SYS_COPYAREA + select FB_SYS_IMAGEBLIT + select LCD_CLASS_DEVICE + select VIDEOMODE_HELPERS help - Say Y to enable the Framebuffer driver for the CLPS7111 and - EP7212 processors. + Say Y to enable the Framebuffer driver for the Cirrus Logic + CLPS711X CPUs. config FB_SA1100 bool "SA-1100 LCD support" @@ -2018,8 +2037,8 @@ config FB_TMIO_ACCELL config FB_S3C tristate "Samsung S3C framebuffer support" - depends on FB && (CPU_S3C2416 || ARCH_S3C64XX || ARCH_S5P64X0 || \ - ARCH_S5PC100 || ARCH_S5PV210 || ARCH_EXYNOS) + depends on FB && (CPU_S3C2416 || ARCH_S3C64XX || \ + ARCH_S5PV210 || ARCH_EXYNOS) select FB_CFB_FILLRECT select FB_CFB_COPYAREA select FB_CFB_IMAGEBLIT diff --git a/drivers/video/fbdev/Makefile b/drivers/video/fbdev/Makefile index 0284f2a12538..1979afffccfe 100644 --- a/drivers/video/fbdev/Makefile +++ b/drivers/video/fbdev/Makefile @@ -14,7 +14,8 @@ obj-$(CONFIG_FB_WMT_GE_ROPS) += wmt_ge_rops.o # Hardware specific drivers go first obj-$(CONFIG_FB_AMIGA) += amifb.o c2p_planar.o obj-$(CONFIG_FB_ARC) += arcfb.o -obj-$(CONFIG_FB_CLPS711X) += clps711xfb.o +obj-$(CONFIG_FB_CLPS711X) += clps711x-fb.o +obj-$(CONFIG_FB_CLPS711X_OLD) += clps711xfb.o obj-$(CONFIG_FB_CYBER2000) += cyber2000fb.o obj-$(CONFIG_FB_GRVGA) += grvga.o obj-$(CONFIG_FB_PM2) += pm2fb.o @@ -78,6 +79,7 @@ obj-$(CONFIG_FB_ATMEL) += atmel_lcdfb.o obj-$(CONFIG_FB_PVR2) += pvr2fb.o obj-$(CONFIG_FB_VOODOO1) += sstfb.o obj-$(CONFIG_FB_ARMCLCD) += amba-clcd.o +obj-$(CONFIG_PLAT_VERSATILE_CLCD) += amba-clcd-versatile.o obj-$(CONFIG_FB_GOLDFISH) += goldfishfb.o obj-$(CONFIG_FB_68328) += 68328fb.o obj-$(CONFIG_FB_GBE) += gbefb.o diff --git a/drivers/video/fbdev/amba-clcd-versatile.c b/drivers/video/fbdev/amba-clcd-versatile.c new file mode 100644 index 000000000000..7a8afcd4573e --- /dev/null +++ b/drivers/video/fbdev/amba-clcd-versatile.c @@ -0,0 +1,182 @@ +#include <linux/device.h> +#include <linux/dma-mapping.h> +#include <linux/amba/bus.h> +#include <linux/amba/clcd.h> +#include <linux/platform_data/video-clcd-versatile.h> + +static struct clcd_panel vga = { + .mode = { + .name = "VGA", + .refresh = 60, + .xres = 640, + .yres = 480, + .pixclock = 39721, + .left_margin = 40, + .right_margin = 24, + .upper_margin = 32, + .lower_margin = 11, + .hsync_len = 96, + .vsync_len = 2, + .sync = 0, + .vmode = FB_VMODE_NONINTERLACED, + }, + .width = -1, + .height = -1, + .tim2 = TIM2_BCD | TIM2_IPC, + .cntl = CNTL_LCDTFT | CNTL_BGR | CNTL_LCDVCOMP(1), + .caps = CLCD_CAP_5551 | CLCD_CAP_565 | CLCD_CAP_888, + .bpp = 16, +}; + +static struct clcd_panel xvga = { + .mode = { + .name = "XVGA", + .refresh = 60, + .xres = 1024, + .yres = 768, + .pixclock = 15748, + .left_margin = 152, + .right_margin = 48, + .upper_margin = 23, + .lower_margin = 3, + .hsync_len = 104, + .vsync_len = 4, + .sync = 0, + .vmode = FB_VMODE_NONINTERLACED, + }, + .width = -1, + .height = -1, + .tim2 = TIM2_BCD | TIM2_IPC, + .cntl = CNTL_LCDTFT | CNTL_BGR | CNTL_LCDVCOMP(1), + .caps = CLCD_CAP_5551 | CLCD_CAP_565 | CLCD_CAP_888, + .bpp = 16, +}; + +/* Sanyo TM38QV67A02A - 3.8 inch QVGA (320x240) Color TFT */ +static struct clcd_panel sanyo_tm38qv67a02a = { + .mode = { + .name = "Sanyo TM38QV67A02A", + .refresh = 116, + .xres = 320, + .yres = 240, + .pixclock = 100000, + .left_margin = 6, + .right_margin = 6, + .upper_margin = 5, + .lower_margin = 5, + .hsync_len = 6, + .vsync_len = 6, + .sync = 0, + .vmode = FB_VMODE_NONINTERLACED, + }, + .width = -1, + .height = -1, + .tim2 = TIM2_BCD, + .cntl = CNTL_LCDTFT | CNTL_BGR | CNTL_LCDVCOMP(1), + .caps = CLCD_CAP_5551, + .bpp = 16, +}; + +static struct clcd_panel sanyo_2_5_in = { + .mode = { + .name = "Sanyo QVGA Portrait", + .refresh = 116, + .xres = 240, + .yres = 320, + .pixclock = 100000, + .left_margin = 20, + .right_margin = 10, + .upper_margin = 2, + .lower_margin = 2, + .hsync_len = 10, + .vsync_len = 2, + .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, + .vmode = FB_VMODE_NONINTERLACED, + }, + .width = -1, + .height = -1, + .tim2 = TIM2_IVS | TIM2_IHS | TIM2_IPC, + .cntl = CNTL_LCDTFT | CNTL_BGR | CNTL_LCDVCOMP(1), + .caps = CLCD_CAP_5551, + .bpp = 16, +}; + +/* Epson L2F50113T00 - 2.2 inch 176x220 Color TFT */ +static struct clcd_panel epson_l2f50113t00 = { + .mode = { + .name = "Epson L2F50113T00", + .refresh = 390, + .xres = 176, + .yres = 220, + .pixclock = 62500, + .left_margin = 3, + .right_margin = 2, + .upper_margin = 1, + .lower_margin = 0, + .hsync_len = 3, + .vsync_len = 2, + .sync = 0, + .vmode = FB_VMODE_NONINTERLACED, + }, + .width = -1, + .height = -1, + .tim2 = TIM2_BCD | TIM2_IPC, + .cntl = CNTL_LCDTFT | CNTL_BGR | CNTL_LCDVCOMP(1), + .caps = CLCD_CAP_5551, + .bpp = 16, +}; + +static struct clcd_panel *panels[] = { + &vga, + &xvga, + &sanyo_tm38qv67a02a, + &sanyo_2_5_in, + &epson_l2f50113t00, +}; + +struct clcd_panel *versatile_clcd_get_panel(const char *name) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(panels); i++) + if (strcmp(panels[i]->mode.name, name) == 0) + break; + + if (i < ARRAY_SIZE(panels)) + return panels[i]; + + pr_err("CLCD: couldn't get parameters for panel %s\n", name); + + return NULL; +} + +int versatile_clcd_setup_dma(struct clcd_fb *fb, unsigned long framesize) +{ + dma_addr_t dma; + + fb->fb.screen_base = dma_alloc_writecombine(&fb->dev->dev, framesize, + &dma, GFP_KERNEL); + if (!fb->fb.screen_base) { + pr_err("CLCD: unable to map framebuffer\n"); + return -ENOMEM; + } + + fb->fb.fix.smem_start = dma; + fb->fb.fix.smem_len = framesize; + + return 0; +} + +int versatile_clcd_mmap_dma(struct clcd_fb *fb, struct vm_area_struct *vma) +{ + return dma_mmap_writecombine(&fb->dev->dev, vma, + fb->fb.screen_base, + fb->fb.fix.smem_start, + fb->fb.fix.smem_len); +} + +void versatile_clcd_remove_dma(struct clcd_fb *fb) +{ + dma_free_writecombine(&fb->dev->dev, fb->fb.fix.smem_len, + fb->fb.screen_base, fb->fb.fix.smem_start); +} diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c index 14d6b3793e0a..6ad23bd3523a 100644 --- a/drivers/video/fbdev/amba-clcd.c +++ b/drivers/video/fbdev/amba-clcd.c @@ -24,8 +24,16 @@ #include <linux/list.h> #include <linux/amba/bus.h> #include <linux/amba/clcd.h> +#include <linux/bitops.h> #include <linux/clk.h> #include <linux/hardirq.h> +#include <linux/dma-mapping.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_graph.h> +#include <video/display_timing.h> +#include <video/of_display_timing.h> +#include <video/videomode.h> #include <asm/sizes.h> @@ -543,6 +551,269 @@ static int clcdfb_register(struct clcd_fb *fb) return ret; } +#ifdef CONFIG_OF +static int clcdfb_of_get_dpi_panel_mode(struct device_node *node, + struct fb_videomode *mode) +{ + int err; + struct display_timing timing; + struct videomode video; + + err = of_get_display_timing(node, "panel-timing", &timing); + if (err) + return err; + + videomode_from_timing(&timing, &video); + + err = fb_videomode_from_videomode(&video, mode); + if (err) + return err; + + return 0; +} + +static int clcdfb_snprintf_mode(char *buf, int size, struct fb_videomode *mode) +{ + return snprintf(buf, size, "%ux%u@%u", mode->xres, mode->yres, + mode->refresh); +} + +static int clcdfb_of_get_mode(struct device *dev, struct device_node *endpoint, + struct fb_videomode *mode) +{ + int err; + struct device_node *panel; + char *name; + int len; + + panel = of_graph_get_remote_port_parent(endpoint); + if (!panel) + return -ENODEV; + + /* Only directly connected DPI panels supported for now */ + if (of_device_is_compatible(panel, "panel-dpi")) + err = clcdfb_of_get_dpi_panel_mode(panel, mode); + else + err = -ENOENT; + if (err) + return err; + + len = clcdfb_snprintf_mode(NULL, 0, mode); + name = devm_kzalloc(dev, len + 1, GFP_KERNEL); + clcdfb_snprintf_mode(name, len + 1, mode); + mode->name = name; + + return 0; +} + +static int clcdfb_of_init_tft_panel(struct clcd_fb *fb, u32 r0, u32 g0, u32 b0) +{ + static struct { + unsigned int part; + u32 r0, g0, b0; + u32 caps; + } panels[] = { + { 0x110, 1, 7, 13, CLCD_CAP_5551 }, + { 0x110, 0, 8, 16, CLCD_CAP_888 }, + { 0x111, 4, 14, 20, CLCD_CAP_444 }, + { 0x111, 3, 11, 19, CLCD_CAP_444 | CLCD_CAP_5551 }, + { 0x111, 3, 10, 19, CLCD_CAP_444 | CLCD_CAP_5551 | + CLCD_CAP_565 }, + { 0x111, 0, 8, 16, CLCD_CAP_444 | CLCD_CAP_5551 | + CLCD_CAP_565 | CLCD_CAP_888 }, + }; + int i; + + /* Bypass pixel clock divider, data output on the falling edge */ + fb->panel->tim2 = TIM2_BCD | TIM2_IPC; + + /* TFT display, vert. comp. interrupt at the start of the back porch */ + fb->panel->cntl |= CNTL_LCDTFT | CNTL_LCDVCOMP(1); + + fb->panel->caps = 0; + + /* Match the setup with known variants */ + for (i = 0; i < ARRAY_SIZE(panels) && !fb->panel->caps; i++) { + if (amba_part(fb->dev) != panels[i].part) + continue; + if (g0 != panels[i].g0) + continue; + if (r0 == panels[i].r0 && b0 == panels[i].b0) + fb->panel->caps = panels[i].caps; + } + + return fb->panel->caps ? 0 : -EINVAL; +} + +static int clcdfb_of_init_display(struct clcd_fb *fb) +{ + struct device_node *endpoint; + int err; + unsigned int bpp; + u32 max_bandwidth; + u32 tft_r0b0g0[3]; + + fb->panel = devm_kzalloc(&fb->dev->dev, sizeof(*fb->panel), GFP_KERNEL); + if (!fb->panel) + return -ENOMEM; + + endpoint = of_graph_get_next_endpoint(fb->dev->dev.of_node, NULL); + if (!endpoint) + return -ENODEV; + + err = clcdfb_of_get_mode(&fb->dev->dev, endpoint, &fb->panel->mode); + if (err) + return err; + + err = of_property_read_u32(fb->dev->dev.of_node, "max-memory-bandwidth", + &max_bandwidth); + if (!err) { + /* + * max_bandwidth is in bytes per second and pixclock in + * pico-seconds, so the maximum allowed bits per pixel is + * 8 * max_bandwidth / (PICOS2KHZ(pixclock) * 1000) + * Rearrange this calculation to avoid overflow and then ensure + * result is a valid format. + */ + bpp = max_bandwidth / (1000 / 8) + / PICOS2KHZ(fb->panel->mode.pixclock); + bpp = rounddown_pow_of_two(bpp); + if (bpp > 32) + bpp = 32; + } else + bpp = 32; + fb->panel->bpp = bpp; + +#ifdef CONFIG_CPU_BIG_ENDIAN + fb->panel->cntl |= CNTL_BEBO; +#endif + fb->panel->width = -1; + fb->panel->height = -1; + + if (of_property_read_u32_array(endpoint, + "arm,pl11x,tft-r0g0b0-pads", + tft_r0b0g0, ARRAY_SIZE(tft_r0b0g0)) == 0) + return clcdfb_of_init_tft_panel(fb, tft_r0b0g0[0], + tft_r0b0g0[1], tft_r0b0g0[2]); + + return -ENOENT; +} + +static int clcdfb_of_vram_setup(struct clcd_fb *fb) +{ + int err; + struct device_node *memory; + u64 size; + + err = clcdfb_of_init_display(fb); + if (err) + return err; + + memory = of_parse_phandle(fb->dev->dev.of_node, "memory-region", 0); + if (!memory) + return -ENODEV; + + fb->fb.screen_base = of_iomap(memory, 0); + if (!fb->fb.screen_base) + return -ENOMEM; + + fb->fb.fix.smem_start = of_translate_address(memory, + of_get_address(memory, 0, &size, NULL)); + fb->fb.fix.smem_len = size; + + return 0; +} + +static int clcdfb_of_vram_mmap(struct clcd_fb *fb, struct vm_area_struct *vma) +{ + unsigned long off, user_size, kernel_size; + + + off = vma->vm_pgoff << PAGE_SHIFT; + user_size = vma->vm_end - vma->vm_start; + kernel_size = fb->fb.fix.smem_len; + + if (off >= kernel_size || user_size > (kernel_size - off)) + return -ENXIO; + + return remap_pfn_range(vma, vma->vm_start, + __phys_to_pfn(fb->fb.fix.smem_start) + vma->vm_pgoff, + user_size, + pgprot_writecombine(vma->vm_page_prot)); +} + +static void clcdfb_of_vram_remove(struct clcd_fb *fb) +{ + iounmap(fb->fb.screen_base); +} + +static int clcdfb_of_dma_setup(struct clcd_fb *fb) +{ + unsigned long framesize; + dma_addr_t dma; + int err; + + err = clcdfb_of_init_display(fb); + if (err) + return err; + + framesize = fb->panel->mode.xres * fb->panel->mode.yres * + fb->panel->bpp / 8; + fb->fb.screen_base = dma_alloc_coherent(&fb->dev->dev, framesize, + &dma, GFP_KERNEL); + if (!fb->fb.screen_base) + return -ENOMEM; + + fb->fb.fix.smem_start = dma; + fb->fb.fix.smem_len = framesize; + + return 0; +} + +static int clcdfb_of_dma_mmap(struct clcd_fb *fb, struct vm_area_struct *vma) +{ + return dma_mmap_writecombine(&fb->dev->dev, vma, fb->fb.screen_base, + fb->fb.fix.smem_start, fb->fb.fix.smem_len); +} + +static void clcdfb_of_dma_remove(struct clcd_fb *fb) +{ + dma_free_coherent(&fb->dev->dev, fb->fb.fix.smem_len, + fb->fb.screen_base, fb->fb.fix.smem_start); +} + +static struct clcd_board *clcdfb_of_get_board(struct amba_device *dev) +{ + struct clcd_board *board = devm_kzalloc(&dev->dev, sizeof(*board), + GFP_KERNEL); + struct device_node *node = dev->dev.of_node; + + if (!board) + return NULL; + + board->name = of_node_full_name(node); + board->caps = CLCD_CAP_ALL; + board->check = clcdfb_check; + board->decode = clcdfb_decode; + if (of_find_property(node, "memory-region", NULL)) { + board->setup = clcdfb_of_vram_setup; + board->mmap = clcdfb_of_vram_mmap; + board->remove = clcdfb_of_vram_remove; + } else { + board->setup = clcdfb_of_dma_setup; + board->mmap = clcdfb_of_dma_mmap; + board->remove = clcdfb_of_dma_remove; + } + + return board; +} +#else +static struct clcd_board *clcdfb_of_get_board(struct amba_device *dev) +{ + return NULL; +} +#endif + static int clcdfb_probe(struct amba_device *dev, const struct amba_id *id) { struct clcd_board *board = dev_get_platdata(&dev->dev); @@ -550,6 +821,9 @@ static int clcdfb_probe(struct amba_device *dev, const struct amba_id *id) int ret; if (!board) + board = clcdfb_of_get_board(dev); + + if (!board) return -EINVAL; ret = dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32)); diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c index d36e830d6fc6..1d8bdb92939b 100644 --- a/drivers/video/fbdev/atmel_lcdfb.c +++ b/drivers/video/fbdev/atmel_lcdfb.c @@ -290,7 +290,7 @@ static void init_contrast(struct atmel_lcdfb_info *sinfo) /* contrast pwm can be 'inverted' */ if (pdata->lcdcon_pol_negative) - contrast_ctr &= ~(ATMEL_LCDC_POL_POSITIVE); + contrast_ctr &= ~(ATMEL_LCDC_POL_POSITIVE); /* have some default contrast/backlight settings */ lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_CTR, contrast_ctr); @@ -1097,16 +1097,19 @@ static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo) pdata->lcd_wiring_mode = ret; pdata->lcdcon_is_backlight = of_property_read_bool(display_np, "atmel,lcdcon-backlight"); + pdata->lcdcon_pol_negative = of_property_read_bool(display_np, "atmel,lcdcon-backlight-inverted"); timings = of_get_display_timings(display_np); if (!timings) { dev_err(dev, "failed to get display timings\n"); + ret = -EINVAL; goto put_display_node; } timings_np = of_find_node_by_name(display_np, "display-timings"); if (!timings_np) { dev_err(dev, "failed to find display-timings node\n"); + ret = -ENODEV; goto put_display_node; } diff --git a/drivers/video/fbdev/aty/aty128fb.c b/drivers/video/fbdev/aty/aty128fb.c index 52108be69e77..ff6070170d01 100644 --- a/drivers/video/fbdev/aty/aty128fb.c +++ b/drivers/video/fbdev/aty/aty128fb.c @@ -1802,13 +1802,7 @@ static int aty128_bl_update_status(struct backlight_device *bd) return 0; } -static int aty128_bl_get_brightness(struct backlight_device *bd) -{ - return bd->props.brightness; -} - static const struct backlight_ops aty128_bl_data = { - .get_brightness = aty128_bl_get_brightness, .update_status = aty128_bl_update_status, }; diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c index c3d0074a32db..37ec09b3fffd 100644 --- a/drivers/video/fbdev/aty/atyfb_base.c +++ b/drivers/video/fbdev/aty/atyfb_base.c @@ -2211,13 +2211,7 @@ static int aty_bl_update_status(struct backlight_device *bd) return 0; } -static int aty_bl_get_brightness(struct backlight_device *bd) -{ - return bd->props.brightness; -} - static const struct backlight_ops aty_bl_data = { - .get_brightness = aty_bl_get_brightness, .update_status = aty_bl_update_status, }; diff --git a/drivers/video/fbdev/aty/radeon_backlight.c b/drivers/video/fbdev/aty/radeon_backlight.c index db572df7e1ef..301d6d6aeead 100644 --- a/drivers/video/fbdev/aty/radeon_backlight.c +++ b/drivers/video/fbdev/aty/radeon_backlight.c @@ -123,13 +123,7 @@ static int radeon_bl_update_status(struct backlight_device *bd) return 0; } -static int radeon_bl_get_brightness(struct backlight_device *bd) -{ - return bd->props.brightness; -} - static const struct backlight_ops radeon_bl_data = { - .get_brightness = radeon_bl_get_brightness, .update_status = radeon_bl_update_status, }; diff --git a/drivers/video/fbdev/au1100fb.c b/drivers/video/fbdev/au1100fb.c index 372d4aea9d1c..0676746ec68c 100644 --- a/drivers/video/fbdev/au1100fb.c +++ b/drivers/video/fbdev/au1100fb.c @@ -41,6 +41,7 @@ * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ +#include <linux/clk.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> @@ -113,7 +114,7 @@ static int au1100fb_fb_blank(int blank_mode, struct fb_info *fbi) case VESA_NO_BLANKING: /* Turn on panel */ fbdev->regs->lcd_control |= LCD_CONTROL_GO; - au_sync(); + wmb(); /* drain writebuffer */ break; case VESA_VSYNC_SUSPEND: @@ -121,7 +122,7 @@ static int au1100fb_fb_blank(int blank_mode, struct fb_info *fbi) case VESA_POWERDOWN: /* Turn off panel */ fbdev->regs->lcd_control &= ~LCD_CONTROL_GO; - au_sync(); + wmb(); /* drain writebuffer */ break; default: break; @@ -434,7 +435,7 @@ static int au1100fb_drv_probe(struct platform_device *dev) struct au1100fb_device *fbdev = NULL; struct resource *regs_res; unsigned long page; - u32 sys_clksrc; + struct clk *c; /* Allocate new device private */ fbdev = devm_kzalloc(&dev->dev, sizeof(struct au1100fb_device), @@ -473,6 +474,13 @@ static int au1100fb_drv_probe(struct platform_device *dev) print_dbg("Register memory map at %p", fbdev->regs); print_dbg("phys=0x%08x, size=%d", fbdev->regs_phys, fbdev->regs_len); + c = clk_get(NULL, "lcd_intclk"); + if (!IS_ERR(c)) { + fbdev->lcdclk = c; + clk_set_rate(c, 48000000); + clk_prepare_enable(c); + } + /* Allocate the framebuffer to the maximum screen size * nbr of video buffers */ fbdev->fb_len = fbdev->panel->xres * fbdev->panel->yres * (fbdev->panel->bpp >> 3) * AU1100FB_NBR_VIDEO_BUFFERS; @@ -506,10 +514,6 @@ static int au1100fb_drv_probe(struct platform_device *dev) print_dbg("Framebuffer memory map at %p", fbdev->fb_mem); print_dbg("phys=0x%08x, size=%dK", fbdev->fb_phys, fbdev->fb_len / 1024); - /* Setup LCD clock to AUX (48 MHz) */ - sys_clksrc = au_readl(SYS_CLKSRC) & ~(SYS_CS_ML_MASK | SYS_CS_DL | SYS_CS_CL); - au_writel((sys_clksrc | (1 << SYS_CS_ML_BIT)), SYS_CLKSRC); - /* load the panel info into the var struct */ au1100fb_var.bits_per_pixel = fbdev->panel->bpp; au1100fb_var.xres = fbdev->panel->xres; @@ -546,6 +550,10 @@ static int au1100fb_drv_probe(struct platform_device *dev) return 0; failed: + if (fbdev->lcdclk) { + clk_disable_unprepare(fbdev->lcdclk); + clk_put(fbdev->lcdclk); + } if (fbdev->fb_mem) { dma_free_noncoherent(&dev->dev, fbdev->fb_len, fbdev->fb_mem, fbdev->fb_phys); @@ -576,11 +584,15 @@ int au1100fb_drv_remove(struct platform_device *dev) fb_dealloc_cmap(&fbdev->info.cmap); + if (fbdev->lcdclk) { + clk_disable_unprepare(fbdev->lcdclk); + clk_put(fbdev->lcdclk); + } + return 0; } #ifdef CONFIG_PM -static u32 sys_clksrc; static struct au1100fb_regs fbregs; int au1100fb_drv_suspend(struct platform_device *dev, pm_message_t state) @@ -590,14 +602,11 @@ int au1100fb_drv_suspend(struct platform_device *dev, pm_message_t state) if (!fbdev) return 0; - /* Save the clock source state */ - sys_clksrc = au_readl(SYS_CLKSRC); - /* Blank the LCD */ au1100fb_fb_blank(VESA_POWERDOWN, &fbdev->info); - /* Stop LCD clocking */ - au_writel(sys_clksrc & ~SYS_CS_ML_MASK, SYS_CLKSRC); + if (fbdev->lcdclk) + clk_disable(fbdev->lcdclk); memcpy(&fbregs, fbdev->regs, sizeof(struct au1100fb_regs)); @@ -613,8 +622,8 @@ int au1100fb_drv_resume(struct platform_device *dev) memcpy(fbdev->regs, &fbregs, sizeof(struct au1100fb_regs)); - /* Restart LCD clocking */ - au_writel(sys_clksrc, SYS_CLKSRC); + if (fbdev->lcdclk) + clk_enable(fbdev->lcdclk); /* Unblank the LCD */ au1100fb_fb_blank(VESA_NO_BLANKING, &fbdev->info); diff --git a/drivers/video/fbdev/au1100fb.h b/drivers/video/fbdev/au1100fb.h index 12d9642d5465..9af19939a9c6 100644 --- a/drivers/video/fbdev/au1100fb.h +++ b/drivers/video/fbdev/au1100fb.h @@ -109,6 +109,7 @@ struct au1100fb_device { size_t fb_len; dma_addr_t fb_phys; int panel_idx; + struct clk *lcdclk; }; /********************************************************************/ diff --git a/drivers/video/fbdev/au1200fb.c b/drivers/video/fbdev/au1200fb.c index 4cfba78a1458..40494dbdf519 100644 --- a/drivers/video/fbdev/au1200fb.c +++ b/drivers/video/fbdev/au1200fb.c @@ -30,6 +30,7 @@ * 675 Mass Ave, Cambridge, MA 02139, USA. */ +#include <linux/clk.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/kernel.h> @@ -330,9 +331,8 @@ struct panel_settings uint32 mode_pwmhi; uint32 mode_outmask; uint32 mode_fifoctrl; - uint32 mode_toyclksrc; uint32 mode_backlight; - uint32 mode_auxpll; + uint32 lcdclk; #define Xres min_xres #define Yres min_yres u32 min_xres; /* Minimum horizontal resolution */ @@ -379,9 +379,8 @@ static struct panel_settings known_lcd_panels[] = .mode_pwmhi = 0x00000000, .mode_outmask = 0x00FFFFFF, .mode_fifoctrl = 0x2f2f2f2f, - .mode_toyclksrc = 0x00000004, /* AUXPLL directly */ .mode_backlight = 0x00000000, - .mode_auxpll = 8, /* 96MHz AUXPLL */ + .lcdclk = 96, 320, 320, 240, 240, }, @@ -407,9 +406,8 @@ static struct panel_settings known_lcd_panels[] = .mode_pwmhi = 0x00000000, .mode_outmask = 0x00FFFFFF, .mode_fifoctrl = 0x2f2f2f2f, - .mode_toyclksrc = 0x00000004, /* AUXPLL directly */ .mode_backlight = 0x00000000, - .mode_auxpll = 8, /* 96MHz AUXPLL */ + .lcdclk = 96, 640, 480, 640, 480, }, @@ -435,9 +433,8 @@ static struct panel_settings known_lcd_panels[] = .mode_pwmhi = 0x00000000, .mode_outmask = 0x00FFFFFF, .mode_fifoctrl = 0x2f2f2f2f, - .mode_toyclksrc = 0x00000004, /* AUXPLL directly */ .mode_backlight = 0x00000000, - .mode_auxpll = 8, /* 96MHz AUXPLL */ + .lcdclk = 96, 800, 800, 600, 600, }, @@ -463,9 +460,8 @@ static struct panel_settings known_lcd_panels[] = .mode_pwmhi = 0x00000000, .mode_outmask = 0x00FFFFFF, .mode_fifoctrl = 0x2f2f2f2f, - .mode_toyclksrc = 0x00000004, /* AUXPLL directly */ .mode_backlight = 0x00000000, - .mode_auxpll = 6, /* 72MHz AUXPLL */ + .lcdclk = 72, 1024, 1024, 768, 768, }, @@ -491,9 +487,8 @@ static struct panel_settings known_lcd_panels[] = .mode_pwmhi = 0x00000000, .mode_outmask = 0x00FFFFFF, .mode_fifoctrl = 0x2f2f2f2f, - .mode_toyclksrc = 0x00000004, /* AUXPLL directly */ .mode_backlight = 0x00000000, - .mode_auxpll = 10, /* 120MHz AUXPLL */ + .lcdclk = 120, 1280, 1280, 1024, 1024, }, @@ -519,9 +514,8 @@ static struct panel_settings known_lcd_panels[] = .mode_pwmhi = 0x03400000, /* SCB 0x0 */ .mode_outmask = 0x00FFFFFF, .mode_fifoctrl = 0x2f2f2f2f, - .mode_toyclksrc = 0x00000004, /* AUXPLL directly */ .mode_backlight = 0x00000000, - .mode_auxpll = 8, /* 96MHz AUXPLL */ + .lcdclk = 96, 1024, 1024, 768, 768, }, @@ -550,9 +544,8 @@ static struct panel_settings known_lcd_panels[] = .mode_pwmhi = 0x03400000, .mode_outmask = 0x00fcfcfc, .mode_fifoctrl = 0x2f2f2f2f, - .mode_toyclksrc = 0x00000004, /* AUXPLL directly */ .mode_backlight = 0x00000000, - .mode_auxpll = 8, /* 96MHz AUXPLL */ + .lcdclk = 96, 640, 480, 640, 480, }, @@ -581,9 +574,8 @@ static struct panel_settings known_lcd_panels[] = .mode_pwmhi = 0x03400000, .mode_outmask = 0x00fcfcfc, .mode_fifoctrl = 0x2f2f2f2f, - .mode_toyclksrc = 0x00000004, /* AUXPLL directly */ .mode_backlight = 0x00000000, - .mode_auxpll = 8, /* 96MHz AUXPLL */ + .lcdclk = 96, /* 96MHz AUXPLL */ 320, 320, 240, 240, }, @@ -612,9 +604,8 @@ static struct panel_settings known_lcd_panels[] = .mode_pwmhi = 0x03400000, .mode_outmask = 0x00fcfcfc, .mode_fifoctrl = 0x2f2f2f2f, - .mode_toyclksrc = 0x00000004, /* AUXPLL directly */ .mode_backlight = 0x00000000, - .mode_auxpll = 8, /* 96MHz AUXPLL */ + .lcdclk = 96, 856, 856, 480, 480, }, @@ -646,9 +637,8 @@ static struct panel_settings known_lcd_panels[] = .mode_pwmhi = 0x00000000, .mode_outmask = 0x00FFFFFF, .mode_fifoctrl = 0x2f2f2f2f, - .mode_toyclksrc = 0x00000004, /* AUXPLL directly */ .mode_backlight = 0x00000000, - .mode_auxpll = (48/12) * 2, + .lcdclk = 96, 800, 800, 480, 480, }, @@ -764,7 +754,7 @@ static int au1200_setlocation (struct au1200fb_device *fbdev, int plane, /* Disable the window while making changes, then restore WINEN */ winenable = lcd->winenable & (1 << plane); - au_sync(); + wmb(); /* drain writebuffer */ lcd->winenable &= ~(1 << plane); lcd->window[plane].winctrl0 = winctrl0; lcd->window[plane].winctrl1 = winctrl1; @@ -772,7 +762,7 @@ static int au1200_setlocation (struct au1200fb_device *fbdev, int plane, lcd->window[plane].winbuf1 = fbdev->fb_phys; lcd->window[plane].winbufctrl = 0; /* select winbuf0 */ lcd->winenable |= winenable; - au_sync(); + wmb(); /* drain writebuffer */ return 0; } @@ -788,22 +778,21 @@ static void au1200_setpanel(struct panel_settings *newpanel, /* Make sure all windows disabled */ winenable = lcd->winenable; lcd->winenable = 0; - au_sync(); + wmb(); /* drain writebuffer */ /* * Ensure everything is disabled before reconfiguring */ if (lcd->screen & LCD_SCREEN_SEN) { /* Wait for vertical sync period */ lcd->intstatus = LCD_INT_SS; - while ((lcd->intstatus & LCD_INT_SS) == 0) { - au_sync(); - } + while ((lcd->intstatus & LCD_INT_SS) == 0) + ; lcd->screen &= ~LCD_SCREEN_SEN; /*disable the controller*/ do { lcd->intstatus = lcd->intstatus; /*clear interrupts*/ - au_sync(); + wmb(); /* drain writebuffer */ /*wait for controller to shut down*/ } while ((lcd->intstatus & LCD_INT_SD) == 0); @@ -829,11 +818,17 @@ static void au1200_setpanel(struct panel_settings *newpanel, */ if (!(panel->mode_clkcontrol & LCD_CLKCONTROL_EXT)) { - uint32 sys_clksrc; - au_writel(panel->mode_auxpll, SYS_AUXPLL); - sys_clksrc = au_readl(SYS_CLKSRC) & ~0x0000001f; - sys_clksrc |= panel->mode_toyclksrc; - au_writel(sys_clksrc, SYS_CLKSRC); + struct clk *c = clk_get(NULL, "lcd_intclk"); + long r, pc = panel->lcdclk * 1000000; + + if (!IS_ERR(c)) { + r = clk_round_rate(c, pc); + if ((pc - r) < (pc / 10)) { /* 10% slack */ + clk_set_rate(c, r); + clk_prepare_enable(c); + } + clk_put(c); + } } /* @@ -847,7 +842,7 @@ static void au1200_setpanel(struct panel_settings *newpanel, lcd->pwmhi = panel->mode_pwmhi; lcd->outmask = panel->mode_outmask; lcd->fifoctrl = panel->mode_fifoctrl; - au_sync(); + wmb(); /* drain writebuffer */ /* fixme: Check window settings to make sure still valid * for new geometry */ @@ -863,7 +858,7 @@ static void au1200_setpanel(struct panel_settings *newpanel, * Re-enable screen now that it is configured */ lcd->screen |= LCD_SCREEN_SEN; - au_sync(); + wmb(); /* drain writebuffer */ /* Call init of panel */ if (pd->panel_init) @@ -956,7 +951,7 @@ static void au1200_setmode(struct au1200fb_device *fbdev) | LCD_WINCTRL2_SCY_1 ) ; lcd->winenable |= win->w[plane].mode_winenable; - au_sync(); + wmb(); /* drain writebuffer */ } @@ -1270,7 +1265,7 @@ static void set_global(u_int cmd, struct au1200_lcd_global_regs_t *pdata) if (pdata->flags & SCREEN_MASK) lcd->colorkeymsk = pdata->mask; - au_sync(); + wmb(); /* drain writebuffer */ } static void get_global(u_int cmd, struct au1200_lcd_global_regs_t *pdata) @@ -1288,7 +1283,7 @@ static void get_global(u_int cmd, struct au1200_lcd_global_regs_t *pdata) hi1 = (lcd->pwmhi >> 16) + 1; divider = (lcd->pwmdiv & 0x3FFFF) + 1; pdata->brightness = ((hi1 << 8) / divider) - 1; - au_sync(); + wmb(); /* drain writebuffer */ } static void set_window(unsigned int plane, @@ -1387,7 +1382,7 @@ static void set_window(unsigned int plane, val |= (pdata->enable & 1) << plane; lcd->winenable = val; } - au_sync(); + wmb(); /* drain writebuffer */ } static void get_window(unsigned int plane, @@ -1414,7 +1409,7 @@ static void get_window(unsigned int plane, pdata->ram_array_mode = (lcd->window[plane].winctrl2 & LCD_WINCTRL2_RAM) >> 21; pdata->enable = (lcd->winenable >> plane) & 1; - au_sync(); + wmb(); /* drain writebuffer */ } static int au1200fb_ioctl(struct fb_info *info, unsigned int cmd, @@ -1511,7 +1506,7 @@ static irqreturn_t au1200fb_handle_irq(int irq, void* dev_id) { /* Nothing to do for now, just clear any pending interrupt */ lcd->intstatus = lcd->intstatus; - au_sync(); + wmb(); /* drain writebuffer */ return IRQ_HANDLED; } @@ -1809,7 +1804,7 @@ static int au1200fb_drv_suspend(struct device *dev) au1200_setpanel(NULL, pd); lcd->outmask = 0; - au_sync(); + wmb(); /* drain writebuffer */ return 0; } diff --git a/drivers/video/fbdev/chipsfb.c b/drivers/video/fbdev/chipsfb.c index 206a66b61072..59abdc6a97f6 100644 --- a/drivers/video/fbdev/chipsfb.c +++ b/drivers/video/fbdev/chipsfb.c @@ -273,7 +273,7 @@ static struct chips_init_reg chips_init_xr[] = { { 0xa8, 0x00 } }; -static void __init chips_hw_init(void) +static void chips_hw_init(void) { int i; diff --git a/drivers/video/fbdev/clps711x-fb.c b/drivers/video/fbdev/clps711x-fb.c new file mode 100644 index 000000000000..49a7bb4ef02f --- /dev/null +++ b/drivers/video/fbdev/clps711x-fb.c @@ -0,0 +1,397 @@ +/* + * Cirrus Logic CLPS711X FB driver + * + * Copyright (C) 2014 Alexander Shiyan <shc_work@mail.ru> + * Based on driver by Russell King <rmk@arm.linux.org.uk> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/clk.h> +#include <linux/fb.h> +#include <linux/io.h> +#include <linux/lcd.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/mfd/syscon.h> +#include <linux/mfd/syscon/clps711x.h> +#include <linux/regulator/consumer.h> +#include <video/of_display_timing.h> + +#define CLPS711X_FB_NAME "clps711x-fb" +#define CLPS711X_FB_BPP_MAX (4) + +/* Registers relative to LCDCON */ +#define CLPS711X_LCDCON (0x0000) +# define LCDCON_GSEN BIT(30) +# define LCDCON_GSMD BIT(31) +#define CLPS711X_PALLSW (0x0280) +#define CLPS711X_PALMSW (0x02c0) +#define CLPS711X_FBADDR (0x0d40) + +struct clps711x_fb_info { + struct clk *clk; + void __iomem *base; + struct regmap *syscon; + resource_size_t buffsize; + struct fb_videomode mode; + struct regulator *lcd_pwr; + u32 ac_prescale; + bool cmap_invert; +}; + +static int clps711x_fb_setcolreg(u_int regno, u_int red, u_int green, + u_int blue, u_int transp, struct fb_info *info) +{ + struct clps711x_fb_info *cfb = info->par; + u32 level, mask, shift; + + if (regno >= BIT(info->var.bits_per_pixel)) + return -EINVAL; + + shift = 4 * (regno & 7); + mask = 0xf << shift; + /* gray = 0.30*R + 0.58*G + 0.11*B */ + level = (((red * 77 + green * 151 + blue * 28) >> 20) << shift) & mask; + if (cfb->cmap_invert) + level = 0xf - level; + + regno = (regno < 8) ? CLPS711X_PALLSW : CLPS711X_PALMSW; + + writel((readl(cfb->base + regno) & ~mask) | level, cfb->base + regno); + + return 0; +} + +static int clps711x_fb_check_var(struct fb_var_screeninfo *var, + struct fb_info *info) +{ + u32 val; + + if (var->bits_per_pixel < 1 || + var->bits_per_pixel > CLPS711X_FB_BPP_MAX) + return -EINVAL; + + if (!var->pixclock) + return -EINVAL; + + val = DIV_ROUND_UP(var->xres, 16) - 1; + if (val < 0x01 || val > 0x3f) + return -EINVAL; + + val = DIV_ROUND_UP(var->yres * var->xres * var->bits_per_pixel, 128); + val--; + if (val < 0x001 || val > 0x1fff) + return -EINVAL; + + var->transp.msb_right = 0; + var->transp.offset = 0; + var->transp.length = 0; + var->red.msb_right = 0; + var->red.offset = 0; + var->red.length = var->bits_per_pixel; + var->green = var->red; + var->blue = var->red; + var->grayscale = var->bits_per_pixel > 1; + + return 0; +} + +static int clps711x_fb_set_par(struct fb_info *info) +{ + struct clps711x_fb_info *cfb = info->par; + resource_size_t size; + u32 lcdcon, pps; + + size = (info->var.xres * info->var.yres * info->var.bits_per_pixel) / 8; + if (size > cfb->buffsize) + return -EINVAL; + + switch (info->var.bits_per_pixel) { + case 1: + info->fix.visual = FB_VISUAL_MONO01; + break; + case 2: + case 4: + info->fix.visual = FB_VISUAL_PSEUDOCOLOR; + break; + default: + return -EINVAL; + } + + info->fix.line_length = info->var.xres * info->var.bits_per_pixel / 8; + info->fix.smem_len = size; + + lcdcon = (info->var.xres * info->var.yres * + info->var.bits_per_pixel) / 128 - 1; + lcdcon |= ((info->var.xres / 16) - 1) << 13; + lcdcon |= (cfb->ac_prescale & 0x1f) << 25; + + pps = clk_get_rate(cfb->clk) / (PICOS2KHZ(info->var.pixclock) * 1000); + if (pps) + pps--; + lcdcon |= (pps & 0x3f) << 19; + + if (info->var.bits_per_pixel == 4) + lcdcon |= LCDCON_GSMD; + if (info->var.bits_per_pixel >= 2) + lcdcon |= LCDCON_GSEN; + + /* LCDCON must only be changed while the LCD is disabled */ + regmap_update_bits(cfb->syscon, SYSCON_OFFSET, SYSCON1_LCDEN, 0); + writel(lcdcon, cfb->base + CLPS711X_LCDCON); + regmap_update_bits(cfb->syscon, SYSCON_OFFSET, + SYSCON1_LCDEN, SYSCON1_LCDEN); + + return 0; +} + +static int clps711x_fb_blank(int blank, struct fb_info *info) +{ + /* Return happy */ + return 0; +} + +static struct fb_ops clps711x_fb_ops = { + .owner = THIS_MODULE, + .fb_setcolreg = clps711x_fb_setcolreg, + .fb_check_var = clps711x_fb_check_var, + .fb_set_par = clps711x_fb_set_par, + .fb_blank = clps711x_fb_blank, + .fb_fillrect = sys_fillrect, + .fb_copyarea = sys_copyarea, + .fb_imageblit = sys_imageblit, +}; + +static int clps711x_lcd_check_fb(struct lcd_device *lcddev, struct fb_info *fi) +{ + struct clps711x_fb_info *cfb = dev_get_drvdata(&lcddev->dev); + + return (!fi || fi->par == cfb) ? 1 : 0; +} + +static int clps711x_lcd_get_power(struct lcd_device *lcddev) +{ + struct clps711x_fb_info *cfb = dev_get_drvdata(&lcddev->dev); + + if (!IS_ERR_OR_NULL(cfb->lcd_pwr)) + if (!regulator_is_enabled(cfb->lcd_pwr)) + return FB_BLANK_NORMAL; + + return FB_BLANK_UNBLANK; +} + +static int clps711x_lcd_set_power(struct lcd_device *lcddev, int blank) +{ + struct clps711x_fb_info *cfb = dev_get_drvdata(&lcddev->dev); + + if (!IS_ERR_OR_NULL(cfb->lcd_pwr)) { + if (blank == FB_BLANK_UNBLANK) { + if (!regulator_is_enabled(cfb->lcd_pwr)) + return regulator_enable(cfb->lcd_pwr); + } else { + if (regulator_is_enabled(cfb->lcd_pwr)) + return regulator_disable(cfb->lcd_pwr); + } + } + + return 0; +} + +static struct lcd_ops clps711x_lcd_ops = { + .check_fb = clps711x_lcd_check_fb, + .get_power = clps711x_lcd_get_power, + .set_power = clps711x_lcd_set_power, +}; + +static int clps711x_fb_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *disp, *np = dev->of_node; + struct clps711x_fb_info *cfb; + struct lcd_device *lcd; + struct fb_info *info; + struct resource *res; + int ret = -ENOENT; + u32 val; + + if (fb_get_options(CLPS711X_FB_NAME, NULL)) + return -ENODEV; + + info = framebuffer_alloc(sizeof(*cfb), dev); + if (!info) + return -ENOMEM; + + cfb = info->par; + platform_set_drvdata(pdev, info); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + goto out_fb_release; + cfb->base = devm_ioremap(dev, res->start, resource_size(res)); + if (!cfb->base) { + ret = -ENOMEM; + goto out_fb_release; + } + + info->fix.mmio_start = res->start; + info->fix.mmio_len = resource_size(res); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + info->screen_base = devm_ioremap_resource(dev, res); + if (IS_ERR(info->screen_base)) { + ret = PTR_ERR(info->screen_base); + goto out_fb_release; + } + + /* Physical address should be aligned to 256 MiB */ + if (res->start & 0x0fffffff) { + ret = -EINVAL; + goto out_fb_release; + } + + info->apertures = alloc_apertures(1); + if (!info->apertures) { + ret = -ENOMEM; + goto out_fb_release; + } + + cfb->buffsize = resource_size(res); + info->fix.smem_start = res->start; + info->apertures->ranges[0].base = info->fix.smem_start; + info->apertures->ranges[0].size = cfb->buffsize; + + cfb->clk = devm_clk_get(dev, NULL); + if (IS_ERR(cfb->clk)) { + ret = PTR_ERR(cfb->clk); + goto out_fb_release; + } + + cfb->syscon = + syscon_regmap_lookup_by_compatible("cirrus,clps711x-syscon1"); + if (IS_ERR(cfb->syscon)) { + ret = PTR_ERR(cfb->syscon); + goto out_fb_release; + } + + disp = of_parse_phandle(np, "display", 0); + if (!disp) { + dev_err(&pdev->dev, "No display defined\n"); + ret = -ENODATA; + goto out_fb_release; + } + + ret = of_get_fb_videomode(disp, &cfb->mode, OF_USE_NATIVE_MODE); + if (ret) + goto out_fb_release; + + of_property_read_u32(disp, "ac-prescale", &cfb->ac_prescale); + cfb->cmap_invert = of_property_read_bool(disp, "cmap-invert"); + + ret = of_property_read_u32(disp, "bits-per-pixel", + &info->var.bits_per_pixel); + if (ret) + goto out_fb_release; + + /* Force disable LCD on any mismatch */ + if (info->fix.smem_start != (readb(cfb->base + CLPS711X_FBADDR) << 28)) + regmap_update_bits(cfb->syscon, SYSCON_OFFSET, + SYSCON1_LCDEN, 0); + + ret = regmap_read(cfb->syscon, SYSCON_OFFSET, &val); + if (ret) + goto out_fb_release; + + if (!(val & SYSCON1_LCDEN)) { + /* Setup start FB address */ + writeb(info->fix.smem_start >> 28, cfb->base + CLPS711X_FBADDR); + /* Clean FB memory */ + memset_io(info->screen_base, 0, cfb->buffsize); + } + + cfb->lcd_pwr = devm_regulator_get(dev, "lcd"); + if (PTR_ERR(cfb->lcd_pwr) == -EPROBE_DEFER) { + ret = -EPROBE_DEFER; + goto out_fb_release; + } + + info->fbops = &clps711x_fb_ops; + info->flags = FBINFO_DEFAULT; + info->var.activate = FB_ACTIVATE_FORCE | FB_ACTIVATE_NOW; + info->var.height = -1; + info->var.width = -1; + info->var.vmode = FB_VMODE_NONINTERLACED; + info->fix.type = FB_TYPE_PACKED_PIXELS; + info->fix.accel = FB_ACCEL_NONE; + strlcpy(info->fix.id, CLPS711X_FB_NAME, sizeof(info->fix.id)); + fb_videomode_to_var(&info->var, &cfb->mode); + + ret = fb_alloc_cmap(&info->cmap, BIT(CLPS711X_FB_BPP_MAX), 0); + if (ret) + goto out_fb_release; + + ret = fb_set_var(info, &info->var); + if (ret) + goto out_fb_dealloc_cmap; + + ret = register_framebuffer(info); + if (ret) + goto out_fb_dealloc_cmap; + + lcd = devm_lcd_device_register(dev, "clps711x-lcd", dev, cfb, + &clps711x_lcd_ops); + if (!IS_ERR(lcd)) + return 0; + + ret = PTR_ERR(lcd); + unregister_framebuffer(info); + +out_fb_dealloc_cmap: + regmap_update_bits(cfb->syscon, SYSCON_OFFSET, SYSCON1_LCDEN, 0); + fb_dealloc_cmap(&info->cmap); + +out_fb_release: + framebuffer_release(info); + + return ret; +} + +static int clps711x_fb_remove(struct platform_device *pdev) +{ + struct fb_info *info = platform_get_drvdata(pdev); + struct clps711x_fb_info *cfb = info->par; + + regmap_update_bits(cfb->syscon, SYSCON_OFFSET, SYSCON1_LCDEN, 0); + + unregister_framebuffer(info); + fb_dealloc_cmap(&info->cmap); + framebuffer_release(info); + + return 0; +} + +static const struct of_device_id clps711x_fb_dt_ids[] = { + { .compatible = "cirrus,clps711x-fb", }, + { } +}; +MODULE_DEVICE_TABLE(of, clps711x_fb_dt_ids); + +static struct platform_driver clps711x_fb_driver = { + .driver = { + .name = CLPS711X_FB_NAME, + .owner = THIS_MODULE, + .of_match_table = clps711x_fb_dt_ids, + }, + .probe = clps711x_fb_probe, + .remove = clps711x_fb_remove, +}; +module_platform_driver(clps711x_fb_driver); + +MODULE_AUTHOR("Alexander Shiyan <shc_work@mail.ru>"); +MODULE_DESCRIPTION("Cirrus Logic CLPS711X FB driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/video/fbdev/da8xx-fb.c b/drivers/video/fbdev/da8xx-fb.c index a8484f768d04..10c876c95772 100644 --- a/drivers/video/fbdev/da8xx-fb.c +++ b/drivers/video/fbdev/da8xx-fb.c @@ -419,7 +419,7 @@ static void lcd_cfg_horizontal_sync(int back_porch, int pulse_width, { u32 reg; - reg = lcdc_read(LCD_RASTER_TIMING_0_REG) & 0xf; + reg = lcdc_read(LCD_RASTER_TIMING_0_REG) & 0x3ff; reg |= (((back_porch-1) & 0xff) << 24) | (((front_porch-1) & 0xff) << 16) | (((pulse_width-1) & 0x3f) << 10); @@ -1447,18 +1447,15 @@ static int fb_probe(struct platform_device *device) da8xx_fb_fix.line_length - 1; /* allocate palette buffer */ - par->v_palette_base = dma_alloc_coherent(NULL, - PALETTE_SIZE, - (resource_size_t *) - &par->p_palette_base, - GFP_KERNEL | GFP_DMA); + par->v_palette_base = dma_zalloc_coherent(NULL, PALETTE_SIZE, + (resource_size_t *)&par->p_palette_base, + GFP_KERNEL | GFP_DMA); if (!par->v_palette_base) { dev_err(&device->dev, "GLCD: kmalloc for palette buffer failed\n"); ret = -EINVAL; goto err_release_fb_mem; } - memset(par->v_palette_base, 0, PALETTE_SIZE); par->irq = platform_get_irq(device, 0); if (par->irq < 0) { diff --git a/drivers/video/fbdev/exynos/s6e8ax0.c b/drivers/video/fbdev/exynos/s6e8ax0.c index 29e70ed3f154..95873f26e39c 100644 --- a/drivers/video/fbdev/exynos/s6e8ax0.c +++ b/drivers/video/fbdev/exynos/s6e8ax0.c @@ -704,11 +704,6 @@ static int s6e8ax0_get_power(struct lcd_device *ld) return lcd->power; } -static int s6e8ax0_get_brightness(struct backlight_device *bd) -{ - return bd->props.brightness; -} - static int s6e8ax0_set_brightness(struct backlight_device *bd) { int ret = 0, brightness = bd->props.brightness; @@ -736,7 +731,6 @@ static struct lcd_ops s6e8ax0_lcd_ops = { }; static const struct backlight_ops s6e8ax0_backlight_ops = { - .get_brightness = s6e8ax0_get_brightness, .update_status = s6e8ax0_set_brightness, }; diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c index e23392ec5af3..42543362f163 100644 --- a/drivers/video/fbdev/hyperv_fb.c +++ b/drivers/video/fbdev/hyperv_fb.c @@ -224,6 +224,11 @@ struct hvfb_par { u32 pseudo_palette[16]; u8 init_buf[MAX_VMBUS_PKT_SIZE]; u8 recv_buf[MAX_VMBUS_PKT_SIZE]; + + /* If true, the VSC notifies the VSP on every framebuffer change */ + bool synchronous_fb; + + struct notifier_block hvfb_panic_nb; }; static uint screen_width = HVFB_WIDTH; @@ -532,6 +537,19 @@ static void hvfb_update_work(struct work_struct *w) schedule_delayed_work(&par->dwork, HVFB_UPDATE_DELAY); } +static int hvfb_on_panic(struct notifier_block *nb, + unsigned long e, void *p) +{ + struct hvfb_par *par; + struct fb_info *info; + + par = container_of(nb, struct hvfb_par, hvfb_panic_nb); + par->synchronous_fb = true; + info = par->info; + synthvid_update(info); + + return NOTIFY_DONE; +} /* Framebuffer operation handlers */ @@ -582,14 +600,44 @@ static int hvfb_blank(int blank, struct fb_info *info) return 1; /* get fb_blank to set the colormap to all black */ } +static void hvfb_cfb_fillrect(struct fb_info *p, + const struct fb_fillrect *rect) +{ + struct hvfb_par *par = p->par; + + cfb_fillrect(p, rect); + if (par->synchronous_fb) + synthvid_update(p); +} + +static void hvfb_cfb_copyarea(struct fb_info *p, + const struct fb_copyarea *area) +{ + struct hvfb_par *par = p->par; + + cfb_copyarea(p, area); + if (par->synchronous_fb) + synthvid_update(p); +} + +static void hvfb_cfb_imageblit(struct fb_info *p, + const struct fb_image *image) +{ + struct hvfb_par *par = p->par; + + cfb_imageblit(p, image); + if (par->synchronous_fb) + synthvid_update(p); +} + static struct fb_ops hvfb_ops = { .owner = THIS_MODULE, .fb_check_var = hvfb_check_var, .fb_set_par = hvfb_set_par, .fb_setcolreg = hvfb_setcolreg, - .fb_fillrect = cfb_fillrect, - .fb_copyarea = cfb_copyarea, - .fb_imageblit = cfb_imageblit, + .fb_fillrect = hvfb_cfb_fillrect, + .fb_copyarea = hvfb_cfb_copyarea, + .fb_imageblit = hvfb_cfb_imageblit, .fb_blank = hvfb_blank, }; @@ -801,6 +849,11 @@ static int hvfb_probe(struct hv_device *hdev, par->fb_ready = true; + par->synchronous_fb = false; + par->hvfb_panic_nb.notifier_call = hvfb_on_panic; + atomic_notifier_chain_register(&panic_notifier_list, + &par->hvfb_panic_nb); + return 0; error: @@ -820,6 +873,9 @@ static int hvfb_remove(struct hv_device *hdev) struct fb_info *info = hv_get_drvdata(hdev); struct hvfb_par *par = info->par; + atomic_notifier_chain_unregister(&panic_notifier_list, + &par->hvfb_panic_nb); + par->update = false; par->fb_ready = false; @@ -836,7 +892,7 @@ static int hvfb_remove(struct hv_device *hdev) } -static DEFINE_PCI_DEVICE_TABLE(pci_stub_id_table) = { +static const struct pci_device_id pci_stub_id_table[] = { { .vendor = PCI_VENDOR_ID_MICROSOFT, .device = PCI_DEVICE_ID_HYPERV_VIDEO, diff --git a/drivers/video/fbdev/i740fb.c b/drivers/video/fbdev/i740fb.c index ca7c9df193b0..a2b4204b42bb 100644 --- a/drivers/video/fbdev/i740fb.c +++ b/drivers/video/fbdev/i740fb.c @@ -1260,7 +1260,7 @@ fail: #define I740_ID_PCI 0x00d1 #define I740_ID_AGP 0x7800 -static DEFINE_PCI_DEVICE_TABLE(i740fb_id_table) = { +static const struct pci_device_id i740fb_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, I740_ID_PCI) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, I740_ID_AGP) }, { 0 } diff --git a/drivers/video/fbdev/mbx/mbxfb.c b/drivers/video/fbdev/mbx/mbxfb.c index 2bd52ed8832c..698df9543e30 100644 --- a/drivers/video/fbdev/mbx/mbxfb.c +++ b/drivers/video/fbdev/mbx/mbxfb.c @@ -628,14 +628,14 @@ static int mbxfb_ioctl(struct fb_info *info, unsigned int cmd, case MBXFB_IOCS_PLANEORDER: if (copy_from_user(&porder, (void __user*)arg, sizeof(struct mbxfb_planeorder))) - return -EFAULT; + return -EFAULT; return mbxfb_ioctl_planeorder(&porder); case MBXFB_IOCS_ALPHA: if (copy_from_user(&alpha, (void __user*)arg, sizeof(struct mbxfb_alphaCtl))) - return -EFAULT; + return -EFAULT; return mbxfb_ioctl_alphactl(&alpha); diff --git a/drivers/video/fbdev/msm/mddi_client_dummy.c b/drivers/video/fbdev/msm/mddi_client_dummy.c index f1b0dfcc9717..cdb8f69a5d88 100644 --- a/drivers/video/fbdev/msm/mddi_client_dummy.c +++ b/drivers/video/fbdev/msm/mddi_client_dummy.c @@ -15,6 +15,7 @@ * GNU General Public License for more details. */ +#include <linux/device.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/kernel.h> @@ -51,8 +52,7 @@ static int mddi_dummy_probe(struct platform_device *pdev) { struct msm_mddi_client_data *client_data = pdev->dev.platform_data; struct panel_info *panel = - kzalloc(sizeof(struct panel_info), GFP_KERNEL); - int ret; + devm_kzalloc(&pdev->dev, sizeof(struct panel_info), GFP_KERNEL); if (!panel) return -ENOMEM; platform_set_drvdata(pdev, panel); @@ -67,24 +67,11 @@ static int mddi_dummy_probe(struct platform_device *pdev) client_data->fb_resource, 1); panel->panel_data.fb_data = client_data->private_client_data; panel->pdev.dev.platform_data = &panel->panel_data; - ret = platform_device_register(&panel->pdev); - if (ret) { - kfree(panel); - return ret; - } - return 0; -} - -static int mddi_dummy_remove(struct platform_device *pdev) -{ - struct panel_info *panel = platform_get_drvdata(pdev); - kfree(panel); - return 0; + return platform_device_register(&panel->pdev); } static struct platform_driver mddi_client_dummy = { .probe = mddi_dummy_probe, - .remove = mddi_dummy_remove, .driver = { .name = "mddi_c_dummy" }, }; diff --git a/drivers/video/fbdev/nvidia/nv_backlight.c b/drivers/video/fbdev/nvidia/nv_backlight.c index 8471008aa6ff..5c151b2ea683 100644 --- a/drivers/video/fbdev/nvidia/nv_backlight.c +++ b/drivers/video/fbdev/nvidia/nv_backlight.c @@ -82,13 +82,7 @@ static int nvidia_bl_update_status(struct backlight_device *bd) return 0; } -static int nvidia_bl_get_brightness(struct backlight_device *bd) -{ - return bd->props.brightness; -} - static const struct backlight_ops nvidia_bl_ops = { - .get_brightness = nvidia_bl_get_brightness, .update_status = nvidia_bl_update_status, }; diff --git a/drivers/video/fbdev/omap2/displays-new/connector-hdmi.c b/drivers/video/fbdev/omap2/displays-new/connector-hdmi.c index 4420ccb69aa9..131c6e260898 100644 --- a/drivers/video/fbdev/omap2/displays-new/connector-hdmi.c +++ b/drivers/video/fbdev/omap2/displays-new/connector-hdmi.c @@ -262,6 +262,23 @@ static int hdmic_audio_config(struct omap_dss_device *dssdev, return 0; } +static int hdmic_set_hdmi_mode(struct omap_dss_device *dssdev, bool hdmi_mode) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + return in->ops.hdmi->set_hdmi_mode(in, hdmi_mode); +} + +static int hdmic_set_infoframe(struct omap_dss_device *dssdev, + const struct hdmi_avi_infoframe *avi) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + return in->ops.hdmi->set_infoframe(in, avi); +} + static struct omap_dss_driver hdmic_driver = { .connect = hdmic_connect, .disconnect = hdmic_disconnect, @@ -277,6 +294,8 @@ static struct omap_dss_driver hdmic_driver = { .read_edid = hdmic_read_edid, .detect = hdmic_detect, + .set_hdmi_mode = hdmic_set_hdmi_mode, + .set_hdmi_infoframe = hdmic_set_infoframe, .audio_enable = hdmic_audio_enable, .audio_disable = hdmic_audio_disable, diff --git a/drivers/video/fbdev/omap2/displays-new/encoder-tpd12s015.c b/drivers/video/fbdev/omap2/displays-new/encoder-tpd12s015.c index 7e33686171e3..c891d8f84cb2 100644 --- a/drivers/video/fbdev/omap2/displays-new/encoder-tpd12s015.c +++ b/drivers/video/fbdev/omap2/displays-new/encoder-tpd12s015.c @@ -242,6 +242,24 @@ static int tpd_audio_config(struct omap_dss_device *dssdev, return in->ops.hdmi->audio_config(in, audio); } +static int tpd_set_infoframe(struct omap_dss_device *dssdev, + const struct hdmi_avi_infoframe *avi) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + return in->ops.hdmi->set_infoframe(in, avi); +} + +static int tpd_set_hdmi_mode(struct omap_dss_device *dssdev, + bool hdmi_mode) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); + struct omap_dss_device *in = ddata->in; + + return in->ops.hdmi->set_hdmi_mode(in, hdmi_mode); +} + static const struct omapdss_hdmi_ops tpd_hdmi_ops = { .connect = tpd_connect, .disconnect = tpd_disconnect, @@ -255,6 +273,8 @@ static const struct omapdss_hdmi_ops tpd_hdmi_ops = { .read_edid = tpd_read_edid, .detect = tpd_detect, + .set_infoframe = tpd_set_infoframe, + .set_hdmi_mode = tpd_set_hdmi_mode, .audio_enable = tpd_audio_enable, .audio_disable = tpd_audio_disable, diff --git a/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c b/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c index c7ba4d8b928a..617f8d2f5127 100644 --- a/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c +++ b/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c @@ -817,6 +817,10 @@ static int acx565akm_probe(struct spi_device *spi) bldev = backlight_device_register("acx565akm", &ddata->spi->dev, ddata, &acx565akm_bl_ops, &props); + if (IS_ERR(bldev)) { + r = PTR_ERR(bldev); + goto err_reg_bl; + } ddata->bl_dev = bldev; if (ddata->has_cabc) { r = sysfs_create_group(&bldev->dev.kobj, &bldev_attr_group); @@ -862,6 +866,7 @@ err_reg: sysfs_remove_group(&bldev->dev.kobj, &bldev_attr_group); err_sysfs: backlight_device_unregister(bldev); +err_reg_bl: err_detect: err_gpio: omap_dss_put_device(ddata->in); diff --git a/drivers/video/fbdev/omap2/dss/Kconfig b/drivers/video/fbdev/omap2/dss/Kconfig index 285bcd103dce..3d5eb6c36c22 100644 --- a/drivers/video/fbdev/omap2/dss/Kconfig +++ b/drivers/video/fbdev/omap2/dss/Kconfig @@ -5,6 +5,7 @@ menuconfig OMAP2_DSS tristate "OMAP2+ Display Subsystem support" select VIDEOMODE_HELPERS select OMAP2_DSS_INIT + select HDMI help OMAP2+ Display Subsystem support. diff --git a/drivers/video/fbdev/omap2/dss/dispc.c b/drivers/video/fbdev/omap2/dss/dispc.c index 7aa33b0f4a1f..be053aa80880 100644 --- a/drivers/video/fbdev/omap2/dss/dispc.c +++ b/drivers/video/fbdev/omap2/dss/dispc.c @@ -2879,19 +2879,24 @@ static bool _dispc_mgr_pclk_ok(enum omap_channel channel, bool dispc_mgr_timings_ok(enum omap_channel channel, const struct omap_video_timings *timings) { - bool timings_ok; - - timings_ok = _dispc_mgr_size_ok(timings->x_res, timings->y_res); + if (!_dispc_mgr_size_ok(timings->x_res, timings->y_res)) + return false; - timings_ok &= _dispc_mgr_pclk_ok(channel, timings->pixelclock); + if (!_dispc_mgr_pclk_ok(channel, timings->pixelclock)) + return false; if (dss_mgr_is_lcd(channel)) { - timings_ok &= _dispc_lcd_timings_ok(timings->hsw, timings->hfp, + /* TODO: OMAP4+ supports interlace for LCD outputs */ + if (timings->interlace) + return false; + + if (!_dispc_lcd_timings_ok(timings->hsw, timings->hfp, timings->hbp, timings->vsw, timings->vfp, - timings->vbp); + timings->vbp)) + return false; } - return timings_ok; + return true; } static void _dispc_mgr_set_lcd_timings(enum omap_channel channel, int hsw, @@ -3257,13 +3262,10 @@ static void dispc_dump_regs(struct seq_file *s) if (i == OMAP_DSS_CHANNEL_DIGIT) continue; - DUMPREG(i, DISPC_DEFAULT_COLOR); - DUMPREG(i, DISPC_TRANS_COLOR); DUMPREG(i, DISPC_TIMING_H); DUMPREG(i, DISPC_TIMING_V); DUMPREG(i, DISPC_POL_FREQ); DUMPREG(i, DISPC_DIVISORo); - DUMPREG(i, DISPC_SIZE_MGR); DUMPREG(i, DISPC_DATA_CYCLE1); DUMPREG(i, DISPC_DATA_CYCLE2); diff --git a/drivers/video/fbdev/omap2/dss/dsi.c b/drivers/video/fbdev/omap2/dss/dsi.c index 4755a34a5422..56b92444c54f 100644 --- a/drivers/video/fbdev/omap2/dss/dsi.c +++ b/drivers/video/fbdev/omap2/dss/dsi.c @@ -5658,18 +5658,11 @@ err_runtime_get: return r; } -static int dsi_unregister_child(struct device *dev, void *data) -{ - struct platform_device *pdev = to_platform_device(dev); - platform_device_unregister(pdev); - return 0; -} - static int __exit omap_dsihw_remove(struct platform_device *dsidev) { struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); - device_for_each_child(&dsidev->dev, NULL, dsi_unregister_child); + of_platform_depopulate(&dsidev->dev); WARN_ON(dsi->scp_clk_refcount > 0); diff --git a/drivers/video/fbdev/omap2/dss/hdmi.h b/drivers/video/fbdev/omap2/dss/hdmi.h index fbee07816337..262771b9b76b 100644 --- a/drivers/video/fbdev/omap2/dss/hdmi.h +++ b/drivers/video/fbdev/omap2/dss/hdmi.h @@ -22,6 +22,7 @@ #include <linux/delay.h> #include <linux/io.h> #include <linux/platform_device.h> +#include <linux/hdmi.h> #include <video/omapdss.h> #include "dss.h" @@ -142,7 +143,7 @@ enum hdmi_audio_samples_perword { HDMI_AUDIO_ONEWORD_TWOSAMPLES = 1 }; -enum hdmi_audio_sample_size { +enum hdmi_audio_sample_size_omap { HDMI_AUDIO_SAMPLE_16BITS = 0, HDMI_AUDIO_SAMPLE_24BITS = 1 }; @@ -178,59 +179,6 @@ enum hdmi_audio_mclk_mode { HDMI_AUDIO_MCLK_192FS = 7 }; -/* INFOFRAME_AVI_ and INFOFRAME_AUDIO_ definitions */ -enum hdmi_core_infoframe { - HDMI_INFOFRAME_AVI_DB1Y_RGB = 0, - HDMI_INFOFRAME_AVI_DB1Y_YUV422 = 1, - HDMI_INFOFRAME_AVI_DB1Y_YUV444 = 2, - HDMI_INFOFRAME_AVI_DB1A_ACTIVE_FORMAT_OFF = 0, - HDMI_INFOFRAME_AVI_DB1A_ACTIVE_FORMAT_ON = 1, - HDMI_INFOFRAME_AVI_DB1B_NO = 0, - HDMI_INFOFRAME_AVI_DB1B_VERT = 1, - HDMI_INFOFRAME_AVI_DB1B_HORI = 2, - HDMI_INFOFRAME_AVI_DB1B_VERTHORI = 3, - HDMI_INFOFRAME_AVI_DB1S_0 = 0, - HDMI_INFOFRAME_AVI_DB1S_1 = 1, - HDMI_INFOFRAME_AVI_DB1S_2 = 2, - HDMI_INFOFRAME_AVI_DB2C_NO = 0, - HDMI_INFOFRAME_AVI_DB2C_ITU601 = 1, - HDMI_INFOFRAME_AVI_DB2C_ITU709 = 2, - HDMI_INFOFRAME_AVI_DB2C_EC_EXTENDED = 3, - HDMI_INFOFRAME_AVI_DB2M_NO = 0, - HDMI_INFOFRAME_AVI_DB2M_43 = 1, - HDMI_INFOFRAME_AVI_DB2M_169 = 2, - HDMI_INFOFRAME_AVI_DB2R_SAME = 8, - HDMI_INFOFRAME_AVI_DB2R_43 = 9, - HDMI_INFOFRAME_AVI_DB2R_169 = 10, - HDMI_INFOFRAME_AVI_DB2R_149 = 11, - HDMI_INFOFRAME_AVI_DB3ITC_NO = 0, - HDMI_INFOFRAME_AVI_DB3ITC_YES = 1, - HDMI_INFOFRAME_AVI_DB3EC_XVYUV601 = 0, - HDMI_INFOFRAME_AVI_DB3EC_XVYUV709 = 1, - HDMI_INFOFRAME_AVI_DB3Q_DEFAULT = 0, - HDMI_INFOFRAME_AVI_DB3Q_LR = 1, - HDMI_INFOFRAME_AVI_DB3Q_FR = 2, - HDMI_INFOFRAME_AVI_DB3SC_NO = 0, - HDMI_INFOFRAME_AVI_DB3SC_HORI = 1, - HDMI_INFOFRAME_AVI_DB3SC_VERT = 2, - HDMI_INFOFRAME_AVI_DB3SC_HORIVERT = 3, - HDMI_INFOFRAME_AVI_DB5PR_NO = 0, - HDMI_INFOFRAME_AVI_DB5PR_2 = 1, - HDMI_INFOFRAME_AVI_DB5PR_3 = 2, - HDMI_INFOFRAME_AVI_DB5PR_4 = 3, - HDMI_INFOFRAME_AVI_DB5PR_5 = 4, - HDMI_INFOFRAME_AVI_DB5PR_6 = 5, - HDMI_INFOFRAME_AVI_DB5PR_7 = 6, - HDMI_INFOFRAME_AVI_DB5PR_8 = 7, - HDMI_INFOFRAME_AVI_DB5PR_9 = 8, - HDMI_INFOFRAME_AVI_DB5PR_10 = 9, -}; - -struct hdmi_cm { - int code; - int mode; -}; - struct hdmi_video_format { enum hdmi_packing_mode packing_mode; u32 y_res; /* Line per panel */ @@ -239,7 +187,8 @@ struct hdmi_video_format { struct hdmi_config { struct omap_video_timings timings; - struct hdmi_cm cm; + struct hdmi_avi_infoframe infoframe; + enum hdmi_core_hdmi_dvi hdmi_dvi_mode; }; /* HDMI PLL structure */ @@ -260,7 +209,7 @@ struct hdmi_audio_format { enum hdmi_audio_justify justification; enum hdmi_audio_sample_order sample_order; enum hdmi_audio_samples_perword samples_per_word; - enum hdmi_audio_sample_size sample_size; + enum hdmi_audio_sample_size_omap sample_size; enum hdmi_audio_blk_strt_end_sig en_sig_blk_strt_end; }; @@ -298,47 +247,6 @@ struct hdmi_core_audio_config { bool en_spdif; }; -/* - * Refer to section 8.2 in HDMI 1.3 specification for - * details about infoframe databytes - */ -struct hdmi_core_infoframe_avi { - /* Y0, Y1 rgb,yCbCr */ - u8 db1_format; - /* A0 Active information Present */ - u8 db1_active_info; - /* B0, B1 Bar info data valid */ - u8 db1_bar_info_dv; - /* S0, S1 scan information */ - u8 db1_scan_info; - /* C0, C1 colorimetry */ - u8 db2_colorimetry; - /* M0, M1 Aspect ratio (4:3, 16:9) */ - u8 db2_aspect_ratio; - /* R0...R3 Active format aspect ratio */ - u8 db2_active_fmt_ar; - /* ITC IT content. */ - u8 db3_itc; - /* EC0, EC1, EC2 Extended colorimetry */ - u8 db3_ec; - /* Q1, Q0 Quantization range */ - u8 db3_q_range; - /* SC1, SC0 Non-uniform picture scaling */ - u8 db3_nup_scaling; - /* VIC0..6 Video format identification */ - u8 db4_videocode; - /* PR0..PR3 Pixel repetition factor */ - u8 db5_pixel_repeat; - /* Line number end of top bar */ - u16 db6_7_line_eoftop; - /* Line number start of bottom bar */ - u16 db8_9_line_sofbottom; - /* Pixel number end of left bar */ - u16 db10_11_pixel_eofleft; - /* Pixel number start of right bar */ - u16 db12_13_pixel_sofright; -}; - struct hdmi_wp_data { void __iomem *base; }; @@ -358,8 +266,6 @@ struct hdmi_phy_data { struct hdmi_core_data { void __iomem *base; - - struct hdmi_core_infoframe_avi avi_cfg; }; static inline void hdmi_write_reg(void __iomem *base_addr, const u32 idx, @@ -425,9 +331,6 @@ int hdmi_phy_init(struct platform_device *pdev, struct hdmi_phy_data *phy); int hdmi_phy_parse_lanes(struct hdmi_phy_data *phy, const u32 *lanes); /* HDMI common funcs */ -const struct hdmi_config *hdmi_default_timing(void); -const struct hdmi_config *hdmi_get_timings(int mode, int code); -struct hdmi_cm hdmi_get_code(struct omap_video_timings *timing); int hdmi_parse_lanes_of(struct platform_device *pdev, struct device_node *ep, struct hdmi_phy_data *phy); diff --git a/drivers/video/fbdev/omap2/dss/hdmi4.c b/drivers/video/fbdev/omap2/dss/hdmi4.c index 626aad2bef46..6a8550cf43e5 100644 --- a/drivers/video/fbdev/omap2/dss/hdmi4.c +++ b/drivers/video/fbdev/omap2/dss/hdmi4.c @@ -281,29 +281,11 @@ static int hdmi_display_check_timing(struct omap_dss_device *dssdev, static void hdmi_display_set_timing(struct omap_dss_device *dssdev, struct omap_video_timings *timings) { - struct hdmi_cm cm; - const struct hdmi_config *t; - mutex_lock(&hdmi.lock); - cm = hdmi_get_code(timings); - hdmi.cfg.cm = cm; - - t = hdmi_get_timings(cm.mode, cm.code); - if (t != NULL) { - hdmi.cfg = *t; - - dispc_set_tv_pclk(t->timings.pixelclock); - } else { - hdmi.cfg.timings = *timings; - hdmi.cfg.cm.code = 0; - hdmi.cfg.cm.mode = HDMI_DVI; - - dispc_set_tv_pclk(timings->pixelclock); - } + hdmi.cfg.timings = *timings; - DSSDBG("using mode: %s, code %d\n", hdmi.cfg.cm.mode == HDMI_DVI ? - "DVI" : "HDMI", hdmi.cfg.cm.code); + dispc_set_tv_pclk(timings->pixelclock); mutex_unlock(&hdmi.lock); } @@ -311,14 +293,7 @@ static void hdmi_display_set_timing(struct omap_dss_device *dssdev, static void hdmi_display_get_timings(struct omap_dss_device *dssdev, struct omap_video_timings *timings) { - const struct hdmi_config *cfg; - struct hdmi_cm cm = hdmi.cfg.cm; - - cfg = hdmi_get_timings(cm.mode, cm.code); - if (cfg == NULL) - cfg = hdmi_default_timing(); - - memcpy(timings, &cfg->timings, sizeof(cfg->timings)); + *timings = hdmi.cfg.timings; } static void hdmi_dump_regs(struct seq_file *s) @@ -516,7 +491,7 @@ static int hdmi_audio_enable(struct omap_dss_device *dssdev) mutex_lock(&hdmi.lock); - if (!hdmi_mode_has_audio(hdmi.cfg.cm.mode)) { + if (!hdmi_mode_has_audio(hdmi.cfg.hdmi_dvi_mode)) { r = -EPERM; goto err; } @@ -554,7 +529,7 @@ static bool hdmi_audio_supported(struct omap_dss_device *dssdev) mutex_lock(&hdmi.lock); - r = hdmi_mode_has_audio(hdmi.cfg.cm.mode); + r = hdmi_mode_has_audio(hdmi.cfg.hdmi_dvi_mode); mutex_unlock(&hdmi.lock); return r; @@ -568,7 +543,7 @@ static int hdmi_audio_config(struct omap_dss_device *dssdev, mutex_lock(&hdmi.lock); - if (!hdmi_mode_has_audio(hdmi.cfg.cm.mode)) { + if (!hdmi_mode_has_audio(hdmi.cfg.hdmi_dvi_mode)) { r = -EPERM; goto err; } @@ -615,6 +590,20 @@ static int hdmi_audio_config(struct omap_dss_device *dssdev, } #endif +static int hdmi_set_infoframe(struct omap_dss_device *dssdev, + const struct hdmi_avi_infoframe *avi) +{ + hdmi.cfg.infoframe = *avi; + return 0; +} + +static int hdmi_set_hdmi_mode(struct omap_dss_device *dssdev, + bool hdmi_mode) +{ + hdmi.cfg.hdmi_dvi_mode = hdmi_mode ? HDMI_HDMI : HDMI_DVI; + return 0; +} + static const struct omapdss_hdmi_ops hdmi_ops = { .connect = hdmi_connect, .disconnect = hdmi_disconnect, @@ -627,6 +616,8 @@ static const struct omapdss_hdmi_ops hdmi_ops = { .get_timings = hdmi_display_get_timings, .read_edid = hdmi_read_edid, + .set_infoframe = hdmi_set_infoframe, + .set_hdmi_mode = hdmi_set_hdmi_mode, .audio_enable = hdmi_audio_enable, .audio_disable = hdmi_audio_disable, diff --git a/drivers/video/fbdev/omap2/dss/hdmi4_core.c b/drivers/video/fbdev/omap2/dss/hdmi4_core.c index 8bde7b7e95ff..4ad39cfce254 100644 --- a/drivers/video/fbdev/omap2/dss/hdmi4_core.c +++ b/drivers/video/fbdev/omap2/dss/hdmi4_core.c @@ -197,9 +197,7 @@ int hdmi4_read_edid(struct hdmi_core_data *core, u8 *edid, int len) return l; } -static void hdmi_core_init(struct hdmi_core_video_config *video_cfg, - struct hdmi_core_infoframe_avi *avi_cfg, - struct hdmi_core_packet_enable_repeat *repeat_cfg) +static void hdmi_core_init(struct hdmi_core_video_config *video_cfg) { DSSDBG("Enter hdmi_core_init\n"); @@ -210,35 +208,6 @@ static void hdmi_core_init(struct hdmi_core_video_config *video_cfg, video_cfg->pkt_mode = HDMI_PACKETMODERESERVEDVALUE; video_cfg->hdmi_dvi = HDMI_DVI; video_cfg->tclk_sel_clkmult = HDMI_FPLL10IDCK; - - /* info frame */ - avi_cfg->db1_format = 0; - avi_cfg->db1_active_info = 0; - avi_cfg->db1_bar_info_dv = 0; - avi_cfg->db1_scan_info = 0; - avi_cfg->db2_colorimetry = 0; - avi_cfg->db2_aspect_ratio = 0; - avi_cfg->db2_active_fmt_ar = 0; - avi_cfg->db3_itc = 0; - avi_cfg->db3_ec = 0; - avi_cfg->db3_q_range = 0; - avi_cfg->db3_nup_scaling = 0; - avi_cfg->db4_videocode = 0; - avi_cfg->db5_pixel_repeat = 0; - avi_cfg->db6_7_line_eoftop = 0; - avi_cfg->db8_9_line_sofbottom = 0; - avi_cfg->db10_11_pixel_eofleft = 0; - avi_cfg->db12_13_pixel_sofright = 0; - - /* packet enable and repeat */ - repeat_cfg->audio_pkt = 0; - repeat_cfg->audio_pkt_repeat = 0; - repeat_cfg->avi_infoframe = 0; - repeat_cfg->avi_infoframe_repeat = 0; - repeat_cfg->gen_cntrl_pkt = 0; - repeat_cfg->gen_cntrl_pkt_repeat = 0; - repeat_cfg->generic_pkt = 0; - repeat_cfg->generic_pkt_repeat = 0; } static void hdmi_core_powerdown_disable(struct hdmi_core_data *core) @@ -303,80 +272,22 @@ static void hdmi_core_video_config(struct hdmi_core_data *core, HDMI_CORE_SYS_TMDS_CTRL, cfg->tclk_sel_clkmult, 6, 5); } -static void hdmi_core_aux_infoframe_avi_config(struct hdmi_core_data *core) +static void hdmi_core_write_avi_infoframe(struct hdmi_core_data *core, + struct hdmi_avi_infoframe *frame) { - u32 val; - char sum = 0, checksum = 0; void __iomem *av_base = hdmi_av_base(core); - struct hdmi_core_infoframe_avi info_avi = core->avi_cfg; - - sum += 0x82 + 0x002 + 0x00D; - hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_TYPE, 0x082); - hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_VERS, 0x002); - hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_LEN, 0x00D); - - val = (info_avi.db1_format << 5) | - (info_avi.db1_active_info << 4) | - (info_avi.db1_bar_info_dv << 2) | - (info_avi.db1_scan_info); - hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(0), val); - sum += val; - - val = (info_avi.db2_colorimetry << 6) | - (info_avi.db2_aspect_ratio << 4) | - (info_avi.db2_active_fmt_ar); - hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(1), val); - sum += val; - - val = (info_avi.db3_itc << 7) | - (info_avi.db3_ec << 4) | - (info_avi.db3_q_range << 2) | - (info_avi.db3_nup_scaling); - hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(2), val); - sum += val; - - hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(3), - info_avi.db4_videocode); - sum += info_avi.db4_videocode; - - val = info_avi.db5_pixel_repeat; - hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(4), val); - sum += val; - - val = info_avi.db6_7_line_eoftop & 0x00FF; - hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(5), val); - sum += val; - - val = ((info_avi.db6_7_line_eoftop >> 8) & 0x00FF); - hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(6), val); - sum += val; - - val = info_avi.db8_9_line_sofbottom & 0x00FF; - hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(7), val); - sum += val; - - val = ((info_avi.db8_9_line_sofbottom >> 8) & 0x00FF); - hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(8), val); - sum += val; - - val = info_avi.db10_11_pixel_eofleft & 0x00FF; - hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(9), val); - sum += val; - - val = ((info_avi.db10_11_pixel_eofleft >> 8) & 0x00FF); - hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(10), val); - sum += val; - - val = info_avi.db12_13_pixel_sofright & 0x00FF; - hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(11), val); - sum += val; - - val = ((info_avi.db12_13_pixel_sofright >> 8) & 0x00FF); - hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(12), val); - sum += val; + u8 data[HDMI_INFOFRAME_SIZE(AVI)]; + int i; - checksum = 0x100 - sum; - hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_CHSUM, checksum); + hdmi_avi_infoframe_pack(frame, data, sizeof(data)); + + print_hex_dump_debug("AVI: ", DUMP_PREFIX_NONE, 16, 1, data, + HDMI_INFOFRAME_SIZE(AVI), false); + + for (i = 0; i < sizeof(data); ++i) { + hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_BASE + i * 4, + data[i]); + } } static void hdmi_core_av_packet_config(struct hdmi_core_data *core, @@ -404,11 +315,10 @@ void hdmi4_configure(struct hdmi_core_data *core, struct omap_video_timings video_timing; struct hdmi_video_format video_format; /* HDMI core */ - struct hdmi_core_infoframe_avi *avi_cfg = &core->avi_cfg; struct hdmi_core_video_config v_core_cfg; - struct hdmi_core_packet_enable_repeat repeat_cfg; + struct hdmi_core_packet_enable_repeat repeat_cfg = { 0 }; - hdmi_core_init(&v_core_cfg, avi_cfg, &repeat_cfg); + hdmi_core_init(&v_core_cfg); hdmi_wp_init_vid_fmt_timings(&video_format, &video_timing, cfg); @@ -431,44 +341,24 @@ void hdmi4_configure(struct hdmi_core_data *core, hdmi_core_powerdown_disable(core); v_core_cfg.pkt_mode = HDMI_PACKETMODE24BITPERPIXEL; - v_core_cfg.hdmi_dvi = cfg->cm.mode; + v_core_cfg.hdmi_dvi = cfg->hdmi_dvi_mode; hdmi_core_video_config(core, &v_core_cfg); /* release software reset in the core */ hdmi_core_swreset_release(core); - /* - * configure packet - * info frame video see doc CEA861-D page 65 - */ - avi_cfg->db1_format = HDMI_INFOFRAME_AVI_DB1Y_RGB; - avi_cfg->db1_active_info = - HDMI_INFOFRAME_AVI_DB1A_ACTIVE_FORMAT_OFF; - avi_cfg->db1_bar_info_dv = HDMI_INFOFRAME_AVI_DB1B_NO; - avi_cfg->db1_scan_info = HDMI_INFOFRAME_AVI_DB1S_0; - avi_cfg->db2_colorimetry = HDMI_INFOFRAME_AVI_DB2C_NO; - avi_cfg->db2_aspect_ratio = HDMI_INFOFRAME_AVI_DB2M_NO; - avi_cfg->db2_active_fmt_ar = HDMI_INFOFRAME_AVI_DB2R_SAME; - avi_cfg->db3_itc = HDMI_INFOFRAME_AVI_DB3ITC_NO; - avi_cfg->db3_ec = HDMI_INFOFRAME_AVI_DB3EC_XVYUV601; - avi_cfg->db3_q_range = HDMI_INFOFRAME_AVI_DB3Q_DEFAULT; - avi_cfg->db3_nup_scaling = HDMI_INFOFRAME_AVI_DB3SC_NO; - avi_cfg->db4_videocode = cfg->cm.code; - avi_cfg->db5_pixel_repeat = HDMI_INFOFRAME_AVI_DB5PR_NO; - avi_cfg->db6_7_line_eoftop = 0; - avi_cfg->db8_9_line_sofbottom = 0; - avi_cfg->db10_11_pixel_eofleft = 0; - avi_cfg->db12_13_pixel_sofright = 0; - - hdmi_core_aux_infoframe_avi_config(core); + if (cfg->hdmi_dvi_mode == HDMI_HDMI) { + hdmi_core_write_avi_infoframe(core, &cfg->infoframe); + + /* enable/repeat the infoframe */ + repeat_cfg.avi_infoframe = HDMI_PACKETENABLE; + repeat_cfg.avi_infoframe_repeat = HDMI_PACKETREPEATON; + /* wakeup */ + repeat_cfg.audio_pkt = HDMI_PACKETENABLE; + repeat_cfg.audio_pkt_repeat = HDMI_PACKETREPEATON; + } - /* enable/repeat the infoframe */ - repeat_cfg.avi_infoframe = HDMI_PACKETENABLE; - repeat_cfg.avi_infoframe_repeat = HDMI_PACKETREPEATON; - /* wakeup */ - repeat_cfg.audio_pkt = HDMI_PACKETENABLE; - repeat_cfg.audio_pkt_repeat = HDMI_PACKETREPEATON; hdmi_core_av_packet_config(core, repeat_cfg); } diff --git a/drivers/video/fbdev/omap2/dss/hdmi4_core.h b/drivers/video/fbdev/omap2/dss/hdmi4_core.h index bb646896fa82..827909eb6c50 100644 --- a/drivers/video/fbdev/omap2/dss/hdmi4_core.h +++ b/drivers/video/fbdev/omap2/dss/hdmi4_core.h @@ -145,6 +145,7 @@ #define HDMI_CORE_AV_DPD 0xF4 #define HDMI_CORE_AV_PB_CTRL1 0xF8 #define HDMI_CORE_AV_PB_CTRL2 0xFC +#define HDMI_CORE_AV_AVI_BASE 0x100 #define HDMI_CORE_AV_AVI_TYPE 0x100 #define HDMI_CORE_AV_AVI_VERS 0x104 #define HDMI_CORE_AV_AVI_LEN 0x108 diff --git a/drivers/video/fbdev/omap2/dss/hdmi5.c b/drivers/video/fbdev/omap2/dss/hdmi5.c index c468b9e1f295..32d02ec34d23 100644 --- a/drivers/video/fbdev/omap2/dss/hdmi5.c +++ b/drivers/video/fbdev/omap2/dss/hdmi5.c @@ -299,29 +299,11 @@ static int hdmi_display_check_timing(struct omap_dss_device *dssdev, static void hdmi_display_set_timing(struct omap_dss_device *dssdev, struct omap_video_timings *timings) { - struct hdmi_cm cm; - const struct hdmi_config *t; - mutex_lock(&hdmi.lock); - cm = hdmi_get_code(timings); - hdmi.cfg.cm = cm; - - t = hdmi_get_timings(cm.mode, cm.code); - if (t != NULL) { - hdmi.cfg = *t; - - dispc_set_tv_pclk(t->timings.pixelclock); - } else { - hdmi.cfg.timings = *timings; - hdmi.cfg.cm.code = 0; - hdmi.cfg.cm.mode = HDMI_DVI; - - dispc_set_tv_pclk(timings->pixelclock); - } + hdmi.cfg.timings = *timings; - DSSDBG("using mode: %s, code %d\n", hdmi.cfg.cm.mode == HDMI_DVI ? - "DVI" : "HDMI", hdmi.cfg.cm.code); + dispc_set_tv_pclk(timings->pixelclock); mutex_unlock(&hdmi.lock); } @@ -329,14 +311,7 @@ static void hdmi_display_set_timing(struct omap_dss_device *dssdev, static void hdmi_display_get_timings(struct omap_dss_device *dssdev, struct omap_video_timings *timings) { - const struct hdmi_config *cfg; - struct hdmi_cm cm = hdmi.cfg.cm; - - cfg = hdmi_get_timings(cm.mode, cm.code); - if (cfg == NULL) - cfg = hdmi_default_timing(); - - memcpy(timings, &cfg->timings, sizeof(cfg->timings)); + *timings = hdmi.cfg.timings; } static void hdmi_dump_regs(struct seq_file *s) @@ -541,7 +516,7 @@ static int hdmi_audio_enable(struct omap_dss_device *dssdev) mutex_lock(&hdmi.lock); - if (!hdmi_mode_has_audio(hdmi.cfg.cm.mode)) { + if (!hdmi_mode_has_audio(hdmi.cfg.hdmi_dvi_mode)) { r = -EPERM; goto err; } @@ -579,7 +554,7 @@ static bool hdmi_audio_supported(struct omap_dss_device *dssdev) mutex_lock(&hdmi.lock); - r = hdmi_mode_has_audio(hdmi.cfg.cm.mode); + r = hdmi_mode_has_audio(hdmi.cfg.hdmi_dvi_mode); mutex_unlock(&hdmi.lock); return r; @@ -593,7 +568,7 @@ static int hdmi_audio_config(struct omap_dss_device *dssdev, mutex_lock(&hdmi.lock); - if (!hdmi_mode_has_audio(hdmi.cfg.cm.mode)) { + if (!hdmi_mode_has_audio(hdmi.cfg.hdmi_dvi_mode)) { r = -EPERM; goto err; } @@ -640,6 +615,20 @@ static int hdmi_audio_config(struct omap_dss_device *dssdev, } #endif +static int hdmi_set_infoframe(struct omap_dss_device *dssdev, + const struct hdmi_avi_infoframe *avi) +{ + hdmi.cfg.infoframe = *avi; + return 0; +} + +static int hdmi_set_hdmi_mode(struct omap_dss_device *dssdev, + bool hdmi_mode) +{ + hdmi.cfg.hdmi_dvi_mode = hdmi_mode ? HDMI_HDMI : HDMI_DVI; + return 0; +} + static const struct omapdss_hdmi_ops hdmi_ops = { .connect = hdmi_connect, .disconnect = hdmi_disconnect, @@ -652,6 +641,8 @@ static const struct omapdss_hdmi_ops hdmi_ops = { .get_timings = hdmi_display_get_timings, .read_edid = hdmi_read_edid, + .set_infoframe = hdmi_set_infoframe, + .set_hdmi_mode = hdmi_set_hdmi_mode, .audio_enable = hdmi_audio_enable, .audio_disable = hdmi_audio_disable, diff --git a/drivers/video/fbdev/omap2/dss/hdmi5_core.c b/drivers/video/fbdev/omap2/dss/hdmi5_core.c index 7528c7a42aa5..83acbf7a8c89 100644 --- a/drivers/video/fbdev/omap2/dss/hdmi5_core.c +++ b/drivers/video/fbdev/omap2/dss/hdmi5_core.c @@ -290,7 +290,6 @@ void hdmi5_core_dump(struct hdmi_core_data *core, struct seq_file *s) } static void hdmi_core_init(struct hdmi_core_vid_config *video_cfg, - struct hdmi_core_infoframe_avi *avi_cfg, struct hdmi_config *cfg) { DSSDBG("hdmi_core_init\n"); @@ -312,27 +311,8 @@ static void hdmi_core_init(struct hdmi_core_vid_config *video_cfg, video_cfg->vblank_osc = 0; /* Always 0 - need to confirm */ video_cfg->vblank = cfg->timings.vsw + cfg->timings.vfp + cfg->timings.vbp; - video_cfg->v_fc_config.cm.mode = cfg->cm.mode; + video_cfg->v_fc_config.hdmi_dvi_mode = cfg->hdmi_dvi_mode; video_cfg->v_fc_config.timings.interlace = cfg->timings.interlace; - - /* info frame */ - avi_cfg->db1_format = 0; - avi_cfg->db1_active_info = 0; - avi_cfg->db1_bar_info_dv = 0; - avi_cfg->db1_scan_info = 0; - avi_cfg->db2_colorimetry = 0; - avi_cfg->db2_aspect_ratio = 0; - avi_cfg->db2_active_fmt_ar = 0; - avi_cfg->db3_itc = 0; - avi_cfg->db3_ec = 0; - avi_cfg->db3_q_range = 0; - avi_cfg->db3_nup_scaling = 0; - avi_cfg->db4_videocode = 0; - avi_cfg->db5_pixel_repeat = 0; - avi_cfg->db6_7_line_eoftop = 0; - avi_cfg->db8_9_line_sofbottom = 0; - avi_cfg->db10_11_pixel_eofleft = 0; - avi_cfg->db12_13_pixel_sofright = 0; } /* DSS_HDMI_CORE_VIDEO_CONFIG */ @@ -398,7 +378,7 @@ static void hdmi_core_video_config(struct hdmi_core_data *core, /* select DVI mode */ REG_FLD_MOD(base, HDMI_CORE_FC_INVIDCONF, - cfg->v_fc_config.cm.mode, 3, 3); + cfg->v_fc_config.hdmi_dvi_mode, 3, 3); } static void hdmi_core_config_video_packetizer(struct hdmi_core_data *core) @@ -438,24 +418,60 @@ static void hdmi_core_config_video_sampler(struct hdmi_core_data *core) REG_FLD_MOD(core->base, HDMI_CORE_TX_INVID0, video_mapping, 4, 0); } -static void hdmi_core_aux_infoframe_avi_config(struct hdmi_core_data *core) +static void hdmi_core_write_avi_infoframe(struct hdmi_core_data *core, + struct hdmi_avi_infoframe *frame) { void __iomem *base = core->base; - struct hdmi_core_infoframe_avi avi = core->avi_cfg; - - REG_FLD_MOD(base, HDMI_CORE_FC_AVICONF0, avi.db1_format, 1, 0); - REG_FLD_MOD(base, HDMI_CORE_FC_AVICONF0, avi.db1_active_info, 6, 6); - REG_FLD_MOD(base, HDMI_CORE_FC_AVICONF0, avi.db1_bar_info_dv, 3, 2); - REG_FLD_MOD(base, HDMI_CORE_FC_AVICONF0, avi.db1_scan_info, 5, 4); - REG_FLD_MOD(base, HDMI_CORE_FC_AVICONF1, avi.db2_colorimetry, 7, 6); - REG_FLD_MOD(base, HDMI_CORE_FC_AVICONF1, avi.db2_aspect_ratio, 5, 4); - REG_FLD_MOD(base, HDMI_CORE_FC_AVICONF1, avi.db2_active_fmt_ar, 3, 0); - REG_FLD_MOD(base, HDMI_CORE_FC_AVICONF2, avi.db3_itc, 7, 7); - REG_FLD_MOD(base, HDMI_CORE_FC_AVICONF2, avi.db3_ec, 6, 4); - REG_FLD_MOD(base, HDMI_CORE_FC_AVICONF2, avi.db3_q_range, 3, 2); - REG_FLD_MOD(base, HDMI_CORE_FC_AVICONF2, avi.db3_nup_scaling, 1, 0); - REG_FLD_MOD(base, HDMI_CORE_FC_AVIVID, avi.db4_videocode, 6, 0); - REG_FLD_MOD(base, HDMI_CORE_FC_PRCONF, avi.db5_pixel_repeat, 3, 0); + u8 data[HDMI_INFOFRAME_SIZE(AVI)]; + u8 *ptr; + unsigned y, a, b, s; + unsigned c, m, r; + unsigned itc, ec, q, sc; + unsigned vic; + unsigned yq, cn, pr; + + hdmi_avi_infoframe_pack(frame, data, sizeof(data)); + + print_hex_dump_debug("AVI: ", DUMP_PREFIX_NONE, 16, 1, data, + HDMI_INFOFRAME_SIZE(AVI), false); + + ptr = data + HDMI_INFOFRAME_HEADER_SIZE; + + y = (ptr[0] >> 5) & 0x3; + a = (ptr[0] >> 4) & 0x1; + b = (ptr[0] >> 2) & 0x3; + s = (ptr[0] >> 0) & 0x3; + + c = (ptr[1] >> 6) & 0x3; + m = (ptr[1] >> 4) & 0x3; + r = (ptr[1] >> 0) & 0x3; + + itc = (ptr[2] >> 7) & 0x1; + ec = (ptr[2] >> 4) & 0x7; + q = (ptr[2] >> 2) & 0x3; + sc = (ptr[2] >> 0) & 0x3; + + vic = ptr[3]; + + yq = (ptr[4] >> 6) & 0x3; + cn = (ptr[4] >> 4) & 0x3; + pr = (ptr[4] >> 0) & 0xf; + + hdmi_write_reg(base, HDMI_CORE_FC_AVICONF0, + (a << 6) | (s << 4) | (b << 2) | (y << 0)); + + hdmi_write_reg(base, HDMI_CORE_FC_AVICONF1, + (c << 6) | (m << 4) | (r << 0)); + + hdmi_write_reg(base, HDMI_CORE_FC_AVICONF2, + (itc << 7) | (ec << 4) | (q << 2) | (sc << 0)); + + hdmi_write_reg(base, HDMI_CORE_FC_AVIVID, vic); + + hdmi_write_reg(base, HDMI_CORE_FC_AVICONF3, + (yq << 2) | (cn << 0)); + + REG_FLD_MOD(base, HDMI_CORE_FC_PRCONF, pr, 3, 0); } static void hdmi_core_csc_config(struct hdmi_core_data *core, @@ -497,10 +513,8 @@ static void hdmi_core_configure_range(struct hdmi_core_data *core) /* support limited range with 24 bit color depth for now */ csc_coeff = csc_table_deepcolor[0]; - core->avi_cfg.db3_q_range = HDMI_INFOFRAME_AVI_DB3Q_LR; hdmi_core_csc_config(core, csc_coeff); - hdmi_core_aux_infoframe_avi_config(core); } static void hdmi_core_enable_video_path(struct hdmi_core_data *core) @@ -591,11 +605,10 @@ void hdmi5_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp, struct omap_video_timings video_timing; struct hdmi_video_format video_format; struct hdmi_core_vid_config v_core_cfg; - struct hdmi_core_infoframe_avi *avi_cfg = &core->avi_cfg; hdmi_core_mask_interrupts(core); - hdmi_core_init(&v_core_cfg, avi_cfg, cfg); + hdmi_core_init(&v_core_cfg, cfg); hdmi_wp_init_vid_fmt_timings(&video_format, &video_timing, cfg); @@ -608,7 +621,9 @@ void hdmi5_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp, hdmi_wp_video_config_interface(wp, &video_timing); + /* support limited range with 24 bit color depth for now */ hdmi_core_configure_range(core); + cfg->infoframe.quantization_range = HDMI_QUANTIZATION_RANGE_LIMITED; /* * configure core video part, set software reset in the core @@ -621,29 +636,8 @@ void hdmi5_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp, hdmi_core_config_csc(core); hdmi_core_config_video_sampler(core); - /* - * configure packet info frame video see doc CEA861-D page 65 - */ - avi_cfg->db1_format = HDMI_INFOFRAME_AVI_DB1Y_RGB; - avi_cfg->db1_active_info = - HDMI_INFOFRAME_AVI_DB1A_ACTIVE_FORMAT_OFF; - avi_cfg->db1_bar_info_dv = HDMI_INFOFRAME_AVI_DB1B_NO; - avi_cfg->db1_scan_info = HDMI_INFOFRAME_AVI_DB1S_0; - avi_cfg->db2_colorimetry = HDMI_INFOFRAME_AVI_DB2C_NO; - avi_cfg->db2_aspect_ratio = HDMI_INFOFRAME_AVI_DB2M_NO; - avi_cfg->db2_active_fmt_ar = HDMI_INFOFRAME_AVI_DB2R_SAME; - avi_cfg->db3_itc = HDMI_INFOFRAME_AVI_DB3ITC_NO; - avi_cfg->db3_ec = HDMI_INFOFRAME_AVI_DB3EC_XVYUV601; - avi_cfg->db3_q_range = HDMI_INFOFRAME_AVI_DB3Q_DEFAULT; - avi_cfg->db3_nup_scaling = HDMI_INFOFRAME_AVI_DB3SC_NO; - avi_cfg->db4_videocode = cfg->cm.code; - avi_cfg->db5_pixel_repeat = HDMI_INFOFRAME_AVI_DB5PR_NO; - avi_cfg->db6_7_line_eoftop = 0; - avi_cfg->db8_9_line_sofbottom = 0; - avi_cfg->db10_11_pixel_eofleft = 0; - avi_cfg->db12_13_pixel_sofright = 0; - - hdmi_core_aux_infoframe_avi_config(core); + if (cfg->hdmi_dvi_mode == HDMI_HDMI) + hdmi_core_write_avi_infoframe(core, &cfg->infoframe); hdmi_core_enable_video_path(core); diff --git a/drivers/video/fbdev/omap2/dss/hdmi_common.c b/drivers/video/fbdev/omap2/dss/hdmi_common.c index 9a2c39cf297f..7d5f1039de9f 100644 --- a/drivers/video/fbdev/omap2/dss/hdmi_common.c +++ b/drivers/video/fbdev/omap2/dss/hdmi_common.c @@ -1,18 +1,4 @@ -/* - * Logic for the below structure : - * user enters the CEA or VESA timings by specifying the HDMI/DVI code. - * There is a correspondence between CEA/VESA timing and code, please - * refer to section 6.3 in HDMI 1.3 specification for timing code. - * - * In the below structure, cea_vesa_timings corresponds to all OMAP4 - * supported CEA and VESA timing values.code_cea corresponds to the CEA - * code, It is used to get the timing from cea_vesa_timing array.Similarly - * with code_vesa. Code_index is used for back mapping, that is once EDID - * is read from the TV, EDID is parsed to find the timing values and then - * map it to corresponding CEA or VESA index. - */ - #define DSS_SUBSYS_NAME "HDMI" #include <linux/kernel.h> @@ -22,308 +8,6 @@ #include "hdmi.h" -static const struct hdmi_config cea_timings[] = { - { - { 640, 480, 25200000, 96, 16, 48, 2, 10, 33, - OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW, - false, }, - { 1, HDMI_HDMI }, - }, - { - { 720, 480, 27027000, 62, 16, 60, 6, 9, 30, - OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW, - false, }, - { 2, HDMI_HDMI }, - }, - { - { 1280, 720, 74250000, 40, 110, 220, 5, 5, 20, - OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH, - false, }, - { 4, HDMI_HDMI }, - }, - { - { 1920, 540, 74250000, 44, 88, 148, 5, 2, 15, - OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH, - true, }, - { 5, HDMI_HDMI }, - }, - { - { 1440, 240, 27027000, 124, 38, 114, 3, 4, 15, - OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW, - true, }, - { 6, HDMI_HDMI }, - }, - { - { 1920, 1080, 148500000, 44, 88, 148, 5, 4, 36, - OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH, - false, }, - { 16, HDMI_HDMI }, - }, - { - { 720, 576, 27000000, 64, 12, 68, 5, 5, 39, - OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW, - false, }, - { 17, HDMI_HDMI }, - }, - { - { 1280, 720, 74250000, 40, 440, 220, 5, 5, 20, - OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH, - false, }, - { 19, HDMI_HDMI }, - }, - { - { 1920, 540, 74250000, 44, 528, 148, 5, 2, 15, - OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH, - true, }, - { 20, HDMI_HDMI }, - }, - { - { 1440, 288, 27000000, 126, 24, 138, 3, 2, 19, - OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW, - true, }, - { 21, HDMI_HDMI }, - }, - { - { 1440, 576, 54000000, 128, 24, 136, 5, 5, 39, - OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW, - false, }, - { 29, HDMI_HDMI }, - }, - { - { 1920, 1080, 148500000, 44, 528, 148, 5, 4, 36, - OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH, - false, }, - { 31, HDMI_HDMI }, - }, - { - { 1920, 1080, 74250000, 44, 638, 148, 5, 4, 36, - OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH, - false, }, - { 32, HDMI_HDMI }, - }, - { - { 2880, 480, 108108000, 248, 64, 240, 6, 9, 30, - OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW, - false, }, - { 35, HDMI_HDMI }, - }, - { - { 2880, 576, 108000000, 256, 48, 272, 5, 5, 39, - OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW, - false, }, - { 37, HDMI_HDMI }, - }, -}; - -static const struct hdmi_config vesa_timings[] = { -/* VESA From Here */ - { - { 640, 480, 25175000, 96, 16, 48, 2, 11, 31, - OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW, - false, }, - { 4, HDMI_DVI }, - }, - { - { 800, 600, 40000000, 128, 40, 88, 4, 1, 23, - OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH, - false, }, - { 9, HDMI_DVI }, - }, - { - { 848, 480, 33750000, 112, 16, 112, 8, 6, 23, - OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH, - false, }, - { 0xE, HDMI_DVI }, - }, - { - { 1280, 768, 79500000, 128, 64, 192, 7, 3, 20, - OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW, - false, }, - { 0x17, HDMI_DVI }, - }, - { - { 1280, 800, 83500000, 128, 72, 200, 6, 3, 22, - OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW, - false, }, - { 0x1C, HDMI_DVI }, - }, - { - { 1360, 768, 85500000, 112, 64, 256, 6, 3, 18, - OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH, - false, }, - { 0x27, HDMI_DVI }, - }, - { - { 1280, 960, 108000000, 112, 96, 312, 3, 1, 36, - OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH, - false, }, - { 0x20, HDMI_DVI }, - }, - { - { 1280, 1024, 108000000, 112, 48, 248, 3, 1, 38, - OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH, - false, }, - { 0x23, HDMI_DVI }, - }, - { - { 1024, 768, 65000000, 136, 24, 160, 6, 3, 29, - OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW, - false, }, - { 0x10, HDMI_DVI }, - }, - { - { 1400, 1050, 121750000, 144, 88, 232, 4, 3, 32, - OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW, - false, }, - { 0x2A, HDMI_DVI }, - }, - { - { 1440, 900, 106500000, 152, 80, 232, 6, 3, 25, - OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW, - false, }, - { 0x2F, HDMI_DVI }, - }, - { - { 1680, 1050, 146250000, 176 , 104, 280, 6, 3, 30, - OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW, - false, }, - { 0x3A, HDMI_DVI }, - }, - { - { 1366, 768, 85500000, 143, 70, 213, 3, 3, 24, - OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH, - false, }, - { 0x51, HDMI_DVI }, - }, - { - { 1920, 1080, 148500000, 44, 148, 80, 5, 4, 36, - OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH, - false, }, - { 0x52, HDMI_DVI }, - }, - { - { 1280, 768, 68250000, 32, 48, 80, 7, 3, 12, - OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH, - false, }, - { 0x16, HDMI_DVI }, - }, - { - { 1400, 1050, 101000000, 32, 48, 80, 4, 3, 23, - OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH, - false, }, - { 0x29, HDMI_DVI }, - }, - { - { 1680, 1050, 119000000, 32, 48, 80, 6, 3, 21, - OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH, - false, }, - { 0x39, HDMI_DVI }, - }, - { - { 1280, 800, 79500000, 32, 48, 80, 6, 3, 14, - OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH, - false, }, - { 0x1B, HDMI_DVI }, - }, - { - { 1280, 720, 74250000, 40, 110, 220, 5, 5, 20, - OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH, - false, }, - { 0x55, HDMI_DVI }, - }, - { - { 1920, 1200, 154000000, 32, 48, 80, 6, 3, 26, - OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH, - false, }, - { 0x44, HDMI_DVI }, - }, -}; - -const struct hdmi_config *hdmi_default_timing(void) -{ - return &vesa_timings[0]; -} - -static const struct hdmi_config *hdmi_find_timing(int code, - const struct hdmi_config *timings_arr, int len) -{ - int i; - - for (i = 0; i < len; i++) { - if (timings_arr[i].cm.code == code) - return &timings_arr[i]; - } - - return NULL; -} - -const struct hdmi_config *hdmi_get_timings(int mode, int code) -{ - const struct hdmi_config *arr; - int len; - - if (mode == HDMI_DVI) { - arr = vesa_timings; - len = ARRAY_SIZE(vesa_timings); - } else { - arr = cea_timings; - len = ARRAY_SIZE(cea_timings); - } - - return hdmi_find_timing(code, arr, len); -} - -static bool hdmi_timings_compare(struct omap_video_timings *timing1, - const struct omap_video_timings *timing2) -{ - int timing1_vsync, timing1_hsync, timing2_vsync, timing2_hsync; - - if ((DIV_ROUND_CLOSEST(timing2->pixelclock, 1000000) == - DIV_ROUND_CLOSEST(timing1->pixelclock, 1000000)) && - (timing2->x_res == timing1->x_res) && - (timing2->y_res == timing1->y_res)) { - - timing2_hsync = timing2->hfp + timing2->hsw + timing2->hbp; - timing1_hsync = timing1->hfp + timing1->hsw + timing1->hbp; - timing2_vsync = timing2->vfp + timing2->vsw + timing2->vbp; - timing1_vsync = timing1->vfp + timing1->vsw + timing1->vbp; - - DSSDBG("timing1_hsync = %d timing1_vsync = %d"\ - "timing2_hsync = %d timing2_vsync = %d\n", - timing1_hsync, timing1_vsync, - timing2_hsync, timing2_vsync); - - if ((timing1_hsync == timing2_hsync) && - (timing1_vsync == timing2_vsync)) { - return true; - } - } - return false; -} - -struct hdmi_cm hdmi_get_code(struct omap_video_timings *timing) -{ - int i; - struct hdmi_cm cm = {-1}; - DSSDBG("hdmi_get_code\n"); - - for (i = 0; i < ARRAY_SIZE(cea_timings); i++) { - if (hdmi_timings_compare(timing, &cea_timings[i].timings)) { - cm = cea_timings[i].cm; - goto end; - } - } - for (i = 0; i < ARRAY_SIZE(vesa_timings); i++) { - if (hdmi_timings_compare(timing, &vesa_timings[i].timings)) { - cm = vesa_timings[i].cm; - goto end; - } - } - -end: - return cm; -} - int hdmi_parse_lanes_of(struct platform_device *pdev, struct device_node *ep, struct hdmi_phy_data *phy) { diff --git a/drivers/video/fbdev/riva/fbdev.c b/drivers/video/fbdev/riva/fbdev.c index 8a8d7f060784..be73727c7227 100644 --- a/drivers/video/fbdev/riva/fbdev.c +++ b/drivers/video/fbdev/riva/fbdev.c @@ -326,13 +326,7 @@ static int riva_bl_update_status(struct backlight_device *bd) return 0; } -static int riva_bl_get_brightness(struct backlight_device *bd) -{ - return bd->props.brightness; -} - static const struct backlight_ops riva_bl_ops = { - .get_brightness = riva_bl_get_brightness, .update_status = riva_bl_update_status, }; diff --git a/drivers/video/fbdev/s3c-fb.c b/drivers/video/fbdev/s3c-fb.c index 62acae2694a9..b33abb0a433d 100644 --- a/drivers/video/fbdev/s3c-fb.c +++ b/drivers/video/fbdev/s3c-fb.c @@ -1805,38 +1805,6 @@ static struct s3c_fb_driverdata s3c_fb_data_64xx = { .win[4] = &s3c_fb_data_64xx_wins[4], }; -static struct s3c_fb_driverdata s3c_fb_data_s5pc100 = { - .variant = { - .nr_windows = 5, - .vidtcon = VIDTCON0, - .wincon = WINCON(0), - .winmap = WINxMAP(0), - .keycon = WKEYCON, - .osd = VIDOSD_BASE, - .osd_stride = 16, - .buf_start = VIDW_BUF_START(0), - .buf_size = VIDW_BUF_SIZE(0), - .buf_end = VIDW_BUF_END(0), - - .palette = { - [0] = 0x2400, - [1] = 0x2800, - [2] = 0x2c00, - [3] = 0x3000, - [4] = 0x3400, - }, - - .has_prtcon = 1, - .has_blendcon = 1, - .has_clksel = 1, - }, - .win[0] = &s3c_fb_data_s5p_wins[0], - .win[1] = &s3c_fb_data_s5p_wins[1], - .win[2] = &s3c_fb_data_s5p_wins[2], - .win[3] = &s3c_fb_data_s5p_wins[3], - .win[4] = &s3c_fb_data_s5p_wins[4], -}; - static struct s3c_fb_driverdata s3c_fb_data_s5pv210 = { .variant = { .nr_windows = 5, @@ -1970,41 +1938,11 @@ static struct s3c_fb_driverdata s3c_fb_data_s3c2443 = { }, }; -static struct s3c_fb_driverdata s3c_fb_data_s5p64x0 = { - .variant = { - .nr_windows = 3, - .vidtcon = VIDTCON0, - .wincon = WINCON(0), - .winmap = WINxMAP(0), - .keycon = WKEYCON, - .osd = VIDOSD_BASE, - .osd_stride = 16, - .buf_start = VIDW_BUF_START(0), - .buf_size = VIDW_BUF_SIZE(0), - .buf_end = VIDW_BUF_END(0), - - .palette = { - [0] = 0x2400, - [1] = 0x2800, - [2] = 0x2c00, - }, - - .has_blendcon = 1, - .has_fixvclk = 1, - }, - .win[0] = &s3c_fb_data_s5p_wins[0], - .win[1] = &s3c_fb_data_s5p_wins[1], - .win[2] = &s3c_fb_data_s5p_wins[2], -}; - static struct platform_device_id s3c_fb_driver_ids[] = { { .name = "s3c-fb", .driver_data = (unsigned long)&s3c_fb_data_64xx, }, { - .name = "s5pc100-fb", - .driver_data = (unsigned long)&s3c_fb_data_s5pc100, - }, { .name = "s5pv210-fb", .driver_data = (unsigned long)&s3c_fb_data_s5pv210, }, { @@ -2016,9 +1954,6 @@ static struct platform_device_id s3c_fb_driver_ids[] = { }, { .name = "s3c2443-fb", .driver_data = (unsigned long)&s3c_fb_data_s3c2443, - }, { - .name = "s5p64x0-fb", - .driver_data = (unsigned long)&s3c_fb_data_s5p64x0, }, {}, }; diff --git a/drivers/video/fbdev/s3c2410fb.c b/drivers/video/fbdev/s3c2410fb.c index 81af5a63e9e1..43c63a4f3178 100644 --- a/drivers/video/fbdev/s3c2410fb.c +++ b/drivers/video/fbdev/s3c2410fb.c @@ -616,7 +616,7 @@ static int s3c2410fb_debug_store(struct device *dev, return len; } -static DEVICE_ATTR(debug, 0666, s3c2410fb_debug_show, s3c2410fb_debug_store); +static DEVICE_ATTR(debug, 0664, s3c2410fb_debug_show, s3c2410fb_debug_store); static struct fb_ops s3c2410fb_ops = { .owner = THIS_MODULE, @@ -932,7 +932,7 @@ static int s3c24xxfb_probe(struct platform_device *pdev, goto release_irq; } - clk_enable(info->clk); + clk_prepare_enable(info->clk); dprintk("got and enabled clock\n"); usleep_range(1000, 1100); @@ -996,7 +996,7 @@ static int s3c24xxfb_probe(struct platform_device *pdev, free_video_memory: s3c2410fb_unmap_video_memory(fbinfo); release_clock: - clk_disable(info->clk); + clk_disable_unprepare(info->clk); clk_put(info->clk); release_irq: free_irq(irq, info); @@ -1038,7 +1038,7 @@ static int s3c2410fb_remove(struct platform_device *pdev) s3c2410fb_unmap_video_memory(fbinfo); if (info->clk) { - clk_disable(info->clk); + clk_disable_unprepare(info->clk); clk_put(info->clk); info->clk = NULL; } @@ -1070,7 +1070,7 @@ static int s3c2410fb_suspend(struct platform_device *dev, pm_message_t state) * before the clock goes off again (bjd) */ usleep_range(1000, 1100); - clk_disable(info->clk); + clk_disable_unprepare(info->clk); return 0; } @@ -1080,7 +1080,7 @@ static int s3c2410fb_resume(struct platform_device *dev) struct fb_info *fbinfo = platform_get_drvdata(dev); struct s3c2410fb_info *info = fbinfo->par; - clk_enable(info->clk); + clk_prepare_enable(info->clk); usleep_range(1000, 1100); s3c2410fb_init_registers(fbinfo); diff --git a/drivers/video/fbdev/sis/init.c b/drivers/video/fbdev/sis/init.c index bd40f5ecd901..dfe3eb769638 100644 --- a/drivers/video/fbdev/sis/init.c +++ b/drivers/video/fbdev/sis/init.c @@ -1511,7 +1511,7 @@ SiS_Get310DRAMType(struct SiS_Private *SiS_Pr) } else if(SiS_Pr->ChipType >= SIS_340) { /* TODO */ data = 0; - } if(SiS_Pr->ChipType >= SIS_661) { + } else if(SiS_Pr->ChipType >= SIS_661) { if(SiS_Pr->SiS_ROMNew) { data = ((SiS_GetReg(SiS_Pr->SiS_P3d4,0x78) & 0xc0) >> 6); } else { diff --git a/drivers/video/fbdev/sis/sis_main.c b/drivers/video/fbdev/sis/sis_main.c index 22ad028bf123..3f12a2dd959a 100644 --- a/drivers/video/fbdev/sis/sis_main.c +++ b/drivers/video/fbdev/sis/sis_main.c @@ -1572,10 +1572,6 @@ sisfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) /* Adapt RGB settings */ sisfb_bpp_to_var(ivideo, var); - /* Sanity check for offsets */ - if(var->xoffset < 0) var->xoffset = 0; - if(var->yoffset < 0) var->yoffset = 0; - if(var->xres > var->xres_virtual) var->xres_virtual = var->xres; diff --git a/drivers/video/of_display_timing.c b/drivers/video/of_display_timing.c index 987edf110038..5c098d5b4043 100644 --- a/drivers/video/of_display_timing.c +++ b/drivers/video/of_display_timing.c @@ -236,6 +236,7 @@ timingfail: if (native_mode) of_node_put(native_mode); display_timings_release(disp); + disp = NULL; entryfail: kfree(disp); dispfail: diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c index 101db3faf5d4..3d1463c6b120 100644 --- a/drivers/virtio/virtio_pci.c +++ b/drivers/virtio/virtio_pci.c @@ -91,7 +91,7 @@ struct virtio_pci_vq_info }; /* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */ -static DEFINE_PCI_DEVICE_TABLE(virtio_pci_id_table) = { +static const struct pci_device_id virtio_pci_id_table[] = { { PCI_DEVICE(0x1af4, PCI_ANY_ID) }, { 0 } }; diff --git a/drivers/vme/bridges/vme_ca91cx42.c b/drivers/vme/bridges/vme_ca91cx42.c index bfb2d3f06738..18078ecbfcc6 100644 --- a/drivers/vme/bridges/vme_ca91cx42.c +++ b/drivers/vme/bridges/vme_ca91cx42.c @@ -1555,16 +1555,14 @@ static int ca91cx42_crcsr_init(struct vme_bridge *ca91cx42_bridge, } /* Allocate mem for CR/CSR image */ - bridge->crcsr_kernel = pci_alloc_consistent(pdev, VME_CRCSR_BUF_SIZE, - &bridge->crcsr_bus); + bridge->crcsr_kernel = pci_zalloc_consistent(pdev, VME_CRCSR_BUF_SIZE, + &bridge->crcsr_bus); if (bridge->crcsr_kernel == NULL) { dev_err(&pdev->dev, "Failed to allocate memory for CR/CSR " "image\n"); return -ENOMEM; } - memset(bridge->crcsr_kernel, 0, VME_CRCSR_BUF_SIZE); - crcsr_addr = slot * (512 * 1024); iowrite32(bridge->crcsr_bus - crcsr_addr, bridge->base + VCSR_TO); diff --git a/drivers/vme/bridges/vme_tsi148.c b/drivers/vme/bridges/vme_tsi148.c index 61e706c0e00c..e07cfa8001bb 100644 --- a/drivers/vme/bridges/vme_tsi148.c +++ b/drivers/vme/bridges/vme_tsi148.c @@ -2275,16 +2275,14 @@ static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge, bridge = tsi148_bridge->driver_priv; /* Allocate mem for CR/CSR image */ - bridge->crcsr_kernel = pci_alloc_consistent(pdev, VME_CRCSR_BUF_SIZE, - &bridge->crcsr_bus); + bridge->crcsr_kernel = pci_zalloc_consistent(pdev, VME_CRCSR_BUF_SIZE, + &bridge->crcsr_bus); if (bridge->crcsr_kernel == NULL) { dev_err(tsi148_bridge->parent, "Failed to allocate memory for " "CR/CSR image\n"); return -ENOMEM; } - memset(bridge->crcsr_kernel, 0, VME_CRCSR_BUF_SIZE); - reg_split(bridge->crcsr_bus, &crcsr_bus_high, &crcsr_bus_low); iowrite32be(crcsr_bus_high, bridge->base + TSI148_LCSR_CROU); diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 76dd54122f76..f57312fced80 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -1293,7 +1293,7 @@ config DIAG288_WATCHDOG both. To compile this driver as a module, choose M here. The module - will be called vmwatchdog. + will be called diag288_wdt. # SUPERH (sh + sh64) Architecture diff --git a/drivers/watchdog/octeon-wdt-main.c b/drivers/watchdog/octeon-wdt-main.c index 4baf2d788920..8453531545df 100644 --- a/drivers/watchdog/octeon-wdt-main.c +++ b/drivers/watchdog/octeon-wdt-main.c @@ -145,35 +145,39 @@ static void __init octeon_wdt_build_stage1(void) uasm_i_mfc0(&p, K0, C0_STATUS); #ifdef CONFIG_HOTPLUG_CPU - uasm_il_bbit0(&p, &r, K0, ilog2(ST0_NMI), label_enter_bootloader); + if (octeon_bootloader_entry_addr) + uasm_il_bbit0(&p, &r, K0, ilog2(ST0_NMI), + label_enter_bootloader); #endif /* Force 64-bit addressing enabled */ uasm_i_ori(&p, K0, K0, ST0_UX | ST0_SX | ST0_KX); uasm_i_mtc0(&p, K0, C0_STATUS); #ifdef CONFIG_HOTPLUG_CPU - uasm_i_mfc0(&p, K0, C0_EBASE); - /* Coreid number in K0 */ - uasm_i_andi(&p, K0, K0, 0xf); - /* 8 * coreid in bits 16-31 */ - uasm_i_dsll_safe(&p, K0, K0, 3 + 16); - uasm_i_ori(&p, K0, K0, 0x8001); - uasm_i_dsll_safe(&p, K0, K0, 16); - uasm_i_ori(&p, K0, K0, 0x0700); - uasm_i_drotr_safe(&p, K0, K0, 32); - /* - * Should result in: 0x8001,0700,0000,8*coreid which is - * CVMX_CIU_WDOGX(coreid) - 0x0500 - * - * Now ld K0, CVMX_CIU_WDOGX(coreid) - */ - uasm_i_ld(&p, K0, 0x500, K0); - /* - * If bit one set handle the NMI as a watchdog event. - * otherwise transfer control to bootloader. - */ - uasm_il_bbit0(&p, &r, K0, 1, label_enter_bootloader); - uasm_i_nop(&p); + if (octeon_bootloader_entry_addr) { + uasm_i_mfc0(&p, K0, C0_EBASE); + /* Coreid number in K0 */ + uasm_i_andi(&p, K0, K0, 0xf); + /* 8 * coreid in bits 16-31 */ + uasm_i_dsll_safe(&p, K0, K0, 3 + 16); + uasm_i_ori(&p, K0, K0, 0x8001); + uasm_i_dsll_safe(&p, K0, K0, 16); + uasm_i_ori(&p, K0, K0, 0x0700); + uasm_i_drotr_safe(&p, K0, K0, 32); + /* + * Should result in: 0x8001,0700,0000,8*coreid which is + * CVMX_CIU_WDOGX(coreid) - 0x0500 + * + * Now ld K0, CVMX_CIU_WDOGX(coreid) + */ + uasm_i_ld(&p, K0, 0x500, K0); + /* + * If bit one set handle the NMI as a watchdog event. + * otherwise transfer control to bootloader. + */ + uasm_il_bbit0(&p, &r, K0, 1, label_enter_bootloader); + uasm_i_nop(&p); + } #endif /* Clear Dcache so cvmseg works right. */ @@ -194,11 +198,13 @@ static void __init octeon_wdt_build_stage1(void) uasm_i_dmfc0(&p, K0, C0_DESAVE); #ifdef CONFIG_HOTPLUG_CPU - uasm_build_label(&l, p, label_enter_bootloader); - /* Jump to the bootloader and restore K0 */ - UASM_i_LA(&p, K0, (long)octeon_bootloader_entry_addr); - uasm_i_jr(&p, K0); - uasm_i_dmfc0(&p, K0, C0_DESAVE); + if (octeon_bootloader_entry_addr) { + uasm_build_label(&l, p, label_enter_bootloader); + /* Jump to the bootloader and restore K0 */ + UASM_i_LA(&p, K0, (long)octeon_bootloader_entry_addr); + uasm_i_jr(&p, K0); + uasm_i_dmfc0(&p, K0, C0_DESAVE); + } #endif uasm_resolve_relocs(relocs, labels); diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 5c660c77f03b..1e0a317d3dcd 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -230,8 +230,8 @@ static enum bp_state reserve_additional_memory(long credit) rc = add_memory(nid, hotplug_start_paddr, balloon_hotplug << PAGE_SHIFT); if (rc) { - pr_info("%s: add_memory() failed: %i\n", __func__, rc); - return BP_EAGAIN; + pr_warn("Cannot add additional memory (%i)\n", rc); + return BP_ECANCELED; } balloon_hotplug -= credit; diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index c919d3d5c845..5b5c5ff273fd 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c @@ -246,7 +246,7 @@ static void xen_irq_info_cleanup(struct irq_info *info) */ unsigned int evtchn_from_irq(unsigned irq) { - if (unlikely(WARN(irq < 0 || irq >= nr_irqs, "Invalid irq %d!\n", irq))) + if (unlikely(WARN(irq >= nr_irqs, "Invalid irq %d!\n", irq))) return 0; return info_for_irq(irq)->evtchn; diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c index 84b4bfb84344..417415d738d0 100644 --- a/drivers/xen/events/events_fifo.c +++ b/drivers/xen/events/events_fifo.c @@ -67,10 +67,9 @@ static event_word_t *event_array[MAX_EVENT_ARRAY_PAGES] __read_mostly; static unsigned event_array_pages __read_mostly; /* - * sync_set_bit() and friends must be unsigned long aligned on non-x86 - * platforms. + * sync_set_bit() and friends must be unsigned long aligned. */ -#if !defined(CONFIG_X86) && BITS_PER_LONG > 32 +#if BITS_PER_LONG > 32 #define BM(w) (unsigned long *)((unsigned long)w & ~0x7UL) #define EVTCHN_FIFO_BIT(b, w) \ @@ -100,6 +99,25 @@ static unsigned evtchn_fifo_nr_channels(void) return event_array_pages * EVENT_WORDS_PER_PAGE; } +static int init_control_block(int cpu, + struct evtchn_fifo_control_block *control_block) +{ + struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu); + struct evtchn_init_control init_control; + unsigned int i; + + /* Reset the control block and the local HEADs. */ + clear_page(control_block); + for (i = 0; i < EVTCHN_FIFO_MAX_QUEUES; i++) + q->head[i] = 0; + + init_control.control_gfn = virt_to_mfn(control_block); + init_control.offset = 0; + init_control.vcpu = cpu; + + return HYPERVISOR_event_channel_op(EVTCHNOP_init_control, &init_control); +} + static void free_unused_array_pages(void) { unsigned i; @@ -312,7 +330,7 @@ static void evtchn_fifo_handle_events(unsigned cpu) ready = xchg(&control_block->ready, 0); while (ready) { - q = find_first_bit(BM(&ready), EVTCHN_FIFO_MAX_QUEUES); + q = find_first_bit(&ready, EVTCHN_FIFO_MAX_QUEUES); consume_one_event(cpu, control_block, q, &ready); ready |= xchg(&control_block->ready, 0); } @@ -324,7 +342,6 @@ static void evtchn_fifo_resume(void) for_each_possible_cpu(cpu) { void *control_block = per_cpu(cpu_control_block, cpu); - struct evtchn_init_control init_control; int ret; if (!control_block) @@ -341,12 +358,7 @@ static void evtchn_fifo_resume(void) continue; } - init_control.control_gfn = virt_to_mfn(control_block); - init_control.offset = 0; - init_control.vcpu = cpu; - - ret = HYPERVISOR_event_channel_op(EVTCHNOP_init_control, - &init_control); + ret = init_control_block(cpu, control_block); if (ret < 0) BUG(); } @@ -374,30 +386,25 @@ static const struct evtchn_ops evtchn_ops_fifo = { .resume = evtchn_fifo_resume, }; -static int evtchn_fifo_init_control_block(unsigned cpu) +static int evtchn_fifo_alloc_control_block(unsigned cpu) { - struct page *control_block = NULL; - struct evtchn_init_control init_control; + void *control_block = NULL; int ret = -ENOMEM; - control_block = alloc_page(GFP_KERNEL|__GFP_ZERO); + control_block = (void *)__get_free_page(GFP_KERNEL); if (control_block == NULL) goto error; - init_control.control_gfn = virt_to_mfn(page_address(control_block)); - init_control.offset = 0; - init_control.vcpu = cpu; - - ret = HYPERVISOR_event_channel_op(EVTCHNOP_init_control, &init_control); + ret = init_control_block(cpu, control_block); if (ret < 0) goto error; - per_cpu(cpu_control_block, cpu) = page_address(control_block); + per_cpu(cpu_control_block, cpu) = control_block; return 0; error: - __free_page(control_block); + free_page((unsigned long)control_block); return ret; } @@ -411,7 +418,7 @@ static int evtchn_fifo_cpu_notification(struct notifier_block *self, switch (action) { case CPU_UP_PREPARE: if (!per_cpu(cpu_control_block, cpu)) - ret = evtchn_fifo_init_control_block(cpu); + ret = evtchn_fifo_alloc_control_block(cpu); break; default: break; @@ -428,7 +435,7 @@ int __init xen_evtchn_fifo_init(void) int cpu = get_cpu(); int ret; - ret = evtchn_fifo_init_control_block(cpu); + ret = evtchn_fifo_alloc_control_block(cpu); if (ret < 0) goto out; diff --git a/drivers/xen/gntalloc.c b/drivers/xen/gntalloc.c index 787d17945418..e53fe191738c 100644 --- a/drivers/xen/gntalloc.c +++ b/drivers/xen/gntalloc.c @@ -124,7 +124,7 @@ static int add_grefs(struct ioctl_gntalloc_alloc_gref *op, int i, rc, readonly; LIST_HEAD(queue_gref); LIST_HEAD(queue_file); - struct gntalloc_gref *gref; + struct gntalloc_gref *gref, *next; readonly = !(op->flags & GNTALLOC_FLAG_WRITABLE); rc = -ENOMEM; @@ -141,13 +141,11 @@ static int add_grefs(struct ioctl_gntalloc_alloc_gref *op, goto undo; /* Grant foreign access to the page. */ - gref->gref_id = gnttab_grant_foreign_access(op->domid, + rc = gnttab_grant_foreign_access(op->domid, pfn_to_mfn(page_to_pfn(gref->page)), readonly); - if ((int)gref->gref_id < 0) { - rc = gref->gref_id; + if (rc < 0) goto undo; - } - gref_ids[i] = gref->gref_id; + gref_ids[i] = gref->gref_id = rc; } /* Add to gref lists. */ @@ -162,8 +160,8 @@ undo: mutex_lock(&gref_mutex); gref_size -= (op->count - i); - list_for_each_entry(gref, &queue_file, next_file) { - /* __del_gref does not remove from queue_file */ + list_for_each_entry_safe(gref, next, &queue_file, next_file) { + list_del(&gref->next_file); __del_gref(gref); } @@ -193,7 +191,7 @@ static void __del_gref(struct gntalloc_gref *gref) gref->notify.flags = 0; - if (gref->gref_id > 0) { + if (gref->gref_id) { if (gnttab_query_foreign_access(gref->gref_id)) return; diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index eeba7544f0cd..c254ae036f18 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c @@ -69,7 +69,6 @@ struct grant_frames xen_auto_xlat_grant_frames; static union { struct grant_entry_v1 *v1; - union grant_entry_v2 *v2; void *addr; } gnttab_shared; @@ -120,36 +119,10 @@ struct gnttab_ops { * by bit operations. */ int (*query_foreign_access)(grant_ref_t ref); - /* - * Grant a domain to access a range of bytes within the page referred by - * an available grant entry. Ref parameter is reference of a grant entry - * which will be sub-page accessed, domid is id of grantee domain, frame - * is frame address of subpage grant, flags is grant type and flag - * information, page_off is offset of the range of bytes, and length is - * length of bytes to be accessed. - */ - void (*update_subpage_entry)(grant_ref_t ref, domid_t domid, - unsigned long frame, int flags, - unsigned page_off, unsigned length); - /* - * Redirect an available grant entry on domain A to another grant - * reference of domain B, then allow domain C to use grant reference - * of domain B transitively. Ref parameter is an available grant entry - * reference on domain A, domid is id of domain C which accesses grant - * entry transitively, flags is grant type and flag information, - * trans_domid is id of domain B whose grant entry is finally accessed - * transitively, trans_gref is grant entry transitive reference of - * domain B. - */ - void (*update_trans_entry)(grant_ref_t ref, domid_t domid, int flags, - domid_t trans_domid, grant_ref_t trans_gref); }; static struct gnttab_ops *gnttab_interface; -/*This reflects status of grant entries, so act as a global value*/ -static grant_status_t *grstatus; - static int grant_table_version; static int grefs_per_grant_frame; @@ -231,7 +204,7 @@ static void put_free_entry(grant_ref_t ref) } /* - * Following applies to gnttab_update_entry_v1 and gnttab_update_entry_v2. + * Following applies to gnttab_update_entry_v1. * Introducing a valid entry into the grant table: * 1. Write ent->domid. * 2. Write ent->frame: @@ -250,15 +223,6 @@ static void gnttab_update_entry_v1(grant_ref_t ref, domid_t domid, gnttab_shared.v1[ref].flags = flags; } -static void gnttab_update_entry_v2(grant_ref_t ref, domid_t domid, - unsigned long frame, unsigned flags) -{ - gnttab_shared.v2[ref].hdr.domid = domid; - gnttab_shared.v2[ref].full_page.frame = frame; - wmb(); - gnttab_shared.v2[ref].hdr.flags = GTF_permit_access | flags; -} - /* * Public grant-issuing interface functions */ @@ -285,132 +249,11 @@ int gnttab_grant_foreign_access(domid_t domid, unsigned long frame, } EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access); -static void gnttab_update_subpage_entry_v2(grant_ref_t ref, domid_t domid, - unsigned long frame, int flags, - unsigned page_off, unsigned length) -{ - gnttab_shared.v2[ref].sub_page.frame = frame; - gnttab_shared.v2[ref].sub_page.page_off = page_off; - gnttab_shared.v2[ref].sub_page.length = length; - gnttab_shared.v2[ref].hdr.domid = domid; - wmb(); - gnttab_shared.v2[ref].hdr.flags = - GTF_permit_access | GTF_sub_page | flags; -} - -int gnttab_grant_foreign_access_subpage_ref(grant_ref_t ref, domid_t domid, - unsigned long frame, int flags, - unsigned page_off, - unsigned length) -{ - if (flags & (GTF_accept_transfer | GTF_reading | - GTF_writing | GTF_transitive)) - return -EPERM; - - if (gnttab_interface->update_subpage_entry == NULL) - return -ENOSYS; - - gnttab_interface->update_subpage_entry(ref, domid, frame, flags, - page_off, length); - - return 0; -} -EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_subpage_ref); - -int gnttab_grant_foreign_access_subpage(domid_t domid, unsigned long frame, - int flags, unsigned page_off, - unsigned length) -{ - int ref, rc; - - ref = get_free_entries(1); - if (unlikely(ref < 0)) - return -ENOSPC; - - rc = gnttab_grant_foreign_access_subpage_ref(ref, domid, frame, flags, - page_off, length); - if (rc < 0) { - put_free_entry(ref); - return rc; - } - - return ref; -} -EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_subpage); - -bool gnttab_subpage_grants_available(void) -{ - return gnttab_interface->update_subpage_entry != NULL; -} -EXPORT_SYMBOL_GPL(gnttab_subpage_grants_available); - -static void gnttab_update_trans_entry_v2(grant_ref_t ref, domid_t domid, - int flags, domid_t trans_domid, - grant_ref_t trans_gref) -{ - gnttab_shared.v2[ref].transitive.trans_domid = trans_domid; - gnttab_shared.v2[ref].transitive.gref = trans_gref; - gnttab_shared.v2[ref].hdr.domid = domid; - wmb(); - gnttab_shared.v2[ref].hdr.flags = - GTF_permit_access | GTF_transitive | flags; -} - -int gnttab_grant_foreign_access_trans_ref(grant_ref_t ref, domid_t domid, - int flags, domid_t trans_domid, - grant_ref_t trans_gref) -{ - if (flags & (GTF_accept_transfer | GTF_reading | - GTF_writing | GTF_sub_page)) - return -EPERM; - - if (gnttab_interface->update_trans_entry == NULL) - return -ENOSYS; - - gnttab_interface->update_trans_entry(ref, domid, flags, trans_domid, - trans_gref); - - return 0; -} -EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_trans_ref); - -int gnttab_grant_foreign_access_trans(domid_t domid, int flags, - domid_t trans_domid, - grant_ref_t trans_gref) -{ - int ref, rc; - - ref = get_free_entries(1); - if (unlikely(ref < 0)) - return -ENOSPC; - - rc = gnttab_grant_foreign_access_trans_ref(ref, domid, flags, - trans_domid, trans_gref); - if (rc < 0) { - put_free_entry(ref); - return rc; - } - - return ref; -} -EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_trans); - -bool gnttab_trans_grants_available(void) -{ - return gnttab_interface->update_trans_entry != NULL; -} -EXPORT_SYMBOL_GPL(gnttab_trans_grants_available); - static int gnttab_query_foreign_access_v1(grant_ref_t ref) { return gnttab_shared.v1[ref].flags & (GTF_reading|GTF_writing); } -static int gnttab_query_foreign_access_v2(grant_ref_t ref) -{ - return grstatus[ref] & (GTF_reading|GTF_writing); -} - int gnttab_query_foreign_access(grant_ref_t ref) { return gnttab_interface->query_foreign_access(ref); @@ -433,29 +276,6 @@ static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref, int readonly) return 1; } -static int gnttab_end_foreign_access_ref_v2(grant_ref_t ref, int readonly) -{ - gnttab_shared.v2[ref].hdr.flags = 0; - mb(); - if (grstatus[ref] & (GTF_reading|GTF_writing)) { - return 0; - } else { - /* The read of grstatus needs to have acquire - semantics. On x86, reads already have - that, and we just need to protect against - compiler reorderings. On other - architectures we may need a full - barrier. */ -#ifdef CONFIG_X86 - barrier(); -#else - mb(); -#endif - } - - return 1; -} - static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly) { return gnttab_interface->end_foreign_access_ref(ref, readonly); @@ -616,37 +436,6 @@ static unsigned long gnttab_end_foreign_transfer_ref_v1(grant_ref_t ref) return frame; } -static unsigned long gnttab_end_foreign_transfer_ref_v2(grant_ref_t ref) -{ - unsigned long frame; - u16 flags; - u16 *pflags; - - pflags = &gnttab_shared.v2[ref].hdr.flags; - - /* - * If a transfer is not even yet started, try to reclaim the grant - * reference and return failure (== 0). - */ - while (!((flags = *pflags) & GTF_transfer_committed)) { - if (sync_cmpxchg(pflags, flags, 0) == flags) - return 0; - cpu_relax(); - } - - /* If a transfer is in progress then wait until it is completed. */ - while (!(flags & GTF_transfer_completed)) { - flags = *pflags; - cpu_relax(); - } - - rmb(); /* Read the frame number /after/ reading completion status. */ - frame = gnttab_shared.v2[ref].full_page.frame; - BUG_ON(frame == 0); - - return frame; -} - unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref) { return gnttab_interface->end_foreign_transfer_ref(ref); @@ -962,12 +751,6 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, } EXPORT_SYMBOL_GPL(gnttab_unmap_refs); -static unsigned nr_status_frames(unsigned nr_grant_frames) -{ - BUG_ON(grefs_per_grant_frame == 0); - return (nr_grant_frames * grefs_per_grant_frame + SPP - 1) / SPP; -} - static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes) { int rc; @@ -985,55 +768,6 @@ static void gnttab_unmap_frames_v1(void) arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames); } -static int gnttab_map_frames_v2(xen_pfn_t *frames, unsigned int nr_gframes) -{ - uint64_t *sframes; - unsigned int nr_sframes; - struct gnttab_get_status_frames getframes; - int rc; - - nr_sframes = nr_status_frames(nr_gframes); - - /* No need for kzalloc as it is initialized in following hypercall - * GNTTABOP_get_status_frames. - */ - sframes = kmalloc(nr_sframes * sizeof(uint64_t), GFP_ATOMIC); - if (!sframes) - return -ENOMEM; - - getframes.dom = DOMID_SELF; - getframes.nr_frames = nr_sframes; - set_xen_guest_handle(getframes.frame_list, sframes); - - rc = HYPERVISOR_grant_table_op(GNTTABOP_get_status_frames, - &getframes, 1); - if (rc == -ENOSYS) { - kfree(sframes); - return -ENOSYS; - } - - BUG_ON(rc || getframes.status); - - rc = arch_gnttab_map_status(sframes, nr_sframes, - nr_status_frames(gnttab_max_grant_frames()), - &grstatus); - BUG_ON(rc); - kfree(sframes); - - rc = arch_gnttab_map_shared(frames, nr_gframes, - gnttab_max_grant_frames(), - &gnttab_shared.addr); - BUG_ON(rc); - - return 0; -} - -static void gnttab_unmap_frames_v2(void) -{ - arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames); - arch_gnttab_unmap(grstatus, nr_status_frames(nr_grant_frames)); -} - static int gnttab_map(unsigned int start_idx, unsigned int end_idx) { struct gnttab_setup_table setup; @@ -1101,43 +835,13 @@ static struct gnttab_ops gnttab_v1_ops = { .query_foreign_access = gnttab_query_foreign_access_v1, }; -static struct gnttab_ops gnttab_v2_ops = { - .map_frames = gnttab_map_frames_v2, - .unmap_frames = gnttab_unmap_frames_v2, - .update_entry = gnttab_update_entry_v2, - .end_foreign_access_ref = gnttab_end_foreign_access_ref_v2, - .end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v2, - .query_foreign_access = gnttab_query_foreign_access_v2, - .update_subpage_entry = gnttab_update_subpage_entry_v2, - .update_trans_entry = gnttab_update_trans_entry_v2, -}; - static void gnttab_request_version(void) { - int rc; - struct gnttab_set_version gsv; + /* Only version 1 is used, which will always be available. */ + grant_table_version = 1; + grefs_per_grant_frame = PAGE_SIZE / sizeof(struct grant_entry_v1); + gnttab_interface = &gnttab_v1_ops; - gsv.version = 1; - - rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1); - if (rc == 0 && gsv.version == 2) { - grant_table_version = 2; - grefs_per_grant_frame = PAGE_SIZE / sizeof(union grant_entry_v2); - gnttab_interface = &gnttab_v2_ops; - } else if (grant_table_version == 2) { - /* - * If we've already used version 2 features, - * but then suddenly discover that they're not - * available (e.g. migrating to an older - * version of Xen), almost unbounded badness - * can happen. - */ - panic("we need grant tables version 2, but only version 1 is available"); - } else { - grant_table_version = 1; - grefs_per_grant_frame = PAGE_SIZE / sizeof(struct grant_entry_v1); - gnttab_interface = &gnttab_v1_ops; - } pr_info("Grant tables using version %d layout\n", grant_table_version); } @@ -1225,8 +929,7 @@ int gnttab_init(void) } } - ret = arch_gnttab_init(max_nr_grant_frames, - nr_status_frames(max_nr_grant_frames)); + ret = arch_gnttab_init(max_nr_grant_frames); if (ret < 0) goto ini_nomem; diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c index 5f1e1f3cd186..f8bb36f9d9ce 100644 --- a/drivers/xen/manage.c +++ b/drivers/xen/manage.c @@ -103,16 +103,11 @@ static void do_suspend(void) shutting_down = SHUTDOWN_SUSPEND; -#ifdef CONFIG_PREEMPT - /* If the kernel is preemptible, we need to freeze all the processes - to prevent them from being in the middle of a pagetable update - during suspend. */ err = freeze_processes(); if (err) { pr_err("%s: freeze failed %d\n", __func__, err); goto out; } -#endif err = dpm_suspend_start(PMSG_FREEZE); if (err) { @@ -157,10 +152,8 @@ out_resume: dpm_resume_end(si.cancelled ? PMSG_THAW : PMSG_RESTORE); out_thaw: -#ifdef CONFIG_PREEMPT thaw_processes(); out: -#endif shutting_down = SHUTDOWN_INVALID; } #endif /* CONFIG_HIBERNATE_CALLBACKS */ diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c index d57a173685f3..259ba2661543 100644 --- a/drivers/xen/xen-pciback/pci_stub.c +++ b/drivers/xen/xen-pciback/pci_stub.c @@ -579,7 +579,7 @@ static void pcistub_remove(struct pci_dev *dev) } } -static DEFINE_PCI_DEVICE_TABLE(pcistub_ids) = { +static const struct pci_device_id pcistub_ids[] = { { .vendor = PCI_ANY_ID, .device = PCI_ANY_ID, diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c index 4a7e6e0a5f4c..c214daab4829 100644 --- a/drivers/xen/xen-pciback/xenbus.c +++ b/drivers/xen/xen-pciback/xenbus.c @@ -174,6 +174,7 @@ static int xen_pcibk_attach(struct xen_pcibk_device *pdev) "version mismatch (%s/%s) with pcifront - " "halting " DRV_NAME, magic, XEN_PCI_MAGIC); + err = -EFAULT; goto out; } |